repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
sarosamurai/Catalan
https://github.com/sarosamurai/Catalan
5eec0ccdb1492d6143591ab3a8b378efb7c7e894
066df020e6493ea563ee77050637c2d0790cbdf2
97e5fdf401d34b4b8c1d5cadf678471fd2c10e75
refs/heads/master
2016-09-05T08:49:51.205527
2012-08-01T02:25:46
2012-08-01T02:25:46
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.48591142892837524, "alphanum_fraction": 0.514663577079773, "avg_line_length": 24.202898025512695, "blob_id": "e23fab7fa03182260311330c0609f9a92dc5a661", "content_id": "00bc051c6803cab06252514f7f51380b511a1078", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1739, "license_type": "no_license", "max_line_length": 72, "num_lines": 69, "path": "/catalan.py", "repo_name": "sarosamurai/Catalan", "src_encoding": "UTF-8", "text": "def catalan(n):\n if n == 0:\n return 1\n return sum(catalan(i)*catalan(n-1-i) for i in range(0,n))\n\nclass Memoize:\n def __init__(self, func):\n self.prevs = {}\n self.func = func\n\n def __call__(self, *args):\n if not args in self.prevs:\n self.prevs[args] = self.func(*args)\n return self.prevs[args]\n\n@Memoize\ndef catalan_mem(n=0):\n if n == 0:\n return 1\n return sum(catalan_mem(i)*catalan_mem(n-1-i) for i in range(0,n))\n\ndef test(n):\n from time import time\n cats = {}\n i = 0\n runtime = 0\n while runtime < 1000:\n start = time()\n cats[i] = catalan_mem(i)\n end = time()\n runtime = (end-start)*1000\n i += 1\n print \"%20s%d\" % (\"Final i:\", i)\n print \"%20s%f\" % (\"Runtime:\", runtime)\n #return cats\n\ndef plot_cat(n):\n import matplotlib.pyplot as plt\n from time import time\n runtime = {}\n for i in range(0,n):\n start = time()\n catalan_mem(i)\n end = time()\n runtime[i] = (end-start)*1000\n eyes, times = runtime.keys(), runtime.values()\n plt.scatter(eyes,times)\n plt.show()\n\nif __name__ == \"__main__\":\n from time import clock\n non_mem_times = {}\n mem_times = {}\n for n in range(15,21):\n print \"Non-memoized function for n=%d...\" % n\n cats = {0:1}\n start = clock()\n catalan(n)\n end = clock()\n non_mem_times[n] = (end-start)\n print \"Memoized function for n=%d...\" % n\n start = clock()\n catalan_mem(n)\n end = clock()\n mem_times[n] = (end-start)\n\n print \" %-10s%-10s\" % ('non', 'mem')\n for n in range(15,21):\n print \"%-4d%-10.2f%-10.2f\" % (n, non_mem_times[n], mem_times[n])\n" } ]
1
devnev39/tracker
https://github.com/devnev39/tracker
3af11f3dd9a55c616a33baf2b2512ee168e03662
fb8b520ef21235cf7e1abf96779b38097ec73109
973376aef1a240035c4169325332ca69aefa8395
refs/heads/main
2023-07-17T17:39:57.579701
2021-08-28T05:43:55
2021-08-28T05:43:55
397,041,308
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.571152925491333, "alphanum_fraction": 0.5783888101577759, "avg_line_length": 29.04347801208496, "blob_id": "34072b066bcb5817f711a81243c9eb1ab3cfc4f2", "content_id": "c9ddfd1a3e41f7711f912ad2f9c0e76f3d7dd7fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2073, "license_type": "no_license", "max_line_length": 78, "num_lines": 69, "path": "/Java/listener.java", "repo_name": "devnev39/tracker", "src_encoding": "UTF-8", "text": "package Java;\n\nimport com.example.app_tst1.Sender;\nimport java.io.ByteArrayInputStream;\nimport java.io.DataInputStream;\nimport java.io.ObjectInput;\nimport java.io.ObjectInputStream;\nimport java.net.InetAddress;\nimport java.net.ServerSocket;\nimport java.net.Socket;\n\npublic class listener {\n public static void main(String[] args) {\n try {\n start();\n } catch (Exception e) {\n System.out.println(e);\n }\n }\n\n public static Sender getSender(byte[] buffer) throws Exception{\n ByteArrayInputStream bis = new ByteArrayInputStream(buffer);\n ObjectInput in = new ObjectInputStream(bis);\n Object o = in.readObject();\n return (Sender) o;\n }\n\n public static String arr2Str(float[] arr){\n if(arr==null){\n return \"/./\";\n }\n String str = \"\";\n for(int i=0;i<arr.length;i++){\n str+=String.valueOf(arr[i]) + \" \";\n }\n return str;\n }\n\n public static void print(Sender s){\n s.getValues().forEach((key,value)->{\n System.out.println(key+\" == \"+arr2Str((float[])value));\n });\n System.out.println();\n }\n\n public static void start() throws Exception{\n System.out.println(InetAddress.getLocalHost().getHostName());\n ServerSocket ss = new ServerSocket(9000);\n System.out.println(ss.getInetAddress());\n System.out.println(\"Server listening....\");\n DataInputStream dis = null;\n Socket accepted = ss.accept();\n dis = new DataInputStream(accepted.getInputStream());\n System.out.println(\"Connected to \"+accepted.getRemoteSocketAddress());\n while(true){\n byte[] reader = new byte[4096];\n int read = dis.read(reader);\n if(read<0){\n continue;\n }\n byte[] buffer = new byte[read];\n System.out.println(buffer.length);\n java.lang.System.arraycopy(reader,0,buffer,0,read);\n Sender s = getSender(buffer);\n print(s);\n }\n \n }\n}\n" }, { "alpha_fraction": 0.510862410068512, "alphanum_fraction": 0.5319288969039917, "avg_line_length": 25.42608642578125, "blob_id": "e661cc5288377e54c285d72a906f343f0e83e6d6", "content_id": "8170be5161c0d1863663a25caf8f3bebf11d3a1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3038, "license_type": "no_license", "max_line_length": 73, "num_lines": 115, "path": "/tracker_working/analyser.py", "repo_name": "devnev39/tracker", "src_encoding": "UTF-8", "text": "import sys\nimport csv\nimport matplotlib.pyplot as plt\nimport mpl_toolkits.mplot3d.axes3d as p3\nimport numpy as np\n\n\nlim = float(sys.argv[1] if len(sys.argv) else 1)\ncount = 0\ndef set3D():\n fig = plt.figure()\n for each in range(0,count):\n ax = fig.add_subplot(1,count,each+1,projection='3d')\n\n ax.set_xlim3d([-lim,lim])\n ax.set_xlabel('Est')\n\n ax.set_ylim3d([-lim,lim])\n ax.set_ylabel('Nrth')\n\n ax.set_zlim3d([-lim,lim])\n ax.set_zlabel('UP')\n if(count>0):\n ax.set_title(titles[each])\n else:\n ax.set_title(\"3D Test\")\n\n ax.scatter([0],[0],[0],c=\"b\")\n ax.scatter(data[each][:,0],data[each][:,1],data[each][:,2],c=\"r\")\n ax.plot(data[each][:,0],data[each][:,1],data[each][:,2])\n print(type(ax))\n print(data)\n \n plt.show()\n\ndef isEven(n):\n return True if not n%2 else False\n\ndef set2D():\n grid = int(count/2 if isEven(count) else ((count+1)/2))\n fig,axis = plt.subplots(grid,grid,figsize=(5,5))\n if(count==0):\n axis.set_xlabel(\"Est\")\n axis.set_ylabel(\"Nrth\")\n axis.set_title(\"2D Test\")\n axis.scatter(data[count][:,0],data[count][:,1],c=\"r\")\n axis.plot(data[count][:,0],data[count][:,1])\n plt.show()\n return \n print(data[0][0,0])\n gridx = 0\n gridy = 0\n for each in range(0,count):\n axis[gridy,gridx].set_xlabel(\"Est\")\n axis[gridy,gridx].set_ylabel(\"Nrth\")\n axis[gridy,gridx].set_title(titles[each])\n axis[gridy,gridx].scatter(data[each][:,0],data[each][:,1],c=\"r\")\n axis[gridy,gridx].plot(data[each][:,0],data[each][:,1])\n\n if(not gridx%grid):\n gridx+=1\n else:\n gridx=0\n gridy+=1\n\n plt.show()\n\nfields = []\ndata = []\ntitles = []\ntest_case_found = False\ntest_case_count = False\ndef read():\n global data,count,test_case_count,test_case_found\n file = open(\"path.csv\",\"r\")\n creader = csv.reader(file)\n for each in creader:\n if(test_case_found):\n test_case_found = False\n test_case_count = True\n continue\n if(each[0].find(\"TST\")!=-1):\n test_case_found = True\n data.append([])\n titles.append(each[0])\n count += 1\n continue\n if(each==['x', 'y', 'z']):\n if(not count):\n data.append([])\n continue\n if(test_case_count): \n data[count-1].append(np.array([float(i) for i in each]))\n continue\n data[count-1].append(np.array([float(i) for i in each]))\n \n for i in range(len(data)):\n data[i] = np.array(data[i])\n data = np.array(data)\n if(len(data) and not count):\n titles.append(\"Normal\")\n count+=1\n\nread()\n# with open(\"path.csv\",\"r\") as file:\n# creader = csv.reader(file)\n# fields = next(creader)\n# print(fields)\n# for each in creader:\n# rows.append([float(i) for i in each])\n\nif(lim==0.111):\n set2D()\nelse:\n set3D()" }, { "alpha_fraction": 0.5744751691818237, "alphanum_fraction": 0.601466178894043, "avg_line_length": 21.862594604492188, "blob_id": "3ef21fea977499e8fa7a31ff30657355dc7b5996", "content_id": "9de20a79618ea65019189088fa4e32ed6f0a7f1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3001, "license_type": "no_license", "max_line_length": 71, "num_lines": 131, "path": "/tracker_working/listener.py", "repo_name": "devnev39/tracker", "src_encoding": "UTF-8", "text": "import sys\nimport numpy as np\nimport socket\nimport javaobj\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nimport mpl_toolkits.mplot3d.axes3d as p3\nimport time\nimport csv\nfrom Corrections import *\n\nxa = [0]\nay = [0]\naz = [0]\n\nlim = 5\n\naddr = 0\nacc = 0\nprev = 0.0\nfile = 0\ncwriter = 0\nfile_acc = 0\nawriter = 0\nplot_title = \"\"\n\nif len(sys.argv)>1: plot_title = sys.argv[1]\n\nthresh_hold = 0.8\n\nserver = socket.socket()\nserver.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)\n\n\ndef animate(i,line):\n start = time.perf_counter()\n global prev\n data = acc.recv(1024)\n print(f'Received : {len(data)}')\n\n data = (javaobj.loads(data)).values\n delta = round(time.perf_counter() - prev,2)\n\n ac = (np.array(data[\"ACC\"]) - np.array(data[\"GRV\"]))\n prev = time.perf_counter()\n\n truths = check(ac[0],ac[1],ac[2],thresh_hold)\n print(truths)\n if(not truths):return line,\n\n ac = corrections(ac,data[\"ORI\"])\n\n print(delta)\n # awriter.writerow([ac[0],ac[1],ac[2]])\n vals =[]\n for each in truths:\n if(each):\n vals.append(round(ac[truths.index(each)]*(delta**2)/2,2))\n else:\n vals.append(0)\n corrected = dist_correction(vals,data[\"ORI\"])\n xa.append(xa[-1]+corrected[0])\n ay.append(ay[-1]+corrected[1])\n az.append(az[-1]+vals[2]) \n # az.append(0)\n cwriter.writerow([xa[-1],ay[-1],az[-1]])\n \n if(len(xa)>20):\n xa.pop(0)\n ay.pop(0)\n az.pop(0)\n\n line.set_data(np.array(xa),np.array(ay))\n line.set_3d_properties(np.array(az))\n\n print(f'Time spent : {time.perf_counter()-start}')\n return line,\n\ndef getLocalIp():\n sck = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n sck.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)\n sck.connect((\"8.8.8.8\",1))\n return str(sck.getsockname()[0])\n\ntry:\n print(getLocalIp())\n server.bind((getLocalIp(),9000))\n server.listen()\n print(\"server started...\")\n acc,addr = server.accept()\n print(f'Connection from {addr}')\n\n file = open(\"path.csv\",\"a\")\n file_acc = open(\"acc.csv\",\"a\")\n cwriter = csv.writer(file)\n awriter = csv.writer(file_acc)\n if plot_title:\n cwriter.writerow([f\"TST {plot_title}\"])\n awriter.writerow([f\"TST {plot_title}\"])\n cwriter.writerow([\"x\",\"y\",\"z\"])\n cwriter.writerow([0,0,0])\n awriter.writerow([\"x\",\"y\",\"z\"])\n awriter.writerow([0,0,0])\n\n\n prev = time.perf_counter()\n fig = plt.figure()\n ax = p3.Axes3D(fig)\n\n ax.set_xlim3d([-lim, lim])\n ax.set_xlabel('Est')\n\n ax.set_ylim3d([-lim, lim])\n ax.set_ylabel('Nrth')\n\n ax.set_zlim3d([-lim, lim])\n ax.set_zlabel('UP')\n # //ax.set_autoscale_on(True)\n ax.set_title('3D Test')\n\n line = ax.plot([0],[0],[0])[0]\n\n ani = FuncAnimation(fig,animate,frames=100,blit=True,fargs=(line,))\n plt.show()\n file.close()\nexcept Exception as e:\n print(e)\n _,_,tb = sys.exc_info()\n print(tb.tb_lineno)\n server.detach()\n server.close()\n \n\n" }, { "alpha_fraction": 0.4562780261039734, "alphanum_fraction": 0.5156950950622559, "avg_line_length": 20.7560977935791, "blob_id": "bb75be867f081212b7529ffce5c667058db16d7c", "content_id": "fec186b50a207eca596a2cdda31b2f6d9919d718", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 892, "license_type": "no_license", "max_line_length": 51, "num_lines": 41, "path": "/tracker_working/Corrections.py", "repo_name": "devnev39/tracker", "src_encoding": "UTF-8", "text": "import math\n\ndef corrections(dists,ori):\n # Dists is acceleration list\n dists[0] -= 9.8*math.sin(math.radians(ori[2])) \n dists[1] -= 9.8*math.sin(math.radians(ori[1]))\n\n return dists\n\ndef dist_correction(dists,ori):\n x = dists[0]\n y = dists[1]\n if(ori[0]<45 or ori[0]>315):\n return dists\n\n if(ori[0]>45 or ori[0]<135):\n # East side\n dists[0] = y\n dists[1] = -x\n if(ori[0]>135 and ori[0]<225):\n # North side\n dists[0] = -x\n dists[1] = -y\n if(ori[0]>225 and ori[0]<315):\n # West side\n dists[0] = -y\n dists[1] = x\n \n return dists\n\n\ndef check(x,y,z,r):\n truths = [True,True,True]\n if(x<r and x>-r):\n truths[0] = False\n if(y<r and y>-r):\n truths[1] = False\n if(z<r+0.5 and z>-r-0.5):\n truths[2] = False\n if(not sum(truths)):return False\n return truths\n" } ]
4
hoanganhquang/Django
https://github.com/hoanganhquang/Django
ba52a75bb13f813bf84cf8b5b7b5824b7f675935
79237d121191d49681b6f9e63fc5e6c33177eddc
659046e598989fb29d841f2931e05de03c497c1d
refs/heads/master
2023-07-09T03:42:44.672786
2021-08-18T09:13:12
2021-08-18T09:13:12
390,395,094
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7027027010917664, "alphanum_fraction": 0.7094594836235046, "avg_line_length": 31.375, "blob_id": "af7627f899e5adf3954cf4f2589e5a39175e601d", "content_id": "441734324da7f4b0457ce86f722c867625369a89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1036, "license_type": "no_license", "max_line_length": 95, "num_lines": 32, "path": "/commerce/auctions/models.py", "repo_name": "hoanganhquang/Django", "src_encoding": "UTF-8", "text": "from django.contrib.auth.models import AbstractUser\nfrom django.db import models\n\n\nclass User(AbstractUser):\n pass\n\n\nclass AuctionListing(models.Model):\n item_name = models.CharField(max_length=20)\n price = models.FloatField()\n seller = models.ForeignKey(User, on_delete=models.CASCADE, related_name=\"seller_item\")\n date = models.DateTimeField()\n describe = models.CharField(max_length=50)\n\n def __str__(self):\n return f\"{self.id}: {self.item_name}\"\n\n\nclass Bid(models.Model):\n price = models.FloatField()\n user_bids = models.ForeignKey(User, on_delete=models.CASCADE, related_name=\"bid_user\")\n item = models.ForeignKey(AuctionListing, on_delete=models.CASCADE, related_name=\"bid_item\")\n\n def __str__(self):\n return f\"{self.price}\"\n\n\nclass Comment(models.Model):\n body = models.CharField(max_length=100)\n user = models.ForeignKey(User, on_delete=models.CASCADE, related_name=\"cmt_user\")\n item = models.ForeignKey(AuctionListing, on_delete=models.CASCADE, related_name=\"cmt_item\")\n" }, { "alpha_fraction": 0.6278924345970154, "alphanum_fraction": 0.6278924345970154, "avg_line_length": 30.04854393005371, "blob_id": "9b9ad1100145d07faf6544f4776f8b2512769b4a", "content_id": "ded951fb1faff9ec5c88cf52406202da89c75eec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3198, "license_type": "no_license", "max_line_length": 127, "num_lines": 103, "path": "/commerce/auctions/views.py", "repo_name": "hoanganhquang/Django", "src_encoding": "UTF-8", "text": "from django.contrib.auth import authenticate, login, logout\nfrom django.db import IntegrityError\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\nfrom datetime import datetime\nfrom .models import *\n\n\ndef index(request):\n item_list = AuctionListing.objects.all()\n return render(request, \"auctions/index.html\", {\n \"items\": item_list\n })\n\n\ndef login_view(request):\n if request.method == \"POST\":\n\n # Attempt to sign user in\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(request, username=username, password=password)\n\n # Check if authentication successful\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"auctions/login.html\", {\n \"message\": \"Invalid username and/or password.\"\n })\n else:\n return render(request, \"auctions/login.html\")\n\n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect(reverse(\"index\"))\n\n\ndef register(request):\n if request.method == \"POST\":\n username = request.POST[\"username\"]\n email = request.POST[\"email\"]\n\n # Ensure password matches confirmation\n password = request.POST[\"password\"]\n confirmation = request.POST[\"confirmation\"]\n if password != confirmation:\n return render(request, \"auctions/register.html\", {\n \"message\": \"Passwords must match.\"\n })\n\n # Attempt to create new user\n try:\n user = User.objects.create_user(username, email, password)\n user.save()\n except IntegrityError:\n return render(request, \"auctions/register.html\", {\n \"message\": \"Username already taken.\"\n })\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"auctions/register.html\")\n\n\ndef create_item(request):\n user = request.user\n if user.id is None:\n return redirect(\"login_view\")\n\n if request.method == \"POST\":\n seller = user\n item_name = request.POST[\"item-name\"]\n price = request.POST[\"price\"]\n describe = request.POST[\"describe\"]\n date = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n new_item = AuctionListing.objects.create(item_name=item_name, price=price, seller=seller, date=date, describe=describe)\n new_item.save()\n\n return redirect('index')\n\n return render(request, 'auctions/create-item.html')\n\n\ndef item_detail(request, item_id):\n item = AuctionListing.objects.get(pk=int(item_id))\n current_user_id = request.user.id\n current_user = User.objects.get(pk=int(current_user_id))\n\n if request.method == \"POST\":\n price = request.POST[\"bid\"]\n new_bid = Bid.objects.create(price=price, user_bids=request.user, item=item)\n new_bid.save()\n\n context = {\n \"item\": item,\n # \"current_bid\": current_bid\n }\n\n return render(request, 'auctions/item.html', context)\n" }, { "alpha_fraction": 0.46845123171806335, "alphanum_fraction": 0.47609943151474, "avg_line_length": 29.41176414489746, "blob_id": "3dc2ff2d4b682fa70b7b9623fbf7d08d904995ae", "content_id": "3847f258e19e7c6244187868f8a21585110ebeed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 523, "license_type": "no_license", "max_line_length": 73, "num_lines": 17, "path": "/commerce/auctions/templates/auctions/item.html", "repo_name": "hoanganhquang/Django", "src_encoding": "UTF-8", "text": "\n\n <h3>Detail</h3>\n <ul>\n <li>{{item.item_name}}</li>\n <li>{{item.price}}</li>\n <li>{{item.describe}}</li>\n <li>{{item.date}}</li>\n <p>Sell by {{item.seller.username}}</p>\n </ul>\n\n <h4>Your current bid</h4>\n <p>{{current_bid}}</p>\n\n <form action=\"{% url 'item_detail' item_id=item.id %}\" method=\"post\">\n {% csrf_token %}\n <input type=\"text\" placeholder=\"Bid\" name=\"bid\"> <br>\n <input type=\"submit\" value=\"Place bid\">\n </form>\n" }, { "alpha_fraction": 0.7196261882781982, "alphanum_fraction": 0.7196261882781982, "avg_line_length": 24.176469802856445, "blob_id": "b67239de6c369d87962fa7a4dfd9cf327294bc66", "content_id": "65d8f2109ca681270c3f312955d86a14ba1fa35c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 428, "license_type": "no_license", "max_line_length": 77, "num_lines": 17, "path": "/commerce/auctions/admin.py", "repo_name": "hoanganhquang/Django", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import *\n# Register your models here.\n\n\nclass AucAdmin(admin.ModelAdmin):\n list_display = (\"id\", \"item_name\", \"price\", \"seller\", \"date\", \"describe\")\n\n\nclass BidAdmin(admin.ModelAdmin):\n list_display = (\"id\", \"price\", \"user_bids\", \"item\")\n\n\nadmin.site.register(User)\nadmin.site.register(AuctionListing, AucAdmin)\nadmin.site.register(Bid, BidAdmin)\nadmin.site.register(Comment)\n" } ]
4
ItzmeSwapy/python-tumblpy
https://github.com/ItzmeSwapy/python-tumblpy
b3fae624dba3bf0844b5464e69fc42645542efbb
21490bd6cd03d159a440b2c13a6b4641c789c954
d326efadbb397f9332a62aa12aad53c1aae184fc
refs/heads/master
2023-03-19T08:29:02.419851
2017-02-08T22:01:33
2017-02-08T22:01:33
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5849077105522156, "alphanum_fraction": 0.5885782241821289, "avg_line_length": 38.084388732910156, "blob_id": "cd77cd62da90f0b450debdf06190a17d6139094e", "content_id": "e493dba85cdefe2baf5496ac9c30b00039360323", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9263, "license_type": "permissive", "max_line_length": 163, "num_lines": 237, "path": "/tumblpy/api.py", "repo_name": "ItzmeSwapy/python-tumblpy", "src_encoding": "UTF-8", "text": "import requests\nfrom requests_oauthlib import OAuth1\n\nfrom . import __version__\nfrom .compat import json, parse_qsl, urlencode, urlparse\nfrom .exceptions import TumblpyAuthError, TumblpyError\nfrom .helpers import _split_params_and_files\n\n\nclass Tumblpy(object):\n def __init__(self, app_key=None, app_secret=None, oauth_token=None,\n oauth_token_secret=None, headers=None, proxies=None):\n\n # Define some API URLs real quick\n self.base_api_url = 'https://api.tumblr.com'\n self.api_version = 'v2'\n self.api_url = '%s/%s/' % (self.base_api_url, self.api_version)\n\n # Authentication URLs\n self.request_token_url = 'https://www.tumblr.com/oauth/request_token'\n self.access_token_url = 'https://www.tumblr.com/oauth/access_token'\n self.authorize_url = 'https://www.tumblr.com/oauth/authorize'\n self.authenticate_url = 'https://www.tumblr.com/oauth/authorize'\n\n self.default_params = {'api_key': app_key}\n\n req_headers = {'User-Agent': 'Tumblpy v' + __version__}\n if headers:\n req_headers.update(headers)\n\n self.app_key = app_key\n self.app_secret = app_secret\n self.oauth_token = oauth_token\n self.oauth_token_secret = oauth_token_secret\n\n auth = None\n if self.app_key and self.app_secret:\n if not self.oauth_token and not self.oauth_token_secret:\n auth = OAuth1(self.app_key, self.app_secret)\n else:\n auth = OAuth1(self.app_key, self.app_secret,\n self.oauth_token, self.oauth_token_secret)\n\n self.client = requests.Session()\n self.client.proxies = proxies\n self.client.headers = req_headers\n self.client.auth = auth\n\n def get_authentication_tokens(self, callback_url=None):\n \"\"\"Returns a dict including an authorization URL (auth_url) to direct a user to\n\n :param callback_url: (optional) Url the user is returned to after they authorize your app (web clients only)\n \"\"\"\n\n request_args = {}\n if callback_url:\n request_args['oauth_callback'] = callback_url\n\n response = self.client.get(self.request_token_url, params=request_args)\n\n if response.status_code != 200:\n raise TumblpyAuthError('Seems something couldn\\'t be verified with your OAuth junk. Error: %s, Message: %s' % (response.status_code, response.content))\n\n res = response.content\n if isinstance( response.content, bytes ):\n res = res.decode()\n\n request_tokens = dict(parse_qsl(res))\n\n if not request_tokens:\n raise TumblpyError('Unable to decode request tokens.')\n\n auth_url_params = {\n 'oauth_token': request_tokens['oauth_token'],\n }\n if callback_url:\n auth_url_params['oauth_callback'] = callback_url\n\n request_tokens['auth_url'] = self.authenticate_url + '?' + urlencode(auth_url_params)\n\n return request_tokens\n\n def get_authorized_tokens(self, oauth_verifier):\n \"\"\"Returns authorized tokens after they go through the auth_url phase.\n \"\"\"\n response = self.client.get(self.access_token_url,\n params={'oauth_verifier': oauth_verifier})\n\n res = response.content\n if isinstance( response.content, bytes ):\n res = res.decode()\n\n authorized_tokens = dict(parse_qsl(res))\n if not authorized_tokens:\n raise TumblpyError('Unable to decode authorized tokens.')\n\n return authorized_tokens\n\n def request(self, endpoint, method='GET', blog_url=None,\n extra_endpoints=None, params=None):\n params = params or {}\n method = method.lower()\n\n if not method in ('get', 'post'):\n raise TumblpyError('Method must be of GET or POST')\n\n url = self.api_url # http://api.tumblr.com/v2/\n\n if blog_url is not None:\n # http://api.tumblr.com/v2/blog/blogname.tumblr.com/\n blog_url = urlparse(blog_url)\n\n url = '%sblog/%s/' % (\n self.api_url,\n blog_url.hostname if blog_url.hostname is not None else blog_url.path\n )\n\n url = '%s%s' % (url, endpoint)\n if extra_endpoints is not None:\n # In cases like:\n # http://api.tumblr.com/v2/blog/blogname.tumblr.com/posts/type/\n # 'type' is extra in the url & thought this was the best way\n # Docs: http://www.tumblr.com/docs/en/api/v2#posts\n\n url = '%s/%s' % (url, '/'.join(extra_endpoints))\n\n params, files = _split_params_and_files(params)\n params.update(self.default_params)\n\n func = getattr(self.client, method)\n try:\n if method == 'get':\n response = func(url, params=params, allow_redirects=False)\n else:\n kwargs = {'data': params, 'files': files, 'allow_redirects': False}\n if files:\n kwargs['params'] = params\n response = func(url, **kwargs)\n except requests.exceptions.RequestException:\n raise TumblpyError('An unknown error occurred.')\n\n if response.status_code == 401:\n raise TumblpyAuthError('Error: %s, Message: %s' % (response.status_code, response.content))\n\n content = response.content.decode('utf-8')\n try:\n if endpoint == 'avatar':\n content = {\n 'response': {\n 'url': response.headers.get('location')\n }\n }\n else:\n content = json.loads(content)\n except ValueError:\n raise TumblpyError('Unable to parse response, invalid JSON.')\n\n try:\n content = content.get('response', {})\n except AttributeError:\n raise TumblpyError('Unable to parse response, invalid content returned: %s' % content)\n\n if response.status_code < 200 or response.status_code > 301:\n error_message = ''\n if content and (content.get('errors') or content.get('error')):\n if 'errors' in content:\n for error in content['errors']:\n error_message = '%s ' % error\n elif 'error' in content:\n error_message = content['error']\n\n error_message = (error_message or\n 'There was an error making your request.')\n raise TumblpyError(error_message, error_code=response.status_code)\n\n return content\n\n def get(self, endpoint, blog_url=None, extra_endpoints=None, params=None):\n return self.request(endpoint, blog_url=blog_url,\n extra_endpoints=extra_endpoints, params=params)\n\n def post(self, endpoint, blog_url=None, extra_endpoints=None, params=None):\n return self.request(endpoint, method='POST', blog_url=blog_url,\n extra_endpoints=extra_endpoints, params=params)\n\n def get_avatar_url(self, blog_url, size=64):\n size = [str(size)] or ['64']\n return self.get('avatar', blog_url=blog_url, extra_endpoints=size)\n\n def following(self, kwargs=None):\n \"\"\"\n Gets the blogs that the current user is following.\n :param limit: an int, the number of likes you want returned\n :param offset: an int, the blog you want to start at, for pagination.\n\n # Start at the 20th blog and get 20 more blogs.\n client.following({'offset': 20, 'limit': 20})\n\n :returns: A dict created from the JSON response\n \"\"\"\n return self.get('user/following', params=kwargs)\n\n def dashboard(self, kwargs=None):\n \"\"\"\n Gets the dashboard of the current user\n example: dashboard = client.dashboard({'limit': '3'})\n\n\n :param limit: an int, the number of posts you want returned\n :param offset: an int, the posts you want to start at, for pagination.\n :param type: the type of post you want to return\n :param since_id: return only posts that have appeared after this ID\n :param reblog_info: return reblog information about posts\n :param notes_info: return notes information about the posts\n\n :returns: A dict created from the JSON response\n \"\"\"\n return self.get('user/dashboard', params=kwargs)\n\n def posts(self, blog_url, post_type=None, kwargs=None):\n \"\"\"\n Gets a list of posts from a particular blog\n :param blog_url: a string, the blogname you want to look up posts\n for. eg: codingjester.tumblr.com\n :param post_type: the type of posts you want returned, e.g. video. If omitted returns all post types.\n :param limit: an int, the number of likes you want returned\n :param offset: an int, the blog you want to start at, for pagination.\n :returns: A dict created from the JSON response\n \"\"\"\n url = 'posts'\n if post_type:\n url = '%s/%s' % (url, post_type)\n\n return self.get(url, blog_url=blog_url, params=kwargs)\n\n def __repr__(self):\n return u'<TumblrAPI: %s>' % self.app_key\n" }, { "alpha_fraction": 0.6398158669471741, "alphanum_fraction": 0.6478711366653442, "avg_line_length": 30.035715103149414, "blob_id": "9a52763e3ad177b519d8ce8acaf22512643fae8c", "content_id": "a5f6a296f740ddcfe2c2666b55a42224d4e831c4", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 869, "license_type": "permissive", "max_line_length": 77, "num_lines": 28, "path": "/tumblpy/exceptions.py", "repo_name": "ItzmeSwapy/python-tumblpy", "src_encoding": "UTF-8", "text": "class TumblpyError(Exception):\n \"\"\"Generic error class, catch-all for most Tumblpy issues.\n from tumblpy import TumblpyError, TumblpyRateLimitError, TumblpyAuthError\n \"\"\"\n def __init__(self, msg, error_code=None):\n self.error_code = error_code\n if error_code is not None:\n if error_code == 503:\n raise TumblpyRateLimitError(msg, error_code)\n elif error_code == 401:\n raise TumblpyAuthError(msg, error_code)\n\n super(TumblpyError, self).__init__(msg)\n\n @property\n def msg(self):\n return self.args[0]\n\n\nclass TumblpyRateLimitError(TumblpyError):\n \"\"\"Raised when you've hit an API limit.\"\"\"\n pass\n\n\nclass TumblpyAuthError(TumblpyError):\n \"\"\"Raised when you try to access a protected resource and it fails due to\n some issue with your authentication.\"\"\"\n pass\n" }, { "alpha_fraction": 0.5671332478523254, "alphanum_fraction": 0.6329704523086548, "avg_line_length": 23.730770111083984, "blob_id": "6a0498b8176323d24331424469f98f38d4422b77", "content_id": "0cfd803d07c4ff983739bd5c2d7db01eac857fc8", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1929, "license_type": "permissive", "max_line_length": 121, "num_lines": 78, "path": "/HISTORY.rst", "repo_name": "ItzmeSwapy/python-tumblpy", "src_encoding": "UTF-8", "text": ".. :changelog:\n\nHistory\n-------\n\n1.1.4 (2016-02-08)\n++++++++++++++++++\n\n- Remove old api url string formatting.\n- Added ``posts`` method to Tumblpy, see README for example.\n\n1.1.3 (2016-01-17)\n++++++++++++++++++\n\n- Fix missing import\n\n1.1.2 (2016-12-22)\n++++++++++++++++++\n\n- Fix missing import\n\n1.1.1 (2016-05-12)\n++++++++++++++++++\n\n- Fix issue where blogs using https:// were being parsed wrong\n\n\n1.1.0 (2016-30-04)\n++++++++++++++++++\n\n- Add following and dashboard API methods\n\n\n1.0.5 (2015-08-13)\n++++++++++++++++++\n\n- Add support for ``proxies`` keyword for requests\n\n\n1.0.4 (2015-01-15)\n++++++++++++++++++\n\n- Fix request token decode issue in Python 3\n\n\n1.0.3 (2014-10-17)\n++++++++++++++++++\n\n- Unpin ``requests`` and ``requests-oauthlib`` versions in ``setup.py``\n\n\n1.0.2 (2013-05-31)\n++++++++++++++++++\n\n- Made the hotfix for posting photos a little more hotfixy... fixed posting just regular posts (as well as photos)\n\n1.0.1 (2013-05-29)\n++++++++++++++++++\n\n- Hotfix image uploading (not sure why we have to pass ``params`` AND ``data`` to the POST, hotfix for the time being...)\n- Allow for ints and floats (and longs in Python 2) to be passed as parameters to Tumblpy Tumblr API functions\n\n\n1.0.0 (2013-05-23)\n++++++++++++++++++\n\n- Changed internal Tumblpy API structure, but Tumblpy functions should still work as they did before\n- Updated README with more clear examples\n- Added LICENSE\n- ``_split_params_and_files`` has been moved to ``helpers.py``\n- All ``Tumblpy`` exceptions are found in ``exceptions.py``\n- Removed ``pool_maxsize`` from ``Tumblpy.__init__`` because it wasn't being used\n- Removed ``timeout`` parameter from all request methods for the time being\n- Removed ``TumblpyTimeout`` Exception\n- Moved ``callback_url`` parameter from ``Tumblpy.__init__`` to ``get_authentication_tokens``\n- All authentication and API calls over HTTPS\n- Dropped Python 2.5 support\n- Full, transparent Python 3.3 support\n" }, { "alpha_fraction": 0.6784996390342712, "alphanum_fraction": 0.6791694760322571, "avg_line_length": 30.76595687866211, "blob_id": "92263c3bb22a5d5f251e36915840ed2da48b5809", "content_id": "04a407418e56909b6d029114d30d5d212ab758d5", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1493, "license_type": "permissive", "max_line_length": 106, "num_lines": 47, "path": "/examples/example.py", "repo_name": "ItzmeSwapy/python-tumblpy", "src_encoding": "UTF-8", "text": "import sys\n\nfrom tumblpy import Tumblpy\n\nkey = raw_input('App Consumer Key: ')\nsecret = raw_input('App Consumer Secret: ')\n\nif not 'skip-auth' in sys.argv:\n t = Tumblpy(key, secret)\n\n callback_url = raw_input('Callback URL: ')\n\n auth_props = t.get_authentication_tokens(callback_url=callback_url)\n auth_url = auth_props['auth_url']\n\n OAUTH_TOKEN_SECRET = auth_props['oauth_token_secret']\n\n print('Connect with Tumblr via: {}'.format(auth_url))\n\n oauth_token = raw_input('OAuth Token (from callback url): ')\n oauth_verifier = raw_input('OAuth Verifier (from callback url): ')\n\n t = Tumblpy(key, secret, oauth_token, OAUTH_TOKEN_SECRET)\n\n authorized_tokens = t.get_authorized_tokens(oauth_verifier)\n\n final_oauth_token = authorized_tokens['oauth_token']\n final_oauth_token_secret = authorized_tokens['oauth_token_secret']\n\n print('OAuth Token: {}'.format(final_oauth_token))\n print('OAuth Token Secret: {}'.format(final_oauth_token_secret))\nelse:\n final_oauth_token = raw_input('OAuth Token: ')\n final_oauth_token_secret = raw_input('OAuth Token Secret: ')\n\nt = Tumblpy(key, secret, final_oauth_token, final_oauth_token_secret)\n\nblog_url = t.post('user/info')\nblog_url = blog_url['user']['blogs'][0]['url']\n\nprint('Your blog url is: {}'.format(blog_url))\n\nposts = t.posts(blog_url)\n\nprint('Here are some posts this blog has made:', posts)\n\n# print t.post('post', blog_url=blog_url, params={'type':'text', 'title': 'Test', 'body': 'Lorem ipsum.'})\n" }, { "alpha_fraction": 0.3333333432674408, "alphanum_fraction": 0.33888888359069824, "avg_line_length": 24.714284896850586, "blob_id": "6e0d5b1c1cb9e6b64d265606a4fd425fe51bc337", "content_id": "f15c9a43dfd40eda00d0db06dbf7c73ba197a297", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 540, "license_type": "permissive", "max_line_length": 75, "num_lines": 21, "path": "/tumblpy/__init__.py", "repo_name": "ItzmeSwapy/python-tumblpy", "src_encoding": "UTF-8", "text": "# ______ __ __\n# /_ __/_ ______ ___ / /_ / /___ __ __\n# / / / / / / __ `__ \\/ __ \\/ / __ \\/ / / /\n# / / / /_/ / / / / / / /_/ / / /_/ / /_/ /\n# /_/ \\__,_/_/ /_/ /_/_.___/_/ .___/\\__, /\n# /_/ /____/\n\n\"\"\"\nTumblpy\n-------\n\nTumblpy is a Python library to help interface with the Tumblr API and OAuth\n\"\"\"\n\n__author__ = 'Mike Helmick <[email protected]>'\n__version__ = '1.1.4'\n\nfrom .api import Tumblpy\nfrom .exceptions import (\n TumblpyError, TumblpyAuthError, TumblpyRateLimitError\n)\n" }, { "alpha_fraction": 0.5750577449798584, "alphanum_fraction": 0.5750577449798584, "avg_line_length": 29.928571701049805, "blob_id": "ea91e400d038d870f774ec69d90c32600b685db1", "content_id": "62b191019dc980c13fd936dc869dc85cf9d91bf8", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 433, "license_type": "permissive", "max_line_length": 71, "num_lines": 14, "path": "/tumblpy/helpers.py", "repo_name": "ItzmeSwapy/python-tumblpy", "src_encoding": "UTF-8", "text": "from .compat import basestring, numeric_types\n\n\ndef _split_params_and_files(params_):\n params = {}\n files = {}\n for k, v in params_.items():\n if hasattr(v, 'read') and callable(v.read):\n files[k] = v\n elif isinstance(v, basestring) or isinstance(v, numeric_types):\n params[k] = v\n elif isinstance(v, bool):\n params[k] = 'true' if v else 'false'\n return params, files\n" } ]
6
maxvonhippel/Stellar-Visualizer
https://github.com/maxvonhippel/Stellar-Visualizer
5064ce8750c95e35b2df65abf58642cdd084d216
42868196ddca6a753c46f9c029bd029976f3875d
c767d0f3f2ed3dca2fd07b79bc6a8663e6eb4588
refs/heads/master
2021-06-14T08:12:44.631613
2020-10-18T20:54:26
2020-10-18T20:54:26
101,367,793
1
0
BSD-3-Clause
2017-08-25T05:02:13
2020-09-25T04:28:08
2020-10-18T20:54:26
Python
[ { "alpha_fraction": 0.7551970481872559, "alphanum_fraction": 0.7629537582397461, "avg_line_length": 50.17460250854492, "blob_id": "b4cc34bfaa58fa1d0a008282602f65e2f498d1a6", "content_id": "a52a5176b3290f1027397f021677248d16e5566b", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3223, "license_type": "permissive", "max_line_length": 586, "num_lines": 63, "path": "/README.md", "repo_name": "maxvonhippel/Stellar-Visualizer", "src_encoding": "UTF-8", "text": "# Stellar Visualizer\n\n## Introduction\n\nStellar Visualizer:\n\n1. Generates synthetic data in the style of Gaia data.\n2. Visualizes data (synthetic or real) given in CSV format in 3D, with various interactive options for data exploration.\n\nThe folder structure is as follows:\n\n`Generate`: Code relating to the generation of synthetic data\n\n* `________/generate_synthetic_data.py`: `Python3` script which generates synthetic data\n\n`Visualize`: Code relating to the visualization of data (synthetic or real)\n\n* `________/index.html`: `HTML` boilerplate for visualization output\n* `________/index.js`: `Javascript` code for interactivity of visualization output\n\n## Getting started\n\nInstall the [Node Package Manager](http://npmjs.com/) (NPM) if you don't already have it. Then, in the working directory, run:\n\n````\nnpm install\n````\n\nFrom the working directory, in your Python interpreter of choice, to generate a synthetic data file, do the following:\n\n````\nfrom Generate.generate_synthetic_data import *\nsuccess = generate_n_data(1000, 'Visualize/data.csv')\nprint(success)\n````\n\nYou can replace the path to write the file to (`Visualize/data.csv`) with whatever you want. Likewise, you can change the `1000` parameter in the function call to be whatever integer `n` you want, in order to get back `n` many synthetically generated stars.\n\nTo use the Visualize tool, open the Visualize folder and then open [index.html](Visualize/index.html) in your browser of choice. It is tested on and developed for Chrome, but should work on most modern browsers. It's a bit bare-bones at the moment, but more functionality is coming. You can find a sample CSV file at [`Visualize/data.csv`](Visualize/data.csv), which you can select with the \"choose file\" button, or you can make your own CSV file with the Generate code and use that. Feel free to over-write the demo CSV file I provide you with; it is not a dependency for anything.\n\nTo use the Generate tool in MATLAB, `cd` in MATLAB into the Generate directory and then access the Generate functions by name using the MATLAb `py` module. For example:\n\n````\nstar_tuple = py.generate_synthetic_data.generate_uvw(py.generate_synthetic_data.thin_disk);\n````\n\nNote that for now this will remain a Pythonic variable. I'll figure out later how to case these Python lists and tuples into native MATLAB types, and update the docs accordingly.\n\n## Status\n\nThe synthetic data generation tool is basically complete, although I plan to improve it over time. The only major missing feature at this point is MATLAB integration, which I'm working on.\n\nThe visualization tool is in progress and looks pretty cool, but a lot of work remains, in particular for:\n\n1. Labeling of datapoints, with a table that will show information about a specific datapoint when you hover over or select it\n2. Conic filtering of datapoints\n3. More (as well as more sensible/deliberate) coordinate system options\n\n## Citations\n\nThis project is made open-source and freely available under [BSD-3-Clause license](LICENSE).\n\nThis section will get cleaned up and made more science-y and formal and whatnot later. For now, we only have one reference, [which is this](http://adsabs.harvard.edu/full/1987AJ.....93..864J)." }, { "alpha_fraction": 0.5758891105651855, "alphanum_fraction": 0.5996383428573608, "avg_line_length": 34.913421630859375, "blob_id": "c1da4192ee38b944eb7b6b8401dacd24792661ae", "content_id": "0fc71dac92faa534a5c5c81ec5c043b85958f020", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8295, "license_type": "permissive", "max_line_length": 78, "num_lines": 231, "path": "/Generate/generate_synthetic_data.py", "repo_name": "maxvonhippel/Stellar-Visualizer", "src_encoding": "UTF-8", "text": "# generate_synthetic_data.py\n# \n# Generates synthetic data files which are used to prototype a\n# data visualization system. The synthetic files are rough\n# estimates of the sort of data which we will get from Gaia.\n#\n# Authored August 24 2017 by Max von Hippel\n# Edited December 22 2017 by Max & Ted von Hippel\n# Edited December 25 2017 by Max von Hippel\n\nimport random\nimport csv\nimport uuid\nimport numpy as np\nimport scipy.linalg as slin\nfrom math import cos, sin, radians\n\n# ------------------------- Assumptions -------------------------\n\n# These are the hypothetical parameters of the distributions\n# of the data (See generate_uvw).\nthin_disk = {'u': [0, 20],\n\t\t\t 'v': [0, 20],\n\t\t\t 'w': [0, 10],\n\t\t\t 't': [0, 10]}\nthick_disk = {'u': [0, 30],\n\t\t\t 'v': [-20, 30],\n\t\t\t 'w': [0, 30],\n\t\t\t 't': [9, 11]}\nhalo \t = {'u': [0, 50],\n\t\t\t 'v': [-200, 100],\n\t\t\t 'w': [0, 60],\n\t\t\t 't': [11, 13]}\n\n# These are the hypothetical populations of the data.\npopulations = [thin_disk, thick_disk, halo]\n\n# Constants employed in conversion furmulae\n# Hypothetically, we will be using these to determine the T matrix, but that\n# formulation has still not been brought into the actual code-base\n# (as you can see, T constants are all still hard-valued.)\nk = 4.74057\n# Right Ascension of North Galactic Pole, in degrees\nRA_NGP = 192.25\n# Declination of North Galactic Pole, in degrees\nDec_NGP = 27.4\n# Position angle of North Galactic Pole, in degrees, relative to the great\n# semicircle passing through the North Galactic Pole and the zero Galactic\n# longitude\ntheta_o = 123.0\n\ncos_theta_o = cos(radians(theta_o))\nsin_theta_o = sin(radians(theta_o))\ncos_Dec_NGP = cos(radians(Dec_NGP))\nsin_Dec_NGP = sin(radians(Dec_NGP))\nsin_RA_NGP = sin(radians(RA_NGP))\ncos_RA_NGP = cos(radians(RA_NGP))\n\nT = (np.matrix([[cos_theta_o, sin_theta_o, 0], \n\t \t\t [ sin_theta_o, -cos_theta_o, 0],\n\t \t\t [ 0, \t\t\t 0, 1]]) * \n\tnp.matrix([[-sin_Dec_NGP, 0, cos_Dec_NGP], \n\t\t\t [ 0, -1, 0],\n\t\t\t [ cos_Dec_NGP, 0, sin_Dec_NGP]]) * \n\tnp.matrix([[ cos_RA_NGP, sin_RA_NGP, 0], \n\t\t\t [ sin_RA_NGP, -cos_RA_NGP, 0],\n\t\t\t [\t\t 0, \t\t\t 0, \t\t 1]]))\n\n# T should yield: np.matrix([[-0.06699, -0.87276, -0.48354],\n# \t\t\t \t\t\t\t [ 0.49273, -0.45035, 0.74458],\n# \t\t\t \t\t\t [-0.86760, -0.18837, 0.46020]])\n\n\n# Second transformation matrix, based on RA and Dec, used in equatorial_to_uvw\n# as well as in uvw_to_equatorial\ndef A(RA, Dec):\n\treturn np.matrix([[cos(RA) * cos(Dec), -sin(RA), -cos(RA) * sin(Dec)],\n\t\t\t\t [sin(RA) * cos(Dec), cos(RA), -sin(RA) * sin(Dec)],\n\t\t\t\t [ sin(Dec), \t\t 0, \t\t cos(Dec)]])\n\n\n# ------------------------- Methods -------------------------\n\n# uvw_to_equatorial\n# ---------------------------------------------------\n# Transforms a star in uvw to equatorial, filling in\n# unknown values when necesarry based on assumptions\n# explained in the code. (Not entirely deterministic.)\n#\n# Input:\n# \tstar = (i, u, v, w, t)\n# Output:\n#\tstar = i, RA, Dec, pm_RA, pm_Dec, prlx, V_rad, t)\ndef uvw_to_equatorial(star):\n\t# input star is 5-tuple of form (i, u, v, w, t)\n\t(i, u, v, w, t) = star\n\t# RA - Right Ascension, the longitude-like coordinate on the celestial sphere\n\t# RA is uniform [0, 360] with modulo 360\n\tRA = random.uniform(0, 360)\n\tRA = RA if RA != 360 else 0\n\t# Dec - Declination, the latitude-like coordinate on the celestial sphere\n\t# Dec is uniform [-90, 90]\n\t# When I get a chance need to update this to be cos dec distribution\n\tDec = random.uniform(-90, 90)\n\t# From u, v, w we will get:\n\tconversion = slin.inv(T) * slin.inv(A(RA,Dec)) * np.matrix([[u],[v],[w]])\n\t# pm_RA - proper motion in RA, corrected for Dec, in arcsec/yr\n\tpm_RA_over_prlx = conversion.item(1) / k\n\t# pm_Dec - proper motion in Dec, in arcsec/yr\n\tpm_Dec_over_prlx = conversion.item(2) / k\n\t# prlx - Parallax in arcsec. This is a nuisance variable for this\n\t# visualization, therefore we generate an arbitrary/nominal value here.\n\tprlx = 100\n\t# Now that we have a prlx value, we can get pm_RA and pm_Dec\n\tpm_RA = pm_RA_over_prlx * prlx\n\tpm_Dec = pm_Dec_over_prlx * prlx\n\t# V_rad - Gaussian distribution is good enough for now. This value is in\n\t# km / sec. We will likely update with better approximation in the future.\n\tV_rad = random.gauss(0, 50)\n\treturn (i, RA, Dec, pm_RA, pm_Dec, prlx, V_rad, t)\n\n# equatorial_to_uvw\n# ---------------------------------------------------\n# Transforms a star in equatorial to uvw, filling in\n# unknown values when necesarry based on assumptions\n# explained in the code. (Not entirely deterministic.)\n#\n# Input:\n# \tstar = (i, RA, Dec, pm_RA, pm_Dec, prlx, V_rad, t)\n# Output:\n# star = (i, u, v, w, t)\ndef equatorial_to_uvw(star):\n\t(i, RA, Dec, pm_RA, pm_Dec, prlx, V_rad, t) = star\n\tB = T * A(RA, Dec)\n\t# Need to determine RV ------ TODO ------\n\t# For now use throwaway value of 100\n\tRV = 100\n\t(u, v, w) = B * np.matrix([[RV],\n\t\t\t\t\t\t\t [k * pm_RA / prlx],\n\t\t\t\t\t\t\t [k * pm_Dec / prlx]])\n\t# Unpack singleton matrices to scalars\n\tu = u.item(0)\n\tv = v.item(0)\n\tw = w.item(0)\n\treturn (i, u, v, w, t)\n\n# generate_uvw\n# ---------------------------------------------------\n# Generates a random star in uvw based on rough assumptions\n# regarding general distribution of the galaxy.\n# See Assumptions section of code near top for more clarity\n# RE how this method works.\n#\n# Input:\n# \ttype = thin_disk, thick_disk, or halo\n# Output:\n#\tstar = (i, RA, Dec, pm_RA, pm_Dec, prlx, V_rad, t)\ndef generate_uvw(type):\n\t# i - UUID. Simulates the steller id in the Gaia data. (Unpacked)\n\ti = int(uuid.uuid4())\n\t# u, v, and w are galactic velocities\n\tu = random.gauss(type['u'][0], type['u'][1])\n\tv = random.gauss(type['v'][0], type['v'][1])\n\tw = random.gauss(type['w'][0], type['w'][1])\n\t# t - Galactic age\n\tt = random.uniform(type['t'][0], type['t'][1])\n\treturn (i, u, v, w, t)\n\n# write_lines\n# ---------------------------------------------------\n# Writes a long list of generated stars in CSV format to\n# a file, which is the input to the visualization code.\n# Formatted as follows:\n# i, RA, Dec, pm_RA, pm_Dec, prlx, V_rad, t, u, v, w\n#\n# Input:\n# \tstars \t - a list of stars. Each star is a fairly complicated\n#\t\t\t tuple of the form:\n#\t\t\t (i, RA, Dec, pm_RA, pm_Dec, prlx, V_rad, t, u, v, w)\n#\t\t\t Essentially, a star is a concatenation of uvw and\n#\t\t\t equatorial variables, because we might as well\n#\t\t\t include everything in our CSV pre-processed so we can\n#\t\t\t visualize whatever we want on-the-fly without any hefty\n#\t\t\t intermediary computation.\n#\tfilename - the filepath to write to. Either absolute (begin with ~)\n#\t\t\t or relative to this file (generate_synthetic_data.py) \n#\t\t\t should work.\n# Output:\n#\tsuccess - True if file was successfully written, else Error object.\ndef write_lines(stars, filename):\n\ttry:\n\t\twith open(filename, 'w') as file:\n\t\t\tout = csv.writer(file)\n\t\t\t# Write the header of the CSV file\n\t\t\tout.writerow(['i', 'RA', 'Dec', 'pm_RA', 'pm_Dec', 'prlx', 'V_rad', \\\n\t\t\t\t\t\t 't', 'u', 'v', 'w'])\n\t\t\tfor star in stars:\n\t\t\t\tout.writerow(star)\n\texcept Exception as ex:\n\t\treturn ex\n\t# Presumably we are successful if we make it this far.\n\treturn True\n\n# generate_n_data\n# ---------------------------------------------------\n# Generates n-many synthetic stars and writes all the data\n# to a file.\n#\n# Input:\n# \tn \t\t - how many synthetic stars to generate.\n#\tfilename - the filepath to write to. Either absolute (begin with ~)\n#\t\t\t or relative to this file (generate_synthetic_data.py) \n#\t\t\t should work.\n# Output:\n#\tsuccess - True if file was successfully written, else Error object.\ndef generate_n_data(n, filename):\n\ttry:\n\t\tstars = []\n\t\tfor i in range(n):\n\t\t\tpopulation = random.choice(populations)\n\t\t\tuvw_star = generate_uvw(population)\n\t\t\tequatorial_star = uvw_to_equatorial(uvw_star)\n\t\t\t(i, RA, Dec, pm_RA, pm_Dec, prlx, V_rad, t) = equatorial_star\n\t\t\t(i, u, v, w, t) = uvw_star\n\t\t\tstar = (i, RA, Dec, pm_RA, pm_Dec, prlx, V_rad, t, u, v, w)\n\t\t\tstars.append(star)\n\t\treturn write_lines(stars, filename) # (*)\n\texcept Exception as ex:\n\t\treturn ex # (**)\n\t# No need to return True here, because no matter what we returned\n\t# already at locations (*) or (**) in the code above" }, { "alpha_fraction": 0.6117444038391113, "alphanum_fraction": 0.6200345158576965, "avg_line_length": 22.544715881347656, "blob_id": "f2d9c87ce6eb2853d926eb105ba9d67933cd3f82", "content_id": "ae7b7bd1e40f6e78e10dda772bf81a6f16c88611", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2895, "license_type": "permissive", "max_line_length": 76, "num_lines": 123, "path": "/Visualize/index.js", "repo_name": "maxvonhippel/Stellar-Visualizer", "src_encoding": "UTF-8", "text": "// Initialize local variables to null\nvar data = null;\nvar graph3d = null;\nvar container = null;\nvar options = \n{\n width: '600px',\n height: '600px',\n style: 'dot-color',\n showPerspective: true,\n showGrid: true,\n showShadow: false,\n keepAspectRatio: true,\n verticalRatio: 0.5\n};\nvar UVW = true;\n\n// Called when the Visualization API is loaded.\nfunction draw_visualization(csv_file) \n{\n\tvar pow = Math.pow;\n\tvar sqrt = Math.sqrt;\n\t// parse the csv\n\tdata = new vis.DataSet();\n\tvar lines = Papa.parse(csv_file, \n\t{\n\t\tdownload: true,\n\t\theader: true,\n\t\tstep: function(row) \n\t\t{\n\t\t\tvar row_data = row.data[0];\n\t\t\tvar id = parseInt(row_data.i);\n\t\t\tvar x = parseInt(row_data.u);\n\t\t\tvar y = parseInt(row_data.v);\n\t\t\tvar z = parseInt(row_data.w);\n\t\t\tvar t = parseInt(row_data.t);\t// use t for style\n\t\t\tdata.add({\n\t\t\t\tid:id,\n\t\t\t\tRA:parseInt(row_data.RA),\n\t\t\t\tDec:parseInt(row_data.Dec),\n\t\t\t\tpm_RA:parseInt(row_data.pm_RA),\n\t\t\t\tpm_Dec:parseInt(row_data.pm_Dec),\n\t\t\t\tprlx:parseInt(row_data.prlx),\n\t\t\t\tV_rad:parseInt(row_data.V_rad),\n\t\t\t\tt:t,\n\t\t\t\tu:x,\n\t\t\t\tv:y,\n\t\t\t\tw:z,\n\t\t\t\tx:x,\n\t\t\t\ty:y,\n\t\t\t\tz:z,\n\t\t\t\tstyle:parseInt(t),\n\t\t\t});\n\t\t},\n\t\tcomplete: function() \n\t\t{ \n\t\t\tcontainer = document.getElementById('visualization');\n \t\tgraph3d = new vis.Graph3d(container, data, options); \n \t\t\tset_graph_options();\n \t\t\t// these options *should* only need to be set once\n \t\t\tgraph3d.tooltip = true;\n\t\t\tgraph3d.legendLabel = \"Galactic Age\";\n \t\tconsole.log('done drawing');\n \t}\n\t});\n}\n\nfunction handle_file_select(evt) \n{\n \tvar files = evt.target.files;\n // only select first one\n draw_visualization(files[0]);\n}\n\ndocument.getElementById('file')\n\t\t.addEventListener('change',\n\t\t\t\t\t\t handle_file_select,\n\t\t\t\t\t\t false);\n\n// ------------ TODO ------------------\n/* 1. Need to make it so data contains all data not just x, y, z\n * 2. Would be great to make this more elegant, so that maybe we have 1\n * dataset for all the data and a second for just the x, y, z or something?\n * (Is that scaleable???) Then we could just change the second based on the\n * first.\n * 3. Then comes all the UI stuff like showing a buffering indicator of some\n * sort, including axis labels, etc.\n */\n\nfunction set_graph_options()\n{\n\tgraph3d.xLabel = UVW ? \"u\" : \"RA\";\n\tgraph3d.yLabel = UVW ? \"v\" : \"Dec\";\n\tgraph3d.zLabel = UVW ? \"w\" : \"V_rad\";\n}\n\nfunction change_coordinates() \n{\n\tif (data == null || data._data == null) return;\n\tUVW = !UVW;\n\tfor (var line in data._data) {\n\t\tvar cur_line = data._data[line];\n\t\tif (UVW == true)\n\t\t{\n\t\t\tcur_line.x = cur_line.u;\n\t\t\tcur_line.y = cur_line.v;\n\t\t\tcur_line.z = cur_line.w;\n\t\t}\n\t\telse\n\t\t{\n\t\t\tcur_line.x = cur_line.RA;\n\t\t\tcur_line.y = cur_line.Dec;\n\t\t\tcur_line.z = cur_line.V_rad;\n\t\t}\n\t}\n\tgraph3d.setData(data);\n\tset_graph_options();\n\tgraph3d.redraw();\n}\n\ndocument.getElementById('coordinatePicker')\n\t\t.addEventListener('change',\n\t\t\t\t\t\t change_coordinates);" } ]
3
vonpupp/fbcontacts
https://github.com/vonpupp/fbcontacts
d73bf49ad6be619408966091ef5164d0d14606a7
4eafd72a10ce866d95917d234c6996182cf4fc24
3696c1d5b734dbbb335ddea51d2cecbf9f7cd0b2
refs/heads/master
2020-05-29T20:57:24.030282
2012-09-21T19:39:08
2012-09-21T19:39:08
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5588809847831726, "alphanum_fraction": 0.5657193660736084, "avg_line_length": 41.81368637084961, "blob_id": "35a529d3e4fabf5cea345cc36f395cafcad1a588", "content_id": "184b7c2e6e5acc9a7013b2db2a353c045be3e7f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11260, "license_type": "no_license", "max_line_length": 476, "num_lines": 263, "path": "/fbcontacts.py", "repo_name": "vonpupp/fbcontacts", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n#!/usr/bin/python\n\n# Project:\t\t\tfbcontacts\n# Language:\t\t\tPython\n#\n# License: \t\t\tGNU Public License\n# This file is part of the project.\n#\tThis is free software: you can redistribute it and/or modify\n#\tit under the terms of the GNU General Public License as published by\n#\tthe Free Software Foundation, either version 3 of the License, or\n#\t(at your option) any later version.\n#\n#\tDistributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;\n# without even the implied warranty of MERCHANTABILITY or\n# FITNESS FOR A PARTICULAR PURPOSE.\n# See the GNU General Public License for more details.\n# <http://www.gnu.org/licenses/>\n#\n# Author:\t\t\tAlbert De La Fuente (www.albertdelafuente.com)\n# E-Mail:\t\t\thttp://www.google.com/recaptcha/mailhide/d?k=01eb_9W_IYJ4Pm_Y9ALRIPug==&c=L15IEH_kstH8WRWfqnRyeW4IDQuZPzNDRB0KCzMTbHQ=\n#\n# Description:\t\tCreates a CSV file with facebook contacts data (name and email)\n# from yahoo TrueSwitch\n#\n# Limitations:\t\tI've done this in about an hour, so don't expect too much!\n# - Unicode characters are not correctly handled, patches are welcome!\n# Database tables used:\tNone \n# Thread Safe:\t No\n# Extendable:\t\t\tNo\n# Platform Dependencies:\tLinux (openSUSE used)\n# Compiler Options:\t\t\n\"\"\"\n Creates a CSV file with facebook contacts data (name and email) from yahoo TrueSwitch\n\n PROCEDURE:\n * Requirement: You need a yahoo & hotmail mail account, create them if needed.\n \n AT HOTMAIL:\n * At hotmail go to people (up left corner)\n * Click on the Facebook icon\n * Click on okay, to authorize facebook to share contacts data with hotmail\n \n AT YAHOO:\n * At yahoo mail go to contacts\n * Click on Import contacts\n * Click on the hotmail icon\n * Put your credentials and check \"I give Yahoo! permission...\"\n * The \"Step 2\" window will show you a list of your contacts. Save the page with save page as\n * It will create a /transfercontacts_files folder with an index.html file within\n This is going to be the program input!\n\n Command Line Usage:\n python fbcontacts.py ./transfercontacts_files/index.html\n \n Author:\t\t\tAlbert De La Fuente (www.albertdelafuente.com)\n E-Mail:\t\t\thttp://www.google.com/recaptcha/mailhide/d?k=01eb_9W_IYJ4Pm_Y9ALRIPug==&c=L15IEH_kstH8WRWfqnRyeW4IDQuZPzNDRB0KCzMTbHQ=\n \n Why I've done this: I've done this because I believe that I own my fb account data,\n and I should be able to export it if I want.\n\"\"\"\n\n#import argparse\nimport codecs, sys, mmap, re, csv\n#import os\nfrom vlog import vlogger\n\nVERB_NON = 0\nVERB_MIN = 1\nVERB_MED = 2\nVERB_MAX = 3\n\ndef split_on_caps(str):\n rs = re.findall('[A-Z][^A-Z]*',str)\n fs = \"\"\n for word in rs:\n fs += \" \"+word\n return fs\n\ndef conv(s):\n if isinstance(s, unicode):\n s = s.encode('iso-8859-1')\n return s.decode('string-escape').decode('utf-8')\n\n\nclass fbcontactdata():\n def __init__(self, fullname, first, middle, last, email):\n # Public\n self.fullname = fullname\n self.first = first\n self.middle = middle\n self.last = last\n self.email = email\n\nclass fbcontactsparser():\n def __init__(self, filename):\n # Public\n self.filename = filename\n self.outfile = 'out.csv'\n self.clist = []\n \n # Init vlogger\n self.__verbosity = VERB_MAX\n self.vlog = vlogger(self.__verbosity, sys.stdout)\n #self.vlog = self.__log()\n \n # Init mmap\n self.__file = codecs.open(filename, encoding='utf-8', mode='r') # open(filename, 'r')\n self.vlog(VERB_MIN, \"opening file: %s\" % filename)\n self.__f = mmap.mmap(self.__file.fileno(), 0, access=mmap.ACCESS_READ)\n self.__f.seek(0) # rewind\n pass\n \n def parse(self):\n \"\"\"\n Fills the funlist list with all the parsed functionalities based on the index.\n \"\"\"\n self.vlog(VERB_MED, \"-> %s\" % __name__)\n self.__f.seek(0)\n self.__f.readline()\n loc = self.__f.tell()\n endloc = self.__f.size() - 1\n \n #self.vlog(VERB_MAX, \"beginloc = %d\" % beginloc)\n #self.vlog(VERB_MAX, \"endloc = %d\" % endloc)\n \n self.clist = []\n count = 0\n biggestline = ''\n while (loc < endloc):\n line = self.__f.readline()\n self.vlog(VERB_MAX, \"reading line '%s' bytes = %d\" % (line, loc))\n if len(line) > len(biggestline):\n biggestline = line\n loc = self.__f.tell()\n \n #self.vlog(VERB_MED, \"<- getfunlist()\")\n #self.vlog(VERB_MED, \"<- %s\" % __name__)\n \n #self.vlog(VERB_MAX, \"biggestline = %s\" % (biggestline))\n \n if self.outfile is not '':\n fh = open(self.outfile, 'wb')\n #fh = codecs.open(self.outfile, \"wb\", \"utf-8\")\n\n #fh = codecs.open(self.outfile, 'wb', encoding=\"utf-8\")\n else:\n fh = sys.stdout\n\n# csvhdlr = csv.writer(fh, quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n #csvhdlr.writerow(\"Name,Given Name,Additional Name,Family Name,Yomi Name,Given Name Yomi,Additional Name Yomi,Family Name Yomi,Name Prefix,Name Suffix,Initials,Nickname,Short Name,Maiden Name,Birthday,Gender,Location,Billing Information,Directory Server,Mileage,Occupation,Hobby,Sensitivity,Priority,Subject,Notes,Group Membership,E-mail 1 - Type,E-mail 1 - Value\".split(',')) \n \n #clist = biggestline.split('&quot;}]},')\n clist = biggestline.split(b'</label></div></li>')\n for contact in clist:\n fullname = ''\n first = ''\n middle = ''\n last = ''\n email = ''\n \n # RECORD\n #<li><div class=\"clearfix\"><label><input checked=\"checked\" name=\"contact_553\" value=\"{&quot;name&quot;:&quot;Vinicius Bufoni &quot;,&quot;email&quot;:&quot;[email protected]&quot;,&quot;fields&quot;:[{&quot;type&quot;:&quot;name&quot;,&quot;first&quot;:&quot;Vinicius&quot;,&quot;middle&quot;:&quot;Bufoni&quot;},{&quot;type&quot;:&quot;email&quot;,&quot;data&quot;:&quot;[email protected]&quot;}],&quot;category&quot;:&quot;Msn&quot;}\" type=\"checkbox\">Vinicius Bufoni <b>[email protected]</b></label></div></li>\n \n m = re.compile(b'<li><div class=\"clearfix\"><label><input checked=\"checked\" name=\"contact_(.*?)\" value=').search(contact)\n if m:\n cid = m.group(1)\n \n #&quot;name&quot;:&quot;\\\\u00c1tila Ocanha &quot;\n m = re.compile(b'&quot;name&quot;:&quot;(.*?)&quot;').search(contact)\n if m:\n fullname = m.group(1)\n \n m = re.compile(b'&quot;email&quot;:&quot;(.*?)&quot;').search(contact)\n if m:\n email = m.group(1)\n \n m = re.compile(b'&quot;name&quot;,&quot;first&quot;:&quot;(.*?)&quot;').search(contact)\n #m = re.compile(r'&quot;first&quot;:&quot;(.*?)&quot;').search(contact)\n if m:\n first = m.group(1)\n \n m = re.compile(b'&quot;last&quot;:&quot;(.*?)&quot;').search(contact)\n #m = re.compile(r'&quot;last&quot;:&quot;(.*?)&quot;').search(contact)\n if m:\n last = m.group(1)\n \n #if last is not '':\n m = re.compile(b'&quot;middle&quot;:&quot;(.*?)&quot;').search(contact)\n #m = re.compile(r'&quot;middle&quot;:&quot;(.*?)&quot;').search(contact)\n #else:\n #m = re.compile(r'&quot;middle&quot;:&quot;(.*?)&quot;},').search(contact)\n if m:\n middle = m.group(1)\n \n #print(fullname)\n fullname = re.sub('\\s+', ' ', split_on_caps(fullname)).strip()\n #print(fullname)\n #print(conv(fullname))\n fullname = fullname.decode('unicode_escape')\n #first = first.decode('unicode_escape')\n #middle = middle.decode('unicode_escape')\n #newlast = last.decode('unicode_escape')\n #print(fullname)\n \n #fullname = unicode(fullname, \"iso-8859-1\")\n #fullname = fullname.decode('latin9').encode('utf8')\n #fullname = fullname.decode('Latin-1').encode('utf8')\n #print(fullname)\n \n contactobject = fbcontactdata(fullname, first, middle, last, email)\n self.clist.append(contactobject)\n \n #self.vlog(VERB_MAX, \"--- %s\" % (cid))\n #self.vlog(VERB_MAX, \"raw data = %s\" % (contact))\n print(fullname)\n #self.vlog(VERB_MAX, \"fullname = %s\" % (fullname))\n #self.vlog(VERB_MAX, \"first = %s\" % (first))\n #self.vlog(VERB_MAX, \"middle = %s\" % (middle))\n #self.vlog(VERB_MAX, \"last = %s\" % (newlast))\n #self.vlog(VERB_MAX, \"email = %s\" % (email))\n \n #row = fullname + ',,,,,,,,,,,,,,,,,,,,,,,,,,fbcontacts ::: * My Contacts,* Home,' + email\n #csvhdlr.writerow(row.split(','))\n #,Yasmin,L\\u00f3pez,L\\u00f3pez,,,,,,,,,,,,,,,,,,,,,,,fbcontacts ::: * My Contacts,* Home,[email protected]\n \n #print('first = 'first + \"|\" + middle + \"|\" + last + \"|\" + email)\n #print(contact)\n #print clist\n \n #self.vlog(VERB_MAX, \"result = %s\" % (self.clist))\n #self.funlist += [result]\n #return self.funlist\n \n def writecsv(self):\n fh = sys.stdout\n if self.outfile is not '':\n fh = open(self.outfile, 'wb')\n #fh = codecs.open(self.outfile, \"wb\", \"utf-8\")\n #fh = codecs.open(self.outfile, 'wb', encoding=\"unicode_escape\")\n \n csvhdlr = csv.writer(fh, quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n csvhdlr.writerow(\"Name,Given Name,Additional Name,Family Name,Yomi Name,Given Name Yomi,Additional Name Yomi,Family Name Yomi,Name Prefix,Name Suffix,Initials,Nickname,Short Name,Maiden Name,Birthday,Gender,Location,Billing Information,Directory Server,Mileage,Occupation,Hobby,Sensitivity,Priority,Subject,Notes,Group Membership,E-mail 1 - Type,E-mail 1 - Value\".split(',')) \n for contact in self.clist:\n #csvhdlr.writerow(dict((vname, vtype, vnotes, vstereotype, vauthor, valias, vgenfile.encode('utf-8')) for vname, vtype, vnotes, vstereotype, vauthor, valias, vgenfile in row.iteritems()))\n row = contact.fullname + b',,,,,,,,,,,,,,,,,,,,,,,,,,fbcontacts ::: * My Contacts,* Home,' + contact.email\n row = row.encode('utf8')\n values = row.split(',')\n print(values[0])\n csvhdlr.writerow(values)\n\ndef main(args):\n #parser = fbcontactsparser(args.filename)\n parser = fbcontactsparser(args[1])\n parser.parse()\n parser.writecsv()\n\nif __name__ == '__main__':\n #parser = argparse.ArgumentParser(description = __doc__) #\"Creates a CSV file with facebook contacts data (name and email) from yahoo TrueSwitch\")\n #parser.add_argument(\"filename\")\n #args = parser.parse_args()\n #main(args)\n sys.exit(main(sys.argv))\n" }, { "alpha_fraction": 0.5425220131874084, "alphanum_fraction": 0.5483871102333069, "avg_line_length": 25.30769157409668, "blob_id": "7bb333288f979bed93b2245b54b722258b46b5e2", "content_id": "27b89011fccee92b7306307b7f5c763882b47303", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 341, "license_type": "no_license", "max_line_length": 56, "num_lines": 13, "path": "/vlog.py", "repo_name": "vonpupp/fbcontacts", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n\nimport sys #as __sys\n\nclass vlogger:\n def __init__(self, verbosity = 0, log = sys.stderr):\n self.__verbosity = verbosity\n self.__log = log\n \n def __call__(self, verbosity, msg):\n if verbosity <= self.__verbosity:\n print(self.__log, '*' * verbosity, msg)" }, { "alpha_fraction": 0.7034834027290344, "alphanum_fraction": 0.7128292322158813, "avg_line_length": 42.592594146728516, "blob_id": "c81d5b35b6bb62694c75e6ec3337d9f0de88e1da", "content_id": "7f02189cb059d324e540496d5c8d31aca815038d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1177, "license_type": "no_license", "max_line_length": 126, "num_lines": 27, "path": "/readme.txt", "repo_name": "vonpupp/fbcontacts", "src_encoding": "UTF-8", "text": "Creates a CSV file with facebook contacts data (name and email) from yahoo TrueSwitch\n\nPROCEDURE:\n * Requirement: You need a yahoo & hotmail mail account, create them if needed.\n \nAT HOTMAIL:\n * At hotmail go to people (up left corner)\n * Click on the Facebook icon\n * Click on okay, to authorize facebook to share contacts data with hotmail\n \nAT YAHOO:\n * At yahoo mail go to contacts\n * Click on Import contacts\n * Click on the hotmail icon\n * Put your credentials and check \"I give Yahoo! permission...\"\n * The \"Step 2\" window will show you a list of your contacts. Save the page with save page as\n * It will create a /transfercontacts_files folder with an index.html file within\n This is going to be the program input!\n\nCommand Line Usage:\n python fbcontacts.py ./transfercontacts_files/index.html\n \nAuthor:\tAlbert De La Fuente (www.albertdelafuente.com)\nE-Mail:\thttp://www.google.com/recaptcha/mailhide/d?k=01eb_9W_IYJ4Pm_Y9ALRIPug==&c=L15IEH_kstH8WRWfqnRyeW4IDQuZPzNDRB0KCzMTbHQ=\n \nWhy I've done this:\tI've done this because I believe that I own my fb account\n data and I should be able to export it if I want.\n" } ]
3
hyhplus/LeetCodeByPython
https://github.com/hyhplus/LeetCodeByPython
76775ba8c967dcddbc8655aa9ae5fc11bd3d502c
ebc0a59610e4c8b72660a9c96f408cc3319a7c28
27fbab4791fbbf5e5eacd362bc213932dd24c969
refs/heads/master
2021-06-08T11:13:15.660261
2021-05-25T16:19:39
2021-05-25T16:19:39
163,955,567
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.5010941028594971, "alphanum_fraction": 0.5908096432685852, "avg_line_length": 21.700000762939453, "blob_id": "4c60b4c740a58594e10daa853c005b2289ed87fa", "content_id": "efa9b5b05c6a01899ee0b9472c4a4b62b8073f76", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 493, "license_type": "permissive", "max_line_length": 68, "num_lines": 20, "path": "/interviews/003EasyListSort.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n简洁的代码实现: int list1大于100的元素存入list2, list2逆序排序\n\"\"\"\n\n\ndef find_bigger_reserve(list1):\n list2 = list()\n for i in range(len(list1)):\n if list1[i] > 100:\n list2.append(list1[i])\n # or list2 = sorted(list2, reverse=True)\n list2.sort(reverse=True)\n return list2\n\n\nif __name__ == '__main__':\n result = find_bigger_reserve([123, 23, 233, 34, 121, 122, 1333])\n print(result)\n\n\n\n" }, { "alpha_fraction": 0.5249999761581421, "alphanum_fraction": 0.5579364895820618, "avg_line_length": 24.714284896850586, "blob_id": "47f8537cac4cbaaaf9cc8bdbcd759a51ffc6d5d4", "content_id": "0e4fe481e5319009ba37b4fe08bfeaa495596c7e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3276, "license_type": "permissive", "max_line_length": 90, "num_lines": 98, "path": "/001~100/008.StringToInteger(atoi).py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n8. 字符串转为整型\nhttps://leetcode.com/problems/string-to-integer-atoi/\n\n实现atoi将字符串转换为整数。\n该函数首先丢弃所需数量的空白字符,直到找到第一个非空白字符。然后,从该字符开始,采用可选的初始加号或减号,后跟尽可能多的数字,并将它们解释为数值。\n字符串可以包含在形成整数之后的其他字符,这些字符将被忽略并且对此函数的行为没有影响。\n如果str中的第一个非空白字符序列不是有效的整数,或者由于str是空的或者只包含空白字符而不存在这样的序列,则不执行转换。\n如果无法执行有效转换,则返回零值。\n\n注意:\n只有空格字符' '被视为空格字符。\n假设我们正在处理一个只能在32位有符号整数范围内存储整数的环境:[ - 2^31, 2^31 - 1]。如果数值超出可表示值的范围,\n则返回INT_MAX(2^31 - 1)或INT_MIN(-2^31)。\n\n例1:\n输入: “42”\n输出: 42\n\n例2:\n输入: “ - 42”\n输出: -42\n说明:第一个非空白字符是' - ',这是减号。然后取尽可能多的数字,得到42。\n\nExample 3:\nInput: \"4193 with words\"\nOutput: 4193\nExplanation: Conversion stops at digit '3' as the next character is not a numerical digit.\n\nExample 4:\nInput: \"words and 987\"\nOutput: 0\nExplanation: The first non-whitespace character is 'w', which is not a numerical\n digit or a +/- sign. Therefore no valid conversion could be performed.\n\"\"\"\nimport re\n\nfrom timeDecorator import clock\n\nmatcher = re.compile(r'^ *([-\\+]?\\d+)')\n\n\n@clock\nclass Solution:\n def myAtoi(self, s):\n \"\"\"\n Using the stupid stub that the site provided, overriding `str`\n [-<-]利用正则表达式截取字符串\n \"\"\"\n match = matcher.match(s)\n\n if not match:\n return 0\n else:\n num = int(match.group(1))\n if num >= 0:\n return min(num, 2**31 - 1)\n elif num < 0:\n return max(num, -2**31)\n\n\n@clock\nclass Solution2:\n def myAtoi(self, strings):\n number = ''\n meet_num = False # 判断首字符的标志位, False\n for word in strings:\n if word == ' ' and not meet_num:\n continue\n if not word.isdigit() and word != '-' and word != '+':\n break\n\n if not meet_num:\n if word.isdigit() or word == '-' or word == '+':\n number += word\n meet_num = True # 读取了首字符就置为True\n elif meet_num:\n if word.isdigit(): # 除了首字符可以为-,+;其余位只能是数字。\n number += word\n else:\n break\n\n if number == '' or number == '+' or number == '-':\n return 0\n elif int(number) >= 0:\n return min(int(number), 2**31 - 1)\n elif int(number) < 0:\n return max(int(number), -2**31)\n\n\nif __name__ == '__main__':\n string = '234 3+2 a -12131 and string'\n result1 = Solution().myAtoi(string)\n result2 = Solution2().myAtoi(string)\n print(result1, result2)\n" }, { "alpha_fraction": 0.48103079199790955, "alphanum_fraction": 0.5032212138175964, "avg_line_length": 22.67796516418457, "blob_id": "41b82434490f657a738c578c5c452d4423429be4", "content_id": "9c94ac929fd964bbae087d3fca3f402aaa9c02e9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1397, "license_type": "permissive", "max_line_length": 72, "num_lines": 59, "path": "/001~100/017LetterCombinationsOfNumber.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n17\n\nGiven a string containing digits from 2-9 inclusive, return all possible\nletter combinations that the number could represent.\n\nA mapping of digit to letters (just like on the telephone buttons)\nis given below. Note that 1 does not map to any letters.\n\n2: abc\n3: def\n4: ghi\n5: jkl\n6: mno\n7: pqrs\n8: tuv\n9: wxyz\n\"\"\"\n\n\nclass Solution:\n def letterCombinations(self, digits):\n \"\"\"\n\n :param digits: str\n :return: List[str]\n \"\"\"\n # Edge-case\n if not digits:\n return []\n\n import itertools\n # Create dictionary of lists representing each number\n num_dict = dict()\n num_dict['0'] = []\n num_dict['1'] = []\n num_dict['1'] = []\n num_dict['2'] = ['a', 'b', 'c']\n num_dict['3'] = ['d', 'e', 'f']\n num_dict['4'] = ['g', 'h', 'i']\n num_dict['5'] = ['j', 'k', 'l']\n num_dict['6'] = ['m', 'n', 'o']\n num_dict['7'] = ['p', 'q', 'r', 's']\n num_dict['8'] = ['t', 'u', 'v']\n num_dict['9'] = ['w', 'x', 'y', 'z']\n\n num_list = [num_dict[c] for c in digits]\n soln_list = []\n for r in itertools.product(*num_list):\n soln_list.append(''.join(r))\n return soln_list\n\n\nif __name__ == '__main__':\n sl_1 = '345'\n result = Solution().letterCombinations(sl_1)\n print(result)\n" }, { "alpha_fraction": 0.4406392574310303, "alphanum_fraction": 0.4596651494503021, "avg_line_length": 23.314815521240234, "blob_id": "a192d526d333b2736a5955e7ecfcb2383b900ba0", "content_id": "74b277a162b1b7e06ce07e47a9e694364497c6d7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1342, "license_type": "permissive", "max_line_length": 62, "num_lines": 54, "path": "/testing/demo1_3_325.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "\nfrom typing import List\n\n\nclass Solution:\n def maxSubArrayLen(self, nums: List[int], k: int) -> int:\n s = 0\n d = {0: -1}\n res = 0\n\n for i in range(len(nums)):\n s += nums[i]\n if s - k in d:\n res = max(res, i-d[s-k])\n if s not in d:\n d[s] = i\n \n return res\n\n\nclass Solution1:\n def maxSubArrayLen(self, nums: List[int], k: int) -> int:\n dic = {0: -1}\n res = sdt = 0\n for i in range(len(nums)):\n sdt += nums[i]\n if sdt - k in dic:\n res = max(res, i-dic[sdt-k])\n if sdt not in dic:\n dic[sdt] = i\n return res\n\n\nfrom collections import defaultdict\n\nclass Solution2:\n def maxSubArrayLen(self, nums: List[int], k: int) -> int:\n n = len(nums)\n presum_idx = defaultdict(int)\n res = 0\n presum = 0\n presum_idx[0] = 0 # 前0个,和为0 也决定了必须用虚指\n for i in range(n):\n presum += nums[i]\n if presum not in presum_idx:\n presum_idx[presum] = i + 1\n if (presum - k) in presum_idx:\n res = max(res, i - presum_idx[presum - k] + 1)\n \n return res\n\n\ns = Solution1()\na = s.maxSubArrayLen([1, 2, 3, 1, 0,8], 7)\nprint(a)\n" }, { "alpha_fraction": 0.5496535897254944, "alphanum_fraction": 0.5848729610443115, "avg_line_length": 27.799999237060547, "blob_id": "d8628fc281f6369ca1658c538df31f588471a6a5", "content_id": "505583e98fed9da51dcbc15637737aa12939fb9e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1982, "license_type": "permissive", "max_line_length": 118, "num_lines": 60, "path": "/001~100/002.AddTwoNumbers.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nYou are given two non-empty linked lists representing two non-negative integers.\nThe digits are stored in reverse order and each of their nodes contain a single digit.\nAdd the two numbers and return it as a linked list.\n\nYou may assume the two numbers do not contain any leading zero, except the number 0 itself.\n\n\n您将获得两个非空链表,表示两个非负整数。数字以相反的顺序存储,每个节点包含一个数字。\n添加两个数字并将其作为链接列表返回。\n您可以假设这两个数字不包含任何前导零,除了数字0本身。\n\n例:\n输入:(2 - > 4 - > 3)+(5 - > 6 - > 4)\n 输出: 7 - > 0 - > 8\n 说明: 342 + 465 = 807。\n\n\"\"\"\n\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n\n :param l1: ListNode\n :param l2: ListNode\n :return: ListNode\n \"\"\"\n carry = 0\n head = node = ListNode('#')\n while l1 or l2:\n val1 = l1.val if l1 else 0\n val2 = l2.val if l2 else 0\n carry, rem = divmod(carry + val1 + val2, 10)\n node.next = ListNode(rem)\n node = node.next\n l1 = l1.next if l1 else None\n l2 = l2.next if l2 else None\n if carry: # 进一位\n node.next = ListNode(carry)\n\n return head.next\n\n\nif __name__ == '__main__':\n a, a.next, a.next.next = ListNode(2), ListNode(4), ListNode(3)\n b, b.next, b.next.next = ListNode(5), ListNode(6), ListNode(9)\n result = Solution().addTwoNumbers(a, b)\n assert '{0}->{1}->{2}->{3}'.format(result.val, result.next.val, result.next.next.val, result.next.next.next.val) \\\n == '7->0->3->1' # assert 断言:判断结果是否正确;不正确则程序异常\n\n print(result.val, result.next.val, result.next.next.val, result.next.next.next.val)\n\n\n\n\n" }, { "alpha_fraction": 0.547890841960907, "alphanum_fraction": 0.566253125667572, "avg_line_length": 21.098901748657227, "blob_id": "354ec6697f88aa7ae2a926ec0dda5fcd11091fcc", "content_id": "b56290d793b07b780a056fad832a0b220909bb7f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2435, "license_type": "permissive", "max_line_length": 117, "num_lines": 91, "path": "/001~100/010.RegularExpressionMatching.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n10. 正则表达式匹配\nhard\n\n给定输入字符串和模式(P),实现与“.”和“*”支持匹配的正则表达式。\n“.”匹配任何单个字符。\n“*”与前面的单个元素零个或多个匹配。\n匹配应该覆盖整个输入字符串(而不是部分)。\n\n注:\ns可以为空,并且只包含小写字母a-z。\np可以为空,只包含小写字母a-z和类似的字符。或者*。\n\n例1:\n输入:S=“AA”, P=“A”\n输出:假\n说明:“a”与整个字符串“a a”不匹配。\n\n例2:\n输入:S=“AA”, P=“a*”\n输出:真\n解释:“*”表示前面元素“a”的零个或多个。因此,重复“a”一次,它就变成“aa”。\n\nGiven an input string (s) and a pattern (p), implement regular expression matching with support for '.' and '*'.\n\n'.' Matches any single character.\n'*' Matches zero or more of the preceding element.\nThe matching should cover the entire input string (not partial).\n\nNote:\n\ns could be empty and contains only lowercase letters a-z.\np could be empty and contains only lowercase letters a-z, and characters like . or *.\n\nExample 1:\nInput:\ns = \"aa\"\np = \"a\"\nOutput: false\nExplanation: \"a\" does not match the entire string \"aa\".\n\nExample 2:\nInput:\ns = \"aa\"\np = \"a*\"\nOutput: true\nExplanation: '*' means zero or more of the preceding element, 'a'. Therefore, by repeating 'a' once, it becomes \"aa\".\n\"\"\"\nfrom timeDecorator import clock\n\n\n@clock\nclass Solution:\n def isMatch(self, s, p):\n \"\"\" s:str, p:str\"\"\"\n def dfs(s_idx, p_idx, memo):\n if (s_idx, p_idx) in memo:\n return memo[(s_idx, p_idx)]\n\n if p_idx >= len(p):\n return s_idx == len(s)\n\n cur_match = s_idx < len(s) and (\n s[s_idx] == p[p_idx] or p[p_idx] == \".\"\n )\n\n if p_idx + 1 < len(p) and p[p_idx+1] == \"*\":\n match = dfs(s_idx, p_idx+2, memo) or \\\n (cur_match and dfs(s_idx+1, p_idx, memo))\n\n else:\n match = cur_match and dfs(s_idx+1, p_idx+1, memo)\n\n memo[(s_idx, p_idx)] = match\n\n return match\n return dfs(0, 0, {})\n\n\nif __name__ == '__main__':\n str1 = '123321312s'\n p1 = '123.*s'\n\n result = Solution().isMatch(str1, p1)\n print(result)\n\n # result2 = Solution2().isMatch(str1, p1)\n # print(result2)\n\n\n\n\n" }, { "alpha_fraction": 0.5909090638160706, "alphanum_fraction": 0.6181818246841431, "avg_line_length": 8.954545021057129, "blob_id": "e2a306482d4c062ee955c41406b76407bd05f621", "content_id": "bb7a9fb0234ef514f636198c53a1261e5a908d6b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 232, "license_type": "permissive", "max_line_length": 60, "num_lines": 22, "path": "/Algorithm/__init__.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nmarkdown 博客模板\n\n【LeetCode】17. Letter Combinations of a Phone Number(Python3)\n## Problem\n\n<br>\n\n## Algorithmic thinking\n\n<br>\n\n## Python 3 solution\n```py\n\n```\n<br>\n\n\n\"\"\"\n\n" }, { "alpha_fraction": 0.4242081344127655, "alphanum_fraction": 0.4281674325466156, "avg_line_length": 23.55555534362793, "blob_id": "6b3891464c28260fc09a1c26ce9f5c4281798258", "content_id": "09e68061b98ae84fc9ee6e08aa6d843ece5338e7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1964, "license_type": "permissive", "max_line_length": 77, "num_lines": 72, "path": "/001~100/020ValidParentheses.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\ns = '( )'\n\nlen_ = len(s.split())\ns = (s.split())\n\nif len_ % 2 != 0:\n print(False)\n\nleft_l = ['(', '{', '[']\nright_l = [')', '}', ']']\nfor i_ in range(len_):\n pass\n\n\nclass Solution(object):\n def isValid(self, s):\n \"\"\"\n :type s: str\n :rtype: bool\n \"\"\"\n\n # 堆栈以跟踪开放的括号\n stack = []\n\n # 用于跟踪映射的哈希映射,这使代码非常简洁\n # 还可以更轻松地添加更多类型的括号\n mapping = {\")\": \"(\", \"}\": \"{\", \"]\": \"[\"}\n\n # 对于表达式中的每个括号\n for char in s:\n\n # 如果该字符是结束括号\n if char in mapping:\n\n # 如果非空,则弹出堆栈中最顶层的元素\n # 否则为top_element变量指定一个虚拟值‘#’\n top_element = stack.pop() if stack else '#'\n\n # The mapping for the opening bracket in our hash and the top\n # element of the stack don't match, return False\n if mapping[char] != top_element:\n return False\n else:\n # We have an opening bracket, simply push it onto the stack.\n stack.append(char)\n\n # In the end, if the stack is empty, then we have a valid expression.\n # The stack won't be empty for cases like ((()\n return not stack\n\n\nclass Solution2:\n def isValid(self, s: 'str') -> 'bool':\n v = []\n d = {']': '[', ')': '(', '}': '{'}\n for i in range(len(s)):\n if s[i] in d.values():\n v.append(s[i])\n elif s[i] in d.keys():\n if len(v) == 0:\n return False\n elif v[-1] == d[s[i]]:\n v.pop()\n else:\n return False\n if v is []:\n return True\n else:\n return False\n" }, { "alpha_fraction": 0.3797321021556854, "alphanum_fraction": 0.4047757685184479, "avg_line_length": 25.41538429260254, "blob_id": "c367ae0322be8fa61fbfa3ccfe07a1e7cf113af5", "content_id": "175a93921f407d8db32d751e00fd040353297cb8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1879, "license_type": "permissive", "max_line_length": 91, "num_lines": 65, "path": "/001~100/011ContainerWithMostWater.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nclass Solution:\n def maxArea(self, height):\n \"\"\"\n :type height: List[int]\n :rtype: int\n \"\"\"\n\n \"\"\" \n 方法一:暴力法,可运行。但是提交超时,不可通过。\n \"\"\"\n # max_ = 0\n # for i in range(len(height)):\n #\n # for j in range(i+1, len(height)):\n # max_ = max(max_, min(height[i], height[j]) * (j - i))\n #\n # return max_\n\n # # 双指针法解1\n # res, l, r = 0, 0, len(height)-1\n # while l < r:\n # h = min(height[l], height[r])\n # res, l, r = max(res, h * (r - l)), l + (height[l] == h), r - (height[r] == h)\n # return res\n\n # # 双指针法解2\n # max_area = 0,\n # left = 0,\n # right = len(height) - left,\n #\n # while left < right:\n # max_area = max(max_area, min(height[left], height[right]) * (right - 1))\n # if height[left] < height[right]:\n # left += 1\n # else:\n # right -= 1\n # return max_area\n\n \"\"\"\n 方法二:双指针法(3),分别从列表左右两端遍历(while),面积=短边*距离,哪边短遍历时+1,因为短的已达最大值。\n \"\"\"\n max_area = 0\n i = 0\n j = len(height) - 1\n while i < j:\n width = j - i\n length = min(height[i], height[j])\n volume = width * length\n if max_area < volume:\n max_area = volume\n if length == height[i]:\n i += 1\n else:\n j -= 1\n return max_area\n\n\nif __name__ == '__main__':\n integer_list = [1, 3, 3, 9, 12, 1, 2, 43, 2, 23, 2, 2, 23, 23, 23, 2]\n result = Solution().maxArea(integer_list)\n print(result)\n" }, { "alpha_fraction": 0.8101266026496887, "alphanum_fraction": 0.8227847814559937, "avg_line_length": 24.66666603088379, "blob_id": "a7896deea3003f03b44f7d96f06d716f99f6ed75", "content_id": "bc46770fb87df38dbab660e80990a0bf01aa33a3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 157, "license_type": "permissive", "max_line_length": 29, "num_lines": 3, "path": "/README.md", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "# LeetCodeByPython\n使用Python3完成LeetCode的算法题目。 \n尽量使用性能较优的算法,在时间和空间(内存)之间取得平衡。 \n" }, { "alpha_fraction": 0.5704894065856934, "alphanum_fraction": 0.586559534072876, "avg_line_length": 25.269229888916016, "blob_id": "f9e9453ced4f9496c95a80af29589c113b8aa21c", "content_id": "30acbbd20c19f6b1bb3fcb71e74071b23c242a0f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1661, "license_type": "permissive", "max_line_length": 95, "num_lines": 52, "path": "/001~100/006.ZigZagConversion.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n字符串\"PAYPALISHIRING\"在给定行数上以Z字形图案写入,然后逐行阅读: \"PAHNAPLSIIGYIR\"\n编写将采用字符串的代码并在给定多行的情况下进行此转换:\nstring convert(string s,int numRows);\n\n----\n\nThe string \"PAYPALISHIRING\" is written in a zigzag pattern on a given number of rows like this:\n(you may want to display this pattern in a fixed font for better legibility)\n\nP A H N\nA P L S I I G\nY I R\nAnd then read line by line: \"PAHNAPLSIIGYIR\"\nWrite the code that will take a string and make this conversion given a number of rows:\nstring convert(string s, int numRows);\n\"\"\"\n\n\nclass Solution(object):\n def convert(self, s, numRows):\n \"\"\"\n 如果numRows是5,我们看到这样的锯齿形图案:(here Algorithmic thinking)\n\n 我们可以看到数字0~7在这里是一个小模式,如果我们除以8,我们可以在其他小模式中获得相同的数字。如\n 0%8 = 0; 8%8 = 0\n 1%8 = 1; 9%8 = 1\n 所以我们可以使用此功能并将它们过滤到我们存储的行中。\n\n :type s: str\n :type numRows: int\n :rtype: str\n \"\"\"\n if numRows == 1:\n return s\n rows = [''] * numRows\n num = (numRows-1) * 2\n for i, item in enumerate(s):\n if i % num >= numRows:\n rows[(num - i % num)] += item\n else:\n rows[i % num] += item\n return ''.join(rows)\n\n\nif __name__ == '__main__':\n string = 'my path'\n row = 2\n result = Solution().convert(string, row)\n print(result)\n\n\n\n" }, { "alpha_fraction": 0.4767441749572754, "alphanum_fraction": 0.510465145111084, "avg_line_length": 22.216217041015625, "blob_id": "dfcfde9db694412cb6bdd99ca66efac24f2cdad0", "content_id": "5b8e98810a800871b3a6a5cdf050b7358259af82", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1144, "license_type": "permissive", "max_line_length": 86, "num_lines": 37, "path": "/001~100/001.TwoSum.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n给定一个整数数组,返回两个数字的索引,使它们相加到特定目标。\n可以假设每个输入只有一个解决方案,并且可能不会两次使用相同的元素。\n\nExample:\nGiven nums = [2, 7, 11, 15], target = 9,\nBecause nums[0] + nums[1] = 2 + 7 = 9,\nreturn [0, 1].\n\"\"\"\n\n\nclass Solution:\n def twoSum(self, nums, target):\n \"\"\"\n 算法思路:\n 新建一个字典,字典的key: 列表的元素(值); 字典的value: 列表的值的索引。\n 遍历这个列表的时候,获取字典的key存在:target-nums[x]的时候,则返回此索引和当前索引。\n :param nums:\n :param target:\n :return:\n \"\"\"\n new_dic = dict()\n for x in range(len(nums)):\n sec = new_dic.get(target - nums[x], -1) # dict.get(key): key是要的值,不存在则返回-1\n print(sec)\n if sec >= 0:\n return [sec, x], new_dic\n else:\n new_dic[nums[x]] = x\n\n\na = Solution()\nb = a.twoSum([3, 11, 0, 1, 7, 8, 1, 7], 9)\nprint(b)\n\n" }, { "alpha_fraction": 0.4446570873260498, "alphanum_fraction": 0.4695374667644501, "avg_line_length": 18.96815299987793, "blob_id": "c3724454c28353eb0bb49e6f90f3186655dbf602", "content_id": "8447e58757b88ec4868506b777a6b1fc5dc3cddc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3625, "license_type": "permissive", "max_line_length": 67, "num_lines": 157, "path": "/Algorithm/sort/baseSort.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n排序算法:\n\nO(n^2)\n冒泡排序\n插入排序\n选择排序\n\nQ(n log n) 分而治之\n快速排序\n归并排序\n\"\"\"\nimport random\n\n\ndef bubble(sl):\n \"\"\"\n 冒泡排序,O(n^2)\n 相邻的两个元素对比,大的后推,遍历整个列表一次后,将最大项以冒泡的方式排列到列表末尾\n :param sl: list\n :return:\n \"\"\"\n for i in range(len(sl)-1):\n for j in range(i+1, len(sl)):\n if sl[i] > sl[j]:\n sl[i], sl[j] = sl[j], sl[i]\n return sl\n\n\ndef bubble_sorted(items):\n \"\"\"\n 优化的冒泡排序,\n 最好 O(n),\n 最坏 O(n^2)\n 平均 O(n^2)\n \"\"\"\n n = len(items)\n while n > 1:\n swapped = False\n i = 1\n while i < n:\n if items[i] < items[i-1]:\n items[i], items[i-1] = items[i-1], items[i]\n swapped = True\n i += 1\n if not swapped:\n return items\n n -= 1\n\n\ndef bubble_sort(items):\n \"\"\"\n 冒泡排序, 还是while循环换为for循环比较习惯\n 最好 O(n)\n 最坏 O(n^2)\n \"\"\"\n items_len = len(items)\n for i in range(1, items_len):\n has_swap = False\n for j in range(1, items_len):\n if items[j - 1] > items[j]:\n has_swap = True\n items[j - 1], items[j] = items[j], items[j - 1]\n if not has_swap:\n break\n return items\n\n\ndef select_sort(items):\n \"\"\"\n 选择排序, 搜索整个列表,找到最小项位置\n \"\"\"\n for i in range(len(items)-1):\n min_index = i\n for j in range(i+1, len(items)):\n if items[j] < items[min_index]:\n min_index = j\n\n if min_index != i:\n items[min_index], items[i] = items[i], items[min_index]\n\n return items\n\n\ndef insert_sort(items):\n \"\"\"\n 插入排序\n 从第二个数开始找前面对应顺序的位置,像插入一张扑克牌顺序一样排好序\n :param items: list\n :return:\n \"\"\"\n i = 1\n while i < len(items):\n item_insert = items[i]\n j = i - 1\n while j >= 0:\n if item_insert < items[j]:\n items[j+1] = items[j]\n j -= 1\n else:\n break\n items[j+1] = item_insert\n i += 1\n return items\n\n\ndef insert_sort_for(items):\n \"\"\"\n 插入排序,for循环, 中间还是while容易理解:\n 比插入的值 大的数挪后,直到不需要挪动为止即为插入的位置。\n :param items:\n :return:\n \"\"\"\n for i in range(1, len(items)):\n item_insert = items[i]\n j = i - 1\n while j >= 0:\n if item_insert < items[j]:\n items[j + 1] = items[j] # 比插入值大的元素,向后移动一位\n j -= 1\n else: # 不需要挪动时,跳出循环\n break\n items[j + 1] = item_insert # 找到了插入的位置\n\n return items\n\n\n# if __name__ == '__main__':\n# my_sl = [1, 3, 23, 21, 12, 22, 234]\n# result1 = bubble(my_sl)\n# print(result1)\n#\n# result2 = bubble_sorted(my_sl)\n# print(result2)\n#\n# result3 = select_sort(my_sl)\n# print(result3)\n#\n# my_s2 = [1, 3, 23, 21, 12, 22, 234]\n# result5 = insert_sort_for(my_s2)\n# print(result5)\n\n\ndef main(size=10000, sort=select_sort):\n my_items = []\n for _ in range(size):\n my_items.append(random.randint(1, _ + size))\n\n print(my_items)\n sort(my_items)\n print(my_items)\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.4704663157463074, "alphanum_fraction": 0.5015544295310974, "avg_line_length": 31.149999618530273, "blob_id": "e84c253d613931b600db345ad7b7c750eb5ea0b6", "content_id": "b78dd92ca019040623de210f7d11315a5e295277", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2082, "license_type": "permissive", "max_line_length": 111, "num_lines": 60, "path": "/001~100/018-4sum.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Given an array nums of n integers and an integer targets,\nare there elements a, b, c, and d in nums such that a + b + c + d = targets?\nFind all unique quadruplets in the array which gives the sum of targets.\n\nNote:\nThe solution set must not contain duplicate quadruplets.\n\nExample:\nGiven array nums = [1, 0, -1, 0, -2, 2], and targets = 0.\nA solution set is:\n[\n [-1, 0, 0, 1],\n [-2, -1, 1, 2],\n [-2, 0, 0, 2]\n]\"\"\"\n\"\"\"\n参考思路:\n\nhttps://leetcode.com/problems/4sum/discuss/8545/Python-140ms-beats-100-and-works-for-N-sum-(Ngreater2)\n核心是实现一个快速的2指针来解决2和,以及递归以将N和减少到2和.\n知道列表已排序,进行了一些优化.\n传递指针,而不是切片列表.\n\"\"\"\n\n\nclass Solution(object):\n def fourSum(self, nums, target):\n def findNsum(ls, r, targets, N, result, results):\n if r-ls+1 < N or N < 2 or targets < nums[ls]*N or targets > nums[r]*N: # early termination <提前终止>\n return\n\n if N == 2: # two pointers solve sorted 2-sum problem <双指针解决2数之和>\n while ls < r:\n s = nums[ls] + nums[r]\n if s == targets:\n results.append(result + [nums[ls], nums[r]])\n ls += 1\n while ls < r and nums[ls] == nums[ls-1]:\n ls += 1\n elif s < targets:\n ls += 1\n else:\n r -= 1\n\n else: # recursively reduce N <递归减少N>\n for i in range(ls, r+1):\n if i == ls or (i > ls and nums[i-1] != nums[i]):\n findNsum(i+1, r, targets-nums[i], N-1, result+[nums[i]], results)\n nums.sort()\n result_list = []\n findNsum(0, len(nums)-1, target, 4, [], result_list)\n \n return result_list\n\n\nif __name__ == '__main__':\n rst_list = Solution().fourSum([1, 0, -1, 0, -2, 2], 0)\n print(rst_list)\n\n" }, { "alpha_fraction": 0.5205254554748535, "alphanum_fraction": 0.5385878682136536, "avg_line_length": 31.052631378173828, "blob_id": "126d96237fa27fb13e3e3ebc628b6fb680302955", "content_id": "e446ac61b7639b993d2b90240ba6f562c197fac4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 609, "license_type": "permissive", "max_line_length": 61, "num_lines": 19, "path": "/testing/demo1_2_820.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "class Solution:\n def minimumLengthEncoding(self, words: List[str]) -> int:\n words = sorted(x[::-1] for x in words)\n repeat = 0\n for i in range(1,len(words)):\n if not words[i].startswith(words[i-1]):\n repeat = repeat + 1 + len( words[i-1])\n return repeat + 1 + len(words[-1]) \n \n\n\nclass Solution2:\n def minimumLengthEncoding(self, words: List[str]) -> int:\n good = set(words)\n for word in words:\n for k in range(1, len(word)):\n good.discard(word[k:])\n\n return sum(len(word) + 1 for word in good)\n" }, { "alpha_fraction": 0.4531722068786621, "alphanum_fraction": 0.46525681018829346, "avg_line_length": 10.413793563842773, "blob_id": "8d4221b8f022ac3644b8d06f6b38b91294af6258", "content_id": "a91262e3160a0b3d0e941b270508e3edaec55312", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 449, "license_type": "permissive", "max_line_length": 35, "num_lines": 29, "path": "/interviews/004testListAppend.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\ndef func(t=[], s=''):\n t.append('x')\n s += 'test'\n print(t)\n print(s)\n\n\n\"\"\"\n使用默认参数,列表在原来地址,赋值递增; 字符串是重新赋予新的地址和值\n['x']\n['x', 'x']\n['x', 'x', 'x']\n\"\"\"\nfor _ in range(3):\n func()\n\n\n\"\"\"\n使用自己的参数,列表,字符串都是重新赋予新的地址和值\n['x']\n['x']\n['x']\n\"\"\"\nfor _ in range(3):\n func([], '')\n" }, { "alpha_fraction": 0.4707317054271698, "alphanum_fraction": 0.49593496322631836, "avg_line_length": 24.47916603088379, "blob_id": "03498646f61aff191f5b95e9de2cc9ee9ff180ea", "content_id": "b3ed572e49d6920fbb66c2b6c7a8474e823ca342", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1358, "license_type": "permissive", "max_line_length": 62, "num_lines": 48, "path": "/001~100/005.LongestPalindromicSubstring.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n给定一个字符串s,找出s中最长的回文子字符串。可以假设s的最大长度为1000。\n\nGiven a string s, find the longest palindromic substring in s.\nYou may assume that the maximum length of s is 1000.\n\nExample 1:\nInput: \"babad\"\nOutput: \"bab\"\nNote: \"aba\" is also a valid answer.\n\nExample 2:\nInput: \"cbbd\"\nOutput: \"bb\"\n\"\"\"\n\n\nclass Solution:\n def longestPalindrome(self, s):\n \"\"\"\n 这是这个出色的C ++解决方案的Python版本。\n while k < lenS - 1 and s[k] == s[k + 1]: k += 1是非常有效的,\n 可以处理odd-length(abbba)和even-length(abbbba)。\n \"\"\"\n len_s = len(s)\n if len_s <= 1: \n return s\n min_start, max_len, i = 0, 1, 0\n while i < len_s:\n if len_s - i <= max_len/2: \n break\n j, k = i, i\n while k < len_s-1 and s[k] == s[k+1]:\n k += 1\n i = k + 1\n while k < len_s-1 and j and s[k+1] == s[j-1]:\n k, j = k+1, j-1\n if k-j+1 > max_len:\n min_start, max_len = j, k-j+1\n return s[min_start: min_start + max_len]\n \n \nif __name__ == '__main__':\n string = 'test sub aba '\n result = Solution().longestPalindrome(string)\n print(result)\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.38843587040901184, "alphanum_fraction": 0.4410674571990967, "avg_line_length": 23.490909576416016, "blob_id": "3093c8985e5adf27862d1ced981d6ed6d4cd6ec6", "content_id": "76c0f62fdc1cf82773c215504e4df0d327cf6f41", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1419, "license_type": "permissive", "max_line_length": 92, "num_lines": 55, "path": "/001~100/013RomanToInteger.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n13、罗马数字转为整型数字\n\"\"\"\n\n\nclass Solution:\n def romanToInt(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n d = {'M': 1000, 'D': 500, 'C': 100, 'L': 50, 'X': 10, 'V': 5, 'I': 1}\n res, p = 0, 'I'\n for c in s[::-1]:\n res, p = res - d[c] if d[c] < d[p] else res + d[c], c\n\n return res\n\n\nclass Solution2:\n def romanToInt(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n\n roman = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\n res, i = 0, 0\n for i in range(len(s)):\n curr, nxt = s[i], s[i + 1:i + 2]\n print(nxt)\n if nxt and roman[curr] < roman[nxt]:\n res -= roman[curr]\n else:\n res += roman[curr]\n return res\n\n\nif __name__ == '__main__':\n str1 = 'LII'\n ret = Solution2().romanToInt(str1)\n print(ret)\n\n list1 = [0, 1, 2, 3]\n for index in range(len(list1)):\n current, next_ = list1[index], list1[index+1:index+2] # list1[index+1:index+2]是列表类型\n print(current, next_)\n\n s1 = '123'\n for index in range(len(s1)):\n current, next_ = s1[index], s1[index+1:index+2] # 字符串的索引可以越界,列表不行。\n print(current, next_)\n # print('溢出?', s1[3:4])\n\n\n" }, { "alpha_fraction": 0.4965675175189972, "alphanum_fraction": 0.5225018858909607, "avg_line_length": 17.714284896850586, "blob_id": "93987534292ab9f735f891f04b0da6ce4a0a9054", "content_id": "42f83c51c0e3ee3cfb1557bb7f5cbc190a0d113f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1435, "license_type": "permissive", "max_line_length": 51, "num_lines": 70, "path": "/Algorithm/search/baseSearch.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n查询算法:\n\n最小项搜索\n顺序搜索\n二分(叉)搜索\nhash搜索\n\n\"\"\"\n\n\ndef min_search(items):\n \"\"\"\n 最小项搜索\n :param items:\n :return:\n \"\"\"\n min_index = 0\n for i in range(len(items)):\n if items[min_index] > items[i]:\n min_index = i\n return 'min index:{}'.format(min_index)\n\n\ndef order_search(target, items):\n \"\"\"\n 顺序搜索,常用遍历方法, O(n)\n :param target:\n :param items:\n :return:\n \"\"\"\n position = 0\n while position < len(items):\n if target == items[position]:\n return position\n position += 1\n return '404 Not FOUND!:('\n\n\ndef binary_search(target, items):\n \"\"\"\n 二分查找(二叉搜索),O(log2n)\n 前置条件:列表已排好序\n :param target:\n :param items:\n :return:\n \"\"\"\n left = 0\n right = len(items) - 1\n while left <= right:\n middle = (left + right) // 2\n if target == items[middle]:\n return 'where index: {}'.format(middle)\n elif target > items[middle]:\n left = middle + 1\n else:\n right = middle - 1\n return -1\n\n\nif __name__ == '__main__':\n items1 = [1, 3, 6, 23, 0]\n min_search_res = min_search(items1)\n print(min_search_res)\n\n items2 = [1, 3, 6, 23, 66]\n min_search_res = binary_search(66, items2)\n print(min_search_res)\n\n" }, { "alpha_fraction": 0.39765459299087524, "alphanum_fraction": 0.4227078855037689, "avg_line_length": 27.363636016845703, "blob_id": "a4d9fcee71e52166ab3e490bd8986fae6e2a37e1", "content_id": "8d218c8ff3aaab9ac68773e4c730f92b8c88fb7b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1922, "license_type": "permissive", "max_line_length": 73, "num_lines": 66, "path": "/001~100/016-3SumClosest.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n16. 最接近的三数之和\n\neg:\ntarget=3, list=[1,2,3,1]\n最接近的值:1+1+2=4,返回整型数值:4\n\"\"\"\n\n\nclass Solution:\n def three_sum_closest(self, nums, target):\n \"\"\"\n :param nums: list[int]\n :param target: int\n :return: res's type is int\n \"\"\"\n nums.sort()\n res = sum(nums[:3])\n for i in range(len(nums)):\n ls, r = i+1, len(nums)-1\n while ls < r:\n s = sum((nums[i], nums[ls], nums[r]))\n if abs(s-target) < abs(res-target):\n res = s\n if s < target:\n ls += 1\n elif s > target:\n r -= 1\n else:\n return res\n return res\n\n\nclass Solution2:\n def threeSumClosest(self, nums: 'List[int]', target: 'int') -> 'int':\n nums.sort()\n # result = nums[0]+nums[1]+nums[2]\n close = []\n for i in range(0, nums.__len__()-2):\n j, k = i+1, nums.__len__()-1\n if nums[i]+nums[k-1]+nums[k] < target:\n close.append(nums[i]+nums[k-1]+nums[k])\n elif nums[i]+nums[j]+nums[j+1] > target:\n close.append(nums[i]+nums[j]+nums[j+1])\n else:\n while j < k:\n temp = nums[i]+nums[j]+nums[k]\n if temp == target:\n return temp\n close.append(temp)\n if temp < target:\n j += 1\n else:\n k -= 1\n closest = sorted(close, key=lambda x: abs(target-x))\n return closest[0]\n\n\nif __name__ == '__main__':\n sl_1 = [-1, 3, 3, 2, 1]\n result = Solution().three_sum_closest(sl_1, 3)\n print(result)\n result2 = Solution2().threeSumClosest(sl_1, 3)\n print(result2)\n\n\n\n\n" }, { "alpha_fraction": 0.3253886103630066, "alphanum_fraction": 0.35440415143966675, "avg_line_length": 20.863636016845703, "blob_id": "14869529b018740edb35f07522219f948f705b29", "content_id": "cb9734216feca333540d6c9cf53b7edaf79e7b55", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 973, "license_type": "permissive", "max_line_length": 45, "num_lines": 44, "path": "/001~100/015-3Sum.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n3Sum 三数之和\neg: -1+0+1=0\n\n\"\"\"\n\n\nclass Solution:\n\n def threeSum(self, nums):\n \"\"\"\n @param nums: list[int]\n @return: List[List[int]]\n \"\"\"\n d = {}\n for val in nums:\n d[val] = d.get(val, 0) + 1\n\n pos = [x for x in d if x > 0]\n neg = [x for x in d if x < 0]\n\n res = []\n if d.get(0, 0) > 2:\n res.append([0, 0, 0])\n\n for x in pos:\n for y in neg:\n s = -(x + y)\n if s in d:\n if s == x and d[x] > 1:\n res.append([x, x, y])\n elif s == y and d[y] > 1:\n res.append([x, y, y])\n elif y < s < x:\n res.append([x, y, s])\n return res\n\n\nif __name__ == '__main__':\n sl_1 = [-1, 0, 1, -2, 2, -3, 3]\n result = Solution().threeSum(sl_1)\n print(result)\n\n\n\n" }, { "alpha_fraction": 0.3901234567165375, "alphanum_fraction": 0.4009876549243927, "avg_line_length": 23.373493194580078, "blob_id": "714c974989541a04324c8cffe28cc2f72ebee3fe", "content_id": "46c8ae888669cab902175aa7635a5bca6cab8a3b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2039, "license_type": "permissive", "max_line_length": 65, "num_lines": 83, "path": "/001~100/014LongestCommonPerfix.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n14、最长公共前缀\n\"\"\"\n\n\n# class Solution:\n# def longestCommonPrefix(self, sl):\n# \"\"\"\n# :type sl: List[str]\n# :rtype: str\n# \"\"\"\n# if '' in sl:\n# return ''\n# n = len(sl)\n# if n > 1:\n# pr = ''\n# for index, st in enumerate(sl[0]):\n# pr += st\n# for j in range(1, n):\n# if pr not in sl[j][:index+1]:\n# break\n# else:\n# continue\n# break\n# else:\n# return pr\n# return pr[:-1]\n# else:\n# return '' if not n else sl[0]\n\n\n# class Solution:\n# def longestCommonPrefix(self, strs):\n# \"\"\"\n# :type strs: List[str]\n# :rtype: str\n# \"\"\"\n# common = None\n# for s in strs:\n# if common is None:\n# common = list(s)\n# else:\n# for i, c in enumerate(common):\n# if i >= len(s) or c != s[i]:\n# common = common[:i]\n# break\n# return ''.join(common) if common else ''\n\n\n# class Solution:\n# def longestCommonPrefix(self, m):\n# if not m:\n# return ''\n# s1 = min(m)\n# print(s1)\n# s2 = max(m)\n# print(s2)\n#\n# for i, c in enumerate(s1):\n# if c != s2[i]:\n# return s1[:i] # stop until hit the split index\n# return s1\n\n\nclass Solution:\n # @return a string\n def longestCommonPrefix(self, strs):\n if not strs:\n return \"\"\n\n for i, letter_group in enumerate(zip(*strs)):\n if len(set(letter_group)) > 1:\n return strs[0][:i]\n else:\n return min(strs)\n\n\nif __name__ == '__main__':\n sl_1 = ['flow', 'fawer', 'flower']\n result = Solution().longestCommonPrefix(sl_1)\n print(result)\n\n\n" }, { "alpha_fraction": 0.3459162712097168, "alphanum_fraction": 0.41455045342445374, "avg_line_length": 24.543859481811523, "blob_id": "47f2c25e7ec426d80a9150b674efaca495c11508", "content_id": "7fa39f5cd75d0bffe323936521aa464ba4b7a840", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1557, "license_type": "permissive", "max_line_length": 87, "num_lines": 57, "path": "/001~100/012IntegerToRoman.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n12. 整型转为罗马数字\n\"\"\"\n\n\nclass Solution:\n def intToRoman(self, num):\n \"\"\"\n 解法一:通过枚举+遍历列表的限制\n :type num: int\n :rtype: str\n \"\"\"\n stl = ['M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I']\n nums = [1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1]\n\n ret = \"\"\n\n for i, j in enumerate(nums):\n while num >= j:\n ret += stl[i]\n num -= j\n if num == 0:\n return ret\n\n\nclass Solution2:\n def intToRoman(self, num):\n \"\"\"\n 解法二:数据库存储方法(列表索引对应0-9),相当于从数据库取值\n :type num: int\n :rtype: str\n \"\"\"\n M = [\"\", \"M\", \"MM\", \"MMM\"]\n C = [\"\", \"C\", \"CC\", \"CCC\", \"CD\", \"D\", \"DC\", \"DCC\", \"DCCC\", \"CM\"]\n X = [\"\", \"X\", \"XX\", \"XXX\", \"XL\", \"L\", \"LX\", \"LXX\", \"LXXX\", \"XC\"]\n I = [\"\", \"I\", \"II\", \"III\", \"IV\", \"V\", \"VI\", \"VII\", \"VIII\", \"IX\"]\n # or return M[num//1000] + C[(num//100) % 10] + X[(num//10) % 10] + I[num % 10]\n return M[num//1000] + C[(num % 1000)//100] + X[(num % 100)//10] + I[num % 10]\n\n\nif __name__ == '__main__':\n integer = 1775\n result = Solution().intToRoman(integer)\n print(result)\n result2 = Solution2().intToRoman(integer)\n print(result2)\n\n a = 4321\n print((a % 10)//10)\n print(a % 100)\n\n # print((a // 10) % 10)\n # print((a // 100) % 10)\n # print(a // 1000)\n\n" }, { "alpha_fraction": 0.5850515365600586, "alphanum_fraction": 0.6013745665550232, "avg_line_length": 24.2608699798584, "blob_id": "e1e2851bfe709b9ade0ea7778d4dd286389dc4af", "content_id": "eda84a4279d80b77e63103b7b4113d156ea2ff40", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1292, "license_type": "permissive", "max_line_length": 99, "num_lines": 46, "path": "/001~100/003.LongestSubstringNoRepeating.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n给定一个字符串,在不重复字符的情况下查找最长子字符串的长度。\n\nGiven a string, find the length of the longest substring without repeating characters.\n\nExample 1:\n\nInput: \"abcabcbb\"\nOutput: 3\nExplanation: The answer is \"abc\", with the length of 3.\nExample 2:\n\nInput: \"bbbbb\"\nOutput: 1\nExplanation: The answer is \"b\", with the length of 1.\nExample 3:\n\nInput: \"pwwkew\"\nOutput: 3\nExplanation: The answer is \"wke\", with the length of 3.\n Note that the answer must be a substring, \"pwke\" is a subsequence and not a substring.\n\"\"\"\n\n\nclass Solution(object):\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n 算法思路:遍历字符串,获取最长子串的首尾索引即可\n :param s: str\n :return: int\n \"\"\"\n idx, n, start, res = [0] * 128, len(s), 0, 0\n for i in range(n):\n start = max(start, idx[ord(s[i])]) # 子串起始索引\n res = max(res, i - start + 1) # 子串长度\n idx[ord(s[i])] = i + 1\n return res\n\n\nif __name__ == '__main__':\n _s = 'Longest Substring Without Repeating Characters'\n s_ = 'test my string.'\n result = Solution().lengthOfLongestSubstring(s_)\n print(result)\n\n\n" }, { "alpha_fraction": 0.4161735773086548, "alphanum_fraction": 0.4299802780151367, "avg_line_length": 18.882352828979492, "blob_id": "48c6a3d45bd0df90af51809945f1718e85cc135b", "content_id": "16ba5d571d7aa62c06803e1a4b0c9d52d3cc9734", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1102, "license_type": "permissive", "max_line_length": 62, "num_lines": 51, "path": "/interviews/001abcstring.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n给你一个字符串,比如“abc”,请打印出该字符串的所有排列组合,\n如abc: abc,acb,bac,bca,cab,cba, 实际的字符长度超过100个字符串。\n\"\"\"\n\n\nclass Solution:\n\n def all_string_play(self, s):\n \"\"\"\n :param s: str\n :return str_list: list\n \"\"\"\n if len(s) <= 1:\n return [s]\n else:\n str_list = []\n for i in range(len(s)):\n for j in self.all_string_play(s[0:i]+s[i+1:]):\n str_list.append(s[i] + j)\n print(str_list)\n return str_list\n\n\nif __name__ == '__main__':\n string = 'my_'\n result = Solution().all_string_play(string)\n print(result)\n\n\n# def fun1(s):\n# if len(s) <= 1:\n# return [s]\n# else:\n# sl = []\n# for i in range(len(s)):\n# for j in fun1(s[0:i] + s[i + 1:]):\n# sl.append(s[i] + j)\n# return sl\n# \n# \n# def main():\n# a = fun1('abc')\n# print(a)\n# \n# \n# if __name__ == '__main__':\n# main()\n" }, { "alpha_fraction": 0.3992740511894226, "alphanum_fraction": 0.40925589203834534, "avg_line_length": 27.28205108642578, "blob_id": "5db35eb803e954965171a41600b5d4203587777e", "content_id": "cc8f4cd9075758d972c064a2f5e05c0027f4064d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1102, "license_type": "permissive", "max_line_length": 62, "num_lines": 39, "path": "/testing/demo1_1_875.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "class Solution1:\n def minEatingSpeed(self, piles: List[int], h: int) -> int:\n n, maxp = len(piles), max(piles)\n sump = sum(piles)\n if n == h: return maxp\n left, right = (sump-1)//h+1, min(maxp, sump//(h-n))\n while left < right:\n mid = (left + right) // 2\n time = 0\n for pile in piles:\n time += (pile-1)//mid+1\n if time > h: break\n if time > h:\n left = mid + 1\n else:\n right = mid\n return left\n\n\nclass Solution:\n def minEatingSpeed(self, piles: List[int], h: int) -> int:\n sdt = 0\n ms = float(\"-inf\")\n for c in piles:\n sdt += c\n if c > ms:\n ms = c\n nxt = len(piles)\n if nxt == h:\n return ms\n res = ceil(sdt / h)\n rst = min(ms,sdt//(h-nxt))\n while res < rst:\n mid = (res+rst)>>1\n if sum(ceil(p / mid) for p in piles) <= h:\n rst = mid\n else:\n res = mid + 1\n return res" }, { "alpha_fraction": 0.4186440706253052, "alphanum_fraction": 0.503389835357666, "avg_line_length": 26.59375, "blob_id": "d2b5d0130c099197e04e3aa37f50f0df6560bb03", "content_id": "7d9b6ec23d2365fb991fa5af225ec583ac4ac59d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1984, "license_type": "permissive", "max_line_length": 89, "num_lines": 64, "path": "/interviews/002SortToFindSame.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n两个排好序的数组(从小到大)找出重复的元素,例如:\n数组A: [1,2,3,4]\n数组B: [2,4,5,6]\n输出: [2,4]\n\n\"\"\"\nfrom timeDecorator import clock\n\n\nclass Solution:\n @clock\n def sort_find_same(self, list_a, list_b):\n \"\"\"\n 解法一:暴力法, O(n^2)\n :param list_a: list\n :param list_b: list\n :return list_same: list\n \"\"\"\n list_same = []\n for i in range(len(list_a)):\n for j in range(len(list_b)):\n if list_a[i] == list_b[j]:\n list_same.append(list_a[i])\n return list_same\n\n @clock\n def sort_common(self, list_a, list_b):\n \"\"\"\n 解法二:归并排序,遍历一次两个列表中的最大长度,通过指针递增获取相同元素并保存到common_list中\n 归并排序是对几个有序表有序表的排序,合并成一个新的有序表\n https://blog.csdn.net/RedSun528/article/details/82930117\n https://blog.csdn.net/csdn_564174144/article/details/77150346\n :param list_a:\n :param list_b:\n :return: common_list\n \"\"\"\n common_list, a, b = list(), 0, 0\n max_len = max(len(list_a), len(list_b))\n for i in range(max_len):\n if list_a[a] > list_b[b]:\n b += 1\n elif list_a[a] < list_b[b]:\n a += 1\n elif list_a[a] == list_b[b]:\n common_list.append(list_a[a])\n a += 1\n b += 1\n\n return common_list\n\n\nif __name__ == '__main__':\n list1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 12, 14, 15, 16, 17, 18, 19,\n 20, 21, 22, 23, 24, 25, 26, 27, 28, 112, 123, 124, 1256, 1344, 12345, 23456]\n list2 = [2, 4, 6, 12, 14, 16, 122, 1234, 12223, 124144]\n\n result = Solution().sort_find_same(list1, list2)\n print(result)\n\n result2 = Solution().sort_common(list1, list2)\n print(result2)\n\n\n\n\n" }, { "alpha_fraction": 0.5087719559669495, "alphanum_fraction": 0.5348370671272278, "avg_line_length": 19.15151596069336, "blob_id": "5bd758f4c59c5e233b3392615b0fa88dc105a4cc", "content_id": "c42ca2598575fde0dec9e7a66f93052ffaf49e80", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2505, "license_type": "permissive", "max_line_length": 98, "num_lines": 99, "path": "/001~100/023merge-k-sorted-lists.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nMerge k sorted linked lists and return it as one sorted list. Analyze and describe its complexity.\n合并k个已排序的链表并将其作为一个排序列表返回。分析并描述其复杂性。\n\nExample:\n\nInput:\n[\n 1->4->5,\n 1->3->4,\n 2->6\n]\nOutput: 1->1->2->3->4->4->5->6\n\"\"\"\n\n\n\"\"\"\n方法1:蛮力\n算法\n\n遍历所有链接列表并将节点的值收集到一个数组中。\n对此数组进行排序和迭代,以获得正确的节点值。\n创建一个新的已排序链接列表,并使用新节点对其进行扩展。\n\n复杂性分析\n时间复杂度: O(N log N)\n空间复杂度: O(N)\n\"\"\"\n\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution(object):\n def mergeKLists(self, lists):\n \"\"\"\n :type lists: List[ListNode]\n :rtype: ListNode\n \"\"\"\n self.nodes = []\n head = point = ListNode(0)\n for l in lists:\n while l:\n self.nodes.append(l.val)\n l = l.next\n for x in sorted(self.nodes):\n point.next = ListNode(x)\n point = point.next\n return head.next\n\n\n\"\"\"\n方法2: 分而治之\n这种算法的好处是我们不需要多次遍历大多数节点\n\n配对k个列表并合并每对。\n第一次配对后,k个列表合并为k/2个列表,平均长度为2n/k,然后是k/4、k/8等。\n重复此过程,直到得到最终排序的链接列表。\n\n因此,我们将在每次配对和合并时遍历大概n个节点,并重复大概log2k时间的过程。\n\n\"\"\"\n\n\nclass Solution2(object):\n def mergeKLists(self, lists):\n \"\"\"\n :type lists: List[ListNode]\n :rtype: ListNode\n \"\"\"\n amount = len(lists)\n interval = 1\n while interval < amount:\n for i in range(0, amount - interval, interval * 2):\n lists[i] = self.merge2Lists(lists[i], lists[i + interval])\n interval *= 2\n return lists[0] if amount > 0 else lists\n\n def merge2Lists(self, l1, l2):\n head = point = ListNode(0)\n while l1 and l2:\n if l1.val <= l2.val:\n point.next = l1\n l1 = l1.next\n else:\n point.next = l2\n l2 = l1\n l1 = point.next.next\n point = point.next\n if not l1:\n point.next = l2\n else:\n point.next = l1\n return head.next\n" }, { "alpha_fraction": 0.5180723071098328, "alphanum_fraction": 0.5421686768531799, "avg_line_length": 8.11111068725586, "blob_id": "9d8ab134be1a370efbe789d7bf5845e330c67de6", "content_id": "35d11179ddb444ebf4cee712e6b203e00fafa10f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 119, "license_type": "permissive", "max_line_length": 23, "num_lines": 9, "path": "/Algorithm/search/__init__.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n查询算法:\n\n顺序搜索\n二分(叉)搜索\nhash搜索\n\"\"\"\n\n" }, { "alpha_fraction": 0.46183204650878906, "alphanum_fraction": 0.494656503200531, "avg_line_length": 20.83333396911621, "blob_id": "28dc7c6e8e2fe7ae434a3592fb9205832dec8bda", "content_id": "a0f106fd08db74e77479d94c44864417cdb70bf9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1608, "license_type": "permissive", "max_line_length": 85, "num_lines": 60, "path": "/001~100/009.PalindromeNumber.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n确定整数是否是回文。当它向前读取向后时,整数是回文。\n\n例1:\n输入: 121\n输出: true\n\n例2:\n输入: -121\n输出: false\n说明:从左到右,它读取-121。从右到左,它变成121-。因此它不是回文。\n\nFollow up:\nCould you solve it without converting the integer to a string?\n\"\"\"\nfrom timeDecorator import clock\n\n\n@clock\nclass Solution:\n def isPalindrome(self, x):\n \"\"\"\n 解法一(官方不推荐):将`int`转为`str`\n : type x: int\n : rtype: bool\n \"\"\"\n return str(x) == str(x)[::-1]\n\n\n@clock\nclass Solution2:\n def isPalindrome(self, x):\n \"\"\"\n 解法二: 循环整个整型数字进行截取回文串\n : type x: int\n : rtype: bool\n \"\"\"\n if x < 0 or (x % 10 == 0 and x != 0):\n return False\n else:\n reverse_num = 0\n while x > reverse_num: # 循环整个整型数字\n reverse_num = reverse_num * 10 + x % 10 # 得到回文左侧(或右侧)一边的值\n x //= 10 # 循环一次,整除一次10\n print(reverse_num)\n print(x)\n\n return (x == reverse_num) or (x == reverse_num//10) # 奇数偶数问题,奇数要多整除10\n\n\nif __name__ == '__main__':\n integer = 1221\n result = Solution().isPalindrome(integer)\n print(result)\n\n result2 = Solution2().isPalindrome(integer)\n print(result2)\n" }, { "alpha_fraction": 0.5835567712783813, "alphanum_fraction": 0.5987488627433777, "avg_line_length": 20.921567916870117, "blob_id": "435a55704357d795f8cd3b197a19f0b5861d3e64", "content_id": "c99587d0d4d0bfc446ad3efcff1839522f8bd258", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1161, "license_type": "permissive", "max_line_length": 73, "num_lines": 51, "path": "/Algorithm/sort/quickSort.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n快速排序的实现\n时间:最好 O(n log2n) ~ 最坏 O(n^2)\n空间:最好 O(log2n) ~ 最坏 O(n)\n\"\"\"\nimport random\n\n\ndef quick_sort(items):\n quick_sort_helper(items, 0, len(items) - 1)\n\n\ndef quick_sort_helper(items, left, right):\n if left < right:\n pivot_location = partition(items, left, right)\n quick_sort_helper(items, left, pivot_location - 1)\n quick_sort_helper(items, pivot_location + 1, right)\n\n\ndef partition(items, left, right):\n middle = (left + right) // 2\n pivot = items[middle]\n items[middle] = items[right]\n items[right] = pivot\n\n boundary = left\n\n for index in range(left, right):\n if items[index] < pivot:\n items[index], items[boundary] = items[boundary], items[index]\n boundary += 1\n\n items[right], items[boundary] = items[boundary], items[right]\n\n return boundary\n\n\ndef main(size=10000, sort=quick_sort):\n my_items = []\n for _ in range(size):\n my_items.append(random.randint(1, _ + size))\n\n print(my_items)\n sort(my_items)\n print(my_items)\n\n\nif __name__ == '__main__':\n main()\n\n" }, { "alpha_fraction": 0.5263158082962036, "alphanum_fraction": 0.5526315569877625, "avg_line_length": 6.9285712242126465, "blob_id": "08b9e3269b0849f6149f1938aa344888a02c1ece", "content_id": "f7060f8c4698c96e5529ffec9c6be7b4e1582361", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 176, "license_type": "permissive", "max_line_length": 23, "num_lines": 14, "path": "/Algorithm/sort/__init__.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n排序算法:\n\nO(n^2)\n冒泡排序\n插入排序\n选择排序\n\nQ(n log n) 分而治之\n快速排序\n归并排序\n\"\"\"\n\n\n\n" }, { "alpha_fraction": 0.4966711103916168, "alphanum_fraction": 0.515312910079956, "avg_line_length": 17.774999618530273, "blob_id": "3bf8a62738e7f705dcd3e60a30a5beb0a82533df", "content_id": "e65c530b1c210bf140cfa431c085d78e668e5c06", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 857, "license_type": "permissive", "max_line_length": 75, "num_lines": 40, "path": "/timeDecorator.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport time\nimport timeit\n\"\"\"\n装饰器实现程序函数的⏲计时器功能\n\"\"\"\n\n\ndef clock(func):\n \"\"\" 计时装饰器 \"\"\"\n def clocked(*args):\n t0 = timeit.default_timer()\n result = func(*args)\n elapsed = (timeit.default_timer() - t0) * 1000 # 函数或程序运行的时间\n name = func.__name__ # 函数名\n arg_str = ', '.join(repr(arg) for arg in args) # 函数返回值\n print('[%0.8f ms] %s(%s) -> %r' % (elapsed, name, arg_str, result))\n return result\n\n return clocked\n\n\n@clock\ndef run(seconds):\n \"\"\" 测试计时的函数 \"\"\"\n time.sleep(seconds)\n return time\n\n\n@clock\ndef time_test():\n \"\"\" 测试计时的函数 \"\"\"\n time.sleep(2)\n\n\nif __name__ == '__main__':\n run(1.22)\n time_test()\n" }, { "alpha_fraction": 0.5174094438552856, "alphanum_fraction": 0.5724233984947205, "avg_line_length": 26.09433937072754, "blob_id": "1aa1b0715c439c7be5eba488bb631fce6a9ea389", "content_id": "aaa28a038ac550d9261e46b7ebe7df21fd7598a6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1544, "license_type": "permissive", "max_line_length": 97, "num_lines": 53, "path": "/001~100/004.MedianOfTwoSortedArrays.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n4. Median of Two Sorted Arrays\nHard\n\nThere are two sorted arrays nums1 and nums2 of size m and n respectively.\nFind the median of the two sorted arrays. The overall run time complexity should be O(log (m+n)).\nYou may assume nums1 and nums2 cannot be both empty.\n\n大小分别为m和n的已排序数组nums1和nums2。\n找到两个排序数组的中值。总体运行时复杂性应为O(log(m+n))。\n你可以假定nums1和nums2不能同时为空。\n\nExample 1:\nnums1 = [1, 3]\nnums2 = [2]\nThe median is 2.0\n\nExample 2:\nnums1 = [1, 2]\nnums2 = [3, 4]\nThe median is (2 + 3)/2 = 2.5\n\"\"\"\n\n\nclass Solution:\n def findMedianSortedArrays(self, x, y):\n z = len(x) + len(y)\n return self.findKth(x, y, z//2) if z % 2 == 1 \\\n else (self.findKth(x, y, z//2-1)+self.findKth(x, y, z//2))/2.0\n\n def findKth(self, list1, list2, k):\n if len(list1) > len(list2):\n list1, list2 = list2, list1\n if not list1:\n return list2[k]\n if k == len(list1) + len(list2) - 1:\n return max(list1[-1], list2[-1])\n i = len(list1) // 2\n j = k - i\n if list1[i] > list2[j]:\n # Here it's O(1) to get list1[:i] and list2[j:].\n return self.findKth(list1[:i], list2[j:], i)\n else:\n return self.findKth(list1[i:], list2[:j], j)\n\n\nif __name__ == '__main__':\n nums1 = (1, 2, 4, 9)\n nums2 = (1, 2, 4, 8)\n result = Solution().findMedianSortedArrays(nums1, nums2)\n print(result)\n" }, { "alpha_fraction": 0.4091639816761017, "alphanum_fraction": 0.46382635831832886, "avg_line_length": 16.77142906188965, "blob_id": "d0e0448e0bf1022780f5af80015769caac46486b", "content_id": "6f6c5500657512dfbe1517addc6f8d1f8896a7d5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1348, "license_type": "permissive", "max_line_length": 67, "num_lines": 70, "path": "/001~100/007.ReverseInteger.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n7. Reverse Integer\n给定32位有符号整数,整数的反向数字。\n\n例1:\n输入: 123\n输出: 321\n\n例2:\n输入: -123\n输出: -321\n\n例3:\n输入: 120\n输出: 21\n\"\"\"\nfrom timeDecorator import clock\n\n\n@clock\nclass Solution:\n def reverse(self, x):\n \"\"\"\n : type x: int\n : rtype: int\n \"\"\"\n if x < 0:\n number = int(\"-\" + str(abs(x))[::-1]) # [::-1]表示反转字符串\n else:\n number = int(str(abs(x))[::-1])\n\n if number > (2 ** 31 - 1) or number < -(2 ** 31): # 溢出返回0\n return 0\n return number\n\n\nclass Solution2:\n def reverse(self, x):\n \"\"\"\n :type x: int\n :rtype: int\n \"\"\"\n sign = lambda x: x and (1, -1)[x < 0]\n r = int(str(sign(x)*x)[::-1])\n return (sign(x)*r, 0)[r > 2**31 - 1]\n\n\n@clock\nclass Solution3(object):\n def reverse(self, x):\n s = (x > 0) - (x < 0)\n print(s)\n r = int(str(x*s)[::-1])\n return s*r * (r < 2**31)\n\n\n@clock\nclass Solution4:\n def reverse(self, x):\n sign = -1 if x <= 0 else 1\n ans = int(str(sign*x)[::-1])\n return ans*sign if ans < 2**31 else 0\n\n\nif __name__ == '__main__':\n num = -120\n result = Solution4().reverse(num)\n print(result)\n" }, { "alpha_fraction": 0.5143540501594543, "alphanum_fraction": 0.5669856667518616, "avg_line_length": 17.173913955688477, "blob_id": "e97daf49539b9a29a48b9224de2913adca278dfc", "content_id": "a3a8b3f56534c912933efb9e5f4d994cb0ee8180", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 640, "license_type": "permissive", "max_line_length": 46, "num_lines": 23, "path": "/interviews/005dictTest.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nPython字典里面不能有列表\nTypeError: unhashable type: 'list'\n\"\"\"\n# d1 = {}\n# d2 = {3: 5}\n# d3 = {[1, 2, 3]: 'user'}\n# d4 = {(1, 2, 3): 'user'}\n#\n# print(type(d4))\n\n\n# a = 10 # 局部变量,仅在本模块或类中使用,不能在函数内部以及外部使用\n# def set_a():\n# a = 100 # 函数变量,内部变量,仅在函数内有效\n# set_a()\n# print(a) # a=10\n# global a # 声明a为全局变量,全局变量重新赋值需要声明,调用需要导入\n\n# http协议头部字段,并说明每个字段的主要作用\n# 堆内存和栈内存的区别\n" }, { "alpha_fraction": 0.5541125535964966, "alphanum_fraction": 0.5822510719299316, "avg_line_length": 27.8125, "blob_id": "1669c48b2e359817eaad208ff4c2ee64f5f9e013", "content_id": "2a3e7a304aa3fc3cddbca4fcd4e5551445052b80", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 506, "license_type": "permissive", "max_line_length": 106, "num_lines": 16, "path": "/001~100/019RemoveNthFromEndOfList.py", "repo_name": "hyhplus/LeetCodeByPython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n19. 给定链表,从链表末尾删除第n个节点并返回其头部\n\"\"\"\n\n\nclass Solution:\n # https://leetcode.com/problems/remove-nth-node-from-end-of-list/discuss/8802/3-short-Python-solutions\n def removeNthFromEnd(self, head_, n):\n def remove(head):\n if not head:\n return 0, head\n i, head.next = remove(head.next)\n return i+1, (head, head.next)[i+1 == n]\n return remove(head_)[1]\n\n" } ]
37
simon-kuzin/mopidy
https://github.com/simon-kuzin/mopidy
929b097f75a78e4778275852ec873cd5d4d84567
28cf3228b2a2eaae78b91c561d0f824d32b4ef8f
61a06047956a2438770d04f50362af9acbf6dfed
HEAD
2019-03-19T11:51:11.484057
2014-01-09T07:32:06
2014-01-09T07:32:06
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.605979323387146, "alphanum_fraction": 0.6078957319259644, "avg_line_length": 27.670330047607422, "blob_id": "71da9a0f89423122f529b6f2df561c9d0e3aa993", "content_id": "7bccf101f1095ce8dab42cecdb38fffde8a40b27", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2609, "license_type": "permissive", "max_line_length": 78, "num_lines": 91, "path": "/mopidy/backends/local/json.py", "repo_name": "simon-kuzin/mopidy", "src_encoding": "UTF-8", "text": "from __future__ import absolute_import, unicode_literals\n\nimport gzip\nimport json\nimport logging\nimport os\nimport tempfile\n\nimport mopidy\nfrom mopidy import models\nfrom mopidy.backends import local\nfrom mopidy.backends.local import search\n\nlogger = logging.getLogger(__name__)\n\n\n# TODO: move to load and dump in models?\ndef load_library(json_file):\n try:\n with gzip.open(json_file, 'rb') as fp:\n return json.load(fp, object_hook=models.model_json_decoder)\n except (IOError, ValueError) as e:\n logger.warning('Loading JSON local library failed: %s', e)\n return {}\n\n\ndef write_library(json_file, data):\n data['version'] = mopidy.__version__\n directory, basename = os.path.split(json_file)\n\n # TODO: cleanup directory/basename.* files.\n tmp = tempfile.NamedTemporaryFile(\n prefix=basename + '.', dir=directory, delete=False)\n\n try:\n with gzip.GzipFile(fileobj=tmp, mode='wb') as fp:\n json.dump(data, fp, cls=models.ModelJSONEncoder,\n indent=2, separators=(',', ': '))\n os.rename(tmp.name, json_file)\n finally:\n if os.path.exists(tmp.name):\n os.remove(tmp.name)\n\n\nclass JsonLibrary(local.Library):\n name = b'json'\n\n def __init__(self, config):\n self._tracks = {}\n self._media_dir = config['local']['media_dir']\n self._json_file = os.path.join(\n config['local']['data_dir'], b'library.json.gz')\n\n def load(self):\n logger.debug('Loading json library from %s', self._json_file)\n library = load_library(self._json_file)\n self._tracks = dict((t.uri, t) for t in library.get('tracks', []))\n return len(self._tracks)\n\n def lookup(self, uri):\n try:\n return self._tracks[uri]\n except KeyError:\n return None\n\n def search(self, query=None, limit=100, offset=0, uris=None, exact=False):\n tracks = self._tracks.values()\n # TODO: pass limit and offset into search helpers\n if exact:\n return search.find_exact(tracks, query=query, uris=uris)\n else:\n return search.search(tracks, query=query, uris=uris)\n\n def begin(self):\n return self._tracks.itervalues()\n\n def add(self, track):\n self._tracks[track.uri] = track\n\n def remove(self, uri):\n self._tracks.pop(uri, None)\n\n def close(self):\n write_library(self._json_file, {'tracks': self._tracks.values()})\n\n def clear(self):\n try:\n os.remove(self._json_file)\n return True\n except OSError:\n return False\n" }, { "alpha_fraction": 0.6200934648513794, "alphanum_fraction": 0.6308411359786987, "avg_line_length": 27.157894134521484, "blob_id": "257003aab72114ae6fa43558e800b7c658a89946", "content_id": "8cc3df81bb07b858da668297817154f43fbc7f12", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2140, "license_type": "permissive", "max_line_length": 72, "num_lines": 76, "path": "/mopidy/backends/local/translator.py", "repo_name": "simon-kuzin/mopidy", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\n\nimport logging\nimport os\nimport urlparse\nimport urllib\n\nfrom mopidy.utils.encoding import locale_decode\nfrom mopidy.utils.path import path_to_uri, uri_to_path\n\nlogger = logging.getLogger(__name__)\n\n\ndef local_track_uri_to_file_uri(uri, media_dir):\n return path_to_uri(local_track_uri_to_path(uri, media_dir))\n\n\ndef local_track_uri_to_path(uri, media_dir):\n if not uri.startswith('local:track:'):\n raise ValueError('Invalid URI.')\n file_path = uri_to_path(uri).split(b':', 1)[1]\n return os.path.join(media_dir, file_path)\n\n\ndef path_to_local_track_uri(relpath):\n \"\"\"Convert path releative to media_dir to local track URI.\"\"\"\n if isinstance(relpath, unicode):\n relpath = relpath.encode('utf-8')\n return b'local:track:%s' % urllib.quote(relpath)\n\n\ndef parse_m3u(file_path, media_dir):\n r\"\"\"\n Convert M3U file list of uris\n\n Example M3U data::\n\n # This is a comment\n Alternative\\Band - Song.mp3\n Classical\\Other Band - New Song.mp3\n Stuff.mp3\n D:\\More Music\\Foo.mp3\n http://www.example.com:8000/Listen.pls\n http://www.example.com/~user/Mine.mp3\n\n - Relative paths of songs should be with respect to location of M3U.\n - Paths are normaly platform specific.\n - Lines starting with # should be ignored.\n - m3u files are latin-1.\n - This function does not bother with Extended M3U directives.\n \"\"\"\n # TODO: uris as bytes\n uris = []\n try:\n with open(file_path) as m3u:\n contents = m3u.readlines()\n except IOError as error:\n logger.warning('Couldn\\'t open m3u: %s', locale_decode(error))\n return uris\n\n for line in contents:\n line = line.strip().decode('latin1')\n\n if line.startswith('#'):\n continue\n\n if urlparse.urlsplit(line).scheme:\n uris.append(line)\n elif os.path.normpath(line) == os.path.abspath(line):\n path = path_to_uri(line)\n uris.append(path)\n else:\n path = path_to_uri(os.path.join(media_dir, line))\n uris.append(path)\n\n return uris\n" }, { "alpha_fraction": 0.7227304577827454, "alphanum_fraction": 0.7248416543006897, "avg_line_length": 24.836362838745117, "blob_id": "be60b31f6ac1ddab32bb0451209353faa8077a99", "content_id": "22e7d99ed27c692a33e9ccdc302ebda45eb55308", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1421, "license_type": "permissive", "max_line_length": 79, "num_lines": 55, "path": "/docs/ext/stream.rst", "repo_name": "simon-kuzin/mopidy", "src_encoding": "UTF-8", "text": ".. _ext-stream:\n\n*************\nMopidy-Stream\n*************\n\nExtension for playing streaming music.\n\nThe stream backend will handle streaming of URIs matching the\n:confval:`stream/protocols` config value, assuming the needed GStreamer plugins\nare installed.\n\n\nDependencies\n============\n\nNone. The extension just needs Mopidy.\n\n\nDefault configuration\n=====================\n\n.. literalinclude:: ../../mopidy/backends/stream/ext.conf\n :language: ini\n\n\nConfiguration values\n====================\n\n.. confval:: stream/enabled\n\n If the stream extension should be enabled or not.\n\n.. confval:: stream/protocols\n\n Whitelist of URI schemas to allow streaming from. Values should be\n separated by either comma or newline.\n\n.. confval:: stream/timeout\n\n Number of milliseconds before giving up looking up stream metadata.\n\n\nUsage\n=====\n\nThis backend does not provide a library or similar. It simply takes any URI\nadded to Mopidy's tracklist that matches any of the protocols in the\n:confval:`stream/protocols` setting and tries to play back the URI using\nGStreamer. E.g. if you're using an MPD client, you'll just have to find your\nclients \"add URI\" interface, and provide it with the direct URI of the stream.\n\nCurrently the stream backend can only work with URIs pointing direcly at\nstreams, and not intermediate playlists which is often used. See :issue:`303`\nto track the development of playlist expansion support.\n" }, { "alpha_fraction": 0.6333568096160889, "alphanum_fraction": 0.6461998820304871, "avg_line_length": 33.87116622924805, "blob_id": "17dbc896cf97f97ebed6aa7cd9c4125f52598e1d", "content_id": "5d646840efa7efe813741a680fb3a8c238a92e9f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5684, "license_type": "permissive", "max_line_length": 79, "num_lines": 163, "path": "/tests/core/events_test.py", "repo_name": "simon-kuzin/mopidy", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\n\nimport mock\nimport unittest\n\nimport pykka\n\nfrom mopidy import core\nfrom mopidy.backends import dummy\nfrom mopidy.models import Track\n\n\[email protected](core.CoreListener, 'send')\nclass BackendEventsTest(unittest.TestCase):\n def setUp(self):\n self.backend = dummy.create_dummy_backend_proxy()\n self.core = core.Core.start(backends=[self.backend]).proxy()\n\n def tearDown(self):\n pykka.ActorRegistry.stop_all()\n\n def test_backends_playlists_loaded_forwards_event_to_frontends(self, send):\n self.core.playlists_loaded().get()\n\n self.assertEqual(send.call_args[0][0], 'playlists_loaded')\n\n def test_pause_sends_track_playback_paused_event(self, send):\n tl_tracks = self.core.tracklist.add([Track(uri='dummy:a')]).get()\n self.core.playback.play().get()\n send.reset_mock()\n\n self.core.playback.pause().get()\n\n self.assertEqual(send.call_args[0][0], 'track_playback_paused')\n self.assertEqual(send.call_args[1]['tl_track'], tl_tracks[0])\n self.assertEqual(send.call_args[1]['time_position'], 0)\n\n def test_resume_sends_track_playback_resumed(self, send):\n tl_tracks = self.core.tracklist.add([Track(uri='dummy:a')]).get()\n self.core.playback.play()\n self.core.playback.pause().get()\n send.reset_mock()\n\n self.core.playback.resume().get()\n\n self.assertEqual(send.call_args[0][0], 'track_playback_resumed')\n self.assertEqual(send.call_args[1]['tl_track'], tl_tracks[0])\n self.assertEqual(send.call_args[1]['time_position'], 0)\n\n def test_play_sends_track_playback_started_event(self, send):\n tl_tracks = self.core.tracklist.add([Track(uri='dummy:a')]).get()\n send.reset_mock()\n\n self.core.playback.play().get()\n\n self.assertEqual(send.call_args[0][0], 'track_playback_started')\n self.assertEqual(send.call_args[1]['tl_track'], tl_tracks[0])\n\n def test_stop_sends_track_playback_ended_event(self, send):\n tl_tracks = self.core.tracklist.add([Track(uri='dummy:a')]).get()\n self.core.playback.play().get()\n send.reset_mock()\n\n self.core.playback.stop().get()\n\n self.assertEqual(send.call_args_list[0][0][0], 'track_playback_ended')\n self.assertEqual(send.call_args_list[0][1]['tl_track'], tl_tracks[0])\n self.assertEqual(send.call_args_list[0][1]['time_position'], 0)\n\n def test_seek_sends_seeked_event(self, send):\n self.core.tracklist.add([Track(uri='dummy:a', length=40000)])\n self.core.playback.play().get()\n send.reset_mock()\n\n self.core.playback.seek(1000).get()\n\n self.assertEqual(send.call_args[0][0], 'seeked')\n self.assertEqual(send.call_args[1]['time_position'], 1000)\n\n def test_tracklist_add_sends_tracklist_changed_event(self, send):\n send.reset_mock()\n\n self.core.tracklist.add([Track(uri='dummy:a')]).get()\n\n self.assertEqual(send.call_args[0][0], 'tracklist_changed')\n\n def test_tracklist_clear_sends_tracklist_changed_event(self, send):\n self.core.tracklist.add([Track(uri='dummy:a')]).get()\n send.reset_mock()\n\n self.core.tracklist.clear().get()\n\n self.assertEqual(send.call_args[0][0], 'tracklist_changed')\n\n def test_tracklist_move_sends_tracklist_changed_event(self, send):\n self.core.tracklist.add(\n [Track(uri='dummy:a'), Track(uri='dummy:b')]).get()\n send.reset_mock()\n\n self.core.tracklist.move(0, 1, 1).get()\n\n self.assertEqual(send.call_args[0][0], 'tracklist_changed')\n\n def test_tracklist_remove_sends_tracklist_changed_event(self, send):\n self.core.tracklist.add([Track(uri='dummy:a')]).get()\n send.reset_mock()\n\n self.core.tracklist.remove(uri=['dummy:a']).get()\n\n self.assertEqual(send.call_args[0][0], 'tracklist_changed')\n\n def test_tracklist_shuffle_sends_tracklist_changed_event(self, send):\n self.core.tracklist.add(\n [Track(uri='dummy:a'), Track(uri='dummy:b')]).get()\n send.reset_mock()\n\n self.core.tracklist.shuffle().get()\n\n self.assertEqual(send.call_args[0][0], 'tracklist_changed')\n\n def test_playlists_refresh_sends_playlists_loaded_event(self, send):\n send.reset_mock()\n\n self.core.playlists.refresh().get()\n\n self.assertEqual(send.call_args[0][0], 'playlists_loaded')\n\n def test_playlists_refresh_uri_sends_playlists_loaded_event(self, send):\n send.reset_mock()\n\n self.core.playlists.refresh(uri_scheme='dummy').get()\n\n self.assertEqual(send.call_args[0][0], 'playlists_loaded')\n\n def test_playlists_create_sends_playlist_changed_event(self, send):\n send.reset_mock()\n\n self.core.playlists.create('foo').get()\n\n self.assertEqual(send.call_args[0][0], 'playlist_changed')\n\n @unittest.SkipTest\n def test_playlists_delete_sends_playlist_deleted_event(self, send):\n # TODO We should probably add a playlist_deleted event\n pass\n\n def test_playlists_save_sends_playlist_changed_event(self, send):\n playlist = self.core.playlists.create('foo').get()\n playlist = playlist.copy(name='bar')\n send.reset_mock()\n\n self.core.playlists.save(playlist).get()\n\n self.assertEqual(send.call_args[0][0], 'playlist_changed')\n\n def test_set_volume_sends_volume_changed_event(self, send):\n self.core.playback.set_volume(10).get()\n send.reset_mock()\n\n self.core.playback.set_volume(20).get()\n\n self.assertEqual(send.call_args[0][0], 'volume_changed')\n self.assertEqual(send.call_args[1]['volume'], 20)\n" }, { "alpha_fraction": 0.6618287563323975, "alphanum_fraction": 0.6618287563323975, "avg_line_length": 29.622222900390625, "blob_id": "b19b122211c8a47cf98bef447465825a25692d62", "content_id": "a5b2a53984340a545a4fbdab40def201c6e288ed", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1378, "license_type": "permissive", "max_line_length": 76, "num_lines": 45, "path": "/mopidy/backends/stream/actor.py", "repo_name": "simon-kuzin/mopidy", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\n\nimport logging\nimport urlparse\n\nimport pykka\n\nfrom mopidy import audio as audio_lib, exceptions\nfrom mopidy.audio import scan\nfrom mopidy.backends import base\nfrom mopidy.models import Track\n\nlogger = logging.getLogger(__name__)\n\n\nclass StreamBackend(pykka.ThreadingActor, base.Backend):\n def __init__(self, config, audio):\n super(StreamBackend, self).__init__()\n\n self.library = StreamLibraryProvider(\n backend=self, timeout=config['stream']['timeout'])\n self.playback = base.BasePlaybackProvider(audio=audio, backend=self)\n self.playlists = None\n\n self.uri_schemes = audio_lib.supported_uri_schemes(\n config['stream']['protocols'])\n\n\nclass StreamLibraryProvider(base.BaseLibraryProvider):\n def __init__(self, backend, timeout):\n super(StreamLibraryProvider, self).__init__(backend)\n self._scanner = scan.Scanner(min_duration=None, timeout=timeout)\n\n def lookup(self, uri):\n if urlparse.urlsplit(uri).scheme not in self.backend.uri_schemes:\n return []\n\n try:\n data = self._scanner.scan(uri)\n track = scan.audio_data_to_track(data)\n except exceptions.ScannerError as e:\n logger.warning('Problem looking up %s: %s', uri, e)\n track = Track(uri=uri, name=uri)\n\n return [track]\n" }, { "alpha_fraction": 0.6646568775177002, "alphanum_fraction": 0.6853287220001221, "avg_line_length": 36.45161437988281, "blob_id": "341970ef1607917a2ea88369324565e3e500d7dc", "content_id": "f3374547fb52a06fbf2af62ecc65a6033af81aeb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6966, "license_type": "permissive", "max_line_length": 73, "num_lines": 186, "path": "/tests/core/playback_test.py", "repo_name": "simon-kuzin/mopidy", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\n\nimport mock\nimport unittest\n\nfrom mopidy.backends import base\nfrom mopidy.core import Core, PlaybackState\nfrom mopidy.models import Track\n\n\nclass CorePlaybackTest(unittest.TestCase):\n def setUp(self):\n self.backend1 = mock.Mock()\n self.backend1.uri_schemes.get.return_value = ['dummy1']\n self.playback1 = mock.Mock(spec=base.BasePlaybackProvider)\n self.backend1.playback = self.playback1\n\n self.backend2 = mock.Mock()\n self.backend2.uri_schemes.get.return_value = ['dummy2']\n self.playback2 = mock.Mock(spec=base.BasePlaybackProvider)\n self.backend2.playback = self.playback2\n\n # A backend without the optional playback provider\n self.backend3 = mock.Mock()\n self.backend3.uri_schemes.get.return_value = ['dummy3']\n self.backend3.has_playback().get.return_value = False\n\n self.tracks = [\n Track(uri='dummy1:a', length=40000),\n Track(uri='dummy2:a', length=40000),\n Track(uri='dummy3:a', length=40000), # Unplayable\n Track(uri='dummy1:b', length=40000),\n ]\n\n self.core = Core(audio=None, backends=[\n self.backend1, self.backend2, self.backend3])\n self.core.tracklist.add(self.tracks)\n\n self.tl_tracks = self.core.tracklist.tl_tracks\n self.unplayable_tl_track = self.tl_tracks[2]\n\n def test_play_selects_dummy1_backend(self):\n self.core.playback.play(self.tl_tracks[0])\n\n self.playback1.play.assert_called_once_with(self.tracks[0])\n self.assertFalse(self.playback2.play.called)\n\n def test_play_selects_dummy2_backend(self):\n self.core.playback.play(self.tl_tracks[1])\n\n self.assertFalse(self.playback1.play.called)\n self.playback2.play.assert_called_once_with(self.tracks[1])\n\n def test_play_skips_to_next_on_unplayable_track(self):\n self.core.playback.play(self.unplayable_tl_track)\n\n self.playback1.play.assert_called_once_with(self.tracks[3])\n self.assertFalse(self.playback2.play.called)\n\n self.assertEqual(\n self.core.playback.current_tl_track, self.tl_tracks[3])\n\n def test_pause_selects_dummy1_backend(self):\n self.core.playback.play(self.tl_tracks[0])\n self.core.playback.pause()\n\n self.playback1.pause.assert_called_once_with()\n self.assertFalse(self.playback2.pause.called)\n\n def test_pause_selects_dummy2_backend(self):\n self.core.playback.play(self.tl_tracks[1])\n self.core.playback.pause()\n\n self.assertFalse(self.playback1.pause.called)\n self.playback2.pause.assert_called_once_with()\n\n def test_pause_changes_state_even_if_track_is_unplayable(self):\n self.core.playback.current_tl_track = self.unplayable_tl_track\n self.core.playback.pause()\n\n self.assertEqual(self.core.playback.state, PlaybackState.PAUSED)\n self.assertFalse(self.playback1.pause.called)\n self.assertFalse(self.playback2.pause.called)\n\n def test_resume_selects_dummy1_backend(self):\n self.core.playback.play(self.tl_tracks[0])\n self.core.playback.pause()\n self.core.playback.resume()\n\n self.playback1.resume.assert_called_once_with()\n self.assertFalse(self.playback2.resume.called)\n\n def test_resume_selects_dummy2_backend(self):\n self.core.playback.play(self.tl_tracks[1])\n self.core.playback.pause()\n self.core.playback.resume()\n\n self.assertFalse(self.playback1.resume.called)\n self.playback2.resume.assert_called_once_with()\n\n def test_resume_does_nothing_if_track_is_unplayable(self):\n self.core.playback.current_tl_track = self.unplayable_tl_track\n self.core.playback.state = PlaybackState.PAUSED\n self.core.playback.resume()\n\n self.assertEqual(self.core.playback.state, PlaybackState.PAUSED)\n self.assertFalse(self.playback1.resume.called)\n self.assertFalse(self.playback2.resume.called)\n\n def test_stop_selects_dummy1_backend(self):\n self.core.playback.play(self.tl_tracks[0])\n self.core.playback.stop()\n\n self.playback1.stop.assert_called_once_with()\n self.assertFalse(self.playback2.stop.called)\n\n def test_stop_selects_dummy2_backend(self):\n self.core.playback.play(self.tl_tracks[1])\n self.core.playback.stop()\n\n self.assertFalse(self.playback1.stop.called)\n self.playback2.stop.assert_called_once_with()\n\n def test_stop_changes_state_even_if_track_is_unplayable(self):\n self.core.playback.current_tl_track = self.unplayable_tl_track\n self.core.playback.state = PlaybackState.PAUSED\n self.core.playback.stop()\n\n self.assertEqual(self.core.playback.state, PlaybackState.STOPPED)\n self.assertFalse(self.playback1.stop.called)\n self.assertFalse(self.playback2.stop.called)\n\n def test_seek_selects_dummy1_backend(self):\n self.core.playback.play(self.tl_tracks[0])\n self.core.playback.seek(10000)\n\n self.playback1.seek.assert_called_once_with(10000)\n self.assertFalse(self.playback2.seek.called)\n\n def test_seek_selects_dummy2_backend(self):\n self.core.playback.play(self.tl_tracks[1])\n self.core.playback.seek(10000)\n\n self.assertFalse(self.playback1.seek.called)\n self.playback2.seek.assert_called_once_with(10000)\n\n def test_seek_fails_for_unplayable_track(self):\n self.core.playback.current_tl_track = self.unplayable_tl_track\n self.core.playback.state = PlaybackState.PLAYING\n success = self.core.playback.seek(1000)\n\n self.assertFalse(success)\n self.assertFalse(self.playback1.seek.called)\n self.assertFalse(self.playback2.seek.called)\n\n def test_time_position_selects_dummy1_backend(self):\n self.core.playback.play(self.tl_tracks[0])\n self.core.playback.seek(10000)\n self.core.playback.time_position\n\n self.playback1.get_time_position.assert_called_once_with()\n self.assertFalse(self.playback2.get_time_position.called)\n\n def test_time_position_selects_dummy2_backend(self):\n self.core.playback.play(self.tl_tracks[1])\n self.core.playback.seek(10000)\n self.core.playback.time_position\n\n self.assertFalse(self.playback1.get_time_position.called)\n self.playback2.get_time_position.assert_called_once_with()\n\n def test_time_position_returns_0_if_track_is_unplayable(self):\n self.core.playback.current_tl_track = self.unplayable_tl_track\n\n result = self.core.playback.time_position\n\n self.assertEqual(result, 0)\n self.assertFalse(self.playback1.get_time_position.called)\n self.assertFalse(self.playback2.get_time_position.called)\n\n def test_mute(self):\n self.assertEqual(self.core.playback.mute, False)\n\n self.core.playback.mute = True\n\n self.assertEqual(self.core.playback.mute, True)\n" }, { "alpha_fraction": 0.6492094993591309, "alphanum_fraction": 0.6492094993591309, "avg_line_length": 16.75438690185547, "blob_id": "d4274c86774612d0aad3c9951174a25cbabd84b6", "content_id": "ec78f2500ae506f170c0d275f60fa49285eb8b07", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1012, "license_type": "permissive", "max_line_length": 77, "num_lines": 57, "path": "/docs/api/backends.rst", "repo_name": "simon-kuzin/mopidy", "src_encoding": "UTF-8", "text": ".. _backend-api:\n\n***********\nBackend API\n***********\n\n.. module:: mopidy.backends.base\n :synopsis: The API implemented by backends\n\nThe backend API is the interface that must be implemented when you create a\nbackend. If you are working on a frontend and need to access the backend, see\nthe :ref:`core-api`.\n\n\nBackend class\n=============\n\n.. autoclass:: mopidy.backends.base.Backend\n :members:\n\n\nPlayback provider\n=================\n\n.. autoclass:: mopidy.backends.base.BasePlaybackProvider\n :members:\n\n\nPlaylists provider\n==================\n\n.. autoclass:: mopidy.backends.base.BasePlaylistsProvider\n :members:\n\n\nLibrary provider\n================\n\n.. autoclass:: mopidy.backends.base.BaseLibraryProvider\n :members:\n\n\nBackend listener\n================\n\n.. autoclass:: mopidy.backends.listener.BackendListener\n :members:\n\n\n.. _backend-implementations:\n\nBackend implementations\n=======================\n\n* :mod:`mopidy.backends.dummy`\n* :mod:`mopidy.backends.local`\n* :mod:`mopidy.backends.stream`\n" }, { "alpha_fraction": 0.5464139580726624, "alphanum_fraction": 0.5569671988487244, "avg_line_length": 29.691823959350586, "blob_id": "30b7fcd14a41c566357c9026ba51059a610f2f09", "content_id": "ed3f8e01c8f481216a702979cea3e7ab83517b16", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9760, "license_type": "permissive", "max_line_length": 77, "num_lines": 318, "path": "/tests/audio/scan_test.py", "repo_name": "simon-kuzin/mopidy", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\n\nimport os\nimport unittest\n\nfrom mopidy import exceptions\nfrom mopidy.audio import scan\nfrom mopidy.models import Track, Artist, Album\nfrom mopidy.utils import path as path_lib\n\nfrom tests import path_to_data_dir\n\n\nclass FakeGstDate(object):\n def __init__(self, year, month, day):\n self.year = year\n self.month = month\n self.day = day\n\n\nclass TranslatorTest(unittest.TestCase):\n def setUp(self):\n self.data = {\n 'uri': 'uri',\n 'album': 'albumname',\n 'track-number': 1,\n 'artist': 'name',\n 'composer': 'composer',\n 'performer': 'performer',\n 'album-artist': 'albumartistname',\n 'title': 'trackname',\n 'track-count': 2,\n 'album-disc-number': 2,\n 'album-disc-count': 3,\n 'date': FakeGstDate(2006, 1, 1,),\n 'container-format': 'ID3 tag',\n 'genre': 'genre',\n 'duration': 4531000000,\n 'comment': 'comment',\n 'musicbrainz-trackid': 'mbtrackid',\n 'musicbrainz-albumid': 'mbalbumid',\n 'musicbrainz-artistid': 'mbartistid',\n 'musicbrainz-albumartistid': 'mbalbumartistid',\n 'mtime': 1234,\n }\n\n self.album = {\n 'name': 'albumname',\n 'num_tracks': 2,\n 'num_discs': 3,\n 'musicbrainz_id': 'mbalbumid',\n }\n\n self.artist_single = {\n 'name': 'name',\n 'musicbrainz_id': 'mbartistid',\n }\n\n self.artist_multiple = {\n 'name': ['name1', 'name2'],\n 'musicbrainz_id': 'mbartistid',\n }\n\n self.artist = self.artist_single\n\n self.composer_single = {\n 'name': 'composer',\n }\n\n self.composer_multiple = {\n 'name': ['composer1', 'composer2'],\n }\n\n self.composer = self.composer_single\n\n self.performer_single = {\n 'name': 'performer',\n }\n\n self.performer_multiple = {\n 'name': ['performer1', 'performer2'],\n }\n\n self.performer = self.performer_single\n\n self.albumartist = {\n 'name': 'albumartistname',\n 'musicbrainz_id': 'mbalbumartistid',\n }\n\n self.track = {\n 'uri': 'uri',\n 'name': 'trackname',\n 'date': '2006-01-01',\n 'genre': 'genre',\n 'track_no': 1,\n 'disc_no': 2,\n 'comment': 'comment',\n 'length': 4531,\n 'musicbrainz_id': 'mbtrackid',\n 'last_modified': 1234,\n }\n\n def build_track(self):\n if self.albumartist:\n self.album['artists'] = [Artist(**self.albumartist)]\n self.track['album'] = Album(**self.album)\n\n if ('name' in self.artist\n and not isinstance(self.artist['name'], basestring)):\n self.track['artists'] = [Artist(name=artist)\n for artist in self.artist['name']]\n else:\n self.track['artists'] = [Artist(**self.artist)]\n\n if ('name' in self.composer\n and not isinstance(self.composer['name'], basestring)):\n self.track['composers'] = [Artist(name=artist)\n for artist in self.composer['name']]\n else:\n self.track['composers'] = [Artist(**self.composer)] \\\n if self.composer else ''\n\n if ('name' in self.performer\n and not isinstance(self.performer['name'], basestring)):\n self.track['performers'] = [Artist(name=artist)\n for artist in self.performer['name']]\n else:\n self.track['performers'] = [Artist(**self.performer)] \\\n if self.performer else ''\n\n return Track(**self.track)\n\n def check(self):\n expected = self.build_track()\n actual = scan.audio_data_to_track(self.data)\n self.assertEqual(expected, actual)\n\n def test_basic_data(self):\n self.check()\n\n def test_missing_track_number(self):\n del self.data['track-number']\n del self.track['track_no']\n self.check()\n\n def test_missing_track_count(self):\n del self.data['track-count']\n del self.album['num_tracks']\n self.check()\n\n def test_missing_track_name(self):\n del self.data['title']\n del self.track['name']\n self.check()\n\n def test_missing_track_musicbrainz_id(self):\n del self.data['musicbrainz-trackid']\n del self.track['musicbrainz_id']\n self.check()\n\n def test_missing_album_name(self):\n del self.data['album']\n del self.album['name']\n self.check()\n\n def test_missing_album_musicbrainz_id(self):\n del self.data['musicbrainz-albumid']\n del self.album['musicbrainz_id']\n self.check()\n\n def test_missing_artist_name(self):\n del self.data['artist']\n del self.artist['name']\n self.check()\n\n def test_missing_composer_name(self):\n del self.data['composer']\n del self.composer['name']\n self.check()\n\n def test_multiple_track_composers(self):\n self.data['composer'] = ['composer1', 'composer2']\n self.composer = self.composer_multiple\n self.check()\n\n def test_multiple_track_performers(self):\n self.data['performer'] = ['performer1', 'performer2']\n self.performer = self.performer_multiple\n self.check()\n\n def test_missing_performer_name(self):\n del self.data['performer']\n del self.performer['name']\n self.check()\n\n def test_missing_artist_musicbrainz_id(self):\n del self.data['musicbrainz-artistid']\n del self.artist['musicbrainz_id']\n self.check()\n\n def test_multiple_track_artists(self):\n self.data['artist'] = ['name1', 'name2']\n self.data['musicbrainz-artistid'] = 'mbartistid'\n self.artist = self.artist_multiple\n self.check()\n\n def test_missing_album_artist(self):\n del self.data['album-artist']\n del self.albumartist['name']\n self.check()\n\n def test_missing_album_artist_musicbrainz_id(self):\n del self.data['musicbrainz-albumartistid']\n del self.albumartist['musicbrainz_id']\n self.check()\n\n def test_missing_genre(self):\n del self.data['genre']\n del self.track['genre']\n self.check()\n\n def test_missing_date(self):\n del self.data['date']\n del self.track['date']\n self.check()\n\n def test_invalid_date(self):\n self.data['date'] = FakeGstDate(65535, 1, 1)\n del self.track['date']\n self.check()\n\n def test_missing_comment(self):\n del self.data['comment']\n del self.track['comment']\n self.check()\n\n\nclass ScannerTest(unittest.TestCase):\n def setUp(self):\n self.errors = {}\n self.data = {}\n\n def find(self, path):\n media_dir = path_to_data_dir(path)\n for path in path_lib.find_files(media_dir):\n yield os.path.join(media_dir, path)\n\n def scan(self, paths):\n scanner = scan.Scanner()\n for path in paths:\n uri = path_lib.path_to_uri(path)\n key = uri[len('file://'):]\n try:\n self.data[key] = scanner.scan(uri)\n except exceptions.ScannerError as error:\n self.errors[key] = error\n\n def check(self, name, key, value):\n name = path_to_data_dir(name)\n self.assertEqual(self.data[name][key], value)\n\n def test_data_is_set(self):\n self.scan(self.find('scanner/simple'))\n self.assert_(self.data)\n\n def test_errors_is_not_set(self):\n self.scan(self.find('scanner/simple'))\n self.assert_(not self.errors)\n\n def test_uri_is_set(self):\n self.scan(self.find('scanner/simple'))\n self.check(\n 'scanner/simple/song1.mp3', 'uri',\n 'file://%s' % path_to_data_dir('scanner/simple/song1.mp3'))\n self.check(\n 'scanner/simple/song1.ogg', 'uri',\n 'file://%s' % path_to_data_dir('scanner/simple/song1.ogg'))\n\n def test_duration_is_set(self):\n self.scan(self.find('scanner/simple'))\n self.check('scanner/simple/song1.mp3', 'duration', 4680000000)\n self.check('scanner/simple/song1.ogg', 'duration', 4680000000)\n\n def test_artist_is_set(self):\n self.scan(self.find('scanner/simple'))\n self.check('scanner/simple/song1.mp3', 'artist', 'name')\n self.check('scanner/simple/song1.ogg', 'artist', 'name')\n\n def test_album_is_set(self):\n self.scan(self.find('scanner/simple'))\n self.check('scanner/simple/song1.mp3', 'album', 'albumname')\n self.check('scanner/simple/song1.ogg', 'album', 'albumname')\n\n def test_track_is_set(self):\n self.scan(self.find('scanner/simple'))\n self.check('scanner/simple/song1.mp3', 'title', 'trackname')\n self.check('scanner/simple/song1.ogg', 'title', 'trackname')\n\n def test_nonexistant_dir_does_not_fail(self):\n self.scan(self.find('scanner/does-not-exist'))\n self.assert_(not self.errors)\n\n def test_other_media_is_ignored(self):\n self.scan(self.find('scanner/image'))\n self.assert_(self.errors)\n\n def test_log_file_that_gst_thinks_is_mpeg_1_is_ignored(self):\n self.scan([path_to_data_dir('scanner/example.log')])\n self.assert_(self.errors)\n\n def test_empty_wav_file_is_ignored(self):\n self.scan([path_to_data_dir('scanner/empty.wav')])\n self.assert_(self.errors)\n\n @unittest.SkipTest\n def test_song_without_time_is_handeled(self):\n pass\n" }, { "alpha_fraction": 0.6008374094963074, "alphanum_fraction": 0.6158409118652344, "avg_line_length": 30.844444274902344, "blob_id": "060db972335c56239cbc8260677553af28874fb3", "content_id": "e5747f68cdef588b842b0800813cef36f039e31c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2869, "license_type": "permissive", "max_line_length": 72, "num_lines": 90, "path": "/tests/backends/local/translator_test.py", "repo_name": "simon-kuzin/mopidy", "src_encoding": "UTF-8", "text": "# encoding: utf-8\n\nfrom __future__ import unicode_literals\n\nimport os\nimport tempfile\nimport unittest\n\nfrom mopidy.backends.local.translator import parse_m3u\nfrom mopidy.utils.path import path_to_uri\n\nfrom tests import path_to_data_dir\n\ndata_dir = path_to_data_dir('')\nsong1_path = path_to_data_dir('song1.mp3')\nsong2_path = path_to_data_dir('song2.mp3')\nencoded_path = path_to_data_dir('æøå.mp3')\nsong1_uri = path_to_uri(song1_path)\nsong2_uri = path_to_uri(song2_path)\nencoded_uri = path_to_uri(encoded_path)\n\n# FIXME use mock instead of tempfile.NamedTemporaryFile\n\n\nclass M3UToUriTest(unittest.TestCase):\n def test_empty_file(self):\n uris = parse_m3u(path_to_data_dir('empty.m3u'), data_dir)\n self.assertEqual([], uris)\n\n def test_basic_file(self):\n uris = parse_m3u(path_to_data_dir('one.m3u'), data_dir)\n self.assertEqual([song1_uri], uris)\n\n def test_file_with_comment(self):\n uris = parse_m3u(path_to_data_dir('comment.m3u'), data_dir)\n self.assertEqual([song1_uri], uris)\n\n def test_file_is_relative_to_correct_dir(self):\n with tempfile.NamedTemporaryFile(delete=False) as tmp:\n tmp.write('song1.mp3')\n try:\n uris = parse_m3u(tmp.name, data_dir)\n self.assertEqual([song1_uri], uris)\n finally:\n if os.path.exists(tmp.name):\n os.remove(tmp.name)\n\n def test_file_with_absolute_files(self):\n with tempfile.NamedTemporaryFile(delete=False) as tmp:\n tmp.write(song1_path)\n try:\n uris = parse_m3u(tmp.name, data_dir)\n self.assertEqual([song1_uri], uris)\n finally:\n if os.path.exists(tmp.name):\n os.remove(tmp.name)\n\n def test_file_with_multiple_absolute_files(self):\n with tempfile.NamedTemporaryFile(delete=False) as tmp:\n tmp.write(song1_path + '\\n')\n tmp.write('# comment \\n')\n tmp.write(song2_path)\n try:\n uris = parse_m3u(tmp.name, data_dir)\n self.assertEqual([song1_uri, song2_uri], uris)\n finally:\n if os.path.exists(tmp.name):\n os.remove(tmp.name)\n\n def test_file_with_uri(self):\n with tempfile.NamedTemporaryFile(delete=False) as tmp:\n tmp.write(song1_uri)\n try:\n uris = parse_m3u(tmp.name, data_dir)\n self.assertEqual([song1_uri], uris)\n finally:\n if os.path.exists(tmp.name):\n os.remove(tmp.name)\n\n def test_encoding_is_latin1(self):\n uris = parse_m3u(path_to_data_dir('encoding.m3u'), data_dir)\n self.assertEqual([encoded_uri], uris)\n\n def test_open_missing_file(self):\n uris = parse_m3u(path_to_data_dir('non-existant.m3u'), data_dir)\n self.assertEqual([], uris)\n\n\nclass URItoM3UTest(unittest.TestCase):\n pass\n" }, { "alpha_fraction": 0.6202090382575989, "alphanum_fraction": 0.6407665610313416, "avg_line_length": 37.783782958984375, "blob_id": "db028bdc5a36e735013b8a29685a87f18424bce5", "content_id": "f4028d2f253040d22df0dae43d97dbe466d7d0d9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8610, "license_type": "permissive", "max_line_length": 77, "num_lines": 222, "path": "/tests/core/library_test.py", "repo_name": "simon-kuzin/mopidy", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\n\nimport mock\nimport unittest\n\nfrom mopidy.backends import base\nfrom mopidy.core import Core\nfrom mopidy.models import SearchResult, Track\n\n\nclass CoreLibraryTest(unittest.TestCase):\n def setUp(self):\n self.backend1 = mock.Mock()\n self.backend1.uri_schemes.get.return_value = ['dummy1']\n self.library1 = mock.Mock(spec=base.BaseLibraryProvider)\n self.backend1.library = self.library1\n\n self.backend2 = mock.Mock()\n self.backend2.uri_schemes.get.return_value = ['dummy2']\n self.library2 = mock.Mock(spec=base.BaseLibraryProvider)\n self.backend2.library = self.library2\n\n # A backend without the optional library provider\n self.backend3 = mock.Mock()\n self.backend3.uri_schemes.get.return_value = ['dummy3']\n self.backend3.has_library().get.return_value = False\n\n self.core = Core(audio=None, backends=[\n self.backend1, self.backend2, self.backend3])\n\n def test_lookup_selects_dummy1_backend(self):\n self.core.library.lookup('dummy1:a')\n\n self.library1.lookup.assert_called_once_with('dummy1:a')\n self.assertFalse(self.library2.lookup.called)\n\n def test_lookup_selects_dummy2_backend(self):\n self.core.library.lookup('dummy2:a')\n\n self.assertFalse(self.library1.lookup.called)\n self.library2.lookup.assert_called_once_with('dummy2:a')\n\n def test_lookup_returns_nothing_for_dummy3_track(self):\n result = self.core.library.lookup('dummy3:a')\n\n self.assertEqual(result, [])\n self.assertFalse(self.library1.lookup.called)\n self.assertFalse(self.library2.lookup.called)\n\n def test_refresh_with_uri_selects_dummy1_backend(self):\n self.core.library.refresh('dummy1:a')\n\n self.library1.refresh.assert_called_once_with('dummy1:a')\n self.assertFalse(self.library2.refresh.called)\n\n def test_refresh_with_uri_selects_dummy2_backend(self):\n self.core.library.refresh('dummy2:a')\n\n self.assertFalse(self.library1.refresh.called)\n self.library2.refresh.assert_called_once_with('dummy2:a')\n\n def test_refresh_with_uri_fails_silently_for_dummy3_uri(self):\n self.core.library.refresh('dummy3:a')\n\n self.assertFalse(self.library1.refresh.called)\n self.assertFalse(self.library2.refresh.called)\n\n def test_refresh_without_uri_calls_all_backends(self):\n self.core.library.refresh()\n\n self.library1.refresh.assert_called_once_with(None)\n self.library2.refresh.assert_called_once_with(None)\n\n def test_find_exact_combines_results_from_all_backends(self):\n track1 = Track(uri='dummy1:a')\n track2 = Track(uri='dummy2:a')\n result1 = SearchResult(tracks=[track1])\n result2 = SearchResult(tracks=[track2])\n\n self.library1.find_exact().get.return_value = result1\n self.library1.find_exact.reset_mock()\n self.library2.find_exact().get.return_value = result2\n self.library2.find_exact.reset_mock()\n\n result = self.core.library.find_exact(any=['a'])\n\n self.assertIn(result1, result)\n self.assertIn(result2, result)\n self.library1.find_exact.assert_called_once_with(\n query=dict(any=['a']), uris=None)\n self.library2.find_exact.assert_called_once_with(\n query=dict(any=['a']), uris=None)\n\n def test_find_exact_with_uris_selects_dummy1_backend(self):\n self.core.library.find_exact(\n any=['a'], uris=['dummy1:', 'dummy1:foo', 'dummy3:'])\n\n self.library1.find_exact.assert_called_once_with(\n query=dict(any=['a']), uris=['dummy1:', 'dummy1:foo'])\n self.assertFalse(self.library2.find_exact.called)\n\n def test_find_exact_with_uris_selects_both_backends(self):\n self.core.library.find_exact(\n any=['a'], uris=['dummy1:', 'dummy1:foo', 'dummy2:'])\n\n self.library1.find_exact.assert_called_once_with(\n query=dict(any=['a']), uris=['dummy1:', 'dummy1:foo'])\n self.library2.find_exact.assert_called_once_with(\n query=dict(any=['a']), uris=['dummy2:'])\n\n def test_find_exact_filters_out_none(self):\n track1 = Track(uri='dummy1:a')\n result1 = SearchResult(tracks=[track1])\n\n self.library1.find_exact().get.return_value = result1\n self.library1.find_exact.reset_mock()\n self.library2.find_exact().get.return_value = None\n self.library2.find_exact.reset_mock()\n\n result = self.core.library.find_exact(any=['a'])\n\n self.assertIn(result1, result)\n self.assertNotIn(None, result)\n self.library1.find_exact.assert_called_once_with(\n query=dict(any=['a']), uris=None)\n self.library2.find_exact.assert_called_once_with(\n query=dict(any=['a']), uris=None)\n\n def test_find_accepts_query_dict_instead_of_kwargs(self):\n track1 = Track(uri='dummy1:a')\n track2 = Track(uri='dummy2:a')\n result1 = SearchResult(tracks=[track1])\n result2 = SearchResult(tracks=[track2])\n\n self.library1.find_exact().get.return_value = result1\n self.library1.find_exact.reset_mock()\n self.library2.find_exact().get.return_value = result2\n self.library2.find_exact.reset_mock()\n\n result = self.core.library.find_exact(dict(any=['a']))\n\n self.assertIn(result1, result)\n self.assertIn(result2, result)\n self.library1.find_exact.assert_called_once_with(\n query=dict(any=['a']), uris=None)\n self.library2.find_exact.assert_called_once_with(\n query=dict(any=['a']), uris=None)\n\n def test_search_combines_results_from_all_backends(self):\n track1 = Track(uri='dummy1:a')\n track2 = Track(uri='dummy2:a')\n result1 = SearchResult(tracks=[track1])\n result2 = SearchResult(tracks=[track2])\n\n self.library1.search().get.return_value = result1\n self.library1.search.reset_mock()\n self.library2.search().get.return_value = result2\n self.library2.search.reset_mock()\n\n result = self.core.library.search(any=['a'])\n\n self.assertIn(result1, result)\n self.assertIn(result2, result)\n self.library1.search.assert_called_once_with(\n query=dict(any=['a']), uris=None)\n self.library2.search.assert_called_once_with(\n query=dict(any=['a']), uris=None)\n\n def test_search_with_uris_selects_dummy1_backend(self):\n self.core.library.search(\n query=dict(any=['a']), uris=['dummy1:', 'dummy1:foo', 'dummy3:'])\n\n self.library1.search.assert_called_once_with(\n query=dict(any=['a']), uris=['dummy1:', 'dummy1:foo'])\n self.assertFalse(self.library2.search.called)\n\n def test_search_with_uris_selects_both_backends(self):\n self.core.library.search(\n query=dict(any=['a']), uris=['dummy1:', 'dummy1:foo', 'dummy2:'])\n\n self.library1.search.assert_called_once_with(\n query=dict(any=['a']), uris=['dummy1:', 'dummy1:foo'])\n self.library2.search.assert_called_once_with(\n query=dict(any=['a']), uris=['dummy2:'])\n\n def test_search_filters_out_none(self):\n track1 = Track(uri='dummy1:a')\n result1 = SearchResult(tracks=[track1])\n\n self.library1.search().get.return_value = result1\n self.library1.search.reset_mock()\n self.library2.search().get.return_value = None\n self.library2.search.reset_mock()\n\n result = self.core.library.search(any=['a'])\n\n self.assertIn(result1, result)\n self.assertNotIn(None, result)\n self.library1.search.assert_called_once_with(\n query=dict(any=['a']), uris=None)\n self.library2.search.assert_called_once_with(\n query=dict(any=['a']), uris=None)\n\n def test_search_accepts_query_dict_instead_of_kwargs(self):\n track1 = Track(uri='dummy1:a')\n track2 = Track(uri='dummy2:a')\n result1 = SearchResult(tracks=[track1])\n result2 = SearchResult(tracks=[track2])\n\n self.library1.search().get.return_value = result1\n self.library1.search.reset_mock()\n self.library2.search().get.return_value = result2\n self.library2.search.reset_mock()\n\n result = self.core.library.search(dict(any=['a']))\n\n self.assertIn(result1, result)\n self.assertIn(result2, result)\n self.library1.search.assert_called_once_with(\n query=dict(any=['a']), uris=None)\n self.library2.search.assert_called_once_with(\n query=dict(any=['a']), uris=None)\n" }, { "alpha_fraction": 0.601107656955719, "alphanum_fraction": 0.6032972931861877, "avg_line_length": 35.45070266723633, "blob_id": "5a53a603fae26ed01abb93b32c0912ff19b02ea9", "content_id": "0c8e34780633d009ef98167b8d1c2b006dd1cf0e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7764, "license_type": "permissive", "max_line_length": 79, "num_lines": 213, "path": "/mopidy/audio/scan.py", "repo_name": "simon-kuzin/mopidy", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\n\nimport pygst\npygst.require('0.10')\nimport gst\n\nimport datetime\nimport os\nimport time\n\nfrom mopidy import exceptions\nfrom mopidy.models import Track, Artist, Album\nfrom mopidy.utils import path\n\n\nclass Scanner(object):\n \"\"\"\n Helper to get tags and other relevant info from URIs.\n\n :param timeout: timeout for scanning a URI in ms\n :type event: int\n :param min_duration: minimum duration of scanned URI in ms, -1 for all.\n :type event: int\n \"\"\"\n\n def __init__(self, timeout=1000, min_duration=100):\n self._timeout_ms = timeout\n self._min_duration_ms = min_duration\n\n sink = gst.element_factory_make('fakesink')\n\n audio_caps = gst.Caps(b'audio/x-raw-int; audio/x-raw-float')\n pad_added = lambda src, pad: pad.link(sink.get_pad('sink'))\n\n self._uribin = gst.element_factory_make('uridecodebin')\n self._uribin.set_property('caps', audio_caps)\n self._uribin.connect('pad-added', pad_added)\n\n self._pipe = gst.element_factory_make('pipeline')\n self._pipe.add(self._uribin)\n self._pipe.add(sink)\n\n self._bus = self._pipe.get_bus()\n self._bus.set_flushing(True)\n\n def scan(self, uri):\n \"\"\"\n Scan the given uri collecting relevant metadata.\n\n :param uri: URI of the resource to scan.\n :type event: string\n :return: Dictionary of tags, duration, mtime and uri information.\n \"\"\"\n try:\n self._setup(uri)\n data = self._collect()\n # Make sure uri, mtime and duration does not come from tags.\n data[b'uri'] = uri\n data[b'mtime'] = self._query_mtime(uri)\n data[gst.TAG_DURATION] = self._query_duration()\n finally:\n self._reset()\n\n if self._min_duration_ms is None:\n return data\n elif data[gst.TAG_DURATION] >= self._min_duration_ms * gst.MSECOND:\n return data\n\n raise exceptions.ScannerError('Rejecting file with less than %dms '\n 'audio data.' % self._min_duration_ms)\n\n def _setup(self, uri):\n \"\"\"Primes the pipeline for collection.\"\"\"\n self._pipe.set_state(gst.STATE_READY)\n self._uribin.set_property(b'uri', uri)\n self._bus.set_flushing(False)\n result = self._pipe.set_state(gst.STATE_PAUSED)\n if result == gst.STATE_CHANGE_NO_PREROLL:\n # Live sources don't pre-roll, so set to playing to get data.\n self._pipe.set_state(gst.STATE_PLAYING)\n\n def _collect(self):\n \"\"\"Polls for messages to collect data.\"\"\"\n start = time.time()\n timeout_s = self._timeout_ms / float(1000)\n data = {}\n\n while time.time() - start < timeout_s:\n if not self._bus.have_pending():\n continue\n message = self._bus.pop()\n\n if message.type == gst.MESSAGE_ERROR:\n raise exceptions.ScannerError(message.parse_error()[0])\n elif message.type == gst.MESSAGE_EOS:\n return data\n elif message.type == gst.MESSAGE_ASYNC_DONE:\n if message.src == self._pipe:\n return data\n elif message.type == gst.MESSAGE_TAG:\n taglist = message.parse_tag()\n for key in taglist.keys():\n data[key] = taglist[key]\n\n raise exceptions.ScannerError('Timeout after %dms' % self._timeout_ms)\n\n def _reset(self):\n \"\"\"Ensures we cleanup child elements and flush the bus.\"\"\"\n self._bus.set_flushing(True)\n self._pipe.set_state(gst.STATE_NULL)\n\n def _query_duration(self):\n try:\n return self._pipe.query_duration(gst.FORMAT_TIME, None)[0]\n except gst.QueryError:\n return None\n\n def _query_mtime(self, uri):\n if not uri.startswith('file:'):\n return None\n return os.path.getmtime(path.uri_to_path(uri))\n\n\ndef audio_data_to_track(data):\n \"\"\"Convert taglist data + our extras to a track.\"\"\"\n albumartist_kwargs = {}\n album_kwargs = {}\n artist_kwargs = {}\n composer_kwargs = {}\n performer_kwargs = {}\n track_kwargs = {}\n\n def _retrieve(source_key, target_key, target):\n if source_key in data:\n target.setdefault(target_key, data[source_key])\n\n _retrieve(gst.TAG_ALBUM, 'name', album_kwargs)\n _retrieve(gst.TAG_TRACK_COUNT, 'num_tracks', album_kwargs)\n _retrieve(gst.TAG_ALBUM_VOLUME_COUNT, 'num_discs', album_kwargs)\n _retrieve(gst.TAG_ARTIST, 'name', artist_kwargs)\n _retrieve(gst.TAG_COMPOSER, 'name', composer_kwargs)\n _retrieve(gst.TAG_PERFORMER, 'name', performer_kwargs)\n _retrieve(gst.TAG_ALBUM_ARTIST, 'name', albumartist_kwargs)\n _retrieve(gst.TAG_TITLE, 'name', track_kwargs)\n _retrieve(gst.TAG_TRACK_NUMBER, 'track_no', track_kwargs)\n _retrieve(gst.TAG_ALBUM_VOLUME_NUMBER, 'disc_no', track_kwargs)\n _retrieve(gst.TAG_GENRE, 'genre', track_kwargs)\n _retrieve(gst.TAG_BITRATE, 'bitrate', track_kwargs)\n\n # Following keys don't seem to have TAG_* constant.\n _retrieve('comment', 'comment', track_kwargs)\n _retrieve('musicbrainz-trackid', 'musicbrainz_id', track_kwargs)\n _retrieve('musicbrainz-artistid', 'musicbrainz_id', artist_kwargs)\n _retrieve('musicbrainz-albumid', 'musicbrainz_id', album_kwargs)\n _retrieve(\n 'musicbrainz-albumartistid', 'musicbrainz_id', albumartist_kwargs)\n\n # For streams, will not override if a better value has already been set.\n _retrieve(gst.TAG_ORGANIZATION, 'name', track_kwargs)\n _retrieve(gst.TAG_LOCATION, 'comment', track_kwargs)\n _retrieve(gst.TAG_COPYRIGHT, 'comment', track_kwargs)\n\n if gst.TAG_DATE in data and data[gst.TAG_DATE]:\n date = data[gst.TAG_DATE]\n try:\n date = datetime.date(date.year, date.month, date.day)\n except ValueError:\n pass # Ignore invalid dates\n else:\n track_kwargs['date'] = date.isoformat()\n\n if albumartist_kwargs:\n album_kwargs['artists'] = [Artist(**albumartist_kwargs)]\n\n if data['mtime']:\n track_kwargs['last_modified'] = int(data['mtime'])\n\n if data[gst.TAG_DURATION]:\n track_kwargs['length'] = data[gst.TAG_DURATION] // gst.MSECOND\n\n track_kwargs['uri'] = data['uri']\n track_kwargs['album'] = Album(**album_kwargs)\n\n # TODO: this feels like a half assed workaround. we need to be sure that we\n # don't suddenly have lists in our models where we expect strings etc\n if ('genre' in track_kwargs and\n not isinstance(track_kwargs['genre'], basestring)):\n track_kwargs['genre'] = ', '.join(track_kwargs['genre'])\n\n if ('name' in artist_kwargs\n and not isinstance(artist_kwargs['name'], basestring)):\n track_kwargs['artists'] = [Artist(name=artist)\n for artist in artist_kwargs['name']]\n else:\n track_kwargs['artists'] = [Artist(**artist_kwargs)]\n\n if ('name' in composer_kwargs\n and not isinstance(composer_kwargs['name'], basestring)):\n track_kwargs['composers'] = [Artist(name=artist)\n for artist in composer_kwargs['name']]\n else:\n track_kwargs['composers'] = \\\n [Artist(**composer_kwargs)] if composer_kwargs else ''\n\n if ('name' in performer_kwargs\n and not isinstance(performer_kwargs['name'], basestring)):\n track_kwargs['performers'] = [Artist(name=artist)\n for artist in performer_kwargs['name']]\n else:\n track_kwargs['performers'] = \\\n [Artist(**performer_kwargs)] if performer_kwargs else ''\n\n return Track(**track_kwargs)\n" }, { "alpha_fraction": 0.7119628190994263, "alphanum_fraction": 0.7166085839271545, "avg_line_length": 27.462810516357422, "blob_id": "d28c14ba2d4b0e4bd0e809a4f935a08aa5cfc229", "content_id": "fa91f6a27c9a5a725ded14534166201a75a2fbc2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3444, "license_type": "permissive", "max_line_length": 79, "num_lines": 121, "path": "/docs/ext/mpd.rst", "repo_name": "simon-kuzin/mopidy", "src_encoding": "UTF-8", "text": ".. _ext-mpd:\n\n**********\nMopidy-MPD\n**********\n\nThis extension implements an MPD server to make Mopidy available to :ref:`MPD\nclients <mpd-clients>`.\n\nMPD stands for Music Player Daemon, which is also the name of the `original MPD\nserver project <http://mpd.wikia.com/>`_. Mopidy does not depend on the\noriginal MPD server, but implements the MPD protocol itself, and is thus\ncompatible with clients for the original MPD server.\n\nFor more details on our MPD server implementation, see :mod:`mopidy.mpd`.\n\n\nLimitations\n===========\n\nThis is a non exhaustive list of MPD features that Mopidy doesn't support.\nItems on this list will probably not be supported in the near future.\n\n- Toggling of audio outputs is not supported\n- Channels for client-to-client communication are not supported\n- Stickers are not supported\n- Crossfade is not supported\n- Replay gain is not supported\n- ``stats`` does not provide any statistics\n- ``decoders`` does not provide information about available decoders\n\nThe following items are currently not supported, but should be added in the\nnear future:\n\n- Modifying stored playlists is not supported\n- ``tagtypes`` is not supported\n- Browsing the file system is not supported\n- Live update of the music database is not supported\n\n\nDependencies\n============\n\nNone. The extension just needs Mopidy.\n\n\nDefault configuration\n=====================\n\n.. literalinclude:: ../../mopidy/mpd/ext.conf\n :language: ini\n\n\nConfiguration values\n====================\n\n.. confval:: mpd/enabled\n\n If the MPD extension should be enabled or not.\n\n.. confval:: mpd/hostname\n\n Which address the MPD server should bind to.\n\n ``127.0.0.1``\n Listens only on the IPv4 loopback interface\n ``::1``\n Listens only on the IPv6 loopback interface\n ``0.0.0.0``\n Listens on all IPv4 interfaces\n ``::``\n Listens on all interfaces, both IPv4 and IPv6\n\n.. confval:: mpd/port\n\n Which TCP port the MPD server should listen to.\n\n.. confval:: mpd/password\n\n The password required for connecting to the MPD server. If blank, no\n password is required.\n\n.. confval:: mpd/max_connections\n\n The maximum number of concurrent connections the MPD server will accept.\n\n.. confval:: mpd/connection_timeout\n\n Number of seconds an MPD client can stay inactive before the connection is\n closed by the server.\n\n.. confval:: mpd/zeroconf\n\n Name of the MPD service when published through Zeroconf. The variables\n ``$hostname`` and ``$port`` can be used in the name.\n\n Set to an empty string to disable Zeroconf for MPD.\n\n\nUsage\n=====\n\nThe extension is enabled by default. To connect to the server, use an :ref:`MPD\nclient <mpd-clients>`.\n\n\n.. _use-mpd-on-a-network:\n\nConnecting from other machines on the network\n---------------------------------------------\n\nAs a secure default, Mopidy only accepts connections from ``localhost``. If you\nwant to open it for connections from other machines on your network, see\nthe documentation for the :confval:`mpd/hostname` config value.\n\nIf you open up Mopidy for your local network, you should consider turning on\nMPD password authentication by setting the :confval:`mpd/password` config value\nto the password you want to use. If the password is set, Mopidy will require\nMPD clients to provide the password before they can do anything else. Mopidy\nonly supports a single password, and do not support different permission\nschemes like the original MPD server.\n" }, { "alpha_fraction": 0.698113203048706, "alphanum_fraction": 0.6995372176170349, "avg_line_length": 19.65441131591797, "blob_id": "53f583095abb77986140440861d7e09d2eb3e0d0", "content_id": "27fe3b457afbc184180a936cda8e5d6cfee57fbc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2809, "license_type": "permissive", "max_line_length": 79, "num_lines": 136, "path": "/docs/ext/index.rst", "repo_name": "simon-kuzin/mopidy", "src_encoding": "UTF-8", "text": ".. _ext:\n\n**********\nExtensions\n**********\n\nHere you can find a list of packages that extend Mopidy with additional\nfunctionality. This list is moderated and updated on a regular basis. If you\nwant your package to show up here, follow the :ref:`guide on creating\nextensions <extensiondev>`.\n\n\nBundled with Mopidy\n===================\n\nThese extensions are maintained by Mopidy's core developers. They are installed\ntogether with Mopidy and are enabled by default.\n\n.. toctree::\n :maxdepth: 1\n :glob:\n\n **\n\n\nExternal extensions\n===================\n\nThese extensions are maintained outside Mopidy's core, often by other\ndevelopers.\n\n\nMopidy-Arcam\n------------\n\nhttps://github.com/TooDizzy/mopidy-arcam\n\nExtension for controlling volume using an external Arcam amplifier. Developed\nand tested with an Arcam AVR-300.\n\n\nMopidy-Beets\n------------\n\nhttps://github.com/mopidy/mopidy-beets\n\nProvides a backend for playing music from your `Beets\n<http://beets.radbox.org/>`_ music library through Beets' web extension.\n\n\nMopidy-GMusic\n-------------\n\nhttps://github.com/hechtus/mopidy-gmusic\n\nProvides a backend for playing music from `Google Play Music\n<https://play.google.com/music/>`_.\n\n\nMopidy-MPRIS\n------------\n\nhttps://github.com/mopidy/mopidy-mpris\n\nExtension for controlling Mopidy through the `MPRIS <http://www.mpris.org/>`_\nD-Bus interface, for example using the Ubuntu Sound Menu.\n\n\nMopidy-NAD\n----------\n\nhttps://github.com/mopidy/mopidy-nad\n\nExtension for controlling volume using an external NAD amplifier.\n\n\nMopidy-Notifier\n---------------\n\nhttps://github.com/sauberfred/mopidy-notifier\n\nExtension for displaying track info as User Notifications in Mac OS X.\n\n\nMopidy-radio-de\n---------------\n\nhttps://github.com/hechtus/mopidy-radio-de\n\nExtension for listening to Internet radio stations and podcasts listed at\n`radio.de <http://www.radio.de/>`_, `rad.io <http://www.rad.io/>`_,\n`radio.fr <http://www.radio.fr/>`_, and `radio.at <http://www.radio.at/>`_.\n\n\nMopidy-Scrobbler\n----------------\n\nhttps://github.com/mopidy/mopidy-scrobbler\n\nExtension for scrobbling played tracks to Last.fm.\n\n\nMopidy-SomaFM\n-------------\n\nhttps://github.com/AlexandrePTJ/mopidy-somafm\n\nProvides a backend for playing music from the `SomaFM <http://somafm.com/>`_\nservice.\n\n\nMopidy-SoundCloud\n-----------------\n\nhttps://github.com/mopidy/mopidy-soundcloud\n\nProvides a backend for playing music from the `SoundCloud\n<http://www.soundcloud.com/>`_ service.\n\n\nMopidy-Spotify\n--------------\n\nhttps://github.com/mopidy/mopidy-spotify\n\nExtension for playing music from the `Spotify <http://www.spotify.com/>`_ music\nstreaming service.\n\n\nMopidy-Subsonic\n---------------\n\nhttps://github.com/rattboi/mopidy-subsonic\n\nProvides a backend for playing music from a `Subsonic Music Streamer\n<http://www.subsonic.org/>`_ library.\n" }, { "alpha_fraction": 0.7252747416496277, "alphanum_fraction": 0.7252747416496277, "avg_line_length": 27.4375, "blob_id": "cdf13549a1db79e7bcecfcf42564ab06f95851ea", "content_id": "6ef7b4107435faa8cbfe99369ee10bebf3964b4d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 455, "license_type": "permissive", "max_line_length": 70, "num_lines": 16, "path": "/mopidy/backends/local/playback.py", "repo_name": "simon-kuzin/mopidy", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\n\nimport logging\n\nfrom mopidy.backends import base\n\nfrom . import translator\n\nlogger = logging.getLogger(__name__)\n\n\nclass LocalPlaybackProvider(base.BasePlaybackProvider):\n def change_track(self, track):\n track = track.copy(uri=translator.local_track_uri_to_file_uri(\n track.uri, self.backend.config['local']['media_dir']))\n return super(LocalPlaybackProvider, self).change_track(track)\n" }, { "alpha_fraction": 0.6974790096282959, "alphanum_fraction": 0.6974790096282959, "avg_line_length": 30.733333587646484, "blob_id": "9c2f87df94ff5ed98bd784b5b4038c7d7b4782cc", "content_id": "ee4735e746abf6697acc947eddbfabb452cbdf7b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 952, "license_type": "permissive", "max_line_length": 78, "num_lines": 30, "path": "/mopidy/backends/listener.py", "repo_name": "simon-kuzin/mopidy", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\n\nfrom mopidy import listener\n\n\nclass BackendListener(listener.Listener):\n \"\"\"\n Marker interface for recipients of events sent by the backend actors.\n\n Any Pykka actor that mixes in this class will receive calls to the methods\n defined here when the corresponding events happen in the core actor. This\n interface is used both for looking up what actors to notify of the events,\n and for providing default implementations for those listeners that are not\n interested in all events.\n\n Normally, only the Core actor should mix in this class.\n \"\"\"\n\n @staticmethod\n def send(event, **kwargs):\n \"\"\"Helper to allow calling of backend listener events\"\"\"\n listener.send_async(BackendListener, event, **kwargs)\n\n def playlists_loaded(self):\n \"\"\"\n Called when playlists are loaded or refreshed.\n\n *MAY* be implemented by actor.\n \"\"\"\n pass\n" }, { "alpha_fraction": 0.6693440675735474, "alphanum_fraction": 0.6840696334838867, "avg_line_length": 26.66666603088379, "blob_id": "e086e08c72be881e4e63800adf7d11ca8780e397", "content_id": "266545b2080b04d55c1f22494c2fc3cfcbb61df7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 747, "license_type": "permissive", "max_line_length": 79, "num_lines": 27, "path": "/docs/running.rst", "repo_name": "simon-kuzin/mopidy", "src_encoding": "UTF-8", "text": "**************\nRunning Mopidy\n**************\n\nTo start Mopidy, simply open a terminal and run::\n\n mopidy\n\nFor a complete reference to the Mopidy commands and their command line options,\nsee :ref:`mopidy-cmd`.\n\nWhen Mopidy says ``MPD server running at [127.0.0.1]:6600`` it's ready to\naccept connections by any MPD client. Check out our non-exhaustive\n:doc:`/clients/mpd` list to find recommended clients.\n\n\nStopping Mopidy\n===============\n\nTo stop Mopidy, press ``CTRL+C`` in the terminal where you started Mopidy.\n\nMopidy will also shut down properly if you send it the TERM signal, e.g. by\nusing ``kill``::\n\n kill `ps ax | grep mopidy | grep -v grep | cut -d' ' -f1`\n\nThis can be useful e.g. if you create init script for managing Mopidy.\n" } ]
16
carlosperate/mkdocs-awesome-list-plugin
https://github.com/carlosperate/mkdocs-awesome-list-plugin
78b713e33ec272eb56f5479a6a6275ffef226520
13b2e7e6315c2f08adc7bb5c83641f44ba7e76fc
e2b7b1ac7a04eda7837a681fa2578b04f9727910
refs/heads/master
2020-06-24T07:03:05.564671
2019-07-26T00:30:58
2019-07-26T00:30:58
198,889,364
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.5094073414802551, "alphanum_fraction": 0.5282220244407654, "avg_line_length": 32.74603271484375, "blob_id": "c407a3e1339a230303306ed6ef8a5fde7c228845", "content_id": "1bece758e53aac80b88d20b2e2bc49ddfdbf5e27", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2126, "license_type": "permissive", "max_line_length": 166, "num_lines": 63, "path": "/mkdocs_awesome_list_plugin/awesomelist.py", "repo_name": "carlosperate/mkdocs-awesome-list-plugin", "src_encoding": "UTF-8", "text": "import re\nimport sys\nimport uuid\nfrom mkdocs.plugins import BasePlugin\nfrom webpreview import web_preview\n\n\nHTML = \"\"\"\n<div style=\"\n display: block;\n height: 102px;\n padding: 0;\n margin: 10px;\n border-width: 1px;\n border-color: #bfbfbf;\n border-style: solid;\n border-radius: 5px;\n\">\n <div style=\"float: left; padding: 0; margin: 0;\">\n <img src=\"{}\" style=\"height: 100px; padding: 0; margin: 0; border: none; margin-right: 16px;\">\n </div>\n <div style=\"margin: 16px;\">\n <span style=\"font-weight: bold;\">{}</span><br>\n {}\n </div>\n</div>\n\"\"\"\n\nclass AwesomeList(BasePlugin):\n\n def __init__(self):\n super().__init__()\n self.social_cards = {}\n\n def on_page_markdown(self, markdown, **kwargs):\n copy = markdown\n extra_characters = 0\n for match in re.finditer(\"-[ ]\\[(.*?)\\]\\((.*?)\\)[ ]-[ ](.+?(?<=\\.))\", markdown):\n end_char = match.span()[1]\n full_match = match.group()\n items = match.groups()\n try:\n title, description, image = ('title', 'description', 'https://avatars0.githubusercontent.com/u/21085506?s=400&v=4') #web_preview(items[1], timeout=5)\n except KeyboardInterrupt as e:\n raise e\n except Exception as e:\n print(\"\\nError trying to retrieve data: {}\".format(e))\n print(\"\\tF: {}\".format(full_match))\n print(\"\\tT: {}\".format(items[0]))\n print(\"\\tU: {}\".format(items[1]))\n print(\"\\tD: {}\".format(items[2]))\n else:\n print(\".\", end=\" \")\n sys.stdout.flush()\n uniqueId = uuid.uuid4().hex\n self.social_cards[uniqueId] = HTML.format(image, title, description)\n injected_str = '{' + uniqueId +'}'\n copy = copy[:end_char + extra_characters] + injected_str + markdown[end_char:]\n extra_characters += len(injected_str)\n return copy\n\n def on_page_content(self, html, page, config, **kwargs):\n return html.format(**self.social_cards)\n" }, { "alpha_fraction": 0.754601240158081, "alphanum_fraction": 0.754601240158081, "avg_line_length": 19.375, "blob_id": "01fcf24f88f2eb71f4dd1dc229f57ab98ae8aedc", "content_id": "52354a927d49c38d226e8e5f785a5c8e4b49064a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 326, "license_type": "permissive", "max_line_length": 78, "num_lines": 16, "path": "/README.md", "repo_name": "carlosperate/mkdocs-awesome-list-plugin", "src_encoding": "UTF-8", "text": "# MkDocs Awesome List Plugin\n\nMkDocs Plugin to inject social media cards for each entry in an awesome-list. \n\nTo use this plugin install it with pip in the same environment than MkDocs:\n\n```\npip install MkDocsAwesomeListPlugin\n```\n\nThen add the following entry in the Mkdocs config file:\n\n```yml\nplugins:\n - awesome-list\n```\n" }, { "alpha_fraction": 0.5953757166862488, "alphanum_fraction": 0.611890971660614, "avg_line_length": 33.599998474121094, "blob_id": "6689bfeb7b935183731a370229ab2a32695e1777", "content_id": "e0351901f03f1d9e1d360955a0016bfe411be48c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1211, "license_type": "permissive", "max_line_length": 96, "num_lines": 35, "path": "/setup.py", "repo_name": "carlosperate/mkdocs-awesome-list-plugin", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom setuptools import setup\n\nwith open(\"README.md\") as f:\n readme = f.read()\n\nsetup(\n name=\"MkDocsAwesomeListPlugin\",\n version=\"0.1.0\",\n description=\"MkDocs Plugin to inject social media cards for each entry in an awesome-list.\",\n long_description=readme,\n keywords=[\"mkdocs\", \"plugin\", \"awesome\", \"list\"],\n author=\"Carlos Pereira Atencio\",\n author_email=\"[email protected]\",\n url=\"https://github.com/carlosperate/mkdocs-awesome-list-plugin\",\n license=\"MIT license\",\n packages=[\"mkdocs_awesome_list_plugin\"],\n install_requires=[\"mkdocs\", \"webpreview>=1.6.0\"],\n python_requires=\">=3.4, <4\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n # This entry point is necessary for MkDocs to be able to use the plugin\n entry_points={\n 'mkdocs.plugins': [\n 'awesome-list = mkdocs_awesome_list_plugin.awesomelist:AwesomeList',\n ]\n },\n)\n" } ]
3
Dared46/PBL
https://github.com/Dared46/PBL
27f3038c17c5adf9048c57c09193fef41dac3c4b
be3f3c5fd12c8a634ee22cc1822c7ec1b2078d36
70c9f37ecab64cc00d06deeb04d4942f7cde87fb
refs/heads/master
2022-04-19T23:11:15.533852
2020-04-17T05:10:06
2020-04-17T05:10:06
256,405,737
0
0
null
2020-04-17T05:02:09
2020-04-17T04:59:02
2020-04-17T00:24:23
null
[ { "alpha_fraction": 0.6589940190315247, "alphanum_fraction": 0.676896870136261, "avg_line_length": 25.08888816833496, "blob_id": "2c0fc608a8adebb1d2c454069bd6e33b8205ad9a", "content_id": "930ce6949b81661ea1ff3cc47cb7fa1479a54976", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1185, "license_type": "permissive", "max_line_length": 96, "num_lines": 45, "path": "/scripts/device.py", "repo_name": "Dared46/PBL", "src_encoding": "UTF-8", "text": "import socket, time\nimport _thread as thread\n\n#configuração e instanciação da socket\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n#definição do ip e porta\ns.connect(('localhost', 1234))\nprint('conectado com exito')\n#status inicial do dispositivo\nstatus = 'desligado'\n#tipo do dispositivo, responsável por ajudar o servidor a diferencia se é cliente ou dispositivo\ntipo = 'd'\n\ns.send(bytes(tipo, 'utf-8'))\ndata = s.recv(1024)\n\na = data.decode('utf-8')\n\ns.send(bytes(status, 'utf-8'))\n\n#função que vai ser ativada como thread para ficar constantemente recebendo dados do servidor\n#ela será a responsável por recebimento de comandos\ndef listen():\n global status\n while True:\n data = s.recv(1024)\n a = data.decode('utf-8')\n status = a\n print('dados atualizados: '+ a)\n\n\nthread.start_new_thread(listen, ())\n\n#fica periodicamente mudando o status e enviando pro servidor\nwhile True:\n #envia dados de 6 em 6 segundos\n time.sleep(6)\n\n if (status == 'desligado'):\n status = 'ligado'\n s.send(bytes(status, 'utf-8'))\n\n elif (status == 'ligado'):\n status = 'desligado'\n s.send(bytes(status, 'utf-8'))" }, { "alpha_fraction": 0.5895918607711792, "alphanum_fraction": 0.6022449135780334, "avg_line_length": 36.128787994384766, "blob_id": "72500d1769053a7c29bb71a17498ac940e2cccf0", "content_id": "695bc4d4b91ed0b92b491ab9ba8025ff426bae44", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4946, "license_type": "permissive", "max_line_length": 145, "num_lines": 132, "path": "/scripts/server.py", "repo_name": "Dared46/PBL", "src_encoding": "UTF-8", "text": "import socket\nimport _thread as thread\nimport time\n\n#lista de dispositivos conectados, contendo o id, status e endereço\nlist_device = []\n#status inicial do servidor\nstatus = 'sem dispositivos'\n#variável que controla o envio de comandos\ncomandos = 0\n\n#instanciamento e configuração do servidor, passando ip e porta como parametros\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind(('localhost',1234))\ns.listen(5)\n\n#função responsável por controlar as solicitações do cliente\ndef manager_client(clientsocket):\n #essas variáveis são específicas de cada thread\n topics = ''\n client_topics = []\n global status\n global comandos\n\n while True:\n #laço responsável por recever solicitações de operações e iniciar a execução das mesmas\n data = clientsocket.recv(1024)\n a = data.decode('utf-8')\n #estruturas condicionais responsáveis por diferenciar qual ação o usuário deseja fazer\n if(a == '1'):\n #envia os status dos topicos em que o cliete está inscrito\n if (len(list_device) > 0):\n for i in range(len(list_device)):\n if(i in client_topics):\n topics += 'lampada '+ str(list_device[i][1]) +': '+ str(list_device[i][0]) + '\\n'\n\n clientsocket.send(bytes(topics, 'utf-8'))\n topics = ''\n print('Dados enviados com sucesso')\n else:\n a = 'Voce nao esta inscrito em topicos'\n clientsocket.send(bytes(a, 'utf-8'))\n\n elif (a == '2'):\n #executa comandos, enviando informações a serem atualizadas no publisher\n if (comandos ==0):\n comandos = 1\n else:\n comandos = 0\n\n elif (a == '3'):\n #envia a lista de lampadas conectadas ao servidor\n if (len(list_device)>0):\n for i in range(len(list_device)):\n topics += 'lampada '+ str(list_device[i][1]) + '\\n'\n\n clientsocket.send(bytes(topics, 'utf-8'))\n topics = ''\n print('Dados enviados com sucesso')\n\n else:\n a = 'Nao ha dispositivos conectados'\n clientsocket.send(bytes(a, 'utf-8'))\n\n elif(a=='4'):\n #adiciona à lista de topicos especifica de cada thread determinado topico, o cliente escolhe, envia pra essa thread e ela add à lista\n data = clientsocket.recv(1024)\n topico = data.decode('utf-8')\n client_topics.append(int(topico))\n print('cliente se conectou a lampada '+ topico)\n\n elif (a == '5'):\n #remove topicos da lista de topicos da thread\n data = clientsocket.recv(1024)\n topico = data.decode('utf-8')\n del(client_topics[int(topico)])\n print('cliente se conectou a lampada ' + topico)\n\n#função responsável por controlar o tráfego de dados realizados entre o servidor e as lampadas\ndef manager_device(clientsocket, address):\n global status\n clientsocket.send(bytes(status, 'utf-8'))\n data = clientsocket.recv(1024)\n a = data.decode('utf-8')\n status = a\n\n #id de cada lampada\n id = len(list_device)\n #lista que vai salvar o id, status e endereço da lampada da thead em execução\n device=[]\n device.append(a)\n device.append(id)\n device.append(address)\n list_device.append(device)\n\n #imprimindo no terminal os status das lampadas conectadas\n for i in range(len(list_device)):\n print('Dispositivo ' + str(list_device[i][1]) + ' : ' + list_device[i][0])\n\n #laço que fica recebendo atualizações do publisher e atualizando na lista\n while True:\n time.sleep(6)\n data = clientsocket.recv(1024)\n b = data.decode('utf-8')\n list_device[id][0] = b\n #quando o usuário solicita o comando esse if é ativado e manda comando para as lampadas\n if (comandos == 1):\n clientsocket.send(bytes(status, 'utf-8'))\n for i in range (len(list_device)):\n print('Dispositivo ' + str(list_device[i][1]) +' : ' + str(list_device[i][0]))\n\n#função que vai controlar as conecções de clientes e dispositivos\ndef inicia():\n global list_device\n while True:\n #aceita a conexão e cria um socket para a msm\n clientsocket, address = s.accept()\n print('server conectado por: ', address)\n data = clientsocket.recv(1024)\n tipo = data.decode('utf-8')\n #Se for um cliente normal, inicia uma nova thread de clientes\n if (tipo == 'c'):\n print('Tipo: Cliente')\n thread.start_new_thread(manager_client,(clientsocket,))\n #Se for um dispositivo, inicia uma nova thread de dispositivo\n elif (tipo == 'd'):\n print('Tipo: Dispositivo')\n thread.start_new_thread(manager_device, (clientsocket, address))\n\n\n\ninicia()" }, { "alpha_fraction": 0.804347813129425, "alphanum_fraction": 0.804347813129425, "avg_line_length": 45, "blob_id": "83385bfb49ac82c5b3d006e2074e70fd68eea3b6", "content_id": "58a110b498825904eb65a34f6fbf5679973e320f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 93, "license_type": "permissive", "max_line_length": 85, "num_lines": 2, "path": "/README.md", "repo_name": "Dared46/PBL", "src_encoding": "UTF-8", "text": "# PBL\n Desenvolvimento de um servidor Broker, cujo objetivo é dar comandos em dispositivos.\n" }, { "alpha_fraction": 0.5647448301315308, "alphanum_fraction": 0.5808128714561462, "avg_line_length": 30.597015380859375, "blob_id": "2a628e22ec7aead6bbd5e7deee8da89e5a2fa8dc", "content_id": "42c421de44f4c1c3331c4973735d230085277d47", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2141, "license_type": "permissive", "max_line_length": 114, "num_lines": 67, "path": "/scripts/client.py", "repo_name": "Dared46/PBL", "src_encoding": "UTF-8", "text": "import socket\n\n#lista de dispositivos inscritos pelo cliente\nmy_topics = []\n\n#instanciação e configuração da socket\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect(('localhost', 1234))\n\n#variavel que especifica que é um cliente e não um dispositivo\ntipo = 'c'\ns.send(bytes(tipo, 'utf-8'))\n\n#menu da aplicação\nwhile True:\n\n print('1-Ver status, 2-ligar/Desligar, 3 - listar lampadas, 4-Adicionar topicos, 5 - Remover topicos, q-Sair')\n b = input('Digite uma opção a ser realizada: ')\n print('\\n')\n\n if b == 'q': break\n #solicita visualização dos topicos inscritos\n elif (b =='1'):\n if(len(my_topics)>0):\n s.send(bytes(b, 'utf-8'))\n data = s.recv(1024)\n print(data.decode('utf-8'))\n else:\n (print ('Você não está inscrito em topicos \\n\\n'))\n\n #envio de comandos aos dispositivos\n elif (b=='2'):\n s.send(bytes(b, 'utf-8'))\n print('Comando enviado \\n')\n\n #Solicita uma lista de todas as lampadas conectadas ao servidor\n elif (b=='3'):\n s.send(bytes(b, 'utf-8'))\n data = s.recv(1024)\n print(data.decode('utf-8')+'\\n')\n\n #envia uma string contendo o numero da lampada que o cliente deseja se inscrever\n elif (b == '4'):\n s.send(bytes(b, 'utf-8'))\n b = input('Digite o numero da lampada que deseja se conectar: ')\n print('\\n')\n my_topics.append(b)\n s.send(bytes(b, 'utf-8'))\n print('Voce agora está conectado à lampada ' + b + '\\n\\n')\n\n #envia uma string contendo o numero da lampada que o cliente deseja se desinscrever\n elif (b == '5'):\n if (len(my_topics) > 0):\n s.send(bytes(b, 'utf-8'))\n b = input('Digite o numero da lampada que deseja se desconectar: ')\n print('\\n')\n s.send(bytes(b, 'utf-8'))\n print('voce agora esta desconectado à lampada ' + b + '\\n\\n')\n b = int(b)\n del (my_topics[b])\n else:\n print('Não há lâmpadas conectadas \\n\\n')\n\n else:\n print('Digite uma opção válida \\n\\n')\n#fecha a conexão\ns.close()" } ]
4
Tocha4/make_my_own_neural_network
https://github.com/Tocha4/make_my_own_neural_network
681b7dd774de4a052d7e9ca00e33c85a29b528ee
f370b448f50e9bb2b47625b1c1d25f621aeb6aa1
d260caa08b4c1b8cf05dfe2d02666e288189ccfb
refs/heads/master
2021-05-01T12:00:51.214252
2018-02-14T17:48:07
2018-02-14T17:48:07
121,120,973
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5724583864212036, "alphanum_fraction": 0.5965697169303894, "avg_line_length": 41.33695602416992, "blob_id": "f8c7493b107fe3241f95ce9c5895f4fa7a5029c3", "content_id": "f92e02543c7a6c61473a978e4c6851ad8e8f9cc3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4023, "license_type": "no_license", "max_line_length": 122, "num_lines": 92, "path": "/neuralNetwork_obj.py", "repo_name": "Tocha4/make_my_own_neural_network", "src_encoding": "UTF-8", "text": "import numpy as np\nimport scipy.special as ssp\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy.ndimage.interpolation import rotate\n\n\nclass neuralNetwork():\n \n def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):\n self.inodes = inputnodes\n self.hnodes = hiddennodes\n self.onodes = outputnodes\n self.lr = learningrate\n self.wih = np.random.normal(0.0, pow(self.inodes, -0.5), (self.hnodes, self.inodes))\n self.who = np.random.normal(0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))\n self.activation_function = lambda x: ssp.expit(x)\n \n def train(self, input_list, target_list):\n inputs = np.array(input_list, ndmin=2).T\n targets = np.array(target_list, ndmin=2).T\n #calculate \n hidden_inputs = np.dot(self.wih, inputs)\n hidden_outputs = self.activation_function(hidden_inputs)\n final_inputs = np.dot(self.who, hidden_outputs)\n final_outputs = self.activation_function(final_inputs)\n \n # calculate the errors for the output and for the hidden_nodes\n outputs_error = targets - final_outputs\n hidden_error = np.dot(self.who.T, outputs_error)\n\n # backpropagation\n self.who += self.lr * np.dot(outputs_error*final_outputs*(1-final_outputs), hidden_outputs.T)\n self.wih += self.lr * np.dot(hidden_error*hidden_outputs*(1-hidden_outputs), inputs.T)\n \n def query(self, input_list): \n inputs = np.array(input_list, ndmin=2).T\n #calculate \n hidden_inputs = np.dot(self.wih, inputs)\n hidden_outputs = self.activation_function(hidden_inputs)\n final_inputs = np.dot(self.who, hidden_outputs)\n final_outputs = self.activation_function(final_inputs)\n \n return final_outputs\n \n\nif __name__=='__main__':\n \n input_nodes = 784\n hidden_nodes = 800\n output_nodes = 10\n learning_rate = 0.05\n nn = neuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)\n\n path = r'/home/anton/Schreibtisch/DataScienceTraining/01_basics/mnist_data/mnist_train.csv'\n \n # training the neural network\n training_data_file = open(path, 'r')\n training_data_list = training_data_file.readlines()\n training_data_file.close()\n epoch = 5\n for e in range(epoch):/home/anton/Schreibtisch/DataScienceTraining/01_basics/pagseoTF/mnist_data\n for record in training_data_list:\n all_values = record.split(',')\n inputs = np.asfarray(all_values[1:])/255*0.99+0.01\n targets = np.zeros(output_nodes) +0.01\n targets[int(all_values[0])] = 0.99\n nn.train(inputs, targets)\n# inputs_plus10 = rotate(inputs.reshape((28,28)), 10, cval=0.01, reshape=False)\n# nn.train(inputs_plus10.reshape(784), targets)\n# inputs_minus10 = rotate(inputs.reshape((28,28)), -10, cval=0.01, reshape=False)\n# nn.train(inputs_minus10.reshape(784), targets) \n print('epoch: {} done.'.format(e))\n \n #%%\n with open(r'/home/anton/Schreibtisch/DataScienceTraining/01_basics/mnist_data/mnist_test.csv', 'r') as test_data_file:\n test_data_list = test_data_file.readlines()\n accuracy = []\n for record in test_data_list:\n all_values = record.split(',')\n inputs = np.asfarray(all_values[1:])/255*0.99+0.01\n targets = all_values[0]\n \n \n output = nn.query(inputs)\n probability = output.max()/output.sum()\n if int(targets) == np.argmax(output):\n accuracy.append(1)\n else: \n accuracy.append(0)\n# print('correct, network, probability = {}, {}, {}'.format(targets,np.argmax(output),probability))\n print('Test error rate: {}'.format(100*(1-np.sum(accuracy)/len(accuracy))))\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n " }, { "alpha_fraction": 0.4757281541824341, "alphanum_fraction": 0.48786407709121704, "avg_line_length": 13.75, "blob_id": "be992c81a75af714e181603966c74a49f974441b", "content_id": "f6c0db83c0b85c49066d31c754a890dbe61e2ddc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 412, "license_type": "no_license", "max_line_length": 41, "num_lines": 28, "path": "/gspo_multiprocessing.py", "repo_name": "Tocha4/make_my_own_neural_network", "src_encoding": "UTF-8", "text": "from multiprocessing import Pool, Process\nimport time\nimport os\n\n\n\n\ndef f(x):\n if x %2 == 0:\n time.sleep(1)\n print(x)\n else: print(x)\n return x*x\n\nif __name__ == '__main__':\n \n \n start = time.time()\n \n jobs = []\n for i in range(10):\n p = Process(target=f, args=(i,))\n jobs.append(p)\n p.start()\n\n \n ende = time.time() - start\n print(ende)" } ]
2
chapkovski/dishonesty
https://github.com/chapkovski/dishonesty
02e2a6c6f6574559665007b6a9e50417d4fea403
4ed88fe401095c5702f43b7b7a3b4774d0352edd
5eb054c324d53dc69e94c6313fbe38b167fd56e6
refs/heads/master
2021-01-25T10:35:27.916892
2018-03-12T00:37:07
2018-03-12T00:37:07
123,362,171
0
0
null
2018-03-01T01:00:25
2018-02-27T23:36:16
2018-02-28T00:14:04
null
[ { "alpha_fraction": 0.5979381203651428, "alphanum_fraction": 0.6038291454315186, "avg_line_length": 34.76315689086914, "blob_id": "49003394fa5174a6e0463fd57d989da9e6cda079", "content_id": "7346f8415c876064cf7e867ccd0a08a512ec163a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1358, "license_type": "permissive", "max_line_length": 120, "num_lines": 38, "path": "/dishonesty_app/templates/dishonesty_app/DGInstructions.html", "repo_name": "chapkovski/dishonesty", "src_encoding": "UTF-8", "text": "{% extends \"global/Page.html\" %}\n{% load otree_tags staticfiles %}\n\n{% block title %}\n Second Phase : The Allocation Task\n{% endblock %}\n\n{% block content %}\n <div class=\"instructions well well-lg\">\n <p>DEBUG::</p>\n <h3>Average productivity:</h3>\n <h3>{{ session.vars.avgcatch}}</h3>\n <h3>Average payoff at RET:</h3>\n <h3>{{ session.vars.avgpoints }}</h3>\n\n <p>\n You are now in the second phase of our experiment, you are still paired randomly and anonymously with\n another participant.</p>\n\n <p>\n In this round, both the sender and the receiver will share {{ Constants.endowment }} lab token. <strong> The\n Sender </strong> will decide how much <strong> the sender </strong>\n will retain. Then the rest will go to <strong>the receiver</strong>.\n\n {% if subsession.is_bonus_treatment %}\n The receiver, however, will receive\n additional {{ subsession.bonus }} irrespective the amount of endowment you share with them.\n {% endif %}\n\n\n Prior to making a decision, our algorithm will generate a number that determines the proportion of your\n share. This information is only observed by <strong>the sender</strong>.\n </p>\n </div>\n\n {% next_button %}\n\n{% endblock %}" }, { "alpha_fraction": 0.6831976175308228, "alphanum_fraction": 0.6854181885719299, "avg_line_length": 35.5405387878418, "blob_id": "34579cf699dbabd010bfe418cf14ddbc93ec1e53", "content_id": "8a88c81f90a27c88b61b5890ada707f17682bf8a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1351, "license_type": "permissive", "max_line_length": 120, "num_lines": 37, "path": "/dishonesty_app/templates/dishonesty_app/includes/receiverInstructions.html", "repo_name": "chapkovski/dishonesty", "src_encoding": "UTF-8", "text": "{% load otree_tags staticfiles %}\n\n<div class=\"instructions well well-lg\">\n\n\n <p>\n You are now paired randomly and anonymously with another participant. Our algorithm\n will generate a number that determines the proportion of your endowment share. Your co-participant, <strong> the\n sender </strong> allocates the proportion of shared\n lab token and privately observes the random number displayed on their screen.\n </p>\n\n <p>\n You are <strong> the receiver. </strong></p>\n\n <p>\n You will receive a share of {{ Constants.endowment }} given by your co-participant (the sender). The Sender will\n decide how much she or he will retain. Each of lab token share retrieved by the sender will be multiplied\n by <strong>2</strong>.\n The table below exhibit the relative benefit enjoyed by the Sender and the receiver in each possible allocation\n chosen by the sender.\n </p>\n\n\n {% include 'dishonesty_app/includes/allocation_table.html' %}\n\n <p>\n\n As <strong> the receiver </strong>, we would like you to <strong> guess </strong> the amount of allocated\n lab token by\n your co-participant given the different possible random number indicated by our algorithm\n\n Every correct guess will be rewarded 10 lab token to your wallet\n\n </p>\n\n</div>" }, { "alpha_fraction": 0.5857418179512024, "alphanum_fraction": 0.5969171524047852, "avg_line_length": 29.987951278686523, "blob_id": "c13024ffefecd6a7ac0bc2e6cacc7519b24a0ed7", "content_id": "f7cce000196a743130b7c6045ebc33eabb075de9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2595, "license_type": "permissive", "max_line_length": 92, "num_lines": 83, "path": "/ball_catch/models.py", "repo_name": "chapkovski/dishonesty", "src_encoding": "UTF-8", "text": "from otree.api import (\n models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer, WaitPage,\n Currency as c, currency_range\n)\nimport random\nimport csv\nimport itertools\n\nauthor = \"rap\"\n\ndoc = \"\"\"\nBall-Catching Task\n\"\"\"\n\n\nclass Constants(BaseConstants):\n name_in_url = 'ball_catch3'\n players_per_group = None\n\n num_rounds = 1\n\n prize_and_cost = [1, 2, 3, 4]\n\n\n\nclass Subsession(BaseSubsession):\n def creating_session(self):\n # alternate assignment to blue or red before first round\n colors = itertools.cycle(['blue', 'red'])\n if self.round_number == 1:\n for p in self.get_players():\n p.participant.vars['color'] = next(colors)\n\n\n if self.round_number == 1:\n for p in self.get_players():\n my_prize_and_cost = Constants.prize_and_cost.copy()\n random.shuffle(my_prize_and_cost)\n p.participant.vars['my_prize_and_cost'] = my_prize_and_cost\n\n for p in self.get_players():\n p.condition = p.participant.vars['my_prize_and_cost'][self.round_number - 1]\n if p.condition <= 2:\n p.prize = 10\n p.cost = 5 * (p.condition - 1)\n else:\n p.prize = 20\n p.cost = 5 * (p.condition - 2)\n \n\n\n\nclass Group(BaseGroup):\n total_catch = models.IntegerField(doc=\"\"\"the total amount of catches\"\"\", initial=0)\n total_income = models.IntegerField(doc=\"\"\"the total amount of payoff\"\"\", initial=0)\n avg = models.IntegerField(doc=\"\"\"the total amount of catches\"\"\", initial=0)\n avgincome = models.IntegerField(doc=\"\"\"the total amount of payoff\"\"\", initial=0)\n \n\n\nclass Player(BasePlayer):\n\n condition = models.IntegerField()\n prize = models.IntegerField()\n cost = models.IntegerField()\n catches = models.IntegerField(doc=\"\"\"the amount of catches\"\"\",initial=0)\n clicks = models.IntegerField()\n score = models.IntegerField(doc=\"\"\"the amount of score\"\"\", initial=0)\n expense = models.IntegerField(doc=\"\"\"cost of clicking\"\"\", initial=0)\n catches2=models.IntegerField(doc=\"\"\"try to find out what is inside\"\"\", initial=0)\n\n \n def role(self):\n if self.participant.vars['color']==\"red\" :\n return 'idle'\n if self.participant.vars['color']==\"blue\":\n return 'worker'\n \n\n def set_payoff(self):\n self.payoff = self.score - self.expense\n self.participant.vars['output2'] = self.catches\n self.participant.vars['income2']=self.payoff\n \n\n \n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6353302597999573, "alphanum_fraction": 0.6374807953834534, "avg_line_length": 29.13888931274414, "blob_id": "f42981fa95db806702062704bd35977d6fff50a4", "content_id": "6d1ee48210b8aecfc8e722fbc6271cb2ba097ed5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3255, "license_type": "permissive", "max_line_length": 118, "num_lines": 108, "path": "/dishonesty_app/pages.py", "repo_name": "chapkovski/dishonesty", "src_encoding": "UTF-8", "text": "from . import models\nfrom ._builtin import Page, WaitPage\nfrom .models import Constants\nfrom .forms import GuessFormSet\n\n\nclass SenderPage(Page):\n def is_displayed(self):\n return self.player.role() == 'sender'\n\n\n# TODO: I temporarily make it visible to everybody for RET debugging purposes. switch back to\n# TODO: SenderPage class later\nclass DGInstructions(Page):\n ...\n\n\nclass AllocationInfo(SenderPage):\n ...\n\n\nclass DGInstructions2(Page):\n def vars_for_template(self):\n intro_text = \"dishonesty_app/includes/{}Instructions.html\".format(self.player.role())\n a = list(Constants.kept_choices)\n b = [i * 2 for i in a]\n c = [int(Constants.endowment - i) for i in a]\n return {\n 'introduction': intro_text,\n 'a': a, 'b': b, 'c': c,\n }\n\n\nclass Offer(SenderPage):\n form_model = models.Group\n form_fields = ['kept', 'should_keep']\n\n\nclass ResultsWaitPage(WaitPage):\n def after_all_players_arrive(self):\n self.group.set_payoffs()\n\n def vars_for_template(self):\n if self.player.role() == 'receiver':\n body_text = \"You are participant 2. Waiting for participant 1 to decide.\"\n else:\n body_text = 'Please wait'\n return {'body_text': body_text}\n\n\nclass GuessStrategy(Page):\n def is_displayed(self):\n return self.player.role() == 'receiver'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['formset'] = GuessFormSet(instance=self.player)\n return context\n\n def post(self):\n self.object = self.get_object()\n self.form = self.get_form(\n data=self.request.POST, files=self.request.FILES, instance=self.object)\n\n formset = GuessFormSet(self.request.POST, instance=self.player)\n\n if not formset.is_valid():\n context = self.get_context_data()\n context['formset'] = formset\n self.form.add_error(None, 'all fields are required!')\n context['form'] = self.form\n return self.render_to_response(context)\n formset.save()\n return super().post()\n\n def before_next_page(self):\n self.player.dump_guess_answer = self.player.dumping_answer()\n sender_decision = Constants.endowment - self.group.kept\n alloc_to_guess = self.group.get_player_by_role('sender').participant.vars['allocation']\n receiver_guess = self.player.guesses.get(sender_choice=alloc_to_guess).answer\n diff = sender_decision - receiver_guess\n self.group.receiver_guess = receiver_guess\n self.group.diff_guess = diff\n\n\nclass Results(Page):\n def offer(self):\n return Constants.endowment - self.group.kept\n\n def bonus2(self):\n return Constants.endowment - self.group.kept + self.subsession.bonus * self.subsession.bonus_multiplier\n\n def vars_for_template(self):\n return {\n 'offer': Constants.endowment - self.group.kept,\n 'bonus2': Constants.endowment - self.group.kept + self.subsession.bonus * self.subsession.bonus_multiplier\n }\n\n\npage_sequence = [\n DGInstructions,\n AllocationInfo,\n DGInstructions2,\n Offer,\n ResultsWaitPage,\n GuessStrategy,\n Results\n]\n" }, { "alpha_fraction": 0.6907962560653687, "alphanum_fraction": 0.694932758808136, "avg_line_length": 36.153846740722656, "blob_id": "215e742f8ed3cc5fdbf24e0755eaf16688413e4a", "content_id": "16c5bdaf2246460b4f30554b49d25eeffc8bd123", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 967, "license_type": "permissive", "max_line_length": 133, "num_lines": 26, "path": "/dishonesty_app/templates/dishonesty_app/AllocationInfo.html", "repo_name": "chapkovski/dishonesty", "src_encoding": "UTF-8", "text": "{% extends \"global/Page.html\" %}\n{% load staticfiles otree_tags %}\n\n{% block title %}\n Suggested Proportion of Lab Token\n{% endblock %}\n\n{% block content %}\n <p>\n <h3 class=\"panel-sub-heading\">\n {{ participant.vars.randnumber }}\n </h3> is a number randomly generated by our algorithm. This information is only accessible to you as the Sender.\n Your co-participant will never observe or exposed to this information. <strong>This random number is your basis of\n allocating your shared lab token with\n your co-participant </strong>. The amount of shared lab token is <strong>{{ Constants.endowment }} </strong>, you should allocate\n {{ participant.vars.randnumber }}\n multiplied by a multiplicative factor of {{ Constants.rand_multiplier }}.\ntherefore <strong>you should allocate</strong> <h3 class=\"panel-sub-heading\"> {{ participant.vars.allocation }} </h3> to your\n co-participant.\n\n </p>\n\n\n {% next_button %}\n\n{% endblock %}\n\n" }, { "alpha_fraction": 0.636162519454956, "alphanum_fraction": 0.6458165645599365, "avg_line_length": 40.43333435058594, "blob_id": "b1a1ec9198c29281663e440772dbcd1f5a10a2c9", "content_id": "429e47cae2dceb7bc5e31d9f50cded9d6cc8b6c6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4972, "license_type": "permissive", "max_line_length": 132, "num_lines": 120, "path": "/dishonesty_app/models.py", "repo_name": "chapkovski/dishonesty", "src_encoding": "UTF-8", "text": "from otree.api import (\n models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,\n Currency as c, currency_range\n)\nimport random\nfrom django.db import models as djmodels\nimport json\n\ndoc = \"\"\"\nOne player decides how to divide a certain amount between himself and the other\nplayer.\n\nthere are four possible treatments:\nmydictator_2 : dictator + moral prime, with Random Number 'n' and endowment X\nmydictator _2b mydictator_2+ bonus for the receiver M \nmydictator _2c mydictator_2 + bonus for the receiver M*2\nmydictator _3x mydictator_2 + info of relative performance of the receiver in the effort game \n\"\"\"\n\n\nclass Constants(BaseConstants):\n name_in_url = 'dshnst'\n players_per_group = 2\n num_rounds = 1\n endowment = c(100)\n kept_choices = range(10, 101, 10)\n rand_multiplier = 10\n offer_increment = c(10)\n offer_choices = currency_range(10, endowment, offer_increment)\n\n\nclass Subsession(BaseSubsession):\n is_real_effort_task_treatment = models.BooleanField(doc='true if b part is real effort treatment')\n is_bonus_treatment = models.BooleanField(doc='true if bonus part is in treatment')\n bonus_multiplier = models.FloatField(doc='the multiplier of bonus for a recipient. Is used in some treatments')\n bonus = models.CurrencyField(doc=\"\"\"how large is the bonus that should be paid to recipient. For \n baseline treatment it is 0. For treatments with bonus (2b, 2c) it is a\n fixed amount or doubled fixed amount\"\"\",\n initial=0)\n\n def creating_session(self):\n self.is_real_effort_task_treatment = self.session.config.get('is_real_effort_task_treatment', False)\n if self.is_real_effort_task_treatment:\n assert 'ball_catch' in self.session.config.get('app_sequence'), 'RET should be in apps for this treatment'\n self.bonus = self.session.config.get('bonus', 0)\n self.is_bonus_treatment = True if self.bonus > 0 else False\n self.bonus_multiplier = self.session.config.get('bonus_multiplier', 1)\n if self.round_number == 1:\n for p in self.session.get_participants():\n p.vars['randnumber'] = random.randint(1, 10)\n p.vars['allocation'] = p.vars['randnumber'] * 10\n\n for p in self.get_players():\n for ch in Constants.offer_choices:\n p.guesses.create(sender_choice=ch)\n\n\ndef question(amount):\n return 'How much do you think the sender will allocate the lab token to you if our algorithm suggested him to share {}?'.format(\n c(amount))\n\n\nclass Group(BaseGroup):\n kept = models.CurrencyField(\n doc=\"\"\"Amount sender decided to keep for himself\"\"\",\n min=0, max=Constants.endowment,\n verbose_name='I will keep (from 0 to %i)' % Constants.endowment\n )\n\n should_keep = models.CurrencyField(\n choices=Constants.kept_choices,\n doc=\"\"\"receiver kept\"\"\",\n verbose_name='I understand that the random number advises me to allocate this amount',\n widget=widgets.RadioSelectHorizontal()\n )\n receiver_guess = models.IntegerField(doc='to retrieve matching guess from Guesses model')\n diff_guess = models.IntegerField(doc='to store difference beteween the guess and actual sender decision')\n\n def set_payoffs(self):\n sender = self.get_player_by_role('sender')\n receiver = self.get_player_by_role('receiver')\n sender.payoff = self.kept * 2\n receiver.payoff = Constants.endowment - self.kept + self.subsession.bonus * self.subsession.bonus_multiplier\n\n\nclass Player(BasePlayer):\n dump_guess_answer = models.StringField()\n\n def dumping_answer(self):\n return json.dumps(list(self.guesses.all().values('sender_choice', 'answer')))\n\n def role(self):\n # TODO: the following try except ONLY for debugging. later on we should take it out\n try:\n if self.participant.vars['color'] == \"red\":\n return 'sender'\n if self.participant.vars['color'] == \"blue\":\n return 'receiver'\n except KeyError:\n if self.id_in_group % 2 == 0:\n return 'sender'\n else:\n return 'receiver'\n\n def get_partner_output(self):\n partner = self.get_others_in_group()[0]\n return partner.participant.vars['output2']\n\n def get_partner_productivity(self):\n partner = self.get_others_in_group()[0]\n return partner.participant.vars['income2']\n\n\nclass GuessChoice(djmodels.Model):\n sender_choice = models.IntegerField(doc='to show an option how much a Sender will send')\n answer = models.IntegerField(doc='to store Reciever answer of his guess',\n choices=Constants.offer_choices,\n null=True,\n )\n player = djmodels.ForeignKey(to=Player, related_name='guesses')\n" }, { "alpha_fraction": 0.5397196412086487, "alphanum_fraction": 0.5408878326416016, "avg_line_length": 31.923076629638672, "blob_id": "d1528e49e720ae29a9c2bc1fb30a15e266322eb4", "content_id": "61790518c6223c153479be7be72f0f86c168d90c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 856, "license_type": "permissive", "max_line_length": 86, "num_lines": 26, "path": "/dishonesty_app/forms.py", "repo_name": "chapkovski/dishonesty", "src_encoding": "UTF-8", "text": "import django.forms as forms\nfrom .models import Player, GuessChoice, Constants\nfrom django.forms import inlineformset_factory\nfrom otree.api import widgets\n\n\nclass GuessForm(forms.ModelForm):\n class Meta:\n model = GuessChoice\n fields = ['answer']\n widgets = {\n 'answer': widgets.RadioSelectHorizontal,\n }\n\n def __init__(self, *args, **kwargs):\n print('I AM IN FORM INIT')\n super().__init__(*args, **kwargs)\n self.fields['answer'].empty_label = None\n self.fields['answer'].choices = ((int(i), i) for i in Constants.offer_choices)\n\n\nGuessFormSet = inlineformset_factory(Player, GuessChoice,\n extra=0,\n can_delete=False,\n form=GuessForm,\n )\n" }, { "alpha_fraction": 0.5935483574867249, "alphanum_fraction": 0.5944086313247681, "avg_line_length": 27.012048721313477, "blob_id": "fb8065ff1daa8e62e3883a488d245b4873b17b9e", "content_id": "ef4c6b35dcd5a64e44475fe91b0dab879d932d1e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2325, "license_type": "permissive", "max_line_length": 110, "num_lines": 83, "path": "/ball_catch/pages.py", "repo_name": "chapkovski/dishonesty", "src_encoding": "UTF-8", "text": "from otree.api import Currency as c, currency_range\nfrom . import models\nfrom ._builtin import Page, WaitPage\nfrom .models import Constants\n\n\nclass WorkerPage(Page):\n def is_displayed(self):\n return self.player.role() == 'worker'\n\n\nclass Introduction(Page):\n def vars_for_template(self):\n intro_text = \"ball_catch/Instructions.html\"\n return {\n 'introduction': intro_text,\n }\n\n\nclass Task(WorkerPage):\n form_model = 'player'\n form_fields = ['catches', 'clicks', 'score', 'expense']\n\n def vars_for_template(self):\n return {\n 'prize': self.player.prize,\n 'cost': self.player.cost,\n }\n\n def before_next_page(self):\n self.player.set_payoff()\n \n\n \n\n\nclass ResultsWaitPage(WaitPage):\n def after_all_players_arrive(self):\n # TODO: important! the following code may produce wrong results if we later switch to multi-round RET.\n # TODO: for one round version it's ok\n # Understood. I think one round is enough for me now\n # I also need to show the partner's (workers) productivity, in the sender page \n \n workers = [p for p in self.subsession.get_players() if p.role() == 'worker']\n total_catch = sum([p.catches for p in workers])\n avgcatch = round(total_catch / len(workers),1)\n self.session.vars['avgcatch'] = avgcatch\n total_income = sum([p.payoff for p in workers if p.payoff is not None])\n avgincome = round(total_income / len(workers),1)\n self.session.vars['avgpoints'] = avgincome\n \n \n\nclass Results(WorkerPage):\n ...\n\n\nclass Roundhistory(WorkerPage):\n def vars_for_template(self):\n round_history = []\n for me_prev_round in self.player.in_all_rounds():\n round_history.append({\n 'round_number': me_prev_round.round_number,\n 'me': me_prev_round,\n 'others': me_prev_round.get_others_in_group(),\n\n })\n\n other_player_ids = [p.id_in_group for p in self.player.get_others_in_group()]\n\n return {\n 'other_player_ids': other_player_ids,\n 'round_history': round_history\n }\n\n\npage_sequence = [\n Introduction,\n Task,\n ResultsWaitPage,\n Results,\n Roundhistory\n]\n" }, { "alpha_fraction": 0.5800130367279053, "alphanum_fraction": 0.5839320421218872, "avg_line_length": 26.836362838745117, "blob_id": "20058cd4c046f63ebc80cf946098bf3dc7fb4356", "content_id": "bdbd8dab5f0689f377cd145452aca8e593a4d31f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1531, "license_type": "permissive", "max_line_length": 82, "num_lines": 55, "path": "/quiz/views.py", "repo_name": "chapkovski/dishonesty", "src_encoding": "UTF-8", "text": "from otree.api import Currency as c, currency_range\nfrom . import models\nfrom ._builtin import Page, WaitPage\nfrom .models import Constants\n\nclass Introduction(Page):\n def is_displayed(self):\n return self.round_number == 1\n\n def vars_for_template(self):\n intro_text = \"quiz/Instructions.html\"\n a = list(Constants.kept_choices)\n b = [i * 2 for i in a]\n c = [int(Constants.endowment - i) for i in a]\n return {\n 'introduction': intro_text,\n 'a': a, 'b': b, 'c': c,}\nclass Question(Page):\n form_model = models.Player\n form_fields = ['submitted_answer']\n\n def submitted_answer_choices(self):\n qd = self.player.current_question()\n return [\n qd['choice1'],\n qd['choice2'],\n qd['choice3'],\n qd['choice4'],\n ]\n\n def error_message(self, values):\n if values ['submitted_answer']!= self.player.solution:\n return 'That is the incorrect answer. Try Again'\n\n def before_next_page(self):\n self.player.check_correct()\n\n\nclass Results(Page):\n def is_displayed(self):\n return self.round_number == Constants.num_rounds\n\n def vars_for_template(self):\n player_in_all_rounds = self.player.in_all_rounds()\n return {\n 'player_in_all_rounds': player_in_all_rounds,\n 'questions_correct': sum([p.is_correct for p in player_in_all_rounds])\n }\n\n\npage_sequence = [\n Introduction,\n Question,\n Results\n]\n" } ]
9
lauridev/TIEA345
https://github.com/lauridev/TIEA345
3d49b099480afb3f830cadc2bb1ac3ed2efe2882
13fbbcfe3f66736f0fa9f8d49c022b369b3a273d
9920a8f9c51b3754ef1332e8f8f01ebbc9ade682
refs/heads/master
2020-04-15T16:43:07.486316
2019-02-27T21:02:21
2019-02-27T21:02:21
164,847,605
0
0
null
2019-01-09T11:09:59
2019-01-09T11:10:02
2019-01-09T11:18:33
null
[ { "alpha_fraction": 0.7176308631896973, "alphanum_fraction": 0.7685950398445129, "avg_line_length": 30.565217971801758, "blob_id": "92818ad23cdc75e0e0f899d0380293b71c6a8ffd", "content_id": "0d88c1a125bb98688f3e16fba9c79062871a9fb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 726, "license_type": "no_license", "max_line_length": 67, "num_lines": 23, "path": "/demo4/demo46.py", "repo_name": "lauridev/TIEA345", "src_encoding": "UTF-8", "text": "from matplotlib import pyplot as plt\nimport numpy as np\nimport cv2\n\n#avataan kuvat\nimg1=cv2.imread(\"kuva3.jpg\",0)\nimg2=cv2.imread(\"kuva4.jpg\",0)\norb = cv2.ORB_create()\n#kp = orb.detect(img,None)\nkp1, des1 = orb.detectAndCompute(img1,None)\nkp2, des2 = orb.detectAndCompute(img2,None)\n#img2 = cv2.drawKeypoints(img,kp,None,color=(0,255,0),flags=0)\n\nbf =cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\nmatches =bf.match(des1,des2)\n\n#Jarjestaa yhtenevaisyydet yhdistavien pisteiden avulla\nmatches =sorted(matches, key =lambda x:x.distance)\n\n#Piirtaa kuvan jossa nakyy kahdeksan parasta yhtenevaisyytta\nimg3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:8],None,flags =2)\n#Tallentaa kuvan\nplt.imshow(img3),plt.savefig(\"tulos.png\")\n" }, { "alpha_fraction": 0.6549865007400513, "alphanum_fraction": 0.6967654824256897, "avg_line_length": 36.04999923706055, "blob_id": "05b2a95f7af27285abafb8ced26b55f6a7677d65", "content_id": "de789b4d31ecaa38487d3811a45753bc5e85f16d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 742, "license_type": "no_license", "max_line_length": 115, "num_lines": 20, "path": "/demo4/demo47.py", "repo_name": "lauridev/TIEA345", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2 \nfrom matplotlib import pyplot as plt\n\nimg=cv2.imread(\"kuva5.jpg\")\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nface_cascade = cv2.CascadeClassifier('/home/pi/opencv-3.1.0/data/haarcascades/haarcascade_frontalface_default.xml')\neye_cascade = cv2.CascadeClassifier('/home/pi/opencv-3.1.0/data/haarcascades/haarcascade_eye.xml')\n\nfaces = face_cascade.detectMultiScale(gray, 1.3, 5)\nfor(x,y,w,h) in faces:\n cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n roi_gray =gray[y:y+h, x:x+w]\n roi_color = img[y:y+h, x:x+w]\n eyes = eye_cascade.detectMultiScale(roi_gray)\n for (ex,ey,ew,eh)in eyes:\n cv2.rectangle(roi_color, (ex,ey),(ex+ew,ey+eh),(0,255,0),2)\nprint(faces)\n\nplt.imsave('naama.png',img)\n\n" }, { "alpha_fraction": 0.6656441688537598, "alphanum_fraction": 0.7024539709091187, "avg_line_length": 20.2608699798584, "blob_id": "0a42f259ad8d9e1b0d871de9127fac6c41dede13", "content_id": "469a5749e0287cc08b47d1435ec3bbc75e3c7f18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 981, "license_type": "no_license", "max_line_length": 47, "num_lines": 46, "path": "/demo5/liikenne.c", "repo_name": "lauridev/TIEA345", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <wiringPi.h>\n\n#define RED 18\n#define YELLOW 23\n#define GREEN 24\n#define PRED 17\n#define PGREEN 27\n#define BUTTON 22\n\nint main() {\n\t//Valmistetaan pinnit ottamaan vastaan \n\twiringPiSetupGpio();\n\tpinMode(RED,OUTPUT);\n\tpinMode(YELLOW, OUTPUT);\n\tpinMode(GREEN, OUTPUT);\n\tpinMode(PRED, OUTPUT);\n\tpinMode(PGREEN,OUTPUT);\n\tpinMode(BUTTON,INPUT);\n\t//pyörii jatkuvasti, ei pääse kohtaan return 0\n\twhile(1){\t\n\t\tdigitalWrite(GREEN,HIGH);\n\t\tdigitalWrite(PRED, HIGH);\n\t\t//kuuntelee nappulaa\n\t\tif(digitalRead(BUTTON)){\n\t\t\tdelay(3000);\n\t\t\tdigitalWrite(GREEN,LOW);\n\t\t\tdigitalWrite(YELLOW,HIGH);\n\t\t\tdelay(4000);\n\t\t\tdigitalWrite(YELLOW, LOW);\n\t\t\tdigitalWrite(RED,HIGH);\n\t\t\tdelay(2000);\n\t\t\tdigitalWrite(PRED, LOW);\n\t\t\tdigitalWrite(PGREEN,HIGH);\n\t\t\tdelay(8000);\n\t\t\tdigitalWrite(PGREEN,0);\n\t\t\tdigitalWrite(PRED, HIGH);\n\t\t\tdigitalWrite(YELLOW, HIGH);\n\t\t\tdigitalWrite(RED,LOW);\n\t\t\tdelay(2000);\n\t\t\tdigitalWrite(YELLOW,LOW);\n\t\t\tdigitalWrite(GREEN, HIGH);\n\t\t}\n\t}\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.7352941036224365, "alphanum_fraction": 0.8235294222831726, "avg_line_length": 16, "blob_id": "a7d057435b1c13a18784e5baef9192f58bbc439b", "content_id": "61b2c0f83964396fac27c102b9af806429d1d145", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 36, "license_type": "no_license", "max_line_length": 23, "num_lines": 2, "path": "/README.md", "repo_name": "lauridev/TIEA345", "src_encoding": "UTF-8", "text": "# TIEA345\nRepon testaus käynnissä\n" }, { "alpha_fraction": 0.7248908281326294, "alphanum_fraction": 0.7609170079231262, "avg_line_length": 27.625, "blob_id": "82941d1f6e2afdf85c91b8297d1e964fad856c5b", "content_id": "7a3aa3b837acb6e129f2d9dbef731c0cfeef917b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 923, "license_type": "no_license", "max_line_length": 115, "num_lines": 32, "path": "/demo3/demo32.py", "repo_name": "lauridev/TIEA345", "src_encoding": "UTF-8", "text": "#@author Lauri Poikolainen 24.1.2019\n#Sheets on jaettu opettajaryhmälle\n\nimport gspread\nimport Adafruit_DHT\nimport time\nfrom oauth2client.service_account import ServiceAccountCredentials\n\nsensor = Adafruit_DHT.DHT11\npin=4\n#odotus sekunneissa kuinka usein päivittää sheetti\nodotus=300\n\nkello = time.time()\n#OAuth löytyy tuolta, heitetty FileZillalla sftp:n kautta\nscope = [\"https://spreadsheets.google.com/feeds\",\"https://www.googleapis.com/auth/drive\"]\ncredentials = ServiceAccountCredentials.from_json_keyfile_name(\"/home/pi/tiea345-lajopoik-bdce5badba64.json\",scope)\n\ngc = gspread.authorize(credentials)\nwks=gc.open(\"TIEA345 demo2\").sheet1\n#worksheetin lähtörivi\nrivi = 2\n\n\nwhile True:\n hum, temp = Adafruit_DHT.read_retry(sensor, pin)\n kello = time.asctime(time.localtime(time.time()))\n wks.update_cell(rivi,1,kello)\n wks.update_cell(rivi,2,temp)\n wks.update_cell(rivi,3,hum)\n rivi +=1\n time.sleep(300)\n" }, { "alpha_fraction": 0.6529209613800049, "alphanum_fraction": 0.6907216310501099, "avg_line_length": 18.399999618530273, "blob_id": "a306bc46c3a7fccf846a67d09def0fd3bb0f2d6a", "content_id": "33402e555de3cb80577959e7506f43de3a988b31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 291, "license_type": "no_license", "max_line_length": 59, "num_lines": 15, "path": "/demo3/demo34.py", "repo_name": "lauridev/TIEA345", "src_encoding": "UTF-8", "text": "#@author Lauri Poikolainen 21.1.2019\n#-*- coding: utf-8 -*-\nimport RPi.GPIO as GPIO\nimport time\nfrom picamera import PiCamera\n\ncamera = PiCamera()\npin=23\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(pin,GPIO.IN)\n\nwhile True:\n if GPIO.input(pin)==1:\n camera.capture(\"/var/www/html/kuvat/viimeisin.jpg\")\n" }, { "alpha_fraction": 0.5963801145553589, "alphanum_fraction": 0.6416289806365967, "avg_line_length": 18.38596534729004, "blob_id": "690b1ecc68ffca4f48e15112c28ff15a03552a02", "content_id": "ce8642e9fc2d75b0cd61a4a7ef3575d1745d8e11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1114, "license_type": "no_license", "max_line_length": 99, "num_lines": 57, "path": "/demo2/demo2.py", "repo_name": "lauridev/TIEA345", "src_encoding": "UTF-8", "text": "#@Lauri Poikolainen 15.1.2019\n#-*- coding: utf-8 -*-\nimport RPi.GPIO as GPIO\nimport time\n\n#Ledit R=red, Y=yellow, G=green, jalankulkijoiden valojen eteen P\nR=4\nY=17\nG=27\n\nPR=22\nPG=5\nPY=6\n#B=button\nB=16\nPIR=23\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(R, GPIO.OUT)\nGPIO.setup(Y, GPIO.OUT)\nGPIO.setup(G, GPIO.OUT)\nGPIO.setup(PR, GPIO.OUT)\nGPIO.setup(PG, GPIO.OUT)\nGPIO.setup(PY, GPIO.OUT)\nGPIO.setup(B, GPIO.IN)\nGPIO.setup(PIR, GPIO.IN)\n\nwhile True:\n\tGPIO.output(G,1)\n\tGPIO.output(PR,1)\n\tif GPIO.input(B) == 1:\n\t\todotus = time.time() + 15\n #Kohta 2.4, signaalivalo jalankulkijoille\n\t\tGPIO.output(PY,1)\n #Kohta 2.5, jos liikenne ei lopu, päästää jalankulkijat 15 sekunnin päästä menemään\n while time.time()<odotus:\n\t\t\tif GPIO.input(PIR)==0:\n\t\t\t\tbreak\n\n GPIO.output(Y,1)\n\t\tGPIO.output(G,0)\n\t\ttime.sleep(3)\n\t\tGPIO.output(Y,0)\n\t\tGPIO.output(PY,0)\n\t\tGPIO.output(PR,0)\n\t\tGPIO.output(R,1)\n\t\ttime.sleep(1)\n\t\tGPIO.output(PG,1)\n\t\ttime.sleep(6)\n\t\tGPIO.output(R,0)\n\t\tGPIO.output(PG,0)\n\t\tGPIO.output(Y,1)\n\t\tGPIO.output(PR,1)\n\t\ttime.sleep(2)\n\t\tGPIO.output(Y,0)\n\t\t\nGPIO.cleanup()\n" } ]
7
javadr/bash-scripts
https://github.com/javadr/bash-scripts
ff14f3a991db3a1d8e9de85ffb2b99121af01015
22de23d6b2a7bc820cd81bdc1483f2f20615eb67
4b8c8a5804a0658662df49274ab91f3d1b476ec0
refs/heads/master
2023-09-01T18:20:50.462997
2023-08-23T16:10:15
2023-08-23T16:10:15
237,916,723
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.6637930870056152, "alphanum_fraction": 0.6637930870056152, "avg_line_length": 18.16666603088379, "blob_id": "ade1edd222e644ca0ea597e65a8bdbbe35c55097", "content_id": "61389ec072ca2cb4b9e3f3f0c61d0fc1c27585ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 116, "license_type": "no_license", "max_line_length": 71, "num_lines": 6, "path": "/texliveUpdate", "repo_name": "javadr/bash-scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ncurDir=$(pwd)\ncd ~\nsudo -E env \"PATH=$PATH\" tlmgr --repository texlive update --self --all\ncd $curDir\n\n" }, { "alpha_fraction": 0.573889970779419, "alphanum_fraction": 0.6182902455329895, "avg_line_length": 25.946428298950195, "blob_id": "4dd2e8f3f685afc3195a0bdc59aa70e2bfe30334", "content_id": "526085ce23b155fa0829a9d1b697e1da97f433b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1509, "license_type": "no_license", "max_line_length": 92, "num_lines": 56, "path": "/mw.py", "repo_name": "javadr/bash-scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2018-2021: S.M.J.R.,\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# Change Logs:\n# Mon 17 May 2021 02:32:55 PM +0430\n# ver 0.11: simplification of code \n# Tue 22 May 2018 04:50:34 AM +0430\n# ver 0.01: Initial version\n\n__version__ = 0.11\n__date__ = \"17May2021\"\n\nimport sys\nfrom itertools import permutations, combinations\nfrom pathlib import Path\n\n\ndef choose(chars, k):\n finalwords = set()\n for item in combinations(chars, k):\n finalwords.update(makewords(item))\n return finalwords\n \ndef makewords(chars):\n words = set( ''.join(item) for item in permutations(chars) if ''.join(item) in worddic )\n return words\n\ndef usage():\n print(f\"\"\"Word Maker ver {__version__} [{__date__}]\n Usage:\n \\t{Path(sys.argv[0]).name} <char1> <char2> .... <charN> k \\t # k <= N\"\"\")\n\nif __name__ == '__main__':\n chars = sys.argv[1:-1]\n try: \n k = int(sys.argv[-1])\n except ValueError: \n usage()\n sys.exit()\n\n if not chars: #len(sys.argv) < 2\n usage()\n sys.exit()\n \n # os.path.dirname(os.path.realpath(__file__))\n curdir = Path(sys.argv[0]).parent.absolute()\n worddic = {i: 1 for i in open(f\"{curdir}/Bwords.txt\").read().split()}\n\n print('\\n'.join(choose(chars, k)))\n" }, { "alpha_fraction": 0.6805555820465088, "alphanum_fraction": 0.6805555820465088, "avg_line_length": 23, "blob_id": "69c922cd9deb32fb7a727074f1fc8dc16faa963f", "content_id": "14cabafbf7dc5addd5c757fb5e8c42d12b2812e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 72, "license_type": "no_license", "max_line_length": 58, "num_lines": 3, "path": "/ppmpv", "repo_name": "javadr/bash-scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nxdotool key --window \"$(xdotool search --class mpv)\" space\n" }, { "alpha_fraction": 0.6379310488700867, "alphanum_fraction": 0.6379310488700867, "avg_line_length": 18.33333396911621, "blob_id": "3f962ddc3856cbe91d934b5c9d434628e71b325f", "content_id": "4bec4c2fc1630face34b7958d995177715a1d295", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 58, "license_type": "no_license", "max_line_length": 44, "num_lines": 3, "path": "/dnfupdate", "repo_name": "javadr/bash-scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsudo dnf update --best --allowerasing -y $@\n" }, { "alpha_fraction": 0.6981664299964905, "alphanum_fraction": 0.7433004379272461, "avg_line_length": 63.3636360168457, "blob_id": "876720a1c7058ce3c2e156212f8382403b09e0c0", "content_id": "18d21d157f59e81416cd42247ebfe3302589b10e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 709, "license_type": "no_license", "max_line_length": 538, "num_lines": 11, "path": "/texliveSync", "repo_name": "javadr/bash-scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\ncurDir=\"$(pwd)\"\n\nPRO=\"--progress\"\nFLAGS=\"-ahv --delete \"\nEXCLUDES=\"--exclude=.svn --exclude=bin/amd64-freebsd --exclude=bin/amd64-kfreebsd --exclude=bin/armel-linux --exclude=bin/i386-freebsd --exclude=bin/i386-kfreebsd --exclude=bin/i386-netbsd --exclude=bin/i386-solaris --exclude=bin/mips-irix --exclude=bin/powerpc-aix --exclude=bin/powerpc-linux --exclude=bin/sparc-linux --exclude=bin/sparc-solaris --exclude=bin/powerpc-aix --exclude=bin/x86_64-solaris --exclude=bin/aarch64-linux --exclude=bin/amd64-netbsd --exclude=armhf-linux --exclude=x86_64-darwinlegacy --exclude=x86_64-linuxmusl\"\n#--append-verify\"\n\ncd ~\nrsync $PRO $FLAGS $EXCLUDES tug.org::tldevsrc/Master/ texlive\ncd \"$curDir\"\n\n" }, { "alpha_fraction": 0.3840000033378601, "alphanum_fraction": 0.4359999895095825, "avg_line_length": 21.636363983154297, "blob_id": "2a1a92f468a134c63bbd58e64d30be5012e79954", "content_id": "84c2bcabd400e373405eb7a7b828c670532a8036", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 250, "license_type": "no_license", "max_line_length": 40, "num_lines": 11, "path": "/wav2mp3", "repo_name": "javadr/bash-scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash \n# \n# wav2mp3\n# \nfor i in *.wav; do\n #out=$(ls $i | sed -e 's/.wav//g')\n #out=$(echo $i | sed -e 's/.wav$//')\n #lame -h -b 192 \"$i\" \"$out.mp3\"\n #lame -h -b 192 \"$i\" \"${i%.wav}.mp3\"\n lame -h -b 64 \"$i\" \"${i%.wav}.mp3\"\ndone\n\n" }, { "alpha_fraction": 0.6005747318267822, "alphanum_fraction": 0.6149425506591797, "avg_line_length": 23.85714340209961, "blob_id": "1008ddb8f1730a3307b92e417e1374dc67492c13", "content_id": "5f8e72bfad2433ac1152b4b3ce43908d4c731cc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 348, "license_type": "no_license", "max_line_length": 70, "num_lines": 14, "path": "/renameLatin2utf8.py", "repo_name": "javadr/bash-scripts", "src_encoding": "UTF-8", "text": "#!/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\n\nfor name in os.listdir('.'):\n#\tprint name\n\tif os.path.isfile(name):\n\t\ttry: \n\t\t\tif name.decode('utf-8').encode('latin-1') != name:\n\t\t\t\tnewName = name.decode('utf-8').encode('latin-1')\n\t\t\t\tos.rename(name, newName)\n\t\t\t\tprint \"%s file name renamed to this new one: %s\" % (name, newName)\n\t\texcept: pass\n" }, { "alpha_fraction": 0.6359223127365112, "alphanum_fraction": 0.6893203854560852, "avg_line_length": 21.77777862548828, "blob_id": "b6506f570d7e7a3cdcdae57e16e5eb1e09ffa9ad", "content_id": "8cbc69ae564ea8226668686dddc687da6d27018f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 206, "license_type": "no_license", "max_line_length": 94, "num_lines": 9, "path": "/wma2wav", "repo_name": "javadr/bash-scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# REQUIREMENTS:\n# mplayer\n# http://ubuntuforums.org/showthread.php?t=476390\n\nfor i in *.wma\ndo mplayer -vo null -vc dummy -af resample=44100 -ao pcm:waveheader:file=\"${i%.wma}.wav\" \"$i\" \ndone \n" }, { "alpha_fraction": 0.5296803712844849, "alphanum_fraction": 0.6027397513389587, "avg_line_length": 35.5, "blob_id": "54b436d575c24c59a6f06f94bd3e9ab72e6da9bb", "content_id": "ea11f68361038113b8ffc98b82ca24fd41404aec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 219, "license_type": "no_license", "max_line_length": 78, "num_lines": 6, "path": "/wm", "repo_name": "javadr/bash-scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ngrep -E \"^$1[a-z]{$2}$3$\" ~/.local/bin/myscripts/words.txt | nl | column\nprintf \"\\033[0;31m\" # RED\ngrep -E \"^$1[a-z]{$2}$3$\" ~/.local/bin/myscripts/words_alpha.txt | nl | column\nprintf \"\\033[0m\" # No Color\n" }, { "alpha_fraction": 0.5675675868988037, "alphanum_fraction": 0.5810810923576355, "avg_line_length": 13.800000190734863, "blob_id": "5ac2fda6794b872324904cb4fb867925b1ca330f", "content_id": "e015a966066222e038342974f584b5d12945805b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 74, "license_type": "no_license", "max_line_length": 38, "num_lines": 5, "path": "/kad", "repo_name": "javadr/bash-scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nps -o pid= -C anydesk|while read f; do\n kill -9 \"$f\"\ndone\n" }, { "alpha_fraction": 0.7068027257919312, "alphanum_fraction": 0.7108843326568604, "avg_line_length": 34.85365676879883, "blob_id": "cf002e1392609a05a54d68bc2209ac1d9b7c41ed", "content_id": "e6af6df869ecedb07492edda8ce65fd8be2a2439", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1470, "license_type": "no_license", "max_line_length": 99, "num_lines": 41, "path": "/urlfix.py", "repo_name": "javadr/bash-scripts", "src_encoding": "UTF-8", "text": "#!/bin/env python3\n\"\"\"\nURL Unicode Converter\n\nThis program is designed to convert a URL with Unicode characters into its unicode decoded form\nwhen it was copied from browser's address bar. It can be useful when working with URLs that contain\nnon-ASCII characters and need to be converted back to Unicode for various purposes.\n\nUsage:\n\n 1. Copy a URL with Unicode characters from the browser's address bar.\n 2. Run this program.\n 3. The program will automatically decode the URL into Unicode and\n store it back into the clipboard.\n\nPlease note that this program assumes the presence of `pyperclip` module and\nrequires it for proper functionality.\n\nDisclaimer: This program is provided as-is without any warranties. Use it at your own risk.\n\"\"\"\n\nimport argparse\nimport urllib.parse\nimport pyperclip\n\n\nparser = argparse.ArgumentParser(description='Process the clipboard\\'s text.')\nparser.add_argument('-u', dest='urlfix_or_newline', action='store_true', default=True)\nparser.add_argument('-n', dest='urlfix_or_newline', action='store_false', )\nargs = parser.parse_args()\n\noutput = pyperclip.paste()\n\nif args.urlfix_or_newline: # urlfix\n output = urllib.parse.unquote(output)\n# output = urllib.unquote(output) # in python version prior to 3.6\nelse: # newline remove\n output = output.replace('\\n\\n', '!$@#').replace('\\n',\n ' ').replace('!$@#', '\\n').title()\npyperclip.copy(f\"{output}\")\nprint(output)\n" }, { "alpha_fraction": 0.40776699781417847, "alphanum_fraction": 0.48543688654899597, "avg_line_length": 21.071428298950195, "blob_id": "ed91a7976214ce89da77d088c6e65d2d732833b3", "content_id": "9284ba9f24dafefc7a4db3f4f877b43ea15735c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 309, "license_type": "no_license", "max_line_length": 70, "num_lines": 14, "path": "/gettotaltime", "repo_name": "javadr/bash-scripts", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\nRED='\\033[0;31m'\nNC='\\033[0m' # No Color\n\ncur=$(pwd)\ncd \"$1\" || exit\n\ngettime\nexiftool *.mp4 | grep -E \"^Duration\" | cut -d: -f2- | tr : \\ | \\\n dc -f - -e '60o0ddd[+r60*+r60d**+z1<a]dsaxp' | awk '{$1=$1};1' | \\\n tr \" \" \":\" | xargs -I {} printf \"\\tTotal is ${RED}{}${NC}\\n\"\n\ncd \"$cur\" || exit\n" }, { "alpha_fraction": 0.44366195797920227, "alphanum_fraction": 0.4507042169570923, "avg_line_length": 46, "blob_id": "b5d29950d9913e7e58e7194cd09cc3e9b83189c0", "content_id": "29bc0abf460978b57b5fc77672141814d7c070f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 142, "license_type": "no_license", "max_line_length": 127, "num_lines": 3, "path": "/ytlist", "repo_name": "javadr/bash-scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nyt-dlp --dump-json --flat-playlist \"$@\" | jq -r \"[.title,.url]|@csv\"|sed 's/\",\"/](/g'|sed -E 's/\"$/)/g'|sed -E 's/^\\\"/1. [/g' \n" }, { "alpha_fraction": 0.5436681509017944, "alphanum_fraction": 0.5655021667480469, "avg_line_length": 40.6363639831543, "blob_id": "d698e6e65123877223cf318807408c072954187f", "content_id": "7bc48297697b3319ccbac37d954edb63453f6d8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 458, "license_type": "no_license", "max_line_length": 143, "num_lines": 11, "path": "/hash.stid", "repo_name": "javadr/bash-scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# input: CSV file with students' id and their marks with [, \\t] delimiters.\n# output: md5sumed students' id and their marks.\n\nawk -F \"[, \\t]+\" '{\n cmd=\"echo -n \"$1\"|md5sum \"\n cmd|getline cksum\n printf \"%s\", cksum; for(i=2;i<=NF;i++) printf \"%-5.4G \",$i; print \"\"\n }' \"$1\" | sort -t- -nrk2\n\ncat \"$1\" | tr -s [:blank:] ',' | R --slave -e 'x <- read.csv(file=\"stdin\", header=FALSE, sep = \",\", stringsAsFactors = FALSE); summary(x[,2]);'\n" }, { "alpha_fraction": 0.39275363087654114, "alphanum_fraction": 0.4594202935695648, "avg_line_length": 33.5, "blob_id": "4dddfd8322a91ba13432f0927b7278fa67cd35b7", "content_id": "7563ab3742398d90975e23cfae07b6083ce1ef52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 722, "license_type": "no_license", "max_line_length": 210, "num_lines": 20, "path": "/en2fa", "repo_name": "javadr/bash-scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nfor f in *\ndo\n#\tnewfile=\"$(echo $f | tr [:upper:] [:lower:] | sed -e 's/0/۰/g' -e 's/1/۱/g' -e 's/2/۲/g' -e 's/3/۳/g' -e 's/4/۴/g' -e 's/5/۵/g' -e 's/6/۶/g' -e 's/7/۷/g' -e 's/8/۸/g' -e 's/9/۹/g' -e 's/ي/ی/g' -e 's/ك/ک/g')\"\n\tnewfile=\"$(echo $f | sed -e 's/0/۰/g' -e 's/1/۱/g' -e 's/2/۲/g' -e 's/3/۳/g' -e 's/4/۴/g' -e 's/5/۵/g' -e 's/6/۶/g' -e 's/7/۷/g' -e 's/8/۸/g' -e 's/9/۹/g' -e 's/ي/ی/g' -e 's/ك/ک/g')\"\t\n\tif [ \"$newfile\" != \"${newfile%.mp۳}\" ] \n\tthen\n\t\tnewfile=\"$(echo ${newfile%.mp۳}.mp3)\"\n\tfi\n\tif [ \"$newfile\" != \"${newfile%.mp۴}\" ] \n\tthen\n\t\tnewfile=\"$(echo ${newfile%.mp۴}.mp4)\"\n\tfi\t\n\tif [ \"$f\" != \"$newfile\" ] \n\tthen\n\t\tmv -v \"$f\" \"$newfile\"\n\tfi\n#\techo $newfile\ndone\n" }, { "alpha_fraction": 0.42307692766189575, "alphanum_fraction": 0.44017094373703003, "avg_line_length": 22.299999237060547, "blob_id": "3beafbe8bdf58b149215e985c1e4f16c7b0e4cab", "content_id": "c2ec19e0f536d235b0b7afcf2ae7af5b5c50c256", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 234, "license_type": "no_license", "max_line_length": 79, "num_lines": 10, "path": "/revbyspace", "repo_name": "javadr/bash-scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nif [ -f $@ ]; then\n while read line\n do\n echo $line | awk '{for (i=NF; i>1; i--) printf(\"%s \", $i); print $1;}';\n done < $@\nelse\n echo $@ | awk '{for (i=NF; i>1; i--) printf(\"%s \", $i); print $1;}';\nfi\n\n" }, { "alpha_fraction": 0.5518134832382202, "alphanum_fraction": 0.5621761679649353, "avg_line_length": 23, "blob_id": "e448d07028b015da1a24cc3af070a037e07701a6", "content_id": "5f3a9c3700bb74ee03ed4d057546159f5e4576f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 386, "license_type": "no_license", "max_line_length": 75, "num_lines": 16, "path": "/ppt2pdf", "repo_name": "javadr/bash-scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/bash\n# Converts ppt[x] files to pdf counterparts.\n\nif [[ $# -eq 0 ]];\nthen\n mapfile -t list < <(ls ./*.ppt ./*.pptx 2>/dev/null)\n # list=( $(ls ./*.ppt ./*.pptx 2>/dev/null) )\nelse\n list=( \"$@\" )\nfi\n\nfor ppt in \"${list[@]}\"\ndo \n libreoffice --headless --invisible --convert-to pdf \"$ppt\" 1>/dev/null;\n echo \"${ppt#./}\" converted to its pdf counterpart.\ndone\n\n\n" }, { "alpha_fraction": 0.6638655662536621, "alphanum_fraction": 0.6638655662536621, "avg_line_length": 18.66666603088379, "blob_id": "3ec81a2a38ace003e64c07c7de61e284791f1162", "content_id": "6f8e31e3b313e2a63703f21a72ebaa61461db1d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 119, "license_type": "no_license", "max_line_length": 74, "num_lines": 6, "path": "/updateTeXLive-xz", "repo_name": "javadr/bash-scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ncurDir=$(pwd)\ncd ~\nsudo -E env \"PATH=$PATH\" tlmgr --repository texlive-xz update --self --all\ncd $curDir\n\n" }, { "alpha_fraction": 0.542801558971405, "alphanum_fraction": 0.5797665119171143, "avg_line_length": 22.363636016845703, "blob_id": "be9854b11f6f5b8cca6a7744fc7dc1e989ee31e1", "content_id": "41b3aa982cbedd75083c22da156a9aa7d0c63387", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 514, "license_type": "no_license", "max_line_length": 79, "num_lines": 22, "path": "/entro.py", "repo_name": "javadr/bash-scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport sys\nfrom math import log\n\nif len(sys.argv) < 2:\n print(\"Usage: %s <n1> <n2> ...\" % sys.argv[0][sys.argv[0].rfind('/') + 1:])\n sys.exit(-1)\n\nif len(sys.argv) == 2:\n sys.argv.append(f\"{1-eval(sys.argv[1])}\")\n\nprint(f\"Probabilities include {', '.join(sys.argv[1:])}\")\n\nentropy = 0.0\nisum = 0\nfor item in sys.argv[1:]:\n ni = eval(item)\n if ni != 0: entropy += -ni * log(ni, 2)\n isum += ni\nentropy = entropy / isum + log(isum, 2)\nprint(\"entropy is: {}\".format(entropy))\n" }, { "alpha_fraction": 0.7725856900215149, "alphanum_fraction": 0.7725856900215149, "avg_line_length": 31.100000381469727, "blob_id": "4b841fb35171d78cffecb95d6b62a83ea8c4e944", "content_id": "2d96eb515b73f3434ca6e87633a79e547a109071", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 321, "license_type": "no_license", "max_line_length": 67, "num_lines": 10, "path": "/focus", "repo_name": "javadr/bash-scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\nvalue=$(gsettings get org.gnome.desktop.notifications show-banners)\nif [[ $value == 'true' ]]\nthen\n gsettings set org.gnome.desktop.notifications show-banners false\n gnome-extensions enable [email protected]\nelse\n gsettings set org.gnome.desktop.notifications show-banners true\n gnome-extensions disable [email protected]\nfi\n" }, { "alpha_fraction": 0.4694656431674957, "alphanum_fraction": 0.5322692394256592, "avg_line_length": 24.504425048828125, "blob_id": "30824cc840aff93689d740bb9ea2ae791ccf2731", "content_id": "53d454f15691bb571f93e9e862af045d1d06a8fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2925, "license_type": "no_license", "max_line_length": 73, "num_lines": 113, "path": "/abjad.py", "repo_name": "javadr/bash-scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# ***** BEGIN GPL LICENSE BLOCK *****\n#\n# Copyright (C) 2009: S.M.J.R.,\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n#\n# ***** END GPL LICENCE BLOCK *****\n# Change Logs:\n# Sun 09 Apr 2017 10:15:55 AM +0430\n# ver 0.12: Support for ة as ت\n# Mon 21 Nov 2016 10:12:05 AM IRST\n# ver 0.11: Minor bug in summation output\n# Sat 23 Jul 2016 12:47:02 PM IRDT\n# ver 0.1: Accept input from pipe in shell\n# Mon 23 Dec 2013 13:30:52 IRST\n# ver 0.01: Initial version\n\n__author__ = \"S.M.J.R.\"\n__version__ = 0.12\n__date__ = \"9Apr2017\"\n__email__ = '[email protected]'\n\nimport sys\nimport string\n\n\ndef abjadCalculate(inputStr):\n s = 0\n abjDic = {\n u\"ا\": 1,\n u\"إ\": 1,\n u\"أ\": 1,\n u\"آ\": 2,\n u\"ء\": 1,\n u\"ؤ\": 1,\n u\"ئ\": 1,\n u\"ب\": 2,\n u'پ': 2,\n u'چ': 3, #for persian support\n u\"ج\": 3,\n u\"د\": 4,\n u\"ه\": 5,\n u\"و\": 6,\n u\"ز\": 7,\n u'ژ': 7, #for persian support\n u\"ح\": 8,\n u\"ط\": 9,\n u\"ي\": 10,\n u\"ی\": 10,\n u\"ك\": 20,\n u\"ک\": 20,\n u'گ': 20, #for persian support\n u\"ل\": 30,\n u\"م\": 40,\n u\"ن\": 50,\n u\"س\": 60,\n u\"ع\": 70,\n u\"ف\": 80,\n u\"ص\": 90,\n u\"ق\": 100,\n u\"ر\": 200,\n u\"ش\": 300,\n u\"ت\": 400,\n u\"ة\": 400,\n u\"ث\": 500,\n u\"خ\": 600,\n u\"ذ\": 700,\n u\"ض\": 800,\n u\"ظ\": 900,\n u\"غ\": 1000\n }\n for letter in inputStr:\n s += abjDic.get(letter, 0)\n return s\n\n\ndef usage():\n print(\"\"\"Abjad Calculus ver {0}[{1}]\n Usage:\n \\tabjad.py <IN1> <IN2> ....\"\"\".format(__version__, __date__))\n\n\nif __name__ == '__main__':\n allwords = sys.argv[1:] if len(\n sys.argv) > 1 else sys.stdin.readline().split()\n if not allwords: #len(sys.argv) < 2 and len(linesinput)==0:\n usage()\n sys.exit()\n\n abjsum = 0\n maxStrLen = max([len(i) for i in allwords])\n\n print(\"Abjad Calculus ver. {}:\\n\".format(__version__))\n for i, item in enumerate(allwords):\n val = abjadCalculate(item)\n abjsum += val\n print(\"{0} \\t {1}\".format(item.replace('\\n', ''), val))\n\n print(\"\\nsum: {}\".format(abjsum))\n" }, { "alpha_fraction": 0.6846307516098022, "alphanum_fraction": 0.6906187534332275, "avg_line_length": 61.5, "blob_id": "79360b3b00997facd5d74941e7f89835f7c9821d", "content_id": "51588fefa03fd07251ab7294630decc41e15be02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 501, "license_type": "no_license", "max_line_length": 146, "num_lines": 8, "path": "/syncTeXLive-xz", "repo_name": "javadr/bash-scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\ncurDir=\"$(pwd)\"\n\ncd ~ || exit\nrsync -rltzz --delete --progress --modify-window=5 -vh --exclude=\"mactex*\" rsync://rsync.dante.ctan.org/CTAN/systems/texlive/tlnet/ \"texlive-xz\"\n# rsync -rltzz --delete --progress --modify-window=5 -vh --exclude=\"mactex*\" rsync://mirror.asis.sh/CTAN/systems/texlive/tlnet/ \"texlive-xz\"\n# rsync -rltzz --delete --progress --modify-window=5 -vh --exclude=\"mactex*\" rsync://mirror.bardia.tech/ctan/systems/texlive/tlnet/ \"texlive-xz\"\ncd \"$curDir\" || exit\n\n" }, { "alpha_fraction": 0.4285714328289032, "alphanum_fraction": 0.4979591965675354, "avg_line_length": 39.83333206176758, "blob_id": "de31e535ae61e0f5f313ac0b927a7938ec463143", "content_id": "e0d654a3e671a25709dcef4b85e5aa88edc99780", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 249, "license_type": "no_license", "max_line_length": 84, "num_lines": 6, "path": "/gettime", "repo_name": "javadr/bash-scripts", "src_encoding": "UTF-8", "text": "#!/bin/sh\n# output name and time of MP4 files \n\nexiftool *.mp4 | grep -E \"File Name|^Duration\" | cut -d: -f2- | \\\n awk 'NR%2{printf \"فایل%s\",$0;next} {print \" [\\033[0;32m\",$0,\"\\033[0m] ==>\"}' | \\\n sed 's/.mp4//g ; s/\\[ /\\[/g; s/ \\]/\\]/g'\n" }, { "alpha_fraction": 0.639652669429779, "alphanum_fraction": 0.6454414129257202, "avg_line_length": 39.64706039428711, "blob_id": "3c227a58c3d7ebd0a89c6fd80b6ba728facf7541", "content_id": "9b4a0357b4501ca51d0906dd92bc46cb450a11e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 691, "license_type": "no_license", "max_line_length": 86, "num_lines": 17, "path": "/dotel", "repo_name": "javadr/bash-scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash \n\ndocker run --rm --name telegram \\\n --hostname=\"$(hostname)\" \\\n -e DISPLAY=unix\"$DISPLAY\" \\\n -e PULSE_SERVER=unix:\"$XDG_RUNTIME_DIR/pulse/native\" \\\n -v /tmp/.X11-unix:/tmp/.X11-unix \\\n -v \"/home/$(whoami)/.Xauthority:/home/user/.Xauthority\" \\\n -v \"$XDG_RUNTIME_DIR\"/pulse:\"$XDG_RUNTIME_DIR\"/pulse \\\n -v /etc/localtime:/etc/localtime:ro \\\n -v ~/Downloads/.TelegramDesktop:/home/user/.local/share/TelegramDesktop/ \\\n -v ~/Downloads/:/home/user/Downloads/ \\\n -v /tmp/:/tmp/ \\\n --network=host \\\n ghcr.io/xorilog/docker-telegram/telegram &>/dev/null & disown\n\n# -v /home/javad/Downloads/Telegram\\ Desktop:/home/user/Downloads/Telegram\\ Desktop/ \\\n" }, { "alpha_fraction": 0.5261958837509155, "alphanum_fraction": 0.5261958837509155, "avg_line_length": 18.954545974731445, "blob_id": "4bb78c8151d3c0534710d927446c34db936b6e7b", "content_id": "fb53529c8728c1e0489dfe02571cf2a43e7a6a57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 439, "license_type": "no_license", "max_line_length": 94, "num_lines": 22, "path": "/mclean", "repo_name": "javadr/bash-scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash \n\nclean_latex_temp()\n{\n rm -rifv *.aux *.log *.dvi *.idx *.ilg *.ind *.toc *.out *.pyg *.synctex *.tdo *.blg \\\n *.lot *.lol *.lof *.syn *.suc *.bbl *.syc *.brf *.los *.snm *.nav *.gnuplot *.hd\\\n *.bbl *.blg *.bcf *.run.xml *.listing *~\n}\n\nclean_python_temp()\n{\n rm -rifv __pycache__\n}\n\nclean_latex_temp\nclean_python_temp\n\nif [ -d chapters ];then \n cd chapters\n clean_latex_temp\n cd ..\nfi\n" } ]
25
codebynumbers/Flask-Foundation
https://github.com/codebynumbers/Flask-Foundation
1d20a1f8d2e7149453f0a99f0094020cbb02ecf0
6df4dff51f0bf0b2c3065667d43c4eefad46f4e2
df9c2edf499ee398d4d51cac5ed5d3bb20d64a9e
refs/heads/master
2021-01-15T09:42:44.289747
2018-09-17T14:08:20
2018-09-17T14:08:20
23,723,723
1
0
null
2014-09-06T01:36:39
2015-06-08T14:16:05
2015-04-19T00:42:19
Python
[ { "alpha_fraction": 0.7807848453521729, "alphanum_fraction": 0.7807848453521729, "avg_line_length": 26.33333396911621, "blob_id": "f31ceaebc8ae486536c6edb80e4c7e5c3b097a2e", "content_id": "e0f352982a79be9e4ecdd59ff2fdcef4480bb1bb", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 739, "license_type": "permissive", "max_line_length": 137, "num_lines": 27, "path": "/README.md", "repo_name": "codebynumbers/Flask-Foundation", "src_encoding": "UTF-8", "text": "# Flask Foundation\n\nA Fork of the original Flask-Foundation, but with the following enhancements:\n* Login using bcrpyted passwords\n* Forms and Models moved into modules\n* An ActiveModel Mixing added for db opertations (eg. User.save())\n* Account creation example\n* Make rename command to rename appname to something else, eg. make rename appname=webapp\n* Change Password example\n\n#### Getting Started, assumes fully-stocked virtualenv\n\n`make rename appname=somenewapp`\n\nCreate initial migrations\n\n`./manage.py db migrate`\n\nApply to database\n\n`./manage.py db upgrade`\n\nStart\n\n`./manage.py runserver`\n\nOriginal Documentation is located at [https://jackstouffer.github.io/Flask-Foundation/](https://jackstouffer.github.io/Flask-Foundation/)\n\n" }, { "alpha_fraction": 0.747787594795227, "alphanum_fraction": 0.769911527633667, "avg_line_length": 44.20000076293945, "blob_id": "79434782e35866eecd692d62a4e9ac0335d80744", "content_id": "e67d6d4c80e733bdb42abf610edbe020624afeb3", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 226, "license_type": "permissive", "max_line_length": 103, "num_lines": 5, "path": "/appname/config_test_default.py", "repo_name": "codebynumbers/Flask-Foundation", "src_encoding": "UTF-8", "text": "DEBUG = False\nWTF_CSRF_ENABLED = False\nSQLALCHEMY_ECHO = False\nSQLALCHEMY_DATABASE_URI = 'sqlite:////tmp/appname_test.db'\n#SQLALCHEMY_DATABASE_URI = 'mysql://appname_test:appname_test@localhost:3306/appname_test?charset=utf8'\n" }, { "alpha_fraction": 0.7307692170143127, "alphanum_fraction": 0.7307692170143127, "avg_line_length": 16, "blob_id": "20c3b2b00bb52955976c8c96d894b404fe1c8cdb", "content_id": "eab0aca7194c38f07f0bd75b9142786d85a1f45b", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 52, "license_type": "permissive", "max_line_length": 30, "num_lines": 3, "path": "/wsgi_app.py", "repo_name": "codebynumbers/Flask-Foundation", "src_encoding": "UTF-8", "text": "from appname import create_app\n\napp = create_app()\n\n" }, { "alpha_fraction": 0.7936508059501648, "alphanum_fraction": 0.7936508059501648, "avg_line_length": 26.14285659790039, "blob_id": "63ac8dffa8e4e1a768e93125f3dc3f59829aced8", "content_id": "64914c7b6f00f3e3aa637bb408b3d2816770f941", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 189, "license_type": "permissive", "max_line_length": 60, "num_lines": 7, "path": "/appname/forms/widget.py", "repo_name": "codebynumbers/Flask-Foundation", "src_encoding": "UTF-8", "text": "from flask_wtf import Form\nfrom wtforms import StringField\nfrom wtforms.validators import DataRequired\n\n\nclass WidgetForm(Form):\n name = StringField(u'Name', validators=[DataRequired()])" }, { "alpha_fraction": 0.5174129605293274, "alphanum_fraction": 0.7034825682640076, "avg_line_length": 17.236364364624023, "blob_id": "ae3d7f3ef5f407ec5351540588e74208b8dcad05", "content_id": "0c9bb4f5008536afb682c8e3a84119c0abdda7df", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1005, "license_type": "permissive", "max_line_length": 73, "num_lines": 55, "path": "/requirements.txt", "repo_name": "codebynumbers/Flask-Foundation", "src_encoding": "UTF-8", "text": "git+https://bitbucket.org/50onred/[email protected]#egg=Fifty-Flask\ngit+https://bitbucket.org/50onred/[email protected]#egg=Fifty-Tables\nalabaster==0.7.10\nalembic==0.9.1\nappdirs==1.4.2\nBabel==2.3.4\nbcrypt==3.1.3\nblinker==1.4\nboto==2.46.1\ncffi==1.9.1\nclick==6.7\nconfigparser==3.5.0\ncoverage==4.3.4\ncssmin==0.2.0\ndocutils==0.13.1\nenum34==1.1.6\nflake8==3.3.0\nFlask==0.12\nFlask-Assets==0.12\nFlask-Bcrypt==0.7.1\nFlask-Cache==0.13.1\nFlask-DebugToolbar==0.10.1\nFlask-Login==0.4.0\nFlask-Migrate==2.0.3\nFlask-Script==2.0.5\nFlask-SQLAlchemy==2.2\nFlask-WTF==0.14.2\nimagesize==0.7.1\nitsdangerous==0.24\nJinja2==2.9.5\njsmin==2.2.1\nMako==1.0.6\nMarkupSafe==0.23\nmccabe==0.6.1\nMySQL-python==1.2.5\npackaging==16.8\npep8==1.7.0\npy==1.4.32\npycodestyle==2.3.1\npycparser==2.17\npyflakes==1.5.0\nPygments==2.2.0\npyparsing==2.1.10\npytest==3.0.6\npython-editor==1.0.3\npytz==2016.10\nrequests==2.13.0\nsix==1.10.0\nsnowballstemmer==1.2.1\nSphinx==1.5.3\nSQLAlchemy==1.1.6\nuWSGI==2.0.14\nwebassets==0.12.1\nWerkzeug==0.11.15\nWTForms==2.1\n\n\n" }, { "alpha_fraction": 0.701127827167511, "alphanum_fraction": 0.701127827167511, "avg_line_length": 21.16666603088379, "blob_id": "b009ffcc8b1240ac1c617bdd392860cca0c32431", "content_id": "98a7c21b9ae1f445472c698dc8b089f483f6101c", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 532, "license_type": "permissive", "max_line_length": 58, "num_lines": 24, "path": "/manage.py", "repo_name": "codebynumbers/Flask-Foundation", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nfrom flask_migrate import MigrateCommand\nfrom flask_script import Manager, Server\n\nfrom appname import create_app\nfrom appname.models import db, User\n\napp = create_app()\n\nmanager = Manager(app)\nmanager.add_command(\"server\", Server())\nmanager.add_command('db', MigrateCommand)\n\n\[email protected]\ndef make_shell_context():\n \"\"\" Creates a python REPL with several default imports\n in the context of the app\n \"\"\"\n return dict(app=app, db=db, User=User)\n\n\nif __name__ == \"__main__\":\n manager.run()\n" }, { "alpha_fraction": 0.7427937984466553, "alphanum_fraction": 0.7450110912322998, "avg_line_length": 40, "blob_id": "9a5f8da53f9feafa4a44ba7cc2038cf24dd20867", "content_id": "882bbdb376a91b853777fa4c08d918be5c22b91f", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 451, "license_type": "permissive", "max_line_length": 95, "num_lines": 11, "path": "/appname/forms/signup.py", "repo_name": "codebynumbers/Flask-Foundation", "src_encoding": "UTF-8", "text": "from flask_wtf import Form\nfrom wtforms import PasswordField, StringField\nfrom wtforms import validators\n\n\nclass SignupForm(Form):\n username = StringField(u'Email', validators=[validators.required()])\n password = PasswordField(\n u'Password', validators=[validators.required(), validators.Length(min=8)])\n password_confirm = PasswordField(\n u'Repeat Password', validators=[validators.required(), validators.EqualTo('password')])\n" }, { "alpha_fraction": 0.8550724387168884, "alphanum_fraction": 0.8550724387168884, "avg_line_length": 33.75, "blob_id": "fd7a6bc202642b4e40f6963e87db6cfdd414dec9", "content_id": "37af560db8b4e4c5ead0d72be25f60fc62d4a440", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 138, "license_type": "permissive", "max_line_length": 47, "num_lines": 4, "path": "/appname/forms/__init__.py", "repo_name": "codebynumbers/Flask-Foundation", "src_encoding": "UTF-8", "text": "from .login import LoginForm\nfrom .signup import SignupForm\nfrom .change_password import ChangePasswordForm\nfrom .widget import WidgetForm" }, { "alpha_fraction": 0.6722407937049866, "alphanum_fraction": 0.6722407937049866, "avg_line_length": 26.272727966308594, "blob_id": "6725894e9413d6cae7b63ae3d60fcd8f9f0c89e2", "content_id": "98cf191efb66728e4e834145addd2a5ba7e6e45f", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 299, "license_type": "permissive", "max_line_length": 62, "num_lines": 11, "path": "/appname/models/widget.py", "repo_name": "codebynumbers/Flask-Foundation", "src_encoding": "UTF-8", "text": "from db import db, ActiveModel\n\n\nclass Widget(ActiveModel, db.Model):\n __tablename__ = \"widgets\"\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String)\n user_id = db.Column(db.Integer, db.ForeignKey(\"users.id\"))\n\n user = db.relationship(\"User\", backref=\"widgets\")" }, { "alpha_fraction": 0.6600741744041443, "alphanum_fraction": 0.6637824177742004, "avg_line_length": 32.75, "blob_id": "c51c9fc6a2638c334f0d2bcbc54ae3c2e00c963d", "content_id": "e80a106ad3736c1ce16219fc8090b0b39a628e68", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 809, "license_type": "permissive", "max_line_length": 79, "num_lines": 24, "path": "/appname/controllers/mixins.py", "repo_name": "codebynumbers/Flask-Foundation", "src_encoding": "UTF-8", "text": "from flask import abort, flash\nfrom flask_login import login_required, current_user\nfrom appname.models import Widget\n\n\nclass LoginRequiredMixin(object):\n decorators = [login_required]\n\n\nclass WidgetAccessMixin(LoginRequiredMixin):\n widget = None\n\n def dispatch_request(self, *args, **kwargs):\n if 'widget_id' in kwargs:\n self.widget = Widget.query.get(kwargs['widget_id'])\n if self.widget and not current_user == self.widget.user:\n flash(\"Access Denied\", \"danger\")\n abort(401)\n return super(WidgetAccessMixin, self).dispatch_request(*args, **kwargs)\n\n def get_context_data(self, **context):\n context = super(WidgetAccessMixin, self).get_context_data(**context)\n context['widget'] = self.widget\n return context" }, { "alpha_fraction": 0.7446808218955994, "alphanum_fraction": 0.7659574747085571, "avg_line_length": 27.133333206176758, "blob_id": "e96ee9ec58a757bedce0132fbbfc1cb2464a10d5", "content_id": "48b7df010e7d040d86d0cbd31872bbe88a4ad390", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 423, "license_type": "permissive", "max_line_length": 88, "num_lines": 15, "path": "/appname/config_default.py", "repo_name": "codebynumbers/Flask-Foundation", "src_encoding": "UTF-8", "text": "DEBUG = True\nASSETS_DEBUG = False\nDEBUG_TB_INTERCEPT_REDIRECTS = False\n\nSECRET_KEY = 'secret key'\n\nCACHE_TYPE = 'null'\nCACHE_NO_NULL_WARNING = True\n\nSQLALCHEMY_POOL_RECYCLE = 3600\nSQLALCHEMY_COMMIT_ON_TEARDOWN = True\nSQLALCHEMY_ECHO = True\nSQLALCHEMY_DATABASE_URI = 'sqlite:////tmp/appname.db'\nSQLALCHEMY_TRACK_MODIFICATIONS = False\n#SQLALCHEMY_DATABASE_URI = 'mysql://appname:appname@localhost:3306/appname?charset=utf8'\n\n" }, { "alpha_fraction": 0.8181818127632141, "alphanum_fraction": 0.8181818127632141, "avg_line_length": 21.33333396911621, "blob_id": "58b539d810d5ea2a4e947c7cb5c005dc6bcf8957", "content_id": "792b9be2b087bb37b6d44b47e5a56dcb9f310f81", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 66, "license_type": "permissive", "max_line_length": 25, "num_lines": 3, "path": "/appname/models/__init__.py", "repo_name": "codebynumbers/Flask-Foundation", "src_encoding": "UTF-8", "text": "from .db import db\nfrom user import User\nfrom widget import Widget" }, { "alpha_fraction": 0.6512746214866638, "alphanum_fraction": 0.6512746214866638, "avg_line_length": 32, "blob_id": "eb98188fe68970fb3b13b3d5860ea14e372de348", "content_id": "805585f8e3167268d01f32dc1e0119933f30cc21", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2079, "license_type": "permissive", "max_line_length": 76, "num_lines": 63, "path": "/appname/controllers/widgets.py", "repo_name": "codebynumbers/Flask-Foundation", "src_encoding": "UTF-8", "text": "from flask import Blueprint, url_for, flash\nfrom flask_login import current_user\n\nfrom fifty_flask.views.generic import url_rule, FormView\nfrom fifty_tables import NumericColumn, LinkColumn\nfrom fifty_tables.views import SQLAlchemyTableView\n\nfrom appname.controllers.mixins import WidgetAccessMixin, LoginRequiredMixin\nfrom appname.forms.widget import WidgetForm\nfrom appname.models.widget import Widget\n\nwidgets_bp = Blueprint('widgets', __name__, url_prefix='/widgets')\n\n\n@url_rule(widgets_bp, ['/<widget_id>/edit/', '/edit/'], 'edit')\nclass WidgetEditView(WidgetAccessMixin, FormView):\n\n template_name = \"widget_edit.html\"\n form_cls = WidgetForm\n\n def _get_widget(self):\n \"\"\" Get the model to save.\n Could be new in case of create, or existing.\n \"\"\"\n if not self.widget:\n self.widget = Widget()\n return self.widget\n\n def get_form_obj(self):\n return self.widget\n\n def get_redirect_url(self):\n return url_for('.edit', widget_id=self.widget.id)\n\n def form_valid(self, form, **context):\n widget = self._get_widget()\n form.populate_obj(widget)\n widget.user = current_user\n widget.save()\n self.widget = widget\n flash(\"widget saved\", \"success\")\n return super(WidgetEditView, self).form_valid(form, **context)\n\n def form_invalid(self, form, **context):\n flash(\"Error saving widget\", \"danger\")\n return super(WidgetEditView, self).form_invalid(form, **context)\n\n\n@url_rule(widgets_bp, '/', 'list')\nclass WidgetListView(LoginRequiredMixin, SQLAlchemyTableView):\n template_name = 'widget_list.html'\n default_sort = 'id'\n default_sort_direction = 'asc'\n\n def get_table_columns(self, params=None, **context):\n return [\n NumericColumn(name='id', label='ID', int_format='{:}'),\n LinkColumn(name='name', label=\"Name\",\n endpoint='.edit', url_params={'widget_id': 'id'}),\n ]\n\n def get_query(self, params, **context):\n return Widget.query.filter_by(user=current_user)\n" } ]
13
abhisheksom15/Interview-Bot
https://github.com/abhisheksom15/Interview-Bot
50a224f1fb4748c79127385836c3c14e767e6e5b
f964de1089341782744f71e96da31904db048588
972f6f642bd480257e3d65f712e63edc3b53702d
refs/heads/master
2020-04-26T20:51:27.353036
2019-05-31T11:10:28
2019-05-31T11:10:28
173,823,776
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6918287873268127, "alphanum_fraction": 0.6980544924736023, "avg_line_length": 40.45161437988281, "blob_id": "06beff4f03753b69e4ad3fbb891a7c7c8abbe4c6", "content_id": "55ca500494876afbd586abc27bac6960b2b67a36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1285, "license_type": "no_license", "max_line_length": 90, "num_lines": 31, "path": "/InterviewBot/InterviewBot/urls.py", "repo_name": "abhisheksom15/Interview-Bot", "src_encoding": "UTF-8", "text": "\"\"\"InterviewBot URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.conf.urls import include\nfrom Front import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('',views.welcome,name='welcome'),\n path('Front/',include('Front.urls')),\n path('special/',views.special,name=\"special\"),\n path('logout/',views.user_logout,name=\"logout\"),\n path('Interview/<int:count>/<int:pk>/<int:comp_pk>',views.Interview,name='Interview'),\n path('complete/<int:pk>',views.complete,name='complete'),\n path('description/<int:pk>',views.description,name='description'),\n path('result/<int:pk_cp>',views.score,name='result')\n]\n" }, { "alpha_fraction": 0.5500878691673279, "alphanum_fraction": 0.5752782821655273, "avg_line_length": 37.79545593261719, "blob_id": "36c2225287a80bea258085cf2059e8ef54050c38", "content_id": "45fc5d3406742ed977fe32164a16fbfd2768a72d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1707, "license_type": "no_license", "max_line_length": 114, "num_lines": 44, "path": "/InterviewBot/Front/migrations/0005_auto_20190318_1737.py", "repo_name": "abhisheksom15/Interview-Bot", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.7 on 2019-03-18 12:07\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Front', '0004_auto_20190308_1914'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='company_marks',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('cm_marks', models.PositiveSmallIntegerField()),\n ('cm_position', models.CharField(max_length=256)),\n ],\n ),\n migrations.CreateModel(\n name='company_post',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('Company_name', models.CharField(max_length=256)),\n ('Position', models.CharField(max_length=256)),\n ('Difficulty_level', models.PositiveIntegerField()),\n ('salary_in_rupees', models.PositiveIntegerField()),\n ('min_experience_in_years', models.PositiveIntegerField()),\n ('Discreption', models.CharField(max_length=512)),\n ],\n ),\n migrations.AddField(\n model_name='company_marks',\n name='Company_name',\n field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='Front.company_post'),\n ),\n migrations.AddField(\n model_name='company_marks',\n name='user_name',\n field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='Front.result_user'),\n ),\n ]\n" }, { "alpha_fraction": 0.49743589758872986, "alphanum_fraction": 0.5871794819831848, "avg_line_length": 20.66666603088379, "blob_id": "486d5a2ec7795633372bea3ad7fc38a346753647", "content_id": "a8b8950708760a1bbbbec982c90693a9b0593a20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 390, "license_type": "no_license", "max_line_length": 52, "num_lines": 18, "path": "/InterviewBot/Front/migrations/0008_auto_20190322_1337.py", "repo_name": "abhisheksom15/Interview-Bot", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.7 on 2019-03-22 08:07\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Front', '0007_auto_20190319_1511'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='result_user',\n name='marks',\n field=models.CharField(max_length=2048),\n ),\n ]\n" }, { "alpha_fraction": 0.6803333163261414, "alphanum_fraction": 0.6918333172798157, "avg_line_length": 37.709678649902344, "blob_id": "98c9d21e167a265ca49610acc4e32b79d64b7d69", "content_id": "fbfe7ef79fc2628060976eae314e25840a31ec10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6000, "license_type": "no_license", "max_line_length": 654, "num_lines": 155, "path": "/InterviewBot/Front/models.py", "repo_name": "abhisheksom15/Interview-Bot", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\n# Create your models here.\n\nclass UserProfileInfo(models.Model):\n\n user=models.OneToOneField(User,on_delete=models.CASCADE)\n\n phone_number=models.IntegerField()\n\n date_of_birth=models.CharField(max_length=15)\n\n Address=models.CharField(max_length=200)\n\n school_10th=models.CharField(max_length=100)\n\n percentage_10th=models.IntegerField()\n\n school_12th=models.CharField(max_length=100)\n\n percentage_12th=models.IntegerField()\n Btech='B.tech'\n Bsc='B.sc'\n BCA='BCA'\n MCA='MCA'\n BBA='BBA'\n BE='B.E'\n NONE='None'\n CSE='Computer science'\n IT='Information technology'\n CE='Computer engineering'\n MATH='Mathematics'\n ML='Machine learning'\n ECE='Electronics and communication'\n\n grad_choices=((Btech,'B.tech'),(Bsc,'B.sc'),(BCA,'BCA'),(MCA,'MCA'),(BBA,'BBA'),(BE,'B.E'),(NONE,'None'))\n graduation=models.CharField(choices=grad_choices,max_length=7)\n\n branch_choices=((CSE,'Computer science'),(IT,'Information technology'),(CE,'Computer engineering'),(MATH,'Mathematics'),(ML,'Machine learning'),(ECE,'Electronics and communication'),(NONE,'None'))\n branch_graduation=models.CharField(choices=branch_choices,max_length=30)\n Cplusplus='C++'\n C='C'\n JAVA='JAVA'\n Csharp='C#'\n NET='.NET'\n PYTHON='PYTHON'\n JS='JavaScript'\n HTML='HTML'\n CSS='CSS'\n DBMS='Database'\n NETWORK='Networking'\n CLOUD='Cloud'\n AND='Android'\n AI='AI'\n DATASCIENCE='Data science'\n\n technical_choices=((Cplusplus,'C++'),(C,'C'),(JAVA,'JAVA'),(Csharp,'C#'),(NET,'.NET'),(PYTHON,'PYTHON'),(JS,'JavaScript'),(HTML,'HTML'),(CSS,'CSS'),(DBMS,'Database'),(NETWORK,'Networking'),(CLOUD,'Cloud'),(AND,'Android'),(ML,'ML'),(AI,'AI'),(DATASCIENCE,'Data science'))\n\n technical_skills_and_language=models.CharField(choices=technical_choices,max_length=15)\n\n Other_skills=models.CharField(choices=technical_choices,blank=True,max_length=15)\n Reading_books='Reading books'\n reading_novels='reading novels'\n Cooking='Cooking'\n Movies='Watching Movies'\n Badminton='Playing Badminton'\n Cricket='Playing Cricket'\n Football='Playing football'\n basketball='Playing basketball'\n Chess='Playing Chess'\n GYM='Going Gym'\n Music='listening Music'\n Dance='Dancing'\n\n hob_choices=((Reading_books,'Reading books'),(reading_novels,'reading novels'),(Cooking,'Cooking'),(Movies,'Watching Movies'),(Badminton,'Playing Badminton'),(Cricket,'Playing Cricket'),(Football,'Playing football'),(basketball,'Playing basketball'),(Chess,'Playing Chess'),(GYM,'Going Gym'),(Music,'listening Music'),(Dance,'Dancing'))\n Main_Hobbies=models.CharField(choices=hob_choices,max_length=25)\n\n Other_Hobbies=models.CharField(choices=hob_choices ,blank=True,max_length=25)\n\n achievement=models.CharField(max_length=100,blank=True)\n\n Project_Done=models.CharField(max_length=100,blank=True)\n\n profile_picture=models.ImageField(upload_to='profile_pics',blank=True)\n\n def __str__(self):\n return self.user.username\nclass questions(models.Model):\n Reading_books='Reading books'\n reading_novels='reading novels'\n Cooking='Cooking'\n Movies='Watching Movies'\n Badminton='Playing Badminton'\n Cricket='Playing Cricket'\n Football='Playing football'\n basketball='Playing basketball'\n Chess='Playing Chess'\n GYM='Going Gym'\n Music='listening Music'\n Dance='Dancing'\n Cplusplus='C++'\n C='C'\n JAVA='JAVA'\n Csharp='C#'\n NET='.NET'\n PYTHON='PYTHON'\n JS='JavaScript'\n HTML='HTML'\n CSS='CSS'\n DBMS='Database'\n NETWORK='Networking'\n CLOUD='Cloud'\n AND='Android'\n AI='AI'\n DATASCIENCE='Data science'\n ML='Machine learning'\n\n HR='HR'\n TECHNICAL='TECHNICAL'\n QUESTION_TYPE=((HR,'HR'),(TECHNICAL,'TECHNICAL'))\n Question_Type=models.CharField(choices=QUESTION_TYPE,max_length=10)\n GK='GK'\n Concept=\"Concept\"\n cat_choice=((Concept,\"Concept\"),(HR,'HR'),(TECHNICAL,'TECHNICAL'),(GK,'GK'),(Reading_books,'Reading books'),(reading_novels,'reading novels'),(Cooking,'Cooking'),(Movies,'Watching Movies'),(Badminton,'Playing Badminton'),(Cricket,'Playing Cricket'),(Football,'Playing football'),(basketball,'Playing basketball'),(Chess,'Playing Chess'),(GYM,'Going Gym'),(Music,'listening Music'),(Dance,'Dancing'),(Cplusplus,'C++'),(C,'C'),(JAVA,'JAVA'),(Csharp,'C#'),(NET,'.NET'),(PYTHON,'PYTHON'),(JS,'JavaScript'),(HTML,'HTML'),(CSS,'CSS'),(DBMS,'Database'),(NETWORK,'Networking'),(CLOUD,'Cloud'),(AND,'Android'),(ML,'ML'),(AI,'AI'),(DATASCIENCE,'Data science'))\n Category=models.CharField(choices=cat_choice,max_length=50)\n Difficulty_level=models.PositiveSmallIntegerField()\n Question=models.CharField(max_length=256)\n Answer=models.CharField(max_length=512)\n\n def __str__(self):\n return self.Question\nclass result_user(models.Model):\n user_result=models.OneToOneField(User,on_delete=models.CASCADE)\n marks=models.CharField(max_length=2048)\n date_of_exam= models.DateTimeField(auto_now_add=True)\n quest_list=models.CharField(max_length=2048,default=\"\")\n exam_name=models.CharField(max_length=256)\n def __str__(self):\n return self.user_result.username\nclass company_post(models.Model):\n Company_name=models.CharField(max_length=256)\n Position=models.CharField(max_length=256)\n Difficulty_level=models.PositiveIntegerField()\n salary_in_rupees=models.PositiveIntegerField()\n min_experience_in_years=models.PositiveIntegerField()\n Discreption=models.CharField(max_length=512)\n def __str__(self):\n return self.Company_name\nclass company_marks(models.Model):\n user_name=models.ForeignKey(result_user,on_delete=models.CASCADE)\n Company_name=models.ForeignKey(company_post,on_delete=models.CASCADE)\n cm_marks=models.PositiveSmallIntegerField()\n cm_position=models.CharField(max_length=256)\n def __str__(self):\n return self.user_name.user_result.username\n" }, { "alpha_fraction": 0.7124277353286743, "alphanum_fraction": 0.72398841381073, "avg_line_length": 48.42856979370117, "blob_id": "4c56f2538156956212f295fcffebeff530b4c53d", "content_id": "56c7327103254cb137788c912568d073e46f4d28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 692, "license_type": "no_license", "max_line_length": 280, "num_lines": 14, "path": "/InterviewBot/Front/forms.py", "repo_name": "abhisheksom15/Interview-Bot", "src_encoding": "UTF-8", "text": "from django import forms\nfrom django.contrib.auth.models import User\nfrom Front.models import UserProfileInfo\n\nclass UserForm(forms.ModelForm):\n password=forms.CharField(widget=forms.PasswordInput())\n\n class Meta():\n model = User\n fields=( 'username','email','password','first_name','last_name' )\nclass UserProfileInfoForm(forms.ModelForm):\n class Meta():\n model = UserProfileInfo\n fields=('phone_number', 'date_of_birth','Address','school_10th','percentage_10th','school_12th','percentage_12th','graduation','branch_graduation','technical_skills_and_language','Other_skills','Main_Hobbies','Other_Hobbies','achievement','Project_Done','profile_picture')\n" }, { "alpha_fraction": 0.5663900375366211, "alphanum_fraction": 0.6307054162025452, "avg_line_length": 24.36842155456543, "blob_id": "f7c09afabccd78ec82f3ab91a10ba0e749987d45", "content_id": "0743de502bc79de4c6072aa75891fc6ee928be85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 482, "license_type": "no_license", "max_line_length": 105, "num_lines": 19, "path": "/InterviewBot/Front/migrations/0007_auto_20190319_1511.py", "repo_name": "abhisheksom15/Interview-Bot", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.7 on 2019-03-19 09:41\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Front', '0006_auto_20190319_1455'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='company_marks',\n name='user_name',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Front.result_user'),\n ),\n ]\n" }, { "alpha_fraction": 0.5647059082984924, "alphanum_fraction": 0.5826625227928162, "avg_line_length": 66.29166412353516, "blob_id": "dbdb6102bb20b339784249b1e286a3d6f17bc357", "content_id": "3d77849d3decf5ba7071f010e34010c5cf6eae4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1615, "license_type": "no_license", "max_line_length": 850, "num_lines": 24, "path": "/InterviewBot/Front/migrations/0002_questions.py", "repo_name": "abhisheksom15/Interview-Bot", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.7 on 2019-03-04 18:24\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Front', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='questions',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('Question_Type', models.CharField(choices=[('HR', 'HR'), ('TECHNICAL', 'TECHNICAL')], max_length=10)),\n ('Category', models.CharField(choices=[('Reading books', 'Reading books'), ('reading novels', 'reading novels'), ('Cooking', 'Cooking'), ('Watching Movies', 'Watching Movies'), ('Playing Badminton', 'Playing Badminton'), ('Playing Cricket', 'Playing Cricket'), ('Playing football', 'Playing football'), ('Playing basketball', 'Playing basketball'), ('Playing Chess', 'Playing Chess'), ('Going Gym', 'Going Gym'), ('listening Music', 'listening Music'), ('Dancing', 'Dancing'), ('C++', 'C++'), ('C', 'C'), ('JAVA', 'JAVA'), ('C#', 'C#'), ('.NET', '.NET'), ('PYTHON', 'PYTHON'), ('JavaScript', 'JavaScript'), ('HTML', 'HTML'), ('CSS', 'CSS'), ('Database', 'Database'), ('Networking', 'Networking'), ('Cloud', 'Cloud'), ('Android', 'Android'), ('Machine learning', 'ML'), ('AI', 'AI'), ('Data science', 'Data science')], max_length=50)),\n ('Difficulty_level', models.PositiveSmallIntegerField()),\n ('Question', models.CharField(max_length=256)),\n ('Answer', models.CharField(max_length=512)),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.8385093212127686, "alphanum_fraction": 0.8385093212127686, "avg_line_length": 39.25, "blob_id": "eeadebc2850c3dd681c4914d999f7100b9ea3e1a", "content_id": "9494c190ff72ebb36beb66acbf61244560a1e928", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 322, "license_type": "no_license", "max_line_length": 89, "num_lines": 8, "path": "/InterviewBot/Front/admin.py", "repo_name": "abhisheksom15/Interview-Bot", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom Front.models import UserProfileInfo,questions,result_user,company_post,company_marks\n# Register your models here.\nadmin.site.register(UserProfileInfo)\nadmin.site.register(questions)\nadmin.site.register(result_user)\nadmin.site.register(company_post)\nadmin.site.register(company_marks)\n" }, { "alpha_fraction": 0.5012345910072327, "alphanum_fraction": 0.5876542925834656, "avg_line_length": 21.5, "blob_id": "21446e959122c303d2cc4ed4e94b8b65458bf8d0", "content_id": "7d94ceb34a8bcab91db44180a912a6ea7c086a96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 405, "license_type": "no_license", "max_line_length": 64, "num_lines": 18, "path": "/InterviewBot/Front/migrations/0009_result_user_quest_list.py", "repo_name": "abhisheksom15/Interview-Bot", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.7 on 2019-04-01 11:30\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Front', '0008_auto_20190322_1337'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='result_user',\n name='quest_list',\n field=models.CharField(default='', max_length=2048),\n ),\n ]\n" }, { "alpha_fraction": 0.5699588656425476, "alphanum_fraction": 0.6337448358535767, "avg_line_length": 24.578947067260742, "blob_id": "c9c1c32205f43345bffdec1227abc991b1a1b2ce", "content_id": "ae13c0407e16a6c643ab5171c36294b476100cb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 486, "license_type": "no_license", "max_line_length": 106, "num_lines": 19, "path": "/InterviewBot/Front/migrations/0006_auto_20190319_1455.py", "repo_name": "abhisheksom15/Interview-Bot", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.7 on 2019-03-19 09:25\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Front', '0005_auto_20190318_1737'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='company_marks',\n name='Company_name',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Front.company_post'),\n ),\n ]\n" }, { "alpha_fraction": 0.6615508794784546, "alphanum_fraction": 0.6720516681671143, "avg_line_length": 37.6875, "blob_id": "3a752cd541c7a33b92936ae1dc68c7b50f1184e2", "content_id": "9d4d2ca78e2b0201dc21269b05d046abc68660b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1238, "license_type": "no_license", "max_line_length": 246, "num_lines": 32, "path": "/InterviewBot/templates/welcome.html", "repo_name": "abhisheksom15/Interview-Bot", "src_encoding": "UTF-8", "text": "{% extends \"basic.html\" %}\n{% block body_block %}\n<div class=\"jumbotron\">\n {% if user.is_authenticated %}\n <h1>Welcome to Interview Bot, {{user.first_name}} {{user.last_name}}.</h1>\n\n <br>\n <a href=\"{% url 'Interview' count=0 pk=user.pk comp_pk=1 %}\" class='btn btn-warning'>Start Interview</a>\n {% else %}\n <h1>Welcome to Interview Bot,</h1>\n <p>For sign up and Registration press the Registration button on navigation bar and after completing that step go to Login Page and start your interview and please share your experience with us and your result will be avialable to you soon.</p>\n <br>\n <br>\n <center><h2 style=\"color:red;\">Please Login or Register for using Interview Bot</h2></center>\n\n {% endif %}\n</div>\n<p>We welcome you to the interview bot made by Abhishek Som,Sagar Yadav & Aditya Kumar Verma.\n Student of Meerut Institute of engineering & technology\n B.tech Computer Science 4th year\n</p>\n{% if user.is_authenticated %}\n<h3><strong>Jobs:</strong> </h3>\n{% for company in cp %}\n<h4>In {{ company.Company_name }} for the post of {{ company.Position }}. </h4>\n<a href=\"{% url 'description' pk=company.pk %}\" class='btn btn-primary'>{{company.Company_name}} : {{company.Position}}</a>\n{% endfor %}\n<br>\n\n\n{% endif %}\n{% endblock %}\n" }, { "alpha_fraction": 0.5899354815483093, "alphanum_fraction": 0.6030967831611633, "avg_line_length": 95.875, "blob_id": "d4efe0b9a62a858852ce5636c9c4e39ad1a4a011", "content_id": "03499b8d051d0e523b339de7c03a43ac75cc88f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3875, "license_type": "no_license", "max_line_length": 527, "num_lines": 40, "path": "/InterviewBot/Front/migrations/0001_initial.py", "repo_name": "abhisheksom15/Interview-Bot", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.1 on 2019-01-23 20:55\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='UserProfileInfo',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('phone_number', models.IntegerField()),\n ('date_of_birth', models.CharField(max_length=15)),\n ('Address', models.CharField(max_length=200)),\n ('school_10th', models.CharField(max_length=100)),\n ('percentage_10th', models.IntegerField()),\n ('school_12th', models.CharField(max_length=100)),\n ('percentage_12th', models.IntegerField()),\n ('graduation', models.CharField(choices=[('B.tech', 'B.tech'), ('B.sc', 'B.sc'), ('BCA', 'BCA'), ('MCA', 'MCA'), ('BBA', 'BBA'), ('B.E', 'B.E'), ('None', 'None')], max_length=7)),\n ('branch_graduation', models.CharField(choices=[('Computer science', 'Computer science'), ('Information technology', 'Information technology'), ('Computer engineering', 'Computer engineering'), ('Mathematics', 'Mathematics'), ('Machine learning', 'Machine learning'), ('Electronics and communication', 'Electronics and communication'), ('None', 'None')], max_length=30)),\n ('technical_skills_and_language', models.CharField(choices=[('C++', 'C++'), ('C', 'C'), ('JAVA', 'JAVA'), ('C#', 'C#'), ('.NET', '.NET'), ('PYTHON', 'PYTHON'), ('JavaScript', 'JavaScript'), ('HTML', 'HTML'), ('CSS', 'CSS'), ('Database', 'Database'), ('Networking', 'Networking'), ('Cloud', 'Cloud'), ('Android', 'Android'), ('Machine learning', 'ML'), ('AI', 'AI'), ('Data science', 'Data science')], max_length=15)),\n ('Other_skills', models.CharField(blank=True, choices=[('C++', 'C++'), ('C', 'C'), ('JAVA', 'JAVA'), ('C#', 'C#'), ('.NET', '.NET'), ('PYTHON', 'PYTHON'), ('JavaScript', 'JavaScript'), ('HTML', 'HTML'), ('CSS', 'CSS'), ('Database', 'Database'), ('Networking', 'Networking'), ('Cloud', 'Cloud'), ('Android', 'Android'), ('Machine learning', 'ML'), ('AI', 'AI'), ('Data science', 'Data science')], max_length=15)),\n ('Main_Hobbies', models.CharField(choices=[('Reading books', 'Reading books'), ('reading novels', 'reading novels'), ('Cooking', 'Cooking'), ('Watching Movies', 'Watching Movies'), ('Playing Badminton', 'Playing Badminton'), ('Playing Cricket', 'Playing Cricket'), ('Playing football', 'Playing football'), ('Playing basketball', 'Playing basketball'), ('Playing Chess', 'Playing Chess'), ('Going Gym', 'Going Gym'), ('listening Music', 'listening Music'), ('Dancing', 'Dancing')], max_length=25)),\n ('Other_Hobbies', models.CharField(blank=True, choices=[('Reading books', 'Reading books'), ('reading novels', 'reading novels'), ('Cooking', 'Cooking'), ('Watching Movies', 'Watching Movies'), ('Playing Badminton', 'Playing Badminton'), ('Playing Cricket', 'Playing Cricket'), ('Playing football', 'Playing football'), ('Playing basketball', 'Playing basketball'), ('Playing Chess', 'Playing Chess'), ('Going Gym', 'Going Gym'), ('listening Music', 'listening Music'), ('Dancing', 'Dancing')], max_length=25)),\n ('achievement', models.CharField(blank=True, max_length=100)),\n ('Project_Done', models.CharField(blank=True, max_length=100)),\n ('profile_picture', models.ImageField(blank=True, upload_to='profile_pics')),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.5790015459060669, "alphanum_fraction": 0.5975295901298523, "avg_line_length": 61.67741775512695, "blob_id": "debc3c7db91502fb68ba786cce0eae72ab89c439", "content_id": "f576cfa1470b52baf30ae7cc2d5bdcc2ee3b572c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1943, "license_type": "no_license", "max_line_length": 918, "num_lines": 31, "path": "/InterviewBot/Front/migrations/0004_auto_20190308_1914.py", "repo_name": "abhisheksom15/Interview-Bot", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.7 on 2019-03-08 13:44\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('Front', '0003_auto_20190305_1241'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='result_user',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('marks', models.PositiveIntegerField()),\n ('date_of_exam', models.DateTimeField(auto_now_add=True)),\n ('exam_name', models.CharField(max_length=256)),\n ('user_result', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.AlterField(\n model_name='questions',\n name='Category',\n field=models.CharField(choices=[('Concept', 'Concept'), ('HR', 'HR'), ('TECHNICAL', 'TECHNICAL'), ('GK', 'GK'), ('Reading books', 'Reading books'), ('reading novels', 'reading novels'), ('Cooking', 'Cooking'), ('Watching Movies', 'Watching Movies'), ('Playing Badminton', 'Playing Badminton'), ('Playing Cricket', 'Playing Cricket'), ('Playing football', 'Playing football'), ('Playing basketball', 'Playing basketball'), ('Playing Chess', 'Playing Chess'), ('Going Gym', 'Going Gym'), ('listening Music', 'listening Music'), ('Dancing', 'Dancing'), ('C++', 'C++'), ('C', 'C'), ('JAVA', 'JAVA'), ('C#', 'C#'), ('.NET', '.NET'), ('PYTHON', 'PYTHON'), ('JavaScript', 'JavaScript'), ('HTML', 'HTML'), ('CSS', 'CSS'), ('Database', 'Database'), ('Networking', 'Networking'), ('Cloud', 'Cloud'), ('Android', 'Android'), ('Machine learning', 'ML'), ('AI', 'AI'), ('Data science', 'Data science')], max_length=50),\n ),\n ]\n" }, { "alpha_fraction": 0.6104016900062561, "alphanum_fraction": 0.6235198378562927, "avg_line_length": 37.627803802490234, "blob_id": "f22ccd02492fbfd3214586fdd8f9481afc96a0b3", "content_id": "74759fcebfda0656ac90d4dfe0e7c0e13c1c6fa3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8614, "license_type": "no_license", "max_line_length": 173, "num_lines": 223, "path": "/InterviewBot/Front/views.py", "repo_name": "abhisheksom15/Interview-Bot", "src_encoding": "UTF-8", "text": "from django.shortcuts import render,get_object_or_404,get_list_or_404\nfrom Front.forms import UserForm,UserProfileInfoForm\nfrom django.urls import reverse\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect,HttpResponse\nfrom django.contrib.auth import authenticate,login,logout\nfrom Front.models import UserProfileInfo,User,questions,result_user,company_post,company_marks\nfrom gtts import gTTS\nfrom django.contrib.auth.password_validation import validate_password\nfrom django.core.exceptions import ValidationError\n\nimport random\n# Create your views here.\ndef welcome(request):\n cp=company_post.objects.all()\n return render(request,'welcome.html',{'cp':cp})\ndef score(request,pk_cp):\n cp=get_object_or_404(company_post,pk=pk_cp)\n try:\n cm=list(company_marks.objects.filter(Company_name=cp,cm_position=cp.Position))\n return render(request,'score.html',{'cm':cm,'company':cp})\n except company_marks.DoesNotExist:\n HttpResponse(\"No User give the interview till now!\")\n@login_required\ndef description(request,pk):\n comp=get_object_or_404(company_post,pk=pk)\n return render(request,'description.html',{'company':comp})\n@login_required\ndef complete(request,pk):\n ru=get_object_or_404(result_user,pk=pk)\n mark=ru.marks\n if(int(mark)>=75):\n s=\"Congratulation, You passed the Technical round and soon your HR round will be conducted\"\n else:\n s=\"Sorry, Your score is below the passing marks, better luck next time\"\n return render(request,'done.html',{'marks':mark,'s':s})\ndef register(request):\n\n registered=False\n\n if(request.method==\"POST\"):\n user_form=UserForm(data=request.POST)\n profile_form=UserProfileInfoForm(data=request.POST)\n\n if(user_form.is_valid() and profile_form.is_valid()):\n user=user_form.save(commit=False)\n try:\n validate_password(user.password, user)\n except ValidationError as e:\n user_form.add_error('password', e)\n return render(request,'Registration.html',{'user_form':user_form,'profile_form':profile_form,'registered':registered,\"Warning\":\"Password not strong\"})\n user.set_password(user.password)\n user.save()\n profile=profile_form.save(commit=False)\n profile.user=user\n\n if('profile_pic' in request.FILES):\n profile.profile_pic=request.FILES['profile_pic']\n profile.save()\n\n registered=True\n else:\n print(user_form.errors,profile_form.errors)\n else:\n user_form=UserForm()\n profile_form=UserProfileInfoForm()\n return render(request,'Registration.html',{'user_form':user_form,'profile_form':profile_form,'registered':registered})\n\n\n\n@login_required\ndef user_logout(request):\n logout(request)\n return HttpResponseRedirect(reverse('welcome'))\n\n@login_required\ndef special(request):\n HttpResponse('you are logged in !! nice')\n\n\n@login_required\ndef Interview(request,pk,comp_pk,count=0):\n company=get_object_or_404(company_post,pk=comp_pk)\n exam_diff=company.Difficulty_level\n max_diff=exam_diff+10\n min_diff=exam_diff-5\n user=get_object_or_404(User,pk=pk)\n\n extras=[\"in\",\"is\",\"am\",\"i\",\"not\",\"did\",\"of\",\"where\",\"what\",\"are\",\"here\",\"there\",\"the\",\"it\",\"it's\",\"its\",\"you\",\"yours\",\"me\",\"my\",\"to\",\"this\",\"they\",\"there\",\"then\",\"than\"]\n\n\n user_profile_info=get_object_or_404(UserProfileInfo,user=user)\n tech=user_profile_info.technical_skills_and_language\n hr=[\"HR\",\"GK\",user_profile_info.Main_Hobbies,user_profile_info.Other_Hobbies]\n TECH=[user_profile_info.technical_skills_and_language,user_profile_info.Other_skills,\"Concept\"]\n fscore=[]\n questions1=[]\n answer1=[]\n score1=[]\n c=0\n for cat in TECH:\n if(get_list_or_404(questions,Question_Type=\"TECHNICAL\",Category=cat)):\n questions_tech=get_list_or_404(questions,Question_Type=\"TECHNICAL\",Category=cat)\n for quest in questions_tech:\n if(quest.Difficulty_level>=min_diff and quest.Difficulty_level<=max_diff):\n questions1.append(quest.Question)\n answer1.append(quest.Answer)\n score1.append(quest.Difficulty_level)\n if(count==0):\n random_questions_total=random.randint(15,50)\n r_list=[]\n for i in range(random_questions_total):\n r_list.append(random.randrange(len(questions1)))\n r_list=list(set(r_list))\n r_string=\"\"\n for val in r_list:\n r_string+=(str(val)+' ')\n try:\n p= result_user.objects.get(user_result=user)\n p.marks=''\n p.quest_list=r_string\n p.save()\n except result_user.DoesNotExist:\n p=result_user(user_result=user,marks=0,exam_name=\"TEST\",quest_list=r_string)\n p.save()\n try:\n checking_test_company= company_marks.objects.get(Company_name=company,cm_position=company.Position,user_name=p)\n return HttpResponse(\"You have already given the Interview for this post ! \")\n except company_marks.DoesNotExist:\n true=True\n m_r=get_object_or_404(result_user,user_result=user)\n list_quest_num=m_r.quest_list.split()\n questions2=[]\n answer2=[]\n score2=[]\n for var in list_quest_num:\n i=int(var)\n questions2.append(questions1[i])\n answer2.append(answer1[i])\n score2.append(score1[i])\n questions1=questions2\n answer1=answer2\n score1=score2\n print(questions1)\n if(request.method=='POST'):\n answer_input=request.POST.get('answer')\n ans=answer1[count-1]\n ans=ans.lower()\n ans_match_list=ans.split()\n answer_input=answer_input.lower()\n answer_list=answer_input.split()\n for word in extras:\n if word in answer_list:\n answer_list.remove(word)\n if word in ans_match_list:\n ans_match_list.remove(word)\n l=len(ans_match_list)\n c_ans=0\n l_input=len(answer_list)\n\n for word in ans_match_list:\n for i in range(l_input):\n if(word==answer_list[i]):\n c_ans+=1\n if(l==1 or l==2):\n req_ans=1\n elif(l<=5):\n req_ans=l-1\n elif(l<=10):\n req_ans=l//2\n elif(l<=20):\n req_ans=l//3\n else:\n req_ans=l//3\n if(req_ans<=c_ans):\n score_question=score1[count-2]\n elif(req_ans>=2 and (req_ans//4)<=c_ans):\n score_question=int((c_ans/req_ans)*score1[count-2])\n else:\n score_question=0\n\n m_r.marks+=(' '+str(count-1)+','+str(score_question))\n m_r.save(update_fields=['marks'])\n maxc=len(questions1)\n if(count==maxc):\n marks_list=m_r.marks.split(' ')\n marks_list_score=[0 for x in range(len(score1))]\n for i in range(1,len(marks_list)):\n marks_item=marks_list[i].split(',')\n print(marks_list)\n print(marks_item)\n marks_list_score[int(marks_item[0])]=int(marks_item[1])\n m_r.marks=str(int((sum(marks_list_score)/sum(score1))*100))\n m_r.save()\n company_marks_stored=company_marks(user_name=m_r,Company_name=company,cm_marks=int(m_r.marks),cm_position=company.Position)\n company_marks_stored.save()\n return HttpResponseRedirect(reverse('complete',args=(m_r.pk,)))\n else:\n count+=1\n ques_audio=gTTS(questions1[count-1])\n file_loc='static/'+str(user.username)+str(count-1)+'.mp3'\n file_name=str(user.username)+str(count-1)+'.mp3'\n ques_audio.save(file_loc)\n return render(request,'Interview.html',{'questions1':questions1[count-1],'c':count,'file_name':file_name,'company':company})\n\ndef user_login(request):\n if(request.method=='POST'):\n username=request.POST.get('username')\n password=request.POST.get('password')\n user=authenticate(username=username,password=password)\n if(user):\n if(user.is_active):\n login(request,user)\n user_test=user\n return HttpResponseRedirect(reverse('welcome'))\n else:\n return HttpResponse(\"ACCOUNT IS NOT ACTIVE\")\n else:\n print(\"someone tried to access account unauthenically and failed\")\n print(\"Username {} and password {}\".format(username,password))\n return HttpResponse(\"invalid login details\")\n else:\n return render(request,'Login.html',{})\n" } ]
14
plb2018/DATA607
https://github.com/plb2018/DATA607
18dfe232c63b7c9ce5c471839a8db21299f5f3cc
b0e9607ec13a75ecf62d0ad0695722da4ee79b2b
61a5f496234f3b8fe6a691eedd8f796463f95035
refs/heads/master
2021-05-04T14:10:18.346756
2018-05-14T03:44:13
2018-05-14T03:44:13
120,194,610
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6628487706184387, "alphanum_fraction": 0.6804698705673218, "avg_line_length": 23.992366790771484, "blob_id": "452cf09cded0a7ebef7d2f777b8dfca87f88206a", "content_id": "b34f1603846772ac15392fbccb7d10e158bdfce4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 3405, "license_type": "no_license", "max_line_length": 355, "num_lines": 131, "path": "/DATA607_week2.Rmd", "repo_name": "plb2018/DATA607", "src_encoding": "UTF-8", "text": "---\r\ntitle: \"DATA 607 - WEEK 2\"\r\nauthor: \"Paul Britton\"\r\noutput: \r\n html_document:\r\n theme: cerulean\r\n highlight: pygments\r\n\r\n---\r\n\r\nMy friends and family have probably had enough of me talking about data, so rather than interview them, I decided to use a movie dataset that I found on the internet [here](https://grouplens.org/datasets/movielens/). I've copied the data to my [github](https://github.com/plb2018/DATA607/tree/master/ml-latest-small) and reference that copy in my code. \r\n\r\nI've opted to do all my MySQL work directly from R, so the only file that i have to show is a .rmd which can be found in my [here](https://github.com/plb2018/DATA607/blob/master/DATA607_week2.Rmd) in my github and [here on rpubs](http://rpubs.com/plb_lttfer/359133)\r\n\r\n\r\n### Load Required Packages\r\n\r\n```{r results='hide', message=FALSE, warning=FALSE}\r\nlibrary(RMySQL)\r\nlibrary(data.table)\r\n\r\n```\r\n\r\n\r\n### Read in username and password from file\r\n\r\n```{r results='hide'}\r\n\r\nuser_df <- read.table(\"mysql_temp_pw.csv\",header=TRUE,sep=\",\")\r\n\r\nuser = toString(user_df[1,\"user\"])\r\npw = toString(user_df[1,\"password\"])\r\n\r\n```\r\n\r\nFirst we'll open a connection to MySQL. Note that I've pre-created a database called \"DATA_607\" using the following SQL command: CREATE DATABASE \"DATA_607\".\r\n\r\n```{r}\r\ncon = dbConnect(MySQL(), user=user, password=pw, dbname='DATA_607', host='localhost')\r\n\r\n\r\n```\r\n\r\n\r\nNext we'll read the movie data into dataframes. The data we care about are contained in 2 separate files (ratings, movies) which we'll end up combining into a single db table later. \r\n\r\n```{r}\r\n\r\n\r\nratings <- read.table(\"https://raw.githubusercontent.com/plb2018/DATA607/master/ml-latest-small/ratings.csv\",header = TRUE, sep = \",\")\r\n\r\nmovies <- read.table(\"https://raw.githubusercontent.com/plb2018/DATA607/master/ml-latest-small/movies.csv\",header = TRUE, sep = \",\",fill = TRUE)\r\n\r\n```\r\n\r\nAnd we'll take a look at the data:\r\n\r\n```{r}\r\n\r\nhead(ratings,5)\r\n\r\n```\r\n\r\n```{r}\r\n\r\nhead(movies,5)\r\n\r\n```\r\n\r\n\r\nThe data look good, so now we'll load the data into individual tables in MySQL:\r\n\r\n\r\n```{r results='hide'}\r\ndbWriteTable(con, name='ratings', value=ratings)\r\ndbWriteTable(con, name='movies', value=movies)\r\n\r\n```\r\n\r\nNow we should have 2 tables: \"movies\" and \"ratings\". Let's check:\r\n\r\n```{r}\r\n\r\ndbListTables(con)\r\n```\r\n\r\nLooks good! Now we'll pull some data and take a peek just to make sure that everything is as we expect:\r\n\r\n```{r}\r\n\r\nquery = dbSendQuery(con, \"SELECT * FROM movies LIMIT 5\")\r\nmovie_data = fetch(query)\r\nmovie_data\r\n```\r\n\r\n```{r}\r\n\r\nquery = dbSendQuery(con, \"SELECT * FROM ratings LIMIT 5\")\r\nrating_data = fetch(query)\r\nrating_data\r\n```\r\n\r\nThe tables look fine, so we'll try to join the data \"movies\" and \"ratings\" tables using the common \"movieId\" field and create a new \"movie_ratings\" table to house the joined data:\r\n\r\n\r\n\r\n```{r results='hide'}\r\n\r\ndbSendQuery(con, \"CREATE TABLE movie_ratings AS (\r\nSELECT movies.movieId,ratings.userId, ratings.rating, movies.title,movies.genres \r\nFROM movies\r\nINNER JOIN ratings ON movies.movieId = ratings.movieId);\")\r\n\r\n```\r\nAnd finally, we'll pull a sample out of the newly created table into an R dataframe:\r\n\r\n```{r}\r\n\r\nquery = dbSendQuery(con, \"SELECT * FROM movie_ratings LIMIT 5\")\r\ndata = fetch(query)\r\n\r\ndata\r\n```\r\n\r\nAnd finally, we'll close out db conneciton.\r\n\r\n\r\n```{r results='hide', message=FALSE, warning=FALSE}\r\ndbDisconnect(con)\r\n\r\n```\r\n" }, { "alpha_fraction": 0.6083636283874512, "alphanum_fraction": 0.630545437335968, "avg_line_length": 21.092437744140625, "blob_id": "420632995961f9c40c434350d4452674a33387c6", "content_id": "f8468f734a43da7ee35d81697eb8efcdad8d0f47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 2750, "license_type": "no_license", "max_line_length": 256, "num_lines": 119, "path": "/DATA607_week9.Rmd", "repo_name": "plb2018/DATA607", "src_encoding": "UTF-8", "text": "---\r\ntitle: \"DATA 607 - WEEK 9 Assignment\"\r\nauthor: \"Paul Britton\"\r\noutput:\r\n html_document:\r\n highlight: pygments\r\n theme: cerulean\r\n toc: yes\r\n pdf_document:\r\n toc: yes\r\n---\r\n\r\n```{r, echo=FALSE}\r\nrm(list = ls())\r\n```\r\n\r\n\r\nThe task for week 9 was to get a NYT API key, construct an interface to R, grab some data and finally return a dataframe. I chose to work with the \"Archive_api\" because I actually have some future tasks that I'd like to tackle that involve old newspapers.\r\n\r\nThis file is available on rpubs [here](http://rpubs.com/plb_lttfer/375108) and in my github [here]()\r\n\r\n\r\n### Load the Libraries\r\n\r\n```{r, message=FALSE, warning=FALSE}\r\n\r\nlibrary(jsonlite)\r\nlibrary(httr)\r\nlibrary(knitr)\r\n\r\n```\r\n\r\n\r\n### Build a function to Get JSON from NYT Archives API\r\n\r\nI created a function to query the NYT Archive API and return a dataframe containing the data retrieved. Also, in the development process, I noticed that my request failed frequently so I've set my function up to retry using the RETRY() function. \r\n\r\n```{r}\r\n\r\n#params\r\napi.key <- \"36a5b43cb0e04a1dad5e23a9810f2cc1\"\r\nyyyy <- \"1929\"\r\nmm <- \"09\"\r\n\r\n#return JSON from NYT API\r\nget.NytArchives <- function(api.key,yyyy,mm){\r\n base.url <- paste(\"https://api.nytimes.com/svc/archive/v1/\",yyyy,\"/\",mm,\".json\",sep=\"\")\r\n print(paste(\"Collecting NYT archvies data for: \",toString(yyyy),\"-\",toString(mm)))\r\n \r\n #get seems to fail sometimes, so keep on tryin'\r\n query <- RETRY(\"GET\",\"https://api.nytimes.com/svc/archive/v1/1929/9.json\",\r\n query = list(api_key=api.key),\r\n times = 100, \r\n pause_base = 2)\r\n query <- content(query,as=\"text\",encoding=\"UTF-8\")\r\n \r\n df <- as.data.frame(fromJSON(query))\r\n \r\n #clean up the column names\r\n colnames(df) <- gsub(\"^.*\\\\.\",\"\", colnames(df))\r\n \r\n return(df)\r\n}\r\n\r\n```\r\n\r\n### Test the Function\r\n\r\nWe'll do a call to grab a single month and see what we get back\r\n\r\n```{r}\r\n\r\nresult <- get.NytArchives(api.key,\"1929\",\"9\")\r\n\r\n```\r\n\r\n\r\n\r\n### Check the Output\r\n\r\nAnd now we'll take a look and see what we got. First i'll print the column names: \r\n\r\n```{r}\r\n\r\nkable(colnames(result),col.names = \"Column Names\")\r\n```\r\n\r\nFor output purposes, I'll select only a few of the columns listed above in order to keep things legible:\r\n\r\n```{r}\r\nkable(head(result[c(\"web_url\",\"snippet\")],5))\r\n\r\n```\r\n\r\n\r\nThe data looks good!\r\n\r\n\r\nNow we're going to try and grab a bunch of data all at once:\r\n\r\n```{r}\r\n\r\ndf <- data.frame(matrix(ncol = 2, nrow=0))\r\n\r\ncolnames(df) <- c(\"web_url\",\"snippet\")\r\n\r\nfor (i in 1:5){\r\n data <- get.NytArchives(api.key,1929,i)\r\n \r\n df<- rbind(df,data[c(\"web_url\",\"snippet\")])\r\n}\r\n\r\n\r\n\r\n\r\n```\r\n\r\n\r\nWe've just collected `r nrow(df)` articles from the NYT archives\r\n\r\n" }, { "alpha_fraction": 0.6471400260925293, "alphanum_fraction": 0.6658776998519897, "avg_line_length": 27.3121395111084, "blob_id": "e7d34e7e962bde8d34b0069797436cdbb604b0c0", "content_id": "aaa9fdda19950688c818bc55fdae52926b882f0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 5070, "license_type": "no_license", "max_line_length": 265, "num_lines": 173, "path": "/DATA_607_week13/DATA607_week13.Rmd", "repo_name": "plb2018/DATA607", "src_encoding": "UTF-8", "text": "---\r\ntitle: \"DATA 607 - WEEK 13 Assignment\"\r\nauthor: \"Paul Britton\"\r\noutput:\r\n html_document:\r\n highlight: pygments\r\n theme: cerulean\r\n toc: yes\r\n pdf_document:\r\n toc: yes\r\n---\r\n\r\nThe task here is to migrate from an SQL database to a NoSQL database. I decided to recycle my movie data from week 2 and move it over to mongoDB.\r\n\r\nAll the relevant data for this work can be found on github [here](https://github.com/plb2018/DATA607/tree/master/DATA_607_week13) and the output can be found on rpubs [here](http://rpubs.com/plb_lttfer/384542)\r\n\r\n\r\n## Clean-up and Setup \r\n\r\n```{r, echo=FALSE}\r\nrm(list = ls())\r\n```\r\n\r\n```{r setup}\r\n\r\nlibrary(RMySQL)\r\nlibrary(mongolite)\r\nlibrary(stringr)\r\nlibrary(miniUI)\r\nlibrary(shiny)\r\nlibrary(knitr)\r\n```\r\n\r\n## Create the SQL Tables\r\n\r\n### User Password\r\nI've borrowed an awesome snippet that was posted on Slack as a much nicer way allow for users to input passwords.\r\n\r\n```{r password, message=FALSE,warning=FALSE}\r\n#a shout out to Justin for posting this awesome snippet on slack!\r\n\r\nget_password <- function() {\r\n ui <- miniPage(\r\n gadgetTitleBar(\"Please enter password for database: data_607 \"),\r\n miniContentPanel(\r\n passwordInput(\"password\", \"\")\r\n )\r\n )\r\n\r\n server <- function(input, output) {\r\n observeEvent(input$done, {\r\n stopApp(input$password)\r\n })\r\n observeEvent(input$cancel, {\r\n stopApp(stop(\"No password.\", call. = FALSE))\r\n })\r\n }\r\n\r\n runGadget(ui, server, viewer = dialogViewer(\"Password\", height = 200))\r\n}\r\n\r\npw <- get_password() \r\n\r\n```\r\n\r\n### The SQL data\r\n\r\nIn the interest of reproducibility, I'm going to create the mysql db right here. The only action that the user would need to perform in order to make this code work is to create a mysql database entitled *data_607*.\r\n\r\n\r\n```{r mysql_db, messages = FALSE}\r\n\r\n#connect to the \r\ncon = dbConnect(MySQL(),\r\n user=\"root\",\r\n password=pw,\r\n dbname='DATA_607',\r\n host='localhost')\r\n\r\n#load the data\r\nratings <- read.table(\"https://raw.githubusercontent.com/plb2018/DATA607/master/ml-latest-small/ratings.csv\",header = TRUE, sep = \",\",quote = \"\\\"\")\r\n\r\nmovies <- read.table(\"https://raw.githubusercontent.com/plb2018/DATA607/master/ml-latest-small/movies.csv\",header = TRUE, sep = \",\",fill = TRUE,quote = \"\\\"\")\r\n\r\n#create tables from the data\r\ndbWriteTable(con, name='ratings', value=ratings, overwrite=TRUE)\r\ndbWriteTable(con, name='movies', value=movies, overwrite=TRUE)\r\n\r\n#join the data into a third table\r\ndbSendQuery(con, \"CREATE TABLE IF NOT EXISTS movie_ratings AS (\r\nSELECT movies.movieId,ratings.userId, ratings.rating, movies.title,movies.genres\r\nFROM movies\r\nINNER JOIN ratings ON movies.movieId = ratings.movieId);\")\r\n\r\n\r\n\r\n```\r\n\r\n## Migrate to Mongo\r\n\r\nFrom what I gather, there are a few ways to migrate from SQL to NoSQL, however, it seems to be highly dependent on the input SQL DB and the desired outcome. In the case of my movie DB, the requirements are pretty simple, so i decided to try 2 methods of migration.\r\n\r\n### Method 1 - CSV\r\n\r\nA common method seems to be using CSV as a go-between. It appears as though this method offers good versatility, however, it's probably slow and impractival for extremely large databases.\r\n\r\nFirst I write my SQL tables to files \r\n\r\n```{r to_csv}\r\n\r\nsql.tables <- dbListTables(con)\r\n\r\nfor (i in 1:length(sql.tables)){\r\n data<- dbReadTable(con,sql.tables[i])\r\n write.table(data,\r\n paste(sql.tables[i],\".csv\",sep=\"\"),\r\n row.names=FALSE,\r\n sep=\",\")\r\n}\r\n\r\n```\r\n\r\nFor the ease of the user, I've put the output from the above on github, and I'll work from that source. We load 1000 rows of the DB from github back into R\r\n\r\n```{r from_csv}\r\n\r\nmovie.ratings <- read.table(\"https://github.com/plb2018/DATA607/raw/master/DATA_607_week13/movie_ratings.csv\",sep=\",\",header=TRUE, nrows=1000)\r\n\r\n```\r\n\r\nThen we connect to mongo and add the data. Note that we assume that the user here already has a local copy of mongo running which contains a db called \"DATA_607\" and a collection called \"movies\"\r\n\r\n```{r to_mongo1}\r\nmongo <- mongo(db=\"DATA_607\",collection=\"movies\")\r\nmongo$insert(movie.ratings)\r\n\r\n```\r\n\r\nNow we check and see what's in the new mongo db:\r\n\r\n```{r mongo_check1}\r\nkable(mongo$find(\r\n query = '{\"rating\" : {\"$gte\" : 5}}', \r\n fields = '{\"title\":true, \"rating\":true}',\r\n limit = 10)\r\n )\r\n```\r\n \r\n The data looks good!\r\n\r\n\r\n\r\n### Method 2 - Right from MySQL\r\n\r\nAnother way, which it appears to be less feasible for complicated SQL DBs is to just go directly from a SQL query into mongo. Here we're really only loading 1 table, so it's not a big deal, but i'm less confident that this will work for complicated DBs.\r\n\r\n```{r to_mongo2}\r\n\r\nmovie.ratings <- dbReadTable(con,\"movie_ratings\")\r\nmongo <- mongo(db=\"DATA_607\",collection=\"movies2\")\r\nmongo$insert(movie.ratings)\r\n```\r\n\r\n```{r mongo_check2}\r\nkable(mongo$find(\r\n query = '{\"rating\" : {\"$gte\" : 5}}', \r\n fields = '{\"title\":true, \"rating\":true}',\r\n limit = 10)\r\n )\r\n```\r\n\r\n\r\nThe data looks good here too! " }, { "alpha_fraction": 0.6865671873092651, "alphanum_fraction": 0.7761194109916687, "avg_line_length": 32.5, "blob_id": "45aa47f5dc2ec593be0d58aabdbde4c43df45161", "content_id": "14859cf64b12f1dcd27eabe099dc8df671c431a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 67, "license_type": "no_license", "max_line_length": 56, "num_lines": 2, "path": "/README.md", "repo_name": "plb2018/DATA607", "src_encoding": "UTF-8", "text": "# DATA607\nA new github repository to hold all my work for DATA 607\n" }, { "alpha_fraction": 0.6688089370727539, "alphanum_fraction": 0.6842330694198608, "avg_line_length": 22.22916603088379, "blob_id": "a527727aae88522eb677d1f34e85f139747e564d", "content_id": "87973c63e5d8296773415d862a045e06ace5bea4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 2334, "license_type": "no_license", "max_line_length": 505, "num_lines": 96, "path": "/books/DATA607_week7.Rmd", "repo_name": "plb2018/DATA607", "src_encoding": "UTF-8", "text": "---\r\ntitle: \"DATA 607 - WEEK 7 Assignment\"\r\nauthor: \"Paul Britton\"\r\noutput:\r\n html_document:\r\n highlight: pygments\r\n theme: cerulean\r\n toc: yes\r\n pdf_document:\r\n toc: yes\r\n---\r\n\r\n#Intoduction\r\n\r\nFor week 7, the task was to record book titles in several different formats and then read them into R. The book-data for this work is available in my github, and this document can be found on rpubs.\r\n\r\n\r\n#Clear the Workspace\r\n\r\n```{r}\r\nrm(list = ls())\r\n```\r\n\r\n#Load the Required Packages\r\n\r\n```{r message=FALSE}\r\nlibrary(XML)\r\nlibrary(RCurl)\r\nlibrary(rlist)\r\nlibrary(jsonlite)\r\nlibrary(compare)\r\nlibrary(data.table)\r\n```\r\n\r\n# Load the HTML\r\n\r\nFirst we'll get the HTML file. We'll read it from github using RCurl's getURL\r\n\r\n```{r}\r\n\r\nhtml <-getURL(\"https://raw.githubusercontent.com/plb2018/DATA607/master/books/data_607_books.html\")\r\n\r\n\r\nhtml.table <- readHTMLTable(html)\r\nhtml.table <- html.table[[1]]\r\n\r\nhtml.table\r\n\r\n```\r\n\r\nThe data is loaded to the dataframe! \r\n\r\n# Load the XML\r\n\r\nNext we load the XML file. We'll read the data from github in the same way as the HTML file.\r\n\r\n\r\n```{r}\r\nxml.file <-getURL(\"https://raw.githubusercontent.com/plb2018/DATA607/master/books/data_607_books.xml\")\r\n\r\n\r\nxml <- xmlParse(xml.file)\r\nxml.table <- xmlToDataFrame(xml)\r\nxml.table\r\n\r\n```\r\n\r\nOnce again, the data is loaded to the dataframe without issue!\r\n\r\n# Load the JSON\r\n\r\nWe'll now load the JSON using jsonlite:\r\n\r\n\r\n```{r}\r\njson.file <-getURL(\"https://raw.githubusercontent.com/plb2018/DATA607/master/books/data_607_books.json\")\r\n\r\njson.table <- fromJSON(json.file)\r\n\r\njson.table <- data.table::rbindlist(json.table)\r\n\r\njson.table\r\n \r\n```\r\n\r\nThe data is loaded.\r\n\r\n# Quick comparison\r\n\r\n```{r}\r\n\r\ncompare(html.table,xml.table,equal=TRUE)\r\ncompare(html.table,json.table,equal=TRUE)\r\n```\r\n\r\nWe can see visually that the content is the same for all, and that the html and xml are the same, however, the json appears to be ever-so-slightly different in that the columns are factors instead of chrs. The is easy to change, as needed. Based on this experience, I'd probably say that XML was the easiest. It worked on my first attempt, and is easier to read than HTML. JSON seems like the easiest for a human to work with, and seems less verbose than XML, but i had some issues getting it to work. \r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.6896682977676392, "alphanum_fraction": 0.697960376739502, "avg_line_length": 33.934932708740234, "blob_id": "380174441f2aeab1f6943ac8f22ce99eb81acdbd", "content_id": "d786e2508ee1f623e2f6f10a76e09248d5b1b56b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 10492, "license_type": "no_license", "max_line_length": 899, "num_lines": 292, "path": "/DATA607_week5.Rmd", "repo_name": "plb2018/DATA607", "src_encoding": "UTF-8", "text": "---\r\ntitle: \"DATA 607 - WEEK 5 - Assignment\"\r\nauthor: \"Paul Britton\"\r\noutput: \r\n html_document:\r\n theme: cerulean\r\n highlight: pygments\r\n toc: True\r\n---\r\n\r\n#Housekeeping\r\n\r\nThis doc can be found at [rpubs](http://rpubs.com/plb_lttfer/366514) and the input data files and .rmd file can be found on my github here: [Flights](https://raw.githubusercontent.com/plb2018/DATA607/master/flight_info.csv), [Weather](https://raw.githubusercontent.com/plb2018/DATA607/master/NOAA_Rainfall.csv).\r\n[.rmd]()\r\n\r\n\r\nFirst I'll clean up my work space:\r\n\r\n```{r}\r\nrm(list = ls())\r\n```\r\n\r\nThen we'll load all the packages that we're going to need for this exercise:\r\n\r\n```{r warning=FALSE, message=FALSE}\r\nlibrary(tidyr)\r\nlibrary(dplyr)\r\nlibrary(ggplot2)\r\n\r\n```\r\n\r\n#Load and Clean the Data\r\n\r\nNext we'll load the data from a CSV file. On an initial attempt I had manually cleaned up the data directly in the CSV file by filling in column headers and adding the airline value in the \"delayed\" rows, however, based on some of the comments in the meetup this past week, I've decided to try to keep scalabilty in mind for this entire exercise. As such, I decided to keep the data messy and try to come up with a scalable programatic solution to these issues. I also tried to create code that will work \"as is\" if the number of cities or airlines contained in the data is increased. Let's load the data, clean up the colNames and take a look:\r\n\r\n\r\n\r\n```{r}\r\nflight.info <- read.csv(\"https://raw.githubusercontent.com/plb2018/DATA607/master/flight_info.csv\",\r\n header=TRUE,\r\n stringsAsFactors = FALSE)\r\n\r\n\r\ncolnames(flight.info)[1:2] = c(\"Airline\",\"Status\")\r\n\r\nflight.info\r\n\r\n```\r\n\r\n\r\n\r\nThe data looks good, but we need to come up with a way to forward-fill the airline names into the \"delayed\" column - preferrable a way that would work as well on 40K rows as it does on these 4 rows. It looks like the tidyr library provides an simple way to do this. First, I'm going to convert the blanks to NAs, then I'll use the fill() function to forward fill the \"Airline\" column:\r\n\r\n\r\n```{r}\r\n\r\nis.na(flight.info) <- flight.info==''\r\n\r\nflight.info <- flight.info %>% fill(Airline)\r\n\r\n\r\n```\r\n\r\n\r\nNow we'll tidy the data using the gather() function from tidyr. Rather than specify a specific column range, when calling gather(), I've decided to exclude \"Airlines\" and \"Status\". This way, the code should scale as-is if more cities are added to the CSV file (i.e. if the data gets \"wider\").\r\n\r\n```{r}\r\n\r\nflight.tidy <- gather(flight.info,\"City\",\"FlightCount\",-Airline,-Status)\r\n\r\nhead(flight.tidy,5)\r\n```\r\n\r\nOur data is now tidy - time for some analysis!\r\n\r\n# Analyze the Data\r\n\r\nFirst we'll take a 30K ft. view (lame pun intended) of the data. What do the raw numbers look like? What's the breakdown in terms of number of flights per airline? Who's delayed more frequently?\r\n\r\n\r\n\r\n```{r}\r\n\r\nflights.total <- flight.tidy %>% \r\n group_by(Airline) %>% \r\n summarise(flights = sum(FlightCount))\r\n\r\nflights.total$proportion <- flights.total$flights / sum(flights.total$flights)\r\n\r\nflights.total\r\n\r\nggplot(flights.total,aes(x=Airline,y=flights)) + \r\n geom_bar(stat=\"identity\",color = \"Red\", fill = \"White\") +\r\n ggtitle(\"Raw Flight Count by Airline\") +\r\n theme(plot.title = element_text(hjust = 0.5))\r\n\r\n\r\nggplot(flights.total,aes(x=Airline,y=proportion)) + \r\n geom_bar(stat=\"identity\",color = \"Red\", fill = \"White\") +\r\n ggtitle(\"Proportion of Total Flights by Airline\") +\r\n ylab(\"Proportion\") +\r\n theme(plot.title = element_text(hjust = 0.5)) \r\n\r\n\r\n```\r\n\r\nWe can see that there are about 11K flights in total, with about 65% being AM WEST and the remaining 35% being ALASKA.\r\n\r\n\r\nNow that we have the total flights for each airline, we'll use it to compute the proportion of delayed flights per airline.\r\n\r\n\r\n```{r}\r\n\r\nflights.delayed <- flight.tidy %>% \r\n group_by(Airline,Status) %>% \r\n summarise(flights = sum(FlightCount)) %>% \r\n filter(Status == \"delayed\") \r\n\r\nflights.total$delayed <- flights.delayed$flights\r\n```\r\n\r\nA quick numerical look at the proportion of delayed flights for ALASKA and AM West, respectively:\r\n\r\n```{r}\r\n\r\nflights.total$delayed / flights.total$flights\r\n```\r\n\r\nAnd a visual Look \r\n\r\n```{r}\r\nggplot(flights.total,aes(x=Airline,y=delayed/flights)) + \r\n geom_bar(stat=\"identity\",fill=\"Blue\") +\r\n ggtitle(\"Proportion of Delayed Flights by Airline \") +\r\n ylab(\"proportion\") +\r\n theme(plot.title = element_text(hjust = 0.5))\r\n\r\n```\r\n\r\n\r\nWe see here that ALASKA has a higher proportion of delayed flights than AM WEST. We probably need data for several more airlines before we can make a determination as to whether this difference is significant. \r\n\r\nNext let's look at the market-share for each airline in each market. Here we consider the proportion of flights for each airline relative to the total number of flights in that market.\r\n\r\n\r\n\r\n```{r}\r\n\r\nflights.city <- flight.tidy %>% \r\n group_by(City) %>% \r\n summarise(flights = sum(FlightCount)) \r\n```\r\n\r\nFirst a quick look at the total number of flights by city:\r\n\r\n```{r}\r\n\r\nflights.city\r\n\r\nggplot(flights.city, aes(x = City, y=flights / sum(flights))) +\r\n geom_bar(stat=\"identity\",position=\"dodge\",fill=\"Red\") + \r\n ylab(\"proportion\") +\r\n ggtitle(\"Proportion of Flights by City\") +\r\n theme(plot.title = element_text(hjust = 0.5))\r\n\r\n```\r\n\r\nThen we look at the proportional market share of each airline by City.\r\n\r\n```{r}\r\n\r\nflights.market <- flight.tidy %>% \r\n group_by(City,Airline) %>% \r\n summarise(flights = sum(FlightCount)) %>% \r\n ungroup() %>% \r\n spread(Airline, flights, fill=0)\r\n\r\n\r\nflights.marketProp <- flights.market\r\n\r\nflights.marketProp[2:ncol(flights.market)] <- flights.market[2:ncol(flights.market)] / rowSums(flights.market[2:ncol(flights.market)])\r\nflights.marketProp <- gather(flights.marketProp,\"Airline\",\"Proportion\",-City)\r\n\r\nhead(flights.marketProp,5)\r\n\r\nggplot(flights.marketProp, aes(x = Airline, y=Proportion, fill = City)) +\r\n geom_bar(stat=\"identity\",position=\"dodge\") + \r\n xlab(\"Airlines\") + \r\n ylab(\"proportion\") +\r\n ggtitle(\"Proportional Market Share by Airline and City\") +\r\n theme(plot.title = element_text(hjust = 0.5))\r\n\r\n\r\n```\r\n\r\nHere we see that ALASKA is dominant in the more northerly cities, particularly Seattle, whereas AM WEST is dominant in the more southerly cities, particularly Phoenix.\r\n\r\nNext we'll look at the proportion of delayed flights by airline and city.\r\n\r\n\r\n```{r}\r\n\r\nflights.marketDelay <- flight.tidy %>% \r\n group_by(City,Airline) %>% \r\n filter(Status == \"delayed\") %>% \r\n summarise(flights = sum(FlightCount)) %>% \r\n ungroup() %>% \r\n spread(Airline, flights, fill=0)\r\n\r\n\r\nflights.marketDelayProp <- flights.marketDelay \r\n \r\nflights.marketDelayProp[2:ncol(flights.market)] <- flights.marketDelay[2:ncol(flights.market)] / flights.market[2:ncol(flights.market)]\r\n\r\nflights.marketDelayProp <- gather(flights.marketDelayProp,\"Airline\",\"Proportion\",-City)\r\n\r\nhead(flights.marketDelayProp,5)\r\n\r\nflights.marketDelayProp %>%\r\n group_by(Airline) %>% \r\n summarise(proportion = mean(Proportion)) \r\n\r\nggplot(flights.marketDelayProp, aes(x = Airline, y=Proportion, fill = City)) +\r\n geom_bar(stat=\"identity\",position=\"dodge\") + \r\n xlab(\"Airlines\") + \r\n ylab(\"proportion\") +\r\n ggtitle(\"Proportional of Delayed Flights by Airline and Market\") +\r\n theme(plot.title = element_text(hjust = 0.5))\r\n\r\nggplot(flights.marketDelayProp, aes(x = Airline, y=1-Proportion, fill = City)) +\r\n geom_bar(stat=\"identity\",position=\"dodge\") + \r\n xlab(\"Airlines\") + \r\n ylab(\"proportion\") +\r\n ggtitle(\"Proportional of On-Time Flights by Airline and Market\") +\r\n theme(plot.title = element_text(hjust = 0.5))\r\n\r\n\r\n\r\n```\r\n\r\nIf we look at the breakdown by City we see that San Francisco and Seattle represent the highest proportion of delays and my suspicion is that this is probably weather-related. I found some NOAA Rainfall stats [here](https://www.currentresults.com/Weather/US/average-annual-precipitation-by-city.php) which I copied an pasted into a CSV file to explore this idea a little bit.\r\n\r\n# Weather Data - Something Extra\r\n\r\n```{r}\r\n\r\nrain.info <- read.csv(\"https://raw.githubusercontent.com/plb2018/DATA607/master/NOAA_Rainfall.csv\",\r\n header=TRUE,\r\n stringsAsFactors = FALSE)\r\n\r\n\r\n#replace spaces in city names w/ dots to match our pre-existing data.\r\nrain.info$City <- chartr(\" \", \".\",rain.info$City)\r\n\r\nhead(rain.info,10)\r\n```\r\n\r\nThe rainfall data looks good, so now we'll merge it with our flight-delay data:\r\n\r\n\r\n```{r}\r\n\r\nrain.info <- left_join(flights.marketDelayProp,rain.info) \r\n\r\nhead(rain.info,5)\r\n\r\n```\r\n\r\nThe data is merged so now we can take a quick look and see if there is any relationship between weather and flight delays. First we'll check and see if the number of rain-days shows any relationship to delays, then we'll look at whethere there is any relationship between the quantity of precipitation and flight delays.\r\n\r\n\r\n```{r}\r\nggplot(rain.info, aes(x = Proportion, y=Days, color = City)) +\r\n geom_point( size = 5) +\r\n xlab(\"Proportion of Flight Delays\") + \r\n ylab(\"# Of Annual Precipitation Days\") +\r\n ggtitle(\"Flight Delays vs. Precipitation Days by City\") +\r\n theme(plot.title = element_text(hjust = 0.5))\r\n\r\nggplot(rain.info, aes(x = Proportion, y=Inches, color = City)) +\r\n geom_point( size = 5) +\r\n xlab(\"Proportion of Flight Delays\") + \r\n ylab(\"Inches Of Annual Precipitation Days\") +\r\n ggtitle(\"Flight Delays vs. Inches of Precipitation by City\") +\r\n theme(plot.title = element_text(hjust = 0.5))\r\n\r\n\r\n```\r\n\r\n\r\nIn both cases above, we see evidence of a relationship between precipitation and and flight delays, which makes intuitive sense. Phoenix has the fewest delays and the least bad weather whereas Seattle has the most delays and the most bad weather. Looking at the flight delay data alone may have lead one to conclude that ALASKA was less efficient and more delay prone (percentage-wise) than AM WEST. The addition of the weather data seems to suggest that the City matters and that because ALASKA flys more flights to cities with poor weather, particularly Seattle, they experience proportionally more delays. In fact, the frequency of delays in the poor-weather markets (Seattle & SF) is significantly lower for ALASKA than for AM WEST, however AM WEST's overall delay numbers are only better because they fly more flights to Phoenix. Maybe ALASKA's pilots are better in the SF & seattle fog :) " }, { "alpha_fraction": 0.5854700803756714, "alphanum_fraction": 0.632478654384613, "avg_line_length": 14.714285850524902, "blob_id": "642c3541fadbcf2c46f8802c804241215ef190d8", "content_id": "911977fccad2880c24430c5608c05d5bee227718", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 234, "license_type": "no_license", "max_line_length": 150, "num_lines": 14, "path": "/Project 3/load_indeed_example.rmd", "repo_name": "plb2018/DATA607", "src_encoding": "UTF-8", "text": "---\r\ntitle: \"TeamRouge\"\r\noutput: html_notebook\r\n---\r\n\r\n\r\n\r\n```{r}\r\ndf <- read.table(\"https://raw.githubusercontent.com/plb2018/DATA607/master/Project%203/indeed_jobs_sample.csv\",header=TRUE,sep=\",\",encoding = \"UTF-8\")\r\n\r\ndf\r\n\r\n\r\n```\r\n" }, { "alpha_fraction": 0.67132568359375, "alphanum_fraction": 0.6907859444618225, "avg_line_length": 28.00967788696289, "blob_id": "c9c16e25b63fc753c2404728546eea99012a8510", "content_id": "f325f3ba27f30fd733571ee19ec3bac2d4b58a39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 9301, "license_type": "no_license", "max_line_length": 397, "num_lines": 310, "path": "/DATA_607_Project4/project_4.Rmd", "repo_name": "plb2018/DATA607", "src_encoding": "UTF-8", "text": "---\r\ntitle: \"DATA 607 - Project 4\"\r\nauthor: \"Paul Britton\"\r\noutput: \r\n html_document:\r\n theme: cerulean\r\n highlight: pygments\r\n toc: TRUE\r\n---\r\n## Intro\r\n\r\nThis rmd and all related input data can be found on my [github](https://github.com/plb2018/DATA607/tree/master/DATA_607_Project4). This document can also be found on [rpubs](http://rpubs.com/plb_lttfer/380117)\r\n\r\n## Clear the Workspace\r\n\r\n```{r clear_workspace}\r\nrm(list=ls())\r\n\r\n```\r\n\r\n## Load the Required Packages\r\n\r\n```{r setup, message=FALSE,warning=FALSE}\r\nlibrary(tm)\r\nlibrary(RTextTools)\r\nlibrary(stringr)\r\nlibrary(SnowballC)\r\nlibrary(ggplot2)\r\nlibrary(knitr)\r\nlibrary(tidyr)\r\n```\r\n\r\n## Load the Data\r\n\r\nWe are going to look at spam data from from [http://spamassassin.apache.org/old/publiccorpus/](http://spamassassin.apache.org/old/publiccorpus/). Given that the sample that we are using isn't all that big, we'll try using Vcorpus. We'll load the \"spam\" and \"ham\" samples into variables, and then add meta-data denoting which is which.\r\n\r\n## Set Ws\r\n\r\n```{r paths}\r\nbasePath <- \"C:/Users/Paul/OneDrive - CUNY School of Professional Studies/CUNY/DATA 607/DATA_607_Project 4/\"\r\n\r\nsetwd(basePath)\r\n```\r\n\r\nThe data can be downloaded directly from R as follows and can then be extracted using the untar command, or external software. For simplicity, I worked with a local copy of the data for this project, from the above local dir.\r\n\r\n\r\n```{r download, eval=FALSE}\r\n\r\ndownload.file(\"https://github.com/plb2018/DATA607/raw/master/DATA_607_Project4/20021010_easy_ham.tar\", destfile = \"20021010_easy_ham.tar\")\r\n\r\ndownload.file(\"https://github.com/plb2018/DATA607/raw/master/DATA_607_Project4/20021010_spam.tar\", destfile = \"20021010_spam.tar\")\r\n\r\n#untar(\"20021010_easy_ham.tar\")\r\n#untar(\"20021010_spam.tar\")\r\n\r\n```\r\n\r\n\r\nWe load the data and add the spam tags:\r\n\r\n```{r load_and_tag}\r\n\r\n\r\nspam <- VCorpus(DirSource(paste(basePath,\"spam/\",sep=\"\"),encoding=\"UTF-8\"),\r\n readerControl = list(reader=readPlain))\r\n\r\nham <- VCorpus(DirSource(paste(basePath,\"easy_ham/\",sep=\"\"),encoding=\"UTF-8\"),\r\n readerControl = list(reader=readPlain))\r\n\r\nmeta(spam, tag = \"is.Spam\") <- \"spam\"\r\nmeta(ham, tag = \"is.Spam\") <- \"not_spam\"\r\n\r\n```\r\n\r\n## Prepare the Data\r\n\r\nNext we'll combine the the data into a single VCorpus and clean it up a bit in preparation for conversion to a DocTermMatrix. At this stage, I'm not going to perform any other cleaning/prep of the data because I'd like to assess the impact of this step when we're further along.\r\n\r\n```{r prep1}\r\n\r\n\r\nham.nSpam <- c(spam,ham,recursive=TRUE)\r\n\r\nham.nSpam <- sample(ham.nSpam,3000)\r\n\r\nham.nSpam <- tm_map(ham.nSpam,content_transformer(function(x) iconv(enc2utf8(x), sub = \"byte\")))\r\n```\r\n\r\nNow we'll create the DTM. Originally I had intended to run with the data as raw as possible for this initial attempt, however, some cleaning (as above) was required in order to get the DTM to create properly. As a side note, it appears that one can use a DTM or a TDM with similar results, so long as you make sure that the dimensions are correct when creating the \"container\" in the next step. \r\n\r\n```{r to_dtm}\r\nspam.dtm <- DocumentTermMatrix(ham.nSpam)\r\nspam.dtm\r\n\r\n#this works too, but you need to watch the dims when creating the container below.\r\n#spam.tdm <- TermDocumentMatrix(ham.nSpam)\r\n#spam.tdm\r\n\r\n```\r\n\r\nWe can see above that the maximal term length is 515, which seems unrealistically high. We'll explore that later - right now we'll create a vector with the true labels for training purposes.\r\n\r\n```{r create_labels}\r\n\r\nspam.label <- unlist(meta(ham.nSpam, \"is.Spam\")[,1])\r\nhead(spam.label,5)\r\n\r\n```\r\n\r\n## Train the Model(s)\r\n\r\nNow we'll create the container that will be used to hold our training data and parameters. \r\n\r\n```{r container}\r\n\r\nN <- length(spam.label)\r\npct <-0.25\r\nr <- round(N*pct,0)\r\n\r\n\r\ncontainer <- create_container(\r\n spam.dtm,\r\n labels = spam.label,\r\n trainSize = 1:r,\r\n testSize = (r+1):N,\r\n virgin=FALSE)\r\n\r\n```\r\n\r\nNext, we'll train on the training set and then classify the test set. I'm going to start with just SVM as the point here it to demonstrate the capability. Then we'll look at a few more things afterwards. \r\n\r\n```{r model}\r\n\r\nsvm.model <- train_model(container, \"SVM\")\r\nsvm.out <- classify_model(container, svm.model)\r\n\r\nkable(head(svm.out,10))\r\n```\r\n\r\nAnd finally, we check to see how accurate the models predictions were:\r\n\r\n```{r}\r\n\r\ntrue.labels <- as.numeric(as.factor(spam.label[(r+1):N]))\r\npredicted.labels <- svm.out[,\"SVM_LABEL\"]\r\nrecall_accuracy(true.labels,predicted.labels)\r\n\r\n```\r\n\r\nNot bad at all!\r\n\r\n\r\n## Cleaning Up the Data\r\n\r\nWe'll now take a look at what happens if we run the same analysis on data that we've cleaned. Normally I would be tempted to determine our the sensitivity of our results to each element of the cleaning, but I think that's beyond the scope of this project. In this case, I'm going to clean it all in one shot. We'll look make the following changes:\r\n\r\n* Remove the numbers\r\n* Remove the punctuation\r\n* Remove the stop-words\r\n* Make the data uniformly lowercase\r\n* Apply stemming\r\n* Remove Sparse Terms\r\n\r\n```{r}\r\n\r\nclean.spam <- ham.nSpam\r\nclean.spam <- tm_map(clean.spam,content_transformer(tolower),lazy=TRUE)\r\nclean.spam <- tm_map(clean.spam,removePunctuation)\r\nclean.spam <- tm_map(clean.spam,removeNumbers)\r\nclean.spam <- tm_map(clean.spam,removeWords,words=stopwords(\"en\"))\r\nclean.spam <- tm_map(clean.spam,stripWhitespace,lazy=TRUE)\r\nclean.spam <- tm_map(clean.spam,stemDocument,lazy=TRUE)\r\n\r\nclean.spam <- tm_map(clean.spam ,content_transformer(function(x) iconv(enc2utf8(x), sub = \"byte\")))\r\n\r\nclean.dtm <- DocumentTermMatrix(clean.spam)\r\nclean.dtm <- removeSparseTerms(clean.dtm,0.95)\r\nclean.dtm\r\n```\r\n\r\n# Testing With the Clean Data\r\n\r\nWe've cleaned the data and reduced the sparsity of the DTM a little bit. The maximal term length is down by about 90% from the uncleaned data. Now We'll check which produces better results. We'll write a little function to train the model and assess the output on a random sample of the data, and then run a few iterations:\r\n\r\n\r\n```{r}\r\n\r\nmodelTest <- function(sampleSize, test_prop, corpus){\r\n r<-round(sampleSize*test_prop)\r\n\r\n \r\n data <- sample(corpus,sampleSize)\r\n labels <- unlist(meta(data, \"is.Spam\")[,1])\r\n dtm <- DocumentTermMatrix(data)\r\n \r\n container <- create_container(\r\n dtm,\r\n labels = labels,\r\n trainSize = 1:r,\r\n testSize = (r+1):sampleSize,\r\n virgin=FALSE) \r\n \r\n svm.model <- train_model(container, \"SVM\")\r\n svm.out <- classify_model(container, svm.model)\r\n \r\n true.labels <- as.numeric(as.factor(labels[(r+1):sampleSize]))\r\n predicted.labels <- svm.out[,\"SVM_LABEL\"]\r\n out <- recall_accuracy(true.labels,predicted.labels)\r\n return(out)\r\n \r\n}\r\n```\r\n\r\nWe'll wrap the function in a loop and see whether we get better results for the \"original\" or the \"clean\" data.\r\n\r\n```{r}\r\nitr <-25\r\norig <- rep(0,itr)\r\nclean <- rep(0,itr)\r\n\r\n\r\nfor (i in 1:itr){\r\n #print(i)\r\n orig[i] <- modelTest(100,0.25,ham.nSpam) \r\n clean[i] <- modelTest(100,0.25,clean.spam) \r\n \r\n}\r\n\r\n\r\n```\r\n\r\nThe above is painfully slow and i assume that there is a much better way to accomplish similar this task. However, in this case, we've gotten the desired data.\r\n\r\n```{r}\r\ndata <- data.frame(orig=orig,clean=clean)\r\nsummary(data)\r\ntidy.data <- gather(data)\r\nggplot(tidy.data,aes(x=value,fill=key)) + geom_histogram(alpha=0.75,bins=10)\r\n\r\n```\r\n\r\nFor this particular task, the original (uncleaned) doesn't appear to produce results that are significantly different from the \"clean\" data, at least on an SVM model.\r\n\r\n# Multi-Model\r\n\r\nFinally, we'll look at a few different methods for training our models. I've chosen a few and down-scoped the data a little bit as otherwise, this step takes *forever* to run.\r\n\r\n\r\n```{r container2}\r\n\r\n#downsample the data\r\nclean.spam <- sample(clean.spam,500)\r\nclean.dtm <- DocumentTermMatrix(clean.spam)\r\nclean.dtm <- removeSparseTerms(clean.dtm,0.95)\r\nspam.label <- unlist(meta(clean.spam, \"is.Spam\")[,1])\r\n\r\nN <- length(spam.label)\r\npct <-0.25\r\nr <- round(N*pct,0)\r\n\r\ncontainer <- create_container(\r\n spam.dtm,\r\n labels = spam.label,\r\n trainSize = 1:r,\r\n testSize = (r+1):N,\r\n virgin=FALSE)\r\n\r\n```\r\n\r\n\r\nWe'll train and run using 3 different algorithms:\r\n\r\n```{r multi-model}\r\n\r\nmulti.model <- train_models(container, algorithm=c(\"SVM\",\"RF\",\"MAXENT\"))\r\nmulti.out <- classify_models(container, multi.model)\r\n```\r\n\r\n```{r show_multi}\r\nkable(head(multi.out,10))\r\n\r\n```\r\n\r\n```{r multi_plot}\r\n\r\ntrue.labels <- as.numeric(as.factor(spam.label[(r+1):N]))\r\n\r\npredicted.labels <- multi.out[,\"SVM_LABEL\"]\r\nsvm <- recall_accuracy(true.labels,predicted.labels)\r\n\r\npredicted.labels <- multi.out[,\"FORESTS_LABEL\"]\r\nrf <- recall_accuracy(true.labels,predicted.labels)\r\n\r\npredicted.labels <- multi.out[,\"MAXENTROPY_LABEL\"]\r\nmaxent <- recall_accuracy(true.labels,predicted.labels)\r\n\r\nresults <- data.frame(svm=svm,rf=rf,maxent=maxent)\r\n\r\ntidy.results <- gather(results)\r\n\r\nggplot(tidy.results,aes(x=key, y=value,fill=key)) + \r\n geom_bar(stat=\"identity\") +\r\n coord_cartesian(ylim = c(min(results)*0.95, max(results)*1.05))+\r\n xlab(\"Model\")+\r\n ylab(\"Success Rate\")+\r\n ggtitle(\"Prediction Success Rate for Various Methods\")+\r\n theme(plot.title = element_text(hjust = 0.5))\r\n\r\n```" }, { "alpha_fraction": 0.5578135848045349, "alphanum_fraction": 0.5760336518287659, "avg_line_length": 25.901960372924805, "blob_id": "b31308ef5697ab40023485b00afe385a6950beca", "content_id": "662232222cba2d979413e4c3239758b7c40e2944", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 2854, "license_type": "no_license", "max_line_length": 251, "num_lines": 102, "path": "/data_607_week1.Rmd", "repo_name": "plb2018/DATA607", "src_encoding": "UTF-8", "text": "---\r\ntitle: \"Data 607 - Week 1\"\r\nauthor: \"Paul Britton\"\r\ndate: '2018-02-04'\r\noutput:\r\n html_document:\r\n df_print: paged\r\n html_notebook: default\r\n pdf_document: default\r\n---\r\n\r\nThe .rmd and raw data for this assignment can be found in my github repository located at [https://github.com/plb2018/DATA607](https://github.com/plb2018/DATA607). For the purposes of this exercise, i'm grabbing the data files directly from my github.\r\n\r\nThe rpubs.com version of this work can be found at [http://rpubs.com/plb_lttfer/](https://github.com/plb2018/DATA607)\r\n\r\n\r\nThe task here is to perform several manipulations of a mushroom data set so first, we'll get the data, grab the first few columns and take a look:\r\n\r\n###Load data\r\n\r\n```{r load_data, results='hide'}\r\nlibrary(data.table)\r\nlibrary(plyr)\r\n\r\n#load the data\r\nshrooms <- fread(\"https://github.com/plb2018/DATA607/raw/master/agaricus-lepiota.data\")\r\n\r\n```\r\n\r\n###Downscope the data and inspect\r\n\r\n```{r downscope_data}\r\n\r\n#grab the first few columns \r\ndf <- data.frame(shrooms[,0:6])\r\n\r\nhead(df,5)\r\n\r\n```\r\n\r\n\r\n\r\n###Name the columns\r\n\r\nThe data looks good, so now we give the columns some names:\r\n\r\n```{r name_cols}\r\n#rename the columns\r\nnames(df) <- c(\"edible_poisonous\",\"cap_shape\",\"cap_surface\",\"cap_color\",\"bruises\",\"odor\")\r\n\r\nhead(df,5)\r\n\r\n```\r\n\r\n\r\n###Replace abbreviations\r\n\r\nThe re-naming of the columns looks good, so now we'll replace the abbreviations with the appropriate names. The source file for the names can be found [here](https://github.com/plb2018/DATA607/raw/master/agaricus-lepiota.names):\r\n\r\n```{r abbrv_to_names}\r\n\r\ndf$edible_poisonous <- mapvalues(df$edible_poisonous,\r\n from=c('e','p'),\r\n to=c('edible','poisonous'))\r\n\r\n\r\n\r\ndf$cap_shape <- mapvalues(df$cap_shape,\r\n from=c('b','c','x','f','k','s'),\r\n to=c('bell','conical','convex','flat','knobbed','sunken'))\r\n\r\n\r\n\r\ndf$cap_surface <- mapvalues(df$cap_surface,\r\n from=c('f','g','y','s'),\r\n to=c('fibrous','grooves','scaly','smooth'))\r\n\r\n\r\n\r\ndf$cap_color <- mapvalues(df$cap_color,\r\n from=c('n','b','c','g','r','p','u','e','w','y'),\r\n to=c('brown','buff','cinnamon','gray','green','pink','purple','red','white','yellow'))\r\n\r\n\r\n\r\ndf$bruises <- mapvalues(df$bruises,\r\n from=c('t','f'), \r\n to=c('bruises','no bruises'))\r\n\r\n\r\n\r\ndf$odor <- mapvalues(df$odor,\r\n from=c('a','l','c','y','f','m','n','p','s'),\r\n to=c('almond','anise','creosote','fishy','foul','musty','none','pungent','spicy'))\r\n\r\n\r\n\r\nhead(df,5)\r\n\r\n```\r\n\r\nWe now have the first few columns of the dataframe with meaningful names, and all abbreviations replaced!\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.7611900568008423, "alphanum_fraction": 0.7681586742401123, "avg_line_length": 71.03921508789062, "blob_id": "1e924605ee36b8cc8ff1e8c1d15c07e4b6050acb", "content_id": "510b502fd44e816606572248ee8b2a4a877da72e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 3731, "license_type": "no_license", "max_line_length": 441, "num_lines": 51, "path": "/DATA_607_week12/DATA_607_Week_12.rmd", "repo_name": "plb2018/DATA607", "src_encoding": "UTF-8", "text": "---\r\ntitle: \"Data 607 - Week 12 - Recommender Systems\"\r\nauthor: \"Paul Britton\"\r\noutput:\r\n html_document:\r\n df_print: paged\r\n toc: yes\r\n html_notebook:\r\n toc: yes\r\n pdf_document:\r\n toc: yes\r\n---\r\n\r\n\r\n#LinkedIn - Job Recommendations\r\n\r\nLinkedin is one of the largest professional social networking sites in existence and as such it doubles as a place for recruiters to find talent and individuals to showcase their skills. As I see it, there are two sides to the job-matching world, withch probably require distinct scenarion designs: The Job Recommendations, and the Employee Recommendations.\r\n\r\n## Scenario Design\r\n\r\n### Job Recommendations\r\nI get emails from linkedin on a semi-regular basis givine me a heads-up on local jobs that may be a good fit for me. Based on my experience as a \"user\" I think that the scenario design would look as follows:\r\n\r\n* Target users: Job Seekers\r\n* Key Goals: Finding new & interesting employment that is consistent with their skill-set and experience\r\n* How can We Help: Make them aware of **relevant** opportunities in terms of skill-set, experience, geography, industry, etc.\r\n\r\n### Candidate Recommendations\r\nAlthough I've never used linkedin in a recruiting capacity, I would imagine that it is almost the flipside of the \"job recommendations\" objective. Namely, to inform employers of candidates who match posted job-descriptions or who might otherwise be a good addition to their organizations.\r\n\r\n* Target users: Recruiters\r\n* Key Goals: Finding talent with the target skills and experience to fill desired roles.\r\n* How can We Help: Facilitate matching of **relevant** potential recruits to existing openings, and making recruiters aware of the matches.\r\n\r\n## Reverse Engineer\r\n\r\nLinkedin relies heavily on item-based (or item-to-item) collaborative filtering. In very simple terms, they make a \"Browsemap\" for each user which captures all of their activity, then they compare the Browsemaps of all users and look at the overlap/differences. The intuition behind this is that there is wisdom in crowds (i.e. the more users that like something, the more valid that thing is) and that similar people like similar things.\r\n\r\nApparently this system was originally intended for the specific task of recommending content on the basis of \"people who viewed this profile also view ____\", however, linkedin has expanded the framework to be more broad and generic. Presumably there is information in everything people click on, not just in the individual profile associations.\r\n\r\nRead more about it in [this .pdf!](https://github.com/plb2018/DATA607/blob/master/DATA_607_week12/rsweb2014_submission_3.pdf)\r\n\r\nBased on my personal (anecdotal) experience, my guess is that formal education / credentials are also given a heavy weight relative to these \"Browsmaps\" as I tend to get many recommendations that match my credentials, but not my experience, or clicking \r\n\r\nAs an aside, this could be because I live in a government town where formal credentials may matter more than experience... in which case the algo is working just fine. Not sure how I would study this.)\r\n\r\n## Improvements\r\n\r\nFrom personal experience, I actually wish that Linkedin would rely more heavily on browsemaps. As mentioned previously, I get a lot of job suggestions that may line up with my formal credentials (some of which are 10 years stale by now!) but very little that lines up with my experience or with my contacts, or with what/who I tend to click on.\r\n\r\nAnother thing I would change is that it appears as though they strive to provide a full list of ~20 jobs in each email. Given my niche, most jobs aren't a match. I'd prefer if they pruned the list to include only relevant jobs\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.5932455658912659, "alphanum_fraction": 0.6234069466590881, "avg_line_length": 22.649213790893555, "blob_id": "121730eb2c72d326831bf25f3bc193a6c49d533b", "content_id": "da238f92d11d4381d8c705be632a3c0dc3a84170", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 4708, "license_type": "no_license", "max_line_length": 279, "num_lines": 191, "path": "/DATA607_week3.Rmd", "repo_name": "plb2018/DATA607", "src_encoding": "UTF-8", "text": "---\r\ntitle: \"DATA 607 - WEEK 3 - Assignment\"\r\nauthor: \"Paul Britton\"\r\noutput: \r\n html_document:\r\n theme: cerulean\r\n highlight: pygments\r\n toc: True\r\n---\r\n\r\n### Load Packages\r\n\r\n```{r}\r\nlibrary(stringr)\r\nlibrary(tidyr)\r\n\r\n```\r\n\r\n\r\n\r\n# Chapter 8 - Problem 3\r\n\r\nFirst we'll format the original data as in the example:\r\n\r\n```{r}\r\nraw.data <-\"555-1239Moe Szyslak(636) 555-0113Burns, C. Montgomery555-6542Rev. Timothy Lovejoy555 8904Ned Flanders636-555-3226Simpson, Homer5553642Dr. Julius Hibbert\"\r\n\r\nname <- unlist(str_extract_all(raw.data, \"[[:alpha:]., ]{2,}\" ))\r\n\r\nphone <- unlist(str_extract_all(raw.data, \"\\\\(?(\\\\d{3})?\\\\)?(-| )?\\\\d{3}(-| )?\\\\d{4}\"))\r\n\r\nphonebook <- data.frame(name = name, phone = phone)\r\n\r\nname\r\n\r\n```\r\n\r\n## 3.1) \r\nExamining the data, we can see that most of the names already adhere to the first_name last_name format. Those that don't have a comma separator (Last, First). So we'll loop over the names, and when we find one with a comma, we'll split it on the comma, and re-order the names:\r\n\r\n```{r}\r\nfor (i in 1:length(name)){\r\n if (grepl(\",\",name[i]) == TRUE){\r\n name_split = unlist(str_split(name[i],\",\"))\r\n first = name_split[2]\r\n last = name_split[1]\r\n name[i] = paste(name_split[2], name_split[1])\r\n }\r\n}\r\n\r\nname\r\n\r\n```\r\n\r\nFirst and last names are now ordered correctly. Next we'll update our phonebook with the first and last names separated. We'll also remove titles, but we don't want to upset Mr. Burns by removing the \"C.\" from in front of his name!\r\n\r\nAnd while we're at it, we'll clean up the phone numbers a bit too... \r\n\r\n```{r}\r\n\r\n#separate first and last names\r\nphonebook$name <- name\r\nphonebook <- separate(phonebook,name, sep = \" (?=[^ ]+$)\",\r\n into=c(\"first_name\",\"last_name\"))\r\n\r\n#remove the titles\r\nphonebook$first_name <- gsub(\"[[:alpha:]]{2,}\\\\.\\\\s*\", \"\\\\1\", phonebook$first_name)\r\n\r\n\r\n#clean up the phone numbers \r\n#drop brackets\r\nphonebook$phone <- gsub(\"[()]\", \"\", phonebook$phone)\r\n\r\n#replace spaces with dashes\r\nphonebook$phone <- gsub(\"\\\\s\", \"-\", phonebook$phone)\r\n\r\n#add dashes where missing\r\nphonebook$phone <- gsub(\"(\\\\d{3})(\\\\d{4})$\",\"\\\\1-\\\\2\",phonebook$phone)\r\n\r\n#create a little function to add an area code where it's missing\r\nadd_area_code <- function(num,chars){\r\n result <- num\r\n if(grepl(chars,num) == FALSE){\r\n result <- paste(chars,num)\r\n }\r\n\r\n return(result)\r\n}\r\n\r\n#i assume that springfield is all area code 636\r\nphonebook$phone <- sapply(phonebook$phone, function(x) add_area_code(x,\"636-\") )\r\n\r\n#remove any extra spaces\r\nphonebook$phone <- gsub(\"\\\\s\", \"\", phonebook$phone)\r\n\r\nphonebook\r\n\r\n```\r\n\r\n\r\n## 3.2)\r\n\r\nHere we're looking to see if the individual has a title. We'll use a regular expression to look for pre-fixes that have >=2 characters, and that end in a \".\". If true, then the individual has a title.\r\n\r\n```{r}\r\n\r\nhas_title <- str_detect(name,\"[[:alpha:]]{2,}\\\\.\\\\s*\")\r\n\r\ntitle <- data.frame(name,has_title)\r\n\r\ntitle\r\n\r\n \r\n```\r\n\r\n\r\n## 3.3)\r\n\r\nNow we're looking to see if a character has a second name. We'll use almost the same regular expression as for the last question. Here, we're looking for a single occurence of an initial, followed by a \".\", as in the case of Mr. Burns.\r\n\r\n```{r}\r\n\r\nhas_second_name <- str_detect(phonebook$first_name,\"[[:alpha:]]?\\\\.\\\\s*\")\r\n\r\nsecond_name <- data.frame(name,has_second_name)\r\n\r\nsecond_name\r\n```\r\n\r\n \r\n\r\n\r\n# Chapter 8 - Problem 4\r\n\r\nSee below for code examples for all of these\r\n\r\n## 4.1) \r\nmatch a string that contains any number of digits and ends in a \"$\"\r\n\r\n## 4.2)\r\nmatch words containing 1-4 lowercase letters in the range a-z\r\n\r\n## 4.3) \r\nmatch any string (or lack thereof) preceeding \".txt\" where the string ends with \".txt\"\r\n\r\n## 4.4) \r\nmatches strings in the following common date formats \"dd/mm/yyyy\" and/or \"mm/dd/yyyy\"\r\n\r\n## 4.5)\r\nmatches strings in the format <tag>some text</tag>\r\n\r\nExamples below:\r\n\r\n```{r}\r\n\r\n#the regex\r\none <- \"[0-9]+\\\\$\"\r\ntwo <- \"\\\\b[a-z]{1,4}\\\\b\"\r\nthree <- \".*?\\\\.txt$\"\r\nfour <- \"\\\\d{2}/\\\\d{2}/\\\\d{4}\"\r\nfive <- \"<(.+?)>.+?</\\\\1>\"\r\n\r\n#an example match, and no-match case.\r\nt1 <- c(\"123456789$\",\"money$\")\r\nt2 <- \"Match this But Not THIS\"\r\nt3 <- c(\"match_me.txt\",\"dont_match_me.txt \")\r\nt4 <- c(\"valentines day is 14/02/2018\", \"not 2018-02-15\")\r\nt5 <- c(\"example html tag = <h>words</h>\", \"example garbage <h>garbage</q>\")\r\n\r\nunlist(str_extract_all(t1,one))\r\nunlist(str_extract_all(t2,two))\r\nunlist(str_extract_all(t3,three))\r\nunlist(str_extract_all(t4,four))\r\nunlist(str_extract_all(t5,five))\r\n\r\n\r\n```\r\n\r\n# Chapter 8 - Problem 9\r\n\r\n```{r}\r\n\r\nsecret_code <- \"clcopCow1zmstc0d87wnkig7OvdicpNuggvhryn92Gjuwczi8hqrfpRxs5Aj5dwpn0TanwoUwisdij7Lj8kpf03AT5Idr3coc0bt7yczjatOaootj55t3Nj3ne6c4Sfek.r1w1YwwojigO\r\nd6vrfUrbz2.2bkAnbhzgv4R9i05zEcrop.wAgnb.SqoU65fPa1otfb7wEm24k6t3sR9zqe5\r\nfy89n6Nd5t9kc4fE905gmc4Rgxo5nhDk!gr\" \r\n\r\nanswer <- unlist(str_extract_all(secret_code,\"[[:upper:]]\"))\r\n\r\nanswer\r\n\r\n```\r\n\r\n\"Congratulations you are a super nerd\"... yeah... that sounds about right. :)\r\n" }, { "alpha_fraction": 0.7241489291191101, "alphanum_fraction": 0.744193434715271, "avg_line_length": 62.020408630371094, "blob_id": "746ded2bb592be36c2829142fcd66074f9348008", "content_id": "1e8884c3ad64245b3373e4968b72d9aedf1ae6a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 3143, "license_type": "no_license", "max_line_length": 485, "num_lines": 49, "path": "/DATA_607_FinalProject/DATA_607_Final_Proposal.rmd", "repo_name": "plb2018/DATA607", "src_encoding": "UTF-8", "text": "---\r\ntitle: \"DATA 607 Project Proposal\"\r\nauthor: \"Paul Britton\"\r\noutput:\r\n html_document:\r\n df_print: paged\r\n toc: yes\r\n html_notebook:\r\n toc: yes\r\n---\r\n\r\n\r\n# Final Project Proposal\r\n\r\n## Task \r\n\r\n### Big Task\r\n\r\nMy end-goal is to extract futures quotes from old (pre-1930s) newspaper data. There is an abundance of free/public-domain newspaper available from various sources. I think that may be too big a task for this project (quick inspection suggesta that the OCR data is not that good in some places!). So I'll settle on an immediate task: \r\n\r\n### Immediate task\r\n\r\nMy immediate task (i.e. for this project) is to build a model to identify which newspaper pages are likely to contain the data that I care about. I figure that the data set I'm looking at contains > 10M pages and that the data I'm interested in is likely confined to < 1% of them. \r\n\r\n### Motivation\r\n\r\nMy motivation is three-fold:\r\n \r\n* I'm interested in financial market history, particularly old, obscure data sets. This data set is rich and provides ample opportunities for cross-validation and error checking (i.e. multiple papers should report the same data-points every day... we'll see...)\r\n* I'm interested in learning more about text processing. I found project four extremely interesting and I'd like to build on the momentum that I feel I have right now.\r\n* I think that there are a lot more interesting things that could be done with the proposed data set in the future and as such, my intention is to become more familiar with it.\r\n\r\n## Data\r\n\r\nI intend to use data from The Library of Congress [Chronicling America](https://chroniclingamerica.loc.gov/) project. Specifically, I'm going to use the [API](https://chroniclingamerica.loc.gov/about/api/) to collect bulk samples. They have hi-res images, OCR and XML data available. I'll use the OCR for certain, possibly the XML, and in a cursory way, the images. \r\n\r\n## Methods\r\n\r\n1. Write code to grab data from the API \r\n\r\n2. Manually tag a sample of .txt documents (a few 100 maybe) as \"containing\" or \"not containing\" the data of interest. This will facilitate the supervised learning approach which will be my primary plan of attack. I will, however, likely look into unsupervised methods as part of the process as well.\r\n\r\n3. Train and tune a model to detect pages of interest. I suspect that this is where much of the work wil lie as the OCR data is messy. As an example here's an image from the [1861 New York Herald](https://chroniclingamerica.loc.gov/lccn/sn83030313/1861-12-11/ed-1/seq-2/) and here is the corresponding [text file](https://chroniclingamerica.loc.gov/lccn/sn83030313/1861-12-11/ed-1/seq-2/ocr/). The ORC process really seems to struggle with the 150 year old printing press output! \r\n\r\n4. Use the model to identify which papers I can ignore entirely (not all of them have financial data)\r\n\r\n5. For the relevant papers (i.e. those not ignored), try to identify which page(s) contain the relevant info such that I have at least 1 observation per business day.\r\n\r\n6. Download images for all pages identified and visually confirm that I got what I wanted.\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.6578994393348694, "alphanum_fraction": 0.6769148707389832, "avg_line_length": 32.74074172973633, "blob_id": "f82a4cfec29026a93514457729d7741caf01d12d", "content_id": "20bc8b7f821c999b2e63597ab220e02fe804363e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 5627, "license_type": "no_license", "max_line_length": 740, "num_lines": 162, "path": "/DATA_607_Project1/DATA_607_Project1.rmd", "repo_name": "plb2018/DATA607", "src_encoding": "UTF-8", "text": "---\r\ntitle: \"DATA 607 - Project 1\"\r\nauthor: \"Paul Britton\"\r\noutput: \r\n html_document:\r\n toc: TRUE\r\n\r\n---\r\n\r\n\r\nAll related data and files for this project can be found in my [github](https://github.com/plb2018/DATA607/tree/master/DATA_607_Project1). Additionally, i've published it to [rpubs.com](http://rpubs.com/plb_lttfer/361563). For this week's assignment, all the data used is contained within this document.\r\n\r\n\r\n\r\n### Load Libraries\r\n\r\nLoad a few libraries that we will use along the way\r\n\r\n```{r} \r\nlibrary(stringr) \r\nlibrary(tidyr)\r\nlibrary(ggplot2)\r\n``` \r\n\r\n\r\n### Load the Data\r\n\r\nFirst we load the data ant take a look\r\n\r\n```{r} \r\n\r\nsource_data<- \"https://raw.githubusercontent.com/plb2018/DATA607/master/DATA_607_Project1/tournamentinfo.txt\" \r\nrankings <- unlist(read.table(source_data,header=FALSE,sep=\"\\n\")) \r\nhead(rankings,10)\r\n```\r\n\r\nWe can see immediately that the data needs a bit of processing before we can do anything with it. We'll try to parse it with some regular expressions\r\n\r\n### Parse the Data\r\n\r\nIt looks like we can extract what we need using regular expressions. The data that we need to grab is spread across 2 rows for each player. In both cases, the fields are consistently separated by pipes, making things a bit easier. The first row (r1) that we want to grab begins with the player number followed by a space and a \"|\" delimiter. The second row (r2) that we want begins with the the state-code (2 uppercase letters), followed by a space and a \"|\" delimiter. This information should be sufficent for us to grab and recombine the rows in a more usable format. I sometimes find regular expressions hard to decipher, particularly if i haven't worked with them in a while, so i tend to explicitly explain them in my comments.\r\n\r\n\r\n```{r} \r\n\r\n\r\n#if we see a 1-4 digit # followed by a space and a pipe, grab the it + the rest of the row\r\nr1 <- unlist(str_extract_all(rankings[5:length(rankings)],\"\\\\d{1,4}\\\\s\\\\|.+\")) \r\n\r\n#if we see 2 uppercase letters followed by a space and a pipe, grab it + the rest of the row\r\nr2 <- unlist(str_extract_all(rankings[5:length(rankings)],\"[[:upper:]]{2}\\\\s\\\\|.+\")) \r\n\r\n#while we're at it, we'll replace the \"W's\" \"L's\" \"D's\" and \"B's\" in r1... we just want thte #s\r\nr1 <- unlist(str_replace_all(r1,\"\\\\|(W|L|B|D|H|U)\\\\s\",\"|\"))\r\n\r\n\r\nhead(r1,5)\r\nhead(r2,5)\r\n\r\n\r\n```\r\n\r\nThe regex appears to have works as intended, so now we'll split the rows on the \"|\"'s while loading the data to a dataframe. We'll then down-select to just the cols that we need.\r\n\r\n```{r} \r\n\r\n#create the df\r\ndf <- data.frame(str_split_fixed(r1,\"\\\\|\",n=11),str_split_fixed(r2,\"\\\\|\",n=10))\r\n\r\n\r\n#grab the data we need\r\ncols <- c(1:10,12:13)\r\ndf <- df[,cols]\r\n\r\n#re-order the cols \r\ncols <- c(1,2,11,12,3:10)\r\ndf <- df[,cols]\r\n\r\n\r\n\r\n#we'll also rename all the cols that we're going to keep while we're at it\r\nnames(df) <- c(\"PlayerNum\",\"PlayerName\",\"PlayerState\",\r\n \"USCF_Pre_Post\",\"TotalPts\",\"r1\",\"r2\",\r\n \"r3\",\"r4\",\"r5\",\"r6\",\"r7\")\r\n\r\n#trim the whitespace\r\ndf <- as.data.frame(apply(df,2,function(x)gsub(\"^\\\\s+|\\\\s+$\", \"\", x)))\r\n\r\nhead(df,5)\r\n``` \r\n\r\n\r\nNow the data looks reasonably close to where we want it to be, however, we still need to deal with that last column (ID_Pre_Post). Because the columns appear to be well formated, We're going to split the column into 3 using separate() from tidyr. We're really only after the pre_rating column here. Another thing of note is that the pre-rating column sometimes contains a \"p\" with the ranking that we want consistently on the lefthand side of the \"p\".\r\n\r\n```{r warning=FALSE} \r\n\r\n#split the columns \r\ndf <- separate(data=df,col=USCF_Pre_Post,sep=\" / R: \",into = c(\"USCF ID\",\"Pre_Post\"))\r\ndf <- separate(data=df,col=Pre_Post,sep=\" ->\",into = c(\"Pre_Rating\",\"Post_Rating\"))\r\n\r\n#pre_rating sometimes contains a \"p\". the resultant \"etc\" column is a throw-away\r\ndf <- separate(data=df,col=Pre_Rating,sep=\"P\",into = c(\"Player_Pre_Rating\",\"etc\"))\r\n\r\nhead(df,5)\r\n\r\n``` \r\n\r\n### Compute the Pre-Chess Rating\r\n\r\n\r\nThe table looks good - all i have to do now is compute the average pre-chess score and drop all the unwanted columns. To compute the average score, i'm going to loop over each player, figure out who they played against (cols 9:15) and use that data to look-up the pre-game ratings. Using that data, i can compute the averages.\r\n\r\n\r\n```{r warning=FALSE} \r\n\r\npre_chess_rating = c()\r\n\r\nfor (i in 1:nrow(df)){\r\n\r\n #get all players who played against this player\r\n opponent_nums <- df[i,9:15]\r\n opponent_nums <- as.numeric(levels(unlist(opponent_nums)))[unlist(opponent_nums)]\r\n opponent_nums <- opponent_nums[!is.na(opponent_nums)]\r\n \r\n #pull scores of the opponents and compute the avg\r\n pre_chess_rating[i] <- trunc(mean(as.numeric(df[opponent_nums,5]))) \r\n \r\n}\r\n\r\ndf$Player_Pre_Rating <- as.numeric(df$Player_Pre_Rating )\r\n\r\ndf$Avg_Opponent_Rating = pre_chess_rating\r\n\r\n\r\n```\r\n\r\n### Write the File\r\n\r\nNow we drop all the unwanted cols and write the data to file:\r\n\r\n```{r warning=FALSE} \r\n\r\ndf <- df[c(2:3,8,5,16)]\r\n\r\nhead(df)\r\n\r\nwrite.csv(df, file = \"data_607_project1.csv\")\r\n\r\n``` \r\n\r\n\r\n### Bonus Plot\r\n\r\nI just wanted to take a look and see whether there were any obvious relatioships in the data. We see below that there appears to be a positive relatioship between total points and player_pre_rating. I also wanted to see if i could extract any info by adding an additional dimension to the plot (avg_opponent_rating) via dot-color. \r\n\r\n```{r warning=FALSE} \r\n\r\nggplot(df, aes(Player_Pre_Rating,TotalPts)) + geom_point(aes(color=Avg_Opponent_Rating)) +\r\nggtitle(\"\")\r\n\r\n\r\n``` " }, { "alpha_fraction": 0.6587796807289124, "alphanum_fraction": 0.6820679903030396, "avg_line_length": 32.79740524291992, "blob_id": "de1541f2fc877e8c33e48afbab2d7d11c27837ef", "content_id": "b5a238d34d1ff6832dd77f3e4d514d5f784d33b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 21486, "license_type": "no_license", "max_line_length": 539, "num_lines": 617, "path": "/DATA_607_FinalProject/DATA_607_Final_Project.rmd", "repo_name": "plb2018/DATA607", "src_encoding": "WINDOWS-1252", "text": "---\r\ntitle: \"DATA 607 - Final Project\"\r\nauthor: \"Paul Britton\"\r\noutput: \r\n html_document:\r\n theme: cerulean\r\n highlight: pygments\r\n toc: TRUE\r\n---\r\n\r\n\r\n# Project Overview\r\n\r\nFor this project, I'm attempting to classify documents from the Library of Congress's [Chronicling America](https://chroniclingamerica.loc.gov/) initiative as either containing specific financial data, or not. I have several goals for this project:\r\n\r\n1. Classify pages (and thus newpapers) according to whether or not they contain specific financial content\r\n2. Familiarize myself with this data set with a goal to facilitate future work. There is A LOT of interesting stuff that could be done here \r\n3. Improve my text-mining skills\r\n\r\n\r\nAll of work assodicated with this project can be found in my [github](https://github.com/plb2018/DATA607/tree/master/DATA_607_FinalProject) including some sample data. The output itself can be found on [rpubs](http://rpubs.com/plb_lttfer/388909) Given the size and location of the data set that I used, I have decided to provide tools that allow any user to collect them from the source themselves. The Library of Congress seems committed to maintaining availability of data so I don't have any concerns here regarding reproducibility. \r\n\r\n\r\n## About the Data\r\n\r\nChronicling America currently has about 13.3M pages archived and seems to be continually expanding their data set. The data is available in a few formats: \r\n\r\n* [Images](https://chroniclingamerica.loc.gov/lccn/sn83025287/1883-12-29/ed-1/seq-7/). They can also be downloaded as hi-res .jp2 files.\r\n* [Text](https://chroniclingamerica.loc.gov/lccn/sn83025287/1883-12-29/ed-1/seq-7/ocr/). This was created using [OCR](https://chroniclingamerica.loc.gov/ocr/) and thus is far from perfect, especially for older or messier newspapers.\r\n* [XML](https://github.com/plb2018/DATA607/raw/master/DATA_607_FinalProject/ocr.xml). The XML appears to be an enriched version of the OCR which contains word coordinates within the page, OCR confidence scores. Probably beyond the scope of this project, but something that I hope to work with in the future to automate the extraction of data. \r\n\r\n\r\nI estimate that it takes me about 10 seconds to manually load and visually scan a page for content of interest, so it would take about 154 days (working 24/7) to scan all the pages manually. I also roughly estimate that only about 0.5% of pages contain content that is relevant in the context of this project. \r\n\r\nA few examples of the kind of data that we're looking for here; both images and corresponding OCR data.\r\n\r\nDaily price quotes for various futures markets:\r\n\r\n### Example 1\r\n\r\n#### Image\r\n\r\n![Daily Futures Quotes - Circa Dec 29, 1883](https://github.com/plb2018/DATA607/raw/master/DATA_607_FinalProject/images/futures_sample.PNG)\r\n\r\n#### OCR:\r\n\r\n```\r\nCOMMISSION MERCHANTS,\r\nBoom 4, Mann helper Building, Southeast comer\r\nThird and Minnesota street?. Direct wire to\r\nChicago and Milwaukee Boards of Trad*.\r\n(Operator is our office.)\r\nBt. Paul, Friday, Dec. 23, 1833.\r\nFollowing is to-day's rango of prices on the\r\nMilwaukee and Chicago boards:\r\n???\"\"\">\" ! I I I ' fit\r\n1 w a I- s S s\r\nI a ? ? f 111\r\n: ? : . : c 3 :\r\nMilwaukee, j\r\nWheat- \"\r\nJanuary.... 95 i 94% S5Ji Si% 94%' 92%\r\nFebruary... 96% 95& Oo^'i 95% S5& 98%\r\nMay 1C3% 103% 103% 108% ....\r\nChicago,\r\nWheat-\r\nJanuary.... S6>i 95%' 96% Sssi 36 92%\r\nFebruary.. 97% 98% 91% S6s£ 96% 93 %\r\nMarch ;\r\nMay 1G4% 1C8& 10*34 1«3% ICS^ »5f\r\n```\r\n\r\n### Example 2\r\n\r\n#### Image\r\n\r\nHourly Price quotes for various futures market:\r\n\r\n![Hourly Futures Quotes - Circa Dec 29, 1883](https://github.com/plb2018/DATA607/raw/master/DATA_607_FinalProject/images/futures_sample2.PNG)\r\n\r\n\r\n#### OCR\r\n```\r\nThe following quotations, glrti tt the r>n^« of\r\nthe 2&rkets durissthe day, wars nesi - by XL.\r\nDoran, Co2tj»ion Merchant:\r\n. WHEAT.\r\nJan. May. Jaa. Kay.\r\n9-10 A.M. 84% 10SH SSK- 103Ji\r\n9^5 - t4si lOS-^. 9JJi 108H\r\n10:00 \" 95 103 V toft lUSK\r\n10a5 \" 85 IC3S B*£ 104 V\r\n10*0 M 95 X 108% 96 IMU\r\nLQAi \" 85# 113 % 91)? 104 %\r\n11*0 ??? 95 10J% H¥ 10: V\r\n11:15 \" . 95 l«»i 9»« 104\r\n11*) \" 95 10»»i »5% 10SK\r\nHHa \" .84% 108* 95% 199%\r\n13.-0O \" 95 108% 86 104\r\n12:15 \" »5 ~?????? 103% 96^ 104^\r\n11:10 M t*% 1C3% 96% K-4 j\r\n1345 \" 84« 103%- C-6 10S5<\r\nIsOQ \" 84% ' l'J3% 86 ltSXj\r\n```\r\n\r\n\r\nAs can be seen from the above, the quality of the OCR leaves something to be desired. My hope here is that, despite the translation being either incorrect or totally illegible to humans, there are still enough similarities between financial pages for them to be grouped together and that they are distinct from non-financial pages.\r\n\r\n\r\n\r\n```{r cleanup}\r\nrm(list = ls())\r\n```\r\n\r\n```{r setup, warning=FALSE, message=FALSE}\r\nlibrary(jsonlite)\r\nlibrary(data.tree)\r\nlibrary(stringr)\r\nlibrary(dplyr)\r\nlibrary(tidyr)\r\nlibrary(lubridate)\r\nlibrary(tm)\r\nlibrary(qdap)\r\nlibrary(ggplot2)\r\nlibrary(httr)\r\nlibrary(RTextTools)\r\nlibrary(knitr)\r\n\r\n```\r\n\r\n\r\n## Collect the Data Dictionary\r\n\r\nChronicling America provides a well organized data dictionary and bulk-downloading facility. The first thing that we'll do is get a list of files to download.\r\n\r\n```{r get_data_list}\r\nurl <- \"http://chroniclingamerica.loc.gov/ocr.json\"\r\nocr <- fromJSON(url)\r\nocr <- data.frame(ocr['ocr'])\r\n\r\nkable(head(select(ocr,ocr.url,ocr.size),5))\r\n```\r\n\r\n## Download the Data\r\n\r\nThe list above looks usable so now we can proceed to downloading some data. Note that some of these files are pretty large (1GB compressed) so I've included a \"test\" mode in my download function that grabs the 5 smallest files. Data is downloaded to CWD unless otherwise specified\r\n\r\n\r\n```{r download, eval = FALSE}\r\ngetArchives <- function(ocr.df,outPath = NULL, testMode = TRUE){\r\n if(testMode){ocr.df<- head(ocr.df[order(ocr.df$ocr.size),],5)}\r\n\r\n for (row in 1:nrow(ocr.df)){\r\n data.url <- as.character(ocr.df[row,\"ocr.url\"])\r\n print(paste(\"Downloading: \",data.url),sep=\" \")\r\n download.file(data.url,\r\n destfile=paste(outPath,\r\n tail(unlist(strsplit(data.url,\"\\\\/\")),1)))\r\n }\r\n}\r\n\r\n\r\n#if testMode = True, the function only downloads the smallest file\r\ngetArchives(ocr,testMode=TRUE)\r\n\r\n#if testMode = False the function download everything. Takes forever!\r\n#getArchives(ocr,testMode=FALSE)\r\n\r\n\r\n```\r\n\r\n\r\n## Uncompressing\r\n\r\nOnce the data is downloaded, it will need to be manually unzipped from .bz2 format. While R seems to have good support for bz2, I couldn't seem to get it to work seamlessly with .tar.bz2 files containing a large tree-structure within. \r\n\r\nAfter we have unzipped, we can untar the rest as below. Note that given some of the file sizes, this can take a while to run, so I've included a sample unzip from github. Note that these files contain both .txt and .xml, but no images.\r\n\r\n\r\n```{r uncompress, eval=FALSE}\r\n\r\n#I saved some small sample files on my github for demonstration purposes\r\nuntar(\"https://github.com/plb2018/DATA607/blob/master/DATA_607_FinalProject/sampleData/batch_in_alford_ver01.tar\",\r\n compressed = \"bzip2\",\r\n exdir = \"test\")\r\n\r\n\r\n#A local file that i was working with\r\n#untar(\"batch_mnhi_anoka_ver02.tar\",\r\n# compressed = \"bzip2\",\r\n# exdir = \"test\")\r\n\r\n\r\n```\r\n\r\n\r\n## Inspect\r\n\r\nLet's take a look at the file structure contained within the compressed files.\r\n\r\n```{r inspect, eval=TRUE}\r\npath <- list.dirs(\"test/paper_name\")\r\n\r\nfiles.tree <- data.tree::as.Node(data.frame(pathString = path))\r\n\r\nfiles.tree\r\n```\r\n\r\n\r\nWe see that basically, ever paper,year, month, day, edition and page gets its own folder. This means that we have to crawl these paths to get to all the actual files. I'm tempted to sidestep this file structure by saving all the files in a single directory and encoding the files names with the same info as provided by the dir tree... but we won't do that right now.\r\n\r\n## Get the Files\r\n\r\nWe'll get a list of all the files within the tree structure\r\n\r\n```{r, getFiles}\r\n\r\nocr.names <- list.files(\"test/cat/sn83025287\",\r\n full.names = T,\r\n recursive = T)\r\n\r\nocr.names <- as.data.frame(ocr.names,\r\n stringsAsFactors = FALSE)\r\n\r\n\r\nkable(head(ocr.names,6))\r\n```\r\n\r\n\r\n## Transform\r\n\r\nNow we'll transform the data. Essentially, I'm going to chop up the file-paths and put all that data into a df to be used as meta-data in my corpus further along. We'll concatenate the year, month and day into a proper date, add a column that contains the day of the week (potentially useful in finance) and a column to flag whether the document contains financial data or not.\r\n\r\nIt's also a logical place to separate the .txt and .xml files at this point.\r\n\r\n\r\n```{r, transform}\r\n\r\nnew.cols <- c(\"rootfolder\",\"cat\",\"id\",\"YYYY\",\"mm\",\"dd\",\"edition\",\"pageNum\",\"fname\") \r\n\r\n\r\nocr.names %>%\r\n filter(str_detect(ocr.names,\".txt\")) %>%\r\n separate(ocr.names,\r\n ocr.names,\r\n into=new.cols,\r\n sep=\"/\",\r\n remove=F,\r\n convert=F,\r\n extra=\"warn\",\r\n fill=\"warn\")-> ocr.txt\r\n\r\nocr.names %>%\r\n filter(str_detect(ocr.names,\".xml\")) %>%\r\n separate(ocr.names,\r\n ocr.names,\r\n into=new.cols,\r\n sep=\"/\",\r\n remove=F,\r\n convert=F,\r\n extra=\"warn\",\r\n fill=\"warn\")-> ocr.xml\r\n\r\n\r\n\r\nocr.txt$edition <- gsub(\"ed-\",\"\",ocr.txt$edition)\r\nocr.txt$pageNum <- gsub(\"seq-\",\"\",ocr.txt$pageNum)\r\nocr.txt$date <- with(ocr.txt, ymd(sprintf('%04s%02s%02s', YYYY, mm, dd)))\r\nocr.txt$wDay <- wday(ocr.txt$date,label=T)\r\nocr.txt$hasData <- \"0\" \r\nocr.txt$text <- \"\"\r\ncolnames(ocr.txt)[1] <- \"doc_id\"\r\n\r\nkable(head(ocr.txt,5))\r\n\r\n\r\n\r\n\r\n```\r\n\r\n\r\n## Build a Corpus\r\n\r\nNext we'll take the dataframe that we've just created and add the actual text content from the files. Once again, this can take a while.\r\n\r\n```{r, corpus}\r\n\r\nocr.txt$text <- sapply(ocr.txt$doc_id, readChar,nchars=99999999)\r\nocr.corpus <- Corpus(DataframeSource(ocr.txt))\r\n\r\n#we'll add the doc_id as meta just in case we need it for some reason...\r\nmeta(ocr.corpus,tag=\"doc_id\") <-ocr.txt$doc_id\r\n\r\nkable(head(meta(ocr.corpus),5))\r\n\r\n```\r\n\r\n\r\n## Bag O' Words... (or something like it)\r\n\r\nNow we're going to employ a \"bag of words\" like method to pare down the data for manual classification. \r\n\r\nOne of the problems that I face is that I need a sample of classified documents in order to train the SVM process that I'm planning on applying here. My idea is to short-list the docs using a bag-of-words approach and then manually classify.\r\n\r\nCursory inspection tells me that certain words are more common within target pages than non-target pages. What I do here is remove everything but those relevant words and work with that (significantly reduced) output. This speeds things up a lot.\r\n\r\n```{r, bagOwords}\r\n\r\n\r\nkeepWords<-content_transformer(function(x,words) {\r\n regmatches(x, \r\n gregexpr(paste0(\"\\\\b(\", paste(words,collapse=\"|\"),\"\\\\b)\"), x)\r\n , invert=T)<-\" \"\r\n x\r\n})\r\n\r\nkeep<-c(\"oats\",\"corn\",\"bushel\",\"wheat\",\"rye\",\"barley\")\r\n\r\n\r\nbagOwords.corpus <- ocr.corpus %>%\r\n tm_map(content_transformer(tolower)) %>%\r\n tm_map(removePunctuation) %>%\r\n tm_map(removeNumbers) %>%\r\n tm_map(removeWords,words=stopwords(\"english\")) %>%\r\n tm_map(stripWhitespace) %>%\r\n tm_map(stemDocument,language = \"english\") %>%\r\n tm_map(keepWords,keep)\r\n \r\n\r\nocr.dtm <- DocumentTermMatrix(bagOwords.corpus)\r\n\r\n```\r\n\r\nNow I subset all the documents that contain an abundance of my \"keep\" words. All I did here was make a simple rule that picks out documents that contain > 75 occurences of the special words. In hindsight, I probably should have used a proportion rather than an absolute number, but I suspect that the number of words on a newspaper page is relatively constant for both business & practical reasons, so a hard limit probably isn't all that dangerous.\r\n\r\n\r\n\r\n```{r, moreWords}\r\nt <- as.data.frame(as.matrix(ocr.dtm),stringsAsFactors = F)\r\nt$sum <- rowSums(t)\r\n\r\ntarget <- meta(ocr.corpus[which(t$sum >70)],tag=\"doc_id\")\r\n\r\nkable(head(target,5))\r\n\r\nggplot(t,aes(x=seq(1, nrow(t), by=1),y=t$sum)) + \r\n geom_line() +\r\n ylab(\"Important Term Freq\") +\r\n xlab(\"Doc Num\") +\r\n ggtitle(\"Term Count by Doc\")+\r\n theme(plot.title = element_text(hjust = 0.5))\r\n\r\nqplot(t$sum, geom=\"histogram\") + \r\n xlab(\"Important Term Freq\") +\r\n ggtitle(\"Term Count Freq\")+\r\n theme(plot.title = element_text(hjust = 0.5))\r\n\r\n```\r\n\r\nWe can see that most of the documents contain <25 occurrences of the \"keep\" words but that there is a bit of a jump around the 75-count mark. To be clear, my intention with this \"bag of words\" approach is to identify documents that have a higher probability of containing financial data for manual classification, but I'm not overly concerned with being wrong.\r\n\r\n\r\n#Get Images\r\n\r\nSo we've identified a handful of files that contain relevant words. Next we're going to download images (.pdfs) so that we can manually inspect and classify them. Once again, even the .pdfs are about 5mb each, so this takes a while.\r\n\r\nWe basically parse the file names to reconstruct the image URL at Chronicling America.\r\n\r\n```{r, getImages, eval=FALSE}\r\n\r\n#the base URL \r\nimage.base <- \"https://chroniclingamerica.loc.gov/lccn/\"\r\n\r\nfor (i in unlist(target)){\r\n \r\n str <- unlist(strsplit(i,split='/'))\r\n dt <- paste(str[[3]],str[[4]],str[[5]],sep=\"-\")\r\n fname <- paste(str[7],\".pdf\",sep=\"\")\r\n img.url <- paste(str[2],dt,str[6],fname,sep=\"/\")\r\n \r\n \r\n #print(paste(image.base,img.url,sep=\"\"))\r\n \r\n #i've commented this out so i don't accidentally end up downloading tons of images\r\n #GET(paste(image.base,img.url,sep=\"\"),\r\n # write_disk(paste(\"test/images/\",gsub(\"/\",\"_\",img.url),sep=\"\"),overwrite=F))\r\n \r\n} \r\n\r\n```\r\n\r\n\r\n# Add Classifiaction Info\r\n\r\nSo I've manually classified about 220 documents and that information can now be added to the corpus. We'll use the \"hasData\" meta-tag that we previously created when transforming the data. \r\n\r\nFor the manual classification, rather than classifying every document as either \"TRUE\" or \"FALSE\", I've created a list that contains documents for which my condition is \"TRUE\". On a date-by-date basis, this is valid as newspapers (particularly of this vintage) generally have futures quotes confined to one single page per date. \r\n\r\nWe go through my manually classified items; a simple text file containing paper id, date and page num. We identify the index for each id+date+pageNum combo in the corpus, and then change that flag from from FALSE to TRUE.\r\n\r\n\r\n```{r, addFlagInfo, warning=FALSE}\r\n\r\nflags <- read.csv(\"https://raw.githubusercontent.com/plb2018/DATA607/master/DATA_607_FinalProject/market_data.txt\",header = T, stringsAsFactors = F)\r\n\r\n#create a list to store our updated info\r\nhasData <- rep(FALSE,nrow(meta(ocr.corpus)))\r\n\r\nfor (f in 1:nrow(flags)){\r\n \r\n id <- ymd(flags[f,1])\r\n dt <- flags[f,2]\r\n pg <- flags[f,3]\r\n \r\n #print(paste(id,dt,pg))\r\n \r\n idx_id <- (meta(ocr.corpus,tag = \"id\") == \"sn83025287\")\r\n idx_dt <- (meta(ocr.corpus,tag = \"date\") == dt)\r\n idx_pg <- (meta(ocr.corpus,tag = \"pageNum\") == pg)\r\n\r\n #idx <- data.frame(id=idx_id,dt=idx_dt,pg=idx_pg)\r\n idx <- data.frame(dt=idx_dt,pg=idx_pg)\r\n\r\n idx <- rowSums(idx) == 2\r\n\r\n #meta(ocr.corpus[idx],tag=\"hasData\") <-1\r\n \r\n hasData[idx] <- TRUE\r\n \r\n \r\n}\r\n\r\n#update our corpus classification\r\nmeta(ocr.corpus,tag=\"hasData\") <-hasData\r\n\r\nkable(head(meta(ocr.corpus,tag=\"hasData\"),5))\r\n\r\n```\r\n\r\n\r\n# Build a Model\r\n\r\nNow that we have some classified data, we can build a model. Recall that previously we'd used a stripped down version of the corpus. Here, we're going to revert back to the original, full corpus.\r\n\r\nWe'll start by cleaning the original corpus. Then we'll build a container for the SVM.\r\n\r\n```{r container}\r\n\r\nocr.corpus <- ocr.corpus %>%\r\n tm_map(content_transformer(tolower)) %>%\r\n tm_map(removePunctuation) %>%\r\n tm_map(removeNumbers) %>%\r\n tm_map(removeWords,words=stopwords(\"english\")) %>%\r\n tm_map(stripWhitespace) %>%\r\n tm_map(stemDocument,language = \"english\")\r\n\r\n\r\n#down sample for testing\r\ndata <- sample(ocr.corpus,2500) \r\nocr.dtm <- DocumentTermMatrix(data)\r\n\r\n#setup for building our container\r\nlabels <- unlist(meta(data, \"hasData\")[,1])\r\nN <- length(unlist(meta(data, \"hasData\")[,1]))\r\npct <-0.80\r\nr<-round(N*pct,0)\r\n\r\n\r\n\r\ncontainer <- create_container(\r\n ocr.dtm,\r\n labels = labels,\r\n trainSize = 1:r,\r\n testSize = (r+1):N,\r\n virgin=FALSE)\r\n\r\n```\r\n\r\n## Train the Model\r\n\r\nNext we'll train the actual model on some data. I've chosen SVM as it appears to be a reasonable approach for problems with high dimensionality, which this is.\r\n\r\n```{r SVM}\r\n\r\nsvm.model <- train_model(container, \"SVM\")\r\nsvm.out <- classify_model(container, svm.model)\r\n\r\n\r\n```\r\n\r\n## Inspect the Model\r\n\r\nNow we examine the results of the model. Below we see a table with all of the predicted values alongside the real TRUE values. I think it's a good way to show the data as the real TRUE observations are few relative to the whole sample.\r\n\r\nWe also look at the accuracy using the \"recall_accuracy()\" function. In this case, I think that this function provides a misleading result. Given that I expect only about 0.5% of docs to contain info that I am interested in, and that all my docs are pre-coded as false, we'd expect to get a high score here. I'm much more concerned with the proportion of TRUE documents that I was able to identify as such.\r\n\r\n```{r SVM_out}\r\n\r\ntrue.labels <- labels[(r+1):N]\r\npredicted.labels <- svm.out[,\"SVM_LABEL\"]\r\nsvm.out$real <- true.labels \r\n\r\nkable(head(svm.out[svm.out$real == TRUE,],20))\r\n\r\nrecall_accuracy(true.labels,predicted.labels)\r\n\r\nlength(which(predicted.labels == TRUE & true.labels == TRUE)) / length(which(true.labels )) \r\n\r\n```\r\n\r\nSo the model appears to be able to do a reasonable job of classifying which documents meet my criteria and which do not. I find this impressive given that the training set is relatively small and that the documents are so messy.\r\n\r\n\r\n## Outsample Attempt\r\n\r\nFinally, we'll try to apply the model that we've just trained on some outsample data and take a quick look at the results.\r\n\r\nWe'll start by loading up some uncategorized data and formatting it.\r\n\r\n```{r, outsample}\r\n\r\n#load uncategorized data\r\nuncat.names <- list.files(\"test/uncat/sn83025287\",\r\n full.names = T,\r\n recursive = T)\r\n\r\nuncat.names <- as.data.frame(uncat.names,\r\n stringsAsFactors = FALSE)\r\n\r\n\r\n\r\nuncat.names %>%\r\n filter(str_detect(uncat.names,\".txt\")) %>%\r\n separate(uncat.names,\r\n uncat.names,\r\n into=new.cols,\r\n sep=\"/\",\r\n remove=F,\r\n convert=F,\r\n extra=\"warn\",\r\n fill=\"warn\")-> uncat.txt\r\n\r\nuncat.txt$edition <- gsub(\"ed-\",\"\",uncat.txt$edition)\r\nuncat.txt$pageNum <- gsub(\"seq-\",\"\",uncat.txt$pageNum)\r\nuncat.txt$date <- with(uncat.txt, ymd(sprintf('%04s%02s%02s', YYYY, mm, dd)))\r\nuncat.txt$wDay <- wday(uncat.txt$date,label=T)\r\nuncat.txt$hasData <- FALSE \r\nuncat.txt$text <- \"\"\r\ncolnames(uncat.txt)[1] <- \"doc_id\"\r\n\r\n\r\nuncat.txt <- tail(uncat.txt,50)\r\n\r\nuncat.txt$text <- sapply(uncat.txt$doc_id, readChar,nchars=99999999)\r\nuncat.corpus <- Corpus(DataframeSource(uncat.txt))\r\n\r\n\r\n```\r\n\r\n\r\nWe'll then create a dtm. Note that a slightly different method is required here as model appears to be picky about inputs. As such, I had to give it some info about the matrix originally used to train the model.\r\n\r\n```{r, outsample2}\r\n\r\nuncat.labels <- unlist(meta(uncat.corpus, \"hasData\")[,1])\r\n\r\n#IMPORTANT NOTE: the create_matrix function seems to throw an error only when knitting.\r\n#This is an ugly (but effective) fix. When running, an edit window will pop-up. \r\n#Change \"Acronym\" to \"acronym\" in line ~42 and it work. Obscure, yet effective.\r\n\r\ntrace(\"create_matrix\", edit=T)\r\n\r\ndtm <- create_matrix(uncat.txt,\r\n originalMatrix=ocr.dtm,\r\n toLower = TRUE,\r\n removePunctuation = TRUE,\r\n removeNumbers=TRUE,\r\n removeStopwords = TRUE ,\r\n stemWords = TRUE)\r\n```\r\n\r\nWe then create the container and run the model\r\n\r\n```{r, predict}\r\n\r\nuncat.container <- create_container(\r\n dtm,\r\n labels = labels,\r\n testSize = 1:50,\r\n virgin=FALSE)\r\n\r\n\r\nuncat.out <- classify_model(uncat.container,svm.model)\r\n\r\nkable(head(uncat.out,10))\r\n\r\nuncat.out[uncat.out$SVM_LABEL == TRUE,]\r\n\r\n```\r\n\r\nWe see that according to the model, none of the 50 out-sample pages appear to have any data of interest. A cursory investigation of the associated image files suggests that this is reasonably, although not perfectly accurate. Either way, my suspicion is that the model needs a larger training set, and I intend to continue working on that.\r\n\r\n# Summary\r\n\r\nIn this project, I was able to\r\n\r\n1. Acquire and unpack the desired data\r\n2. Manipulate and transform the data\r\n3. Significantly reduce manual efforts using \"bag of words\", including automated download of image files for manual inspection\r\n4. Manually classify, then automatically tag the data\r\n5. Build an SVM model, which was surprisingly accurate based on the input / intuition\r\n6. Run the model on totally \"out of sample\" data.\r\n\r\n\r\n# Future Work\r\n\r\nI intend to continue to explore this model and the data set in general. Once I have located a significant number of pages which contain the content that I am interested in, I plan to take a crack at automatic extraction of that data.\r\n" }, { "alpha_fraction": 0.5267835855484009, "alphanum_fraction": 0.53423011302948, "avg_line_length": 35.827274322509766, "blob_id": "68fa6b8b5467d3a02bbb17d2f1973f7259d2d3ae", "content_id": "43085bbe6a2e2a8a875205475b4b39fe6316f08a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4163, "license_type": "no_license", "max_line_length": 114, "num_lines": 110, "path": "/Project 3/indeed_scraper.py", "repo_name": "plb2018/DATA607", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\r\n# based on \"First attempt: Original Code\" by Iden\r\n# Web Scraper for Indeed job postings\r\n# Source: Michael Salmon's Medium post\r\n# URL: https://medium.com/@msalmon00/web-scraping-job-postings-from-indeed-96bd588dcb4b\r\n\r\n\r\nimport requests\r\nimport bs4\r\nfrom bs4 import BeautifulSoup\r\nimport os\r\nimport pandas as pd\r\nimport time\r\n\r\n\r\nmax_results_per_city = 100\r\n\r\n\r\ncity_set = [\"New+York+NY\", \"Seattle+WA\", \"San+Francisco+CA\", \"Washington+DC\", \"Atlanta+GA\",\r\n \"Boston+MA\", \"Austin+TX\", \"Cincinnati+OH\", \"Pittsburgh+PA\"]\r\n\r\ncolumns = [\"city\", \"job_title\", \"company_name\", \"location\", \"summary\", \"salary\"]\r\n\r\nsample_df = pd.DataFrame(columns = columns)\r\n\r\ndf = pd.DataFrame(columns = columns)\r\n\r\nlinks = []\r\n\r\nbase_url = \"https://www.indeed.com/\"\r\n\r\nfor city in city_set:\r\n print city\r\n for start in range(0, max_results_per_city, 10):\r\n page = requests.get(\"http://www.indeed.com/jobs?q=data+scientist&l=\" + str(city) + \"&start=\" + str(start))\r\n time.sleep(1)\r\n soup = BeautifulSoup(page.text, \"lxml\", from_encoding = \"utf-8\")\r\n\r\n #collect links into a list - will be used later to get summary_full\r\n for div in soup.find_all(name = \"div\", attrs = {\"class\":\"row\"}):\r\n for a in div.find_all(name = \"a\", attrs = {\"data-tn-element\":\"jobTitle\"}):\r\n links.append(base_url+a[\"href\"])\r\n \r\n\r\n #pull the details\r\n for div in soup.find_all(name = \"div\", attrs = {\"class\":\"row\"}):\r\n num = (len(sample_df) + 1) # Row num for df\r\n job_post = [] # New job posting\r\n job_post.append(city) # Append city from city_set \r\n for a in div.find_all(name = \"a\", attrs = {\"data-tn-element\":\"jobTitle\"}):\r\n job_post.append(a[\"title\"]) # Append job title\r\n company = div.find_all(name = \"span\", attrs = {\"class\":\"company\"})\r\n if len(company) > 0: # Get company name\r\n for b in company:\r\n job_post.append(b.text.strip())\r\n else:\r\n sec_try = div.find_all(name = \"span\", attrs = {\"class\":\"result-link-source\"})\r\n for span in sec_try:\r\n job_post.append(span.text)\r\n c = div.findAll(\"span\", attrs = {\"class\":\"location\"})\r\n for span in c:\r\n job_post.append(span.text) # Append location of job\r\n d = div.findAll(\"span\", attrs = {\"class\":\"summary\"})\r\n for span in d: # Append job summary\r\n job_post.append(span.text.strip())\r\n try: # Get job salary, if any\r\n job_post.append(div.find(\"nobr\").text)\r\n except:\r\n try:\r\n div_two = div.find(name = \"div\", attrs = {\"class\":\"sjcl\"})\r\n div_three = div_two.find(\"div\")\r\n job_post.append(div_three.text.strip())\r\n except:\r\n job_post.append(\"\")\r\n sample_df.loc[num] = job_post\r\n\r\n#add the links to the df\r\nsample_df['link'] = links\r\n\r\n#add a placeholder col for the full summary\r\nsample_df['summary_full'] = \"\" \r\n\r\n\r\n#iterate over the sample_df and access the save links\r\nfor i,row in sample_df.iterrows():\r\n\r\n #a progress report...\r\n if i%10 == 0:\r\n print str(i) + \"of\" + str(sample_df.shape[0])\r\n\r\n summary = []\r\n page2 = requests.get(row['link'])\r\n soup2 = BeautifulSoup(page2.text, \"html.parser\")\r\n time.sleep(1)\r\n d = soup2.findAll(\"span\", attrs = {\"class\":\"summary\"})\r\n for span in d:\r\n #clean up the data a bit (remove spaces & whitespace etc)\r\n data = span.text.strip()\r\n data = os.linesep.join([s for s in data.splitlines() if s])\r\n data =data.replace(\"\\r\",\"\")\r\n data = data.replace(\"\\n\",\"\")\r\n summary.append(data)\r\n\r\n\r\n #add the full summary to the sample_df\r\n sample_df['summary_full'].loc[i] = summary \r\n\r\n#write it all to file using \"|\" delim\r\nsample_df.to_csv(\"indeed_sample.csv\",sep=\"|\",encoding = \"utf-8\")\r\n\r\n" } ]
15
annekwong/swimming-pool-booking
https://github.com/annekwong/swimming-pool-booking
f0abd4f6656983f4822182221c2d7aa69a0d4433
76d0d5cafcfec890590daa13f2925997aeed75c4
661baf4d15efdf27fb9f145e77685a23e9758973
refs/heads/master
2023-07-01T07:16:53.323715
2021-07-26T11:00:30
2021-07-26T11:00:30
389,599,050
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4829694330692291, "alphanum_fraction": 0.48733624815940857, "avg_line_length": 19.836362838745117, "blob_id": "bcd8c25cc93b49260d7a2cd7b0873edc44ad16b4", "content_id": "b023e137e5e4950e716e2c85dbfc1b70dac32a71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1145, "license_type": "no_license", "max_line_length": 60, "num_lines": 55, "path": "/config.py", "repo_name": "annekwong/swimming-pool-booking", "src_encoding": "UTF-8", "text": "import json\nfrom time import sleep\n\ndef ParseConfig_v0(filename):\n j = json.load(open(filename, \"r\"))\n \n c_type = j['type']\n c_time = j['time']\n \n try:\n c_first = int(j['first'])\n except ValueError:\n c_first = None\n \n try:\n c_last = int(j['last'])\n except ValueError:\n c_last = None\n \n return (c_type, c_time, c_first, c_last);\ndef ParseConfig(filename):\n j = json.load(open(filename, \"r\"))\n \n d = {}\n \n d['type'] = j['type']\n d['time'] = j['time']\n \n try:\n d['start'] = int(j['start'])\n except ValueError:\n d['start'] = None\n \n try:\n d['stop'] = int(j['stop'])\n except ValueError:\n d['stop'] = None\n \n try:\n d['step'] = int(j['step'])\n except ValueError:\n d['step'] = None\n \n return d\n\nif __name__ == \"__main__\":\n d = ParseConfig(\"test.json\")\n \n l = list(range(10))\n # print(\"{:s}, {:s}\".format(control_type, control_time))\n # print(l[control_first:control_last])\n print(\"{:s}, {:s}\".format(d['type'], d['time']))\n print(l[d['start']:d['stop']])\n \n sleep(10)" }, { "alpha_fraction": 0.5625524520874023, "alphanum_fraction": 0.5717884302139282, "avg_line_length": 23.77083396911621, "blob_id": "1508ef1d012ed8d3f8613f2c04fb126146ffb1ff", "content_id": "ee4fce11945d6b5e91d216560bb4541e66c5e1bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1191, "license_type": "no_license", "max_line_length": 122, "num_lines": 48, "path": "/test_date.py", "repo_name": "annekwong/swimming-pool-booking", "src_encoding": "UTF-8", "text": "\nfrom datetime import datetime\nfrom datetime import timedelta\n\n\ndef SuffixedDate(s):\n if(s[-1] == '1'):\n return 'st'\n elif(s[-1] == '2'):\n return 'nd'\n elif(s[-1] == '3'):\n return 'rd'\n else:\n return 'th'\n\ndef NextMonday(d):\n offset = 0\n for i in range(1,8):\n if((d+timedelta(i)).strftime(\"%a\") == 'Mon'):\n offset = i\n \n # day = (d+timedelta(offset)).strftime(\"%a %b %d\")\n day = (d+timedelta(offset))\n # print(day.strftime(\"%a %b %d\"))\n \n return day\n\n# let's find the offset by formatting next Monday's date, then getting index of that in the list of dates\n\ndef FormatDate(d):\n s = d.strftime(\"%a %b %d\")\n \n _, m, n = s.split(' ')\n n = str(int(n))\n num = SuffixedDate(n)\n return \"{:s} {:s}{:s}\".format(m, n, num)\n\n# FormatDate(MondayNextWeek())\n\n# ! had a variable named 'd', made a list 'days' from it, by adding timedelta; printing `for d in days` changed d. Spooky.\n\n\n# dates = [x['date'] for x in slot_nodes]\n# nextm = FormatDate(NextMonday(today))\n# offset = daystamps.index(nextm)\n# daystamps[off:off+7]\n\n# - later down the line:\n# days = [x['link'] for x in slot_nodes[off:off+7]]\n\n" }, { "alpha_fraction": 0.5664335489273071, "alphanum_fraction": 0.5678321719169617, "avg_line_length": 22.09677505493164, "blob_id": "09ae8bb070b9966ecf9ef34db1c9b2007a3478ee", "content_id": "d64b36d73a52f6d2206f5901eb2bbe735997d85c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 715, "license_type": "no_license", "max_line_length": 55, "num_lines": 31, "path": "/pack.py", "repo_name": "annekwong/swimming-pool-booking", "src_encoding": "UTF-8", "text": "from zipfile import ZipFile\nfrom glob import glob\nfrom datetime import datetime\nimport os\n\ndef Timestamp():\n return datetime.now().strftime(\"%d-%m-%y_%H-%M-%S\")\n\ndef PackTemp():\n to_zip = glob(\".\\\\temp\\\\*\")\n try:\n to_zip.append(glob(\"log_*.txt\")[0])\n except:\n pass\n \n # daystamp = datetime.now().strftime(\"%d-%m-%y\")\n # filename = \"session_{:s}.zip\".format(daystamp)\n filename = \"session_{:s}.zip\".format(Timestamp())\n \n zf = ZipFile(filename, \"w\")\n for tz in to_zip:\n zf.write(tz)\n zf.close()\n print(\"> wrote {:s}\".format(filename))\n \n # delete everything in temp\n for t in to_zip:\n os.remove(t)\n\nif __name__ == \"__main__\":\n PackTemp()" }, { "alpha_fraction": 0.5975253582000732, "alphanum_fraction": 0.6038604378700256, "avg_line_length": 36.962406158447266, "blob_id": "04e0d31b54a49ff8754bc8178daee6c75cfa9496", "content_id": "433bc829f399fed301d218cbc9cc5538b9cb0941", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20205, "license_type": "no_license", "max_line_length": 249, "num_lines": 532, "path": "/script.py", "repo_name": "annekwong/swimming-pool-booking", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.keys import Keys\n\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\n\nfrom selenium.common.exceptions import ElementClickInterceptedException\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import NoSuchWindowException\n\nfrom time import time, sleep\nfrom datetime import datetime\nimport json\nimport os\n\nfrom sys import argv\n\nimport regex\nfrom glob import glob\n\n\ndriver_path = 'K:\\\\[Newest Core]\\\\Tools\\\\chromedriver.exe'\ndef get_chrome():\n options = Options()\n options.add_argument(\"--log-level=3\");\n \n driver = webdriver.Chrome(driver_path, options=options)\n driver.execute_script(\"Object.defineProperty(navigator, 'webdriver', {get: () => false})\");\n return driver\ndef headless_chrome():\n options = Options()\n options.add_argument(\"--headless\")\n options.add_argument('window-size=1920x1080');\n options.add_argument(\"--log-level=3\");\n \n driver = webdriver.Chrome(executable_path=driver_path, options=options)\n driver.execute_script(\"Object.defineProperty(navigator, 'webdriver', {get: () => false})\");\n return driver\n\n\ndef WaitPageLoad(driver):\n # print(\"Wait starting...\")\n page_ready = ''\n while(page_ready != 'complete'):\n # print(\"Wait continuing...\")\n sleep(0.5)\n page_ready = driver.execute_script(\"return document.readyState\")\n # print(\"Wait finished!\")\n \ndef xpath(driver, x):\n return driver.find_element_by_xpath(x)\ndef xpaths(driver, x):\n return driver.find_elements_by_xpath(x)\n\n\ndef slow_type(element, text, delay=0.05):\n for character in text:\n element.send_keys(character)\n sleep(delay)\n\ndef CheckServerTime(url):\n requests.get(url).headers['Date']\n\nurl = \"https://myrichmond.richmond.ca\"\ncredentials = {\n # \"my_login\": \"[email protected]\",\n # \"my_password\": \"PoolTest7\",\n \"login\": \"[email protected]\",\n \"password\": \"Richmond1\"\n}\nclick_prefix = \"https://richmondcity.perfectmind.com\"\n\n# --- --- --- --- --- ---\n\n\n\nbig_timeout = 60\nmedium_timeout = 10\ntimeout = 5\n\ndef login(driver):\n # if(test):\n # cred_login = credentials['my_login']\n # cred_pass = credentials['my_password']\n # else:\n cred_login = credentials['login']\n cred_pass = credentials['password']\n \n login = WebDriverWait(driver, big_timeout).until(EC.visibility_of_element_located((By.XPATH, \"*//input[@type='text']\")))\n sleep(0.5)\n login.click()\n sleep(0.25)\n slow_type(login, cred_login)\n \n sleep(0.5)\n \n password = WebDriverWait(driver, timeout).until(EC.element_to_be_clickable((By.XPATH, \"*//input[@type='password']\")))\n password.click()\n sleep(0.25)\n slow_type(password, cred_pass)\n \n login_button = WebDriverWait(driver, timeout).until(EC.element_to_be_clickable((By.XPATH, \"*//input[@id='loginButton_0']\")))\n sleep(0.5)\n login_button.click()\n \n \ndef Minoru(driver):\n login(driver)\n \n flag = True\n # sleep_wait = 10\n while(flag):\n try:\n # sleep(sleep_wait)\n activities_button = WebDriverWait(driver, 60).until(EC.element_to_be_clickable((By.XPATH, \"*//div/ul/a[contains(@href, 'richmondcity.perfectmind.com')]\")))\n except ElementClickInterceptedException:\n # print(\"Click intercepted\")\n pass\n except:\n print(\"Probably maintenance... F5-ing\")\n # sleep_wait += 2\n driver.refresh() # this block should be sufficient against maintenance... COULDA TESTED IT THEN\n else:\n flag = False\n sleep(1)\n activities_button.click()\n \n # switch to another tab\n driver.switch_to.window(driver.window_handles[1])\n WaitPageLoad(driver)\n # print(\"Perfect mind!\")\n \n minoru_centre = WebDriverWait(driver, timeout).until(EC.element_to_be_clickable((By.XPATH, \"*//h2[contains(text(), 'Registered Visits')]/following-sibling::ul/li/a[contains(text(), 'Minoru Centre for Active Living')]\")))\n minoru_centre.click()\n\n\n\n\n# - wait for table[@id='classes'] to load\n# - got it.\n# register_buttons = xpaths(driver, \"*//span[contains(text(), 'REGISTERED VISIT - LANE SWIMMING')]/ancestor::div[@class='bm-class-container'] / *//span[contains(text(),'9:00am - ')]/ancestor::div[@class='bm-class-container']\")\n# xpath(register_buttons[0], \".//input[@type='button']\").click()\n\n\n# def go(i):\n # global driver\n # global days\n # global register_buttons\n # days = register_buttons[-7:]\n # driver.get(days[i])\n \n# def s():\n # global driver\n # body_elements = xpaths(driver, \"*//body\")\n # for b in body_elements:\n # count = len(glob(\"*.html\"))\n # name = \"{:02d}.html\".format(count)\n # open(\"{:02d}.html\".format(count), \"w\", encoding='utf8').write(b.get_attribute('outerHTML'))\n # print(\"> wrote {:s}\".format(name))\n \n# are these necessary?...\ndef clickalert(driver):\n # what speed do these appear with?\n try:\n # xpath(driver, \"*//div[@class='message'][@role='alert']\").click()\n WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.XPATH, \"*//div[@class='message'][@role='alert']\"))).click()\n except: \n pass\n else:\n pass\n # print(\"Alert found and clicked away\")\n sleep(0.5)\n\n\"\"\"\ndef register():\n register_flag = True\n while(register_flag):\n try:\n button = WebDriverWait(driver, timeout).until(EC.element_to_be_clickable((By.XPATH, \"*//a[contains(@class,'book-button')]\")))\n # xpath(driver, \"*//a[contains(@class,'book-button')]\").click()\n button.click()\n except KeyboardInterrupt:\n register_flag = False\n break\n except:\n sleep(1)\n finally:\n register_flag = False\n\"\"\"\n\ndef next0(driver):\n # print(\"Going next0\")\n xpath(driver, \"*//span[text()='Next']\").click()\n\ndef next1(driver):\n # print(\"Going next1\")\n xpath(driver, \"*//a[@title='Next']\").click()\n\ndef Timestamp():\n return datetime.now().strftime(\"%d-%m-%y_%H-%M-%S\")\n\n\n# - test up to questionnaire\n# --- go the the page, THEN launch this. That way can test irrespective of where we are\ndef process_v0(driver):\n clickalert(driver)\n \n reg_flag = True\n while(reg_flag):\n try:\n register_button = WebDriverWait(driver, 1.0).until(EC.element_to_be_clickable((By.XPATH, \"*//a[contains(@class,'book-button')]\")))\n except TimeoutException:\n driver.refresh()\n else:\n reg_flag = False\n # register_button = WebDriverWait(driver, timeout).until(EC.element_to_be_clickable((By.XPATH, \"*//a[contains(@class,'book-button')]\")))\n clickalert(driver)\n \n register_button.click()\n \n sleep(0.5)\n \n # --- ATTENDEES\n checkbox = WebDriverWait(driver, timeout).until(EC.element_to_be_clickable((By.XPATH, \".//table//label[contains(text(), '(You)')]/ancestor::tr//input[@type='checkbox']\")))\n clickalert()\n if not checkbox.is_selected():\n checkbox.click()\n next0(driver)\n \n sleep(0.5)\n \n \n # --- QUESTIONNAIRE\n start = time()\n WaitPageLoad(driver)\n end = time()\n # print(\"> load wait: {:3.2f}\".format(end-start))\n \n consent_check = xpath(driver, \"*//*[@class='reg-form']//div[@class='questionField']//input\")\n consent_press = xpath(driver, \"*//*[@class='reg-form']//div[@class='questionField']//label\")\n if not consent_check.is_selected:\n consent_press.click()\n sleep(0.25)\n next1(driver)\n \n \n # --- FEES AND EXTRAS\n pick_fee = WebDriverWait(driver, timeout).until(EC.element_to_be_clickable((By.XPATH, \"*//div[@class='fee-section']//span[contains(text(), 'Aquatic & Fitness Membership')]/preceding-sibling::span[@class='outer-circle']\")))\n # clickalert()\n pick_fee.click()\n next0(driver)\n \n \n # --- PAYMENT\n # - oh, this has an iframe. How quaint!\n WaitPageLoad(driver)\n WebDriverWait(driver, medium_timeout).until(EC.visibility_of_element_located((By.XPATH, \"*//iframe[@name='iframe']\")))\n driver.switch_to.frame(xpath(driver, \"*//iframe[@name='iframe']\")) # can wait until it loads\n order_button = WebDriverWait(driver, medium_timeout).until(EC.element_to_be_clickable((By.XPATH, \"*//button[@class='process-now'][contains(text(), 'Place My Order')]\")))\n # print(\"Got to the order button\")\n try:\n order_button = WebDriverWait(driver, medium_timeout).until(EC.element_to_be_clickable((By.XPATH, \"*//button[@class='process-now'][contains(text(), 'Place My Order')]\")))\n except:\n pass\n # print(\"Can't find order button\")\n else:\n order_button.click()\n \n \n \n # --- session confirmation\n WaitPageLoad(driver)\n \n # session = xpath(driver, \"*//div[@id='main-content']//div[@class='bm-event-info']//h2/span\").text\n # book_date = xpath(driver, \"*//div[@id='main-content']//div[@class='bm-event-info']//span/span[@class='bm-date']\").text.split(\", \")[-1]\n # book_time = xpath(driver, \"*//div[@id='main-content']//div[@class='bm-event-info']//span/span[@class='bm-subject']\").text\n session_el = WebDriverWait(driver, timeout).until(EC.visibility_of_element_located((By.XPATH, \"*//div[@id='main-content']//div[@class='bm-event-info']//h2/span\")))\n bookdate_el = WebDriverWait(driver, timeout).until(EC.visibility_of_element_located((By.XPATH, \"*//div[@id='main-content']//div[@class='bm-event-info']//span/span[@class='bm-date']\")))\n booktime_el = WebDriverWait(driver, timeout).until(EC.visibility_of_element_located((By.XPATH, \"*//div[@id='main-content']//div[@class='bm-event-info']//span/span[@class='bm-subject']\")))\n \n session = session_el.text\n book_date = bookdate_el.text.split(\", \")[-1]\n book_time = booktime_el.text\n \n d = {\n \"session\" : session,\n \"date\" : book_date,\n \"time\" : book_time\n }\n \n # - not the best place for a folder declaration, boa\n save_folder = \".\\\\temp\\\\\"\n if not os.path.exists(save_folder):\n os.mkdir(save_folder)\n \n # timestamp = datetime.now().strftime(\"%H-%M-%S\")\n timestamp = datetime.now().strftime(\"%d-%m-%y %H-%M-%S\")\n json.dump(d, open(save_folder+\"session_results-{:s}.json\".format(timestamp), \"w\"), indent=4)\n xpath(driver, \"*//body\").screenshot(save_folder+\"screenshot-{:s}.png\".format(timestamp))\ndef process(driver):\n try:\n WebDriverWait(driver, 10.0).until(EC.element_to_be_clickable((By.XPATH, \"*//div[@class='message'][@role='alert']\")))\n clickalert(driver)\n except TimeoutException:\n pass\n \n reg_flag = True\n while(reg_flag):\n try:\n register_button = WebDriverWait(driver, 1.0).until(EC.element_to_be_clickable((By.XPATH, \"*//a[contains(@class,'book-button')]\")))\n except TimeoutException:\n print(\"Waiting for the button...\")\n driver.refresh()\n else:\n reg_flag = False\n \n register_button.click()\n \n sleep(0.5)\n \n # --- ATTENDEES\n # if the checkbox is blocked, it's probably because we can't \n try:\n checkbox = WebDriverWait(driver, 2.5).until(EC.element_to_be_clickable((By.XPATH, \".//table//label[contains(text(), '(You)')]/ancestor::tr//input[@type='checkbox']\")))\n except TimeoutException:\n try:\n xpath(driver, \"*//tr[contains(@title, 'Already Registered')]\")\n print(\"Already registered, moving on\")\n return\n except NoSuchElementException:\n pass\n \n clickalert(driver)\n if not checkbox.is_selected():\n checkbox.click()\n next0(driver)\n \n sleep(0.5)\n \n \n # --- QUESTIONNAIRE\n start = time()\n WaitPageLoad(driver)\n end = time()\n # print(\"> load wait: {:3.2f}\".format(end-start))\n \n consent_check = xpath(driver, \"*//*[@class='reg-form']//div[@class='questionField']//input\")\n consent_press = xpath(driver, \"*//*[@class='reg-form']//div[@class='questionField']//label\")\n if not consent_check.is_selected:\n consent_press.click()\n sleep(0.25)\n next1(driver)\n \n \n # --- FEES AND EXTRAS\n pick_fee = WebDriverWait(driver, timeout).until(EC.element_to_be_clickable((By.XPATH, \"*//div[@class='fee-section']//span[contains(text(), 'Aquatic & Fitness Membership')]/preceding-sibling::span[@class='outer-circle']\")))\n # clickalert()\n pick_fee.click()\n next0(driver)\n \n \n # --- PAYMENT\n # - oh, this has an iframe. How quaint!\n WaitPageLoad(driver)\n WebDriverWait(driver, medium_timeout).until(EC.visibility_of_element_located((By.XPATH, \"*//iframe[@name='iframe']\")))\n driver.switch_to.frame(xpath(driver, \"*//iframe[@name='iframe']\")) # can wait until it loads\n order_button = WebDriverWait(driver, medium_timeout).until(EC.element_to_be_clickable((By.XPATH, \"*//button[@class='process-now'][contains(text(), 'Place My Order')]\")))\n # print(\"Got to the order button\")\n try:\n order_button = WebDriverWait(driver, medium_timeout).until(EC.element_to_be_clickable((By.XPATH, \"*//button[@class='process-now'][contains(text(), 'Place My Order')]\")))\n except:\n pass\n # print(\"Can't find order button\")\n else:\n order_button.click()\n \n \n \n # --- session confirmation\n WaitPageLoad(driver)\n \n # session = xpath(driver, \"*//div[@id='main-content']//div[@class='bm-event-info']//h2/span\").text\n # book_date = xpath(driver, \"*//div[@id='main-content']//div[@class='bm-event-info']//span/span[@class='bm-date']\").text.split(\", \")[-1]\n # book_time = xpath(driver, \"*//div[@id='main-content']//div[@class='bm-event-info']//span/span[@class='bm-subject']\").text\n \n flag = True\n while(flag):\n try:\n session_el = WebDriverWait(driver, timeout).until(EC.visibility_of_element_located((By.XPATH, \"*//div[@id='main-content']//div[@class='bm-event-info']//h2/span\")))\n flag = False\n except NoSuchWindowException:\n print(\"Window exception.\")\n sleep(1)\n \n bookdate_el = WebDriverWait(driver, timeout).until(EC.visibility_of_element_located((By.XPATH, \"*//div[@id='main-content']//div[@class='bm-event-info']//span/span[@class='bm-date']\")))\n booktime_el = WebDriverWait(driver, timeout).until(EC.visibility_of_element_located((By.XPATH, \"*//div[@id='main-content']//div[@class='bm-event-info']//span/span[@class='bm-subject']\")))\n \n session = session_el.text\n book_date = bookdate_el.text.split(\", \")[-1]\n book_time = booktime_el.text\n \n d = {\n \"session\" : session,\n \"date\" : book_date,\n \"time\" : book_time\n }\n \n # - not the best place for a folder declaration, boa\n save_folder = \".\\\\temp\\\\\"\n if not os.path.exists(save_folder):\n os.mkdir(save_folder)\n \n # timestamp = datetime.now().strftime(\"%H-%M-%S\")\n # timestamp = datetime.now().strftime(\"%d-%m-%y %H-%M-%S\")\n timestamp = Timestamp()\n \n json.dump(d, open(save_folder+\"session_results-{:s}.json\".format(timestamp), \"w\"), indent=4)\n xpath(driver, \"*//body\").screenshot(save_folder+\"screenshot-{:s}.png\".format(timestamp))\n print(\"Booked {:s}, {:s}, {:s}\".format(session, book_date, book_time))\n \n\n# register_buttons = xpaths(driver, \"*//span[contains(text(), 'REGISTERED VISIT - LANE SWIMMING')]/ancestor::div[@class='bm-class-container'] / *//span[contains(text(),'9:00am - ')]/ancestor::div[@class='bm-class-container']//input[@type='button']\")\ndef RegisterButtons_v0(type_str, time_str):\n register_buttons = xpaths(driver, \"*//span[contains(text(), '{:s}')]/ancestor::div[@class='bm-class-container'] / *//span[contains(text(),'{:s} ')]/ancestor::div[@class='bm-class-container']//input[@type='button']\".format(type_str, time_str))\n return [click_prefix+regex.compile(r\"\\(\\'(.*)\\'\\)\").search(x.get_attribute('onclick'))[1] for x in register_buttons if x.get_attribute('value')=='REGISTER']\n# renamed\ndef SlotNodes(driver):\n # date = \"./preceding-sibling::tr[@class='bm-marker-row'][1]\").text\n \n classes_table = xpath(driver, \"*//table[@id='classes']\")\n # class_type = xpath(_, \".//span[@class='bm-event-description']\").get_attribute('innerHTML')\n \n # date = xpath(entries[0], \"./preceding-sibling::tr[@class='bm-marker-row'][1]\").text\n # entries = xpaths(classes_table, \".//div[@class='bm-class-header-wrapper']//span[contains(text(), '{:s}')]/ancestor::tr / .//div[@class='bm-group-item-desc']//span[contains(text(), '{:s}')]/ancestor::tr\".format('LANE SWIMMING', '9:00am -'))\n # control = {\n # \"type\" : \"REGISTERED VISIT - LANE SWIMMING\",\n # \"time\" : \"9:00 am\"\n # }\n \n # control = json.load(open(\"control_test.json\",\"r\"))\n control = json.load(open(\"control_release.json\",\"r\"))\n print(\"Book type: {:s}\".format(control['type']))\n print(\"Book time: {:s}\".format(control['time']))\n \n nodes = xpaths(classes_table, \".//div[@class='bm-class-header-wrapper']//span[contains(text(), '{:s}')]/ancestor::tr / .//div[@class='bm-group-item-desc']//span[contains(text(), '{:s} -')]/ancestor::tr\".format(control['type'], control['time']))\n # return [click_prefix+regex.compile(r\"\\(\\'(.*)\\'\\)\").search(xpath(x, \".//input\").get_attribute('onclick'))[1] for x in entries]\n \n links = [click_prefix+regex.compile(r\"\\(\\'(.*)\\'\\)\").search(xpath(x, \".//input\").get_attribute('onclick'))[1] for x in nodes]\n \n infos = []\n # for each entry, let's grab type, \n for i in range(len(nodes)):\n d = {}\n d['element'] = nodes[i]\n d['link'] = links[i]\n \n swim_type = xpath(nodes[i], \".//div[@class='bm-class-header-wrapper']//span[@class='bm-event-description']\").get_attribute('innerHTML')\n d['type'] = swim_type\n \n date = xpath(nodes[i], \"./preceding-sibling::tr[@class='bm-marker-row'][1]\").text\n d['date'] = date\n \n slot_time = xpath(nodes[i], \".//div[@class='bm-group-item-desc']//span[contains(@aria-label, 'Event time')]\").text\n d['time'] = slot_time\n \n try:\n spots = xpath(nodes[i], \".//div[@class='bm-spots-left-label']/span[@aria-label]\").text\n except NoSuchElementException:\n spots = 'None'\n \n d['spots'] = spots\n \n # - fuck printing's slow...\n print(\"{:s}, {:s}, {:s}, {:s}\".format(swim_type, date, slot_time, spots))\n \n infos.append(d)\n \n return infos\n\n\nif __name__ == \"__main__\":\n \n \n ### ----- FLOW -----\n driver = get_chrome()\n driver.get(url)\n \n start = time()\n Minoru(driver)\n end = time()\n print(\"> Login+Minoru: {:3.2f} seconds\".format(end-start))\n \n # - wait for the table to appear\n start = time()\n WebDriverWait(driver, big_timeout).until(EC.visibility_of_element_located((By.XPATH, \"*//table[@id='classes']\")))\n end = time()\n print(\"> Classes: {:3.2f} seconds\".format(end-start))\n \n \n slot_nodes = SlotNodes(driver, settings)\n \n print(\"{}:{}:{}\".format(settings['start'], settings['stop'], settings['step']))\n for s in slot_nodes[settings['start']:settings['stop']:settings['step']]:\n print(\"{:s}, {:s}, {:s}, {:s}\".format(s['type'], s['date'], s['time'], s['spots']))\n \n for sn in slot_nodes[settings['start']:settings['stop']:settings['step']]:\n # process full\n if(sn['spots'] != 'Full'):\n driver.get(sn['link'])\n \n # process full\n try:\n full_el = xpath(driver, \"*//label[@class='spots']/span\").text\n except NoSuchElementException:\n full_el = None\n \n if(full_el != 'Full'):\n process(driver)\n sleep(1)\n \n \n # - do some sort of check, THEN decide to close; possibly go through them again\n driver.close()\n driver.quit()\n \n # scoop up \"temp\" folder, archive contents, send in an email\n \n \n exit(0)\n \n " }, { "alpha_fraction": 0.6436119079589844, "alphanum_fraction": 0.651296854019165, "avg_line_length": 21.65217399597168, "blob_id": "aab9a3c1f347a4dc1fd1e0846771d33a720324fd", "content_id": "17e7cde1560a99ea4550f865a8726a4b5129f039", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1041, "license_type": "no_license", "max_line_length": 113, "num_lines": 46, "path": "/pools_release.py", "repo_name": "annekwong/swimming-pool-booking", "src_encoding": "UTF-8", "text": "from script import *\nfrom test_date import *\n\n# driver = get_chrome()\ndriver = headless_chrome()\ndriver.get(url)\nWaitPageLoad(driver)\n\nstart = time()\nMinoru(driver)\nend = time()\nprint(\"> Login+Minoru: {:3.2f} seconds\".format(end-start))\n\n# - wait for the table to appear\nstart = time()\nWebDriverWait(driver, big_timeout).until(EC.visibility_of_element_located((By.XPATH, \"*//table[@id='classes']\")))\nend = time()\nprint(\"> Classes: {:3.2f} seconds\".format(end-start))\n\n\nslot_nodes = SlotNodes(driver)\n\ndates = [x['date'] for x in slot_nodes]\ntoday = datetime.now().date()\nnextm = FormatDate(NextMonday(today))\noffset = dates.index(nextm)\n\n\nprint(\"\\nWeek and days of interest: \")\nfor s in slot_nodes[offset:offset+7]:\n print(\"{:s}, {:s}, {:s}, {:s}\".format(s['type'], s['date'], s['time'], s['spots']))\n\ndays = [x['link'] for x in slot_nodes[offset:offset+7]] # release\n\n\nfor i in range(len(days)):\n driver.get(days[i])\n process(driver)\n sleep(1)\nprint(\"> All booked\")\n\n\n# --- bow out.\ndriver.quit()\nprint(\"> All done.\")\nexit(0)" }, { "alpha_fraction": 0.6022576093673706, "alphanum_fraction": 0.6082337498664856, "avg_line_length": 24.116666793823242, "blob_id": "c91aed426541010501b4ab75bc1a8e3f5e5635d0", "content_id": "1b654afe33ca7f44e871efd18680e68a83d29907", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1506, "license_type": "no_license", "max_line_length": 85, "num_lines": 60, "path": "/email_script.py", "repo_name": "annekwong/swimming-pool-booking", "src_encoding": "UTF-8", "text": "import smtplib\nimport json\nimport os\nfrom datetime import datetime\n\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email import encoders\nfrom glob import glob\n\n\ndef DayStamp():\n return datetime.now().strftime(\"%d-%m-%y\") \n\ndef SimpleEmail(recipient, message):\n s = smtplib.SMTP('smtp.gmail.com', 587)\n s.starttls()\n j = json.load(open(\"email_sgm.json\", \"r\"))\n login = j['email']\n passw = j['passw']\n s.login(login, passw)\n s.sendmail(login, recipient, message)\n s.quit()\n\ndef ContentEmail(toaddr, files):\n j = json.load(open(\"email_sgm.json\", \"r\"))\n fromaddr = j['email']\n \n msg = MIMEMultipart()\n msg['From'] = fromaddr\n msg['To'] = toaddr\n msg['Subject'] = \"{:s}'s session\".format(DayStamp())\n body = ''\n msg.attach(MIMEText(body, 'plain'))\n \n for f in files:\n part = MIMEBase('application', 'octet-stream')\n part.set_payload(open(f, 'rb').read())\n encoders.encode_base64(part)\n part.add_header(\"Content-Disposition\", \"attachment; filename={:s}\".format(f))\n msg.attach(part)\n \n SimpleEmail(toaddr, msg.as_string())\n\ndef SendZips():\n zips = glob(\"*.zip\")\n \n toaddr0 = '[email protected]'\n toaddr1 = '[email protected]'\n \n ContentEmail(toaddr0, zips)\n ContentEmail(toaddr1, zips)\n \n for z in zips:\n os.remove(z)\n\n# send the zip, and the log.\nif __name__ == \"__main__\":\n SendZips()" }, { "alpha_fraction": 0.5922330021858215, "alphanum_fraction": 0.5922330021858215, "avg_line_length": 16.33333396911621, "blob_id": "99ea8a957b3bea63dbfbe82d1f73a13eb3221116", "content_id": "16b19772bec303e9c2476fe2603118f123625369", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 103, "license_type": "no_license", "max_line_length": 26, "num_lines": 6, "path": "/packsend.py", "repo_name": "annekwong/swimming-pool-booking", "src_encoding": "UTF-8", "text": "from email_script import *\nfrom pack import *\n\nif __name__ == \"__main__\":\n PackTemp()\n SendZips()" } ]
7
ma7moudelfeky/OdooApps
https://github.com/ma7moudelfeky/OdooApps
15d4086320cf67fd55fb40103daf0f6c97585aa7
0494eb6bd270f8402a9ad2eadca36778b0ab23cf
ef8809b48dc0b94fc3d7863112e5c7ca8389906d
refs/heads/master
2022-04-15T20:08:33.447859
2020-04-12T12:21:19
2020-04-12T12:21:19
255,061,044
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6831682920455933, "alphanum_fraction": 0.6831682920455933, "avg_line_length": 26.272727966308594, "blob_id": "da0d5840f8ce61af6bd9a9edfc71be66c90809b4", "content_id": "78f439b484709f842c5f845f0686d6b4ec95e7c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 303, "license_type": "no_license", "max_line_length": 63, "num_lines": 11, "path": "/leave_limit/models/leave_type.py", "repo_name": "ma7moudelfeky/OdooApps", "src_encoding": "UTF-8", "text": "\"\"\" Leave Type \"\"\"\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import UserError, Warning, ValidationError\n\n\nclass HrLeaveType(models.Model):\n \"\"\" inherit Hr Leave Type \"\"\"\n _inherit = 'hr.leave.type'\n\n monthly_leave_limit = fields.Boolean()\n leave_limit = fields.Float()\n\n\n\n" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.6388888955116272, "avg_line_length": 17.25, "blob_id": "afe2b8b69b7816151ffaf5018049dec4be8c2f61", "content_id": "304d46d27b7092efb8d5378c9959a60b24707cdc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 72, "license_type": "no_license", "max_line_length": 24, "num_lines": 4, "path": "/leave_limit/models/__init__.py", "repo_name": "ma7moudelfeky/OdooApps", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom . import leave_type\nfrom . import hr_leave" }, { "alpha_fraction": 0.5875152945518494, "alphanum_fraction": 0.588739275932312, "avg_line_length": 39.900001525878906, "blob_id": "0600ee4cd4768592381809486ccef9d947e6d6c2", "content_id": "1930de4cd6e11985f6bdbaab68b4f41ca1cdf06e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 817, "license_type": "no_license", "max_line_length": 124, "num_lines": 20, "path": "/leave_limit/controllers/controllers.py", "repo_name": "ma7moudelfeky/OdooApps", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom odoo import http\n\n# class JcbLeaveLimit(http.Controller):\n# @http.route('/jcb_leave_limit/jcb_leave_limit/', auth='public')\n# def index(self, **kw):\n# return \"Hello, world\"\n\n# @http.route('/jcb_leave_limit/jcb_leave_limit/objects/', auth='public')\n# def list(self, **kw):\n# return http.request.render('jcb_leave_limit.listing', {\n# 'root': '/jcb_leave_limit/jcb_leave_limit',\n# 'objects': http.request.env['jcb_leave_limit.jcb_leave_limit'].search([]),\n# })\n\n# @http.route('/jcb_leave_limit/jcb_leave_limit/objects/<model(\"jcb_leave_limit.jcb_leave_limit\"):obj>/', auth='public')\n# def object(self, obj, **kw):\n# return http.request.render('jcb_leave_limit.object', {\n# 'object': obj\n# })" }, { "alpha_fraction": 0.48915988206863403, "alphanum_fraction": 0.5, "avg_line_length": 23.600000381469727, "blob_id": "642a470846625fd89872822484fe24832ba68168", "content_id": "bb9b4ff003b77e0fa1a3b1bced6689c68cd8459e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 738, "license_type": "no_license", "max_line_length": 60, "num_lines": 30, "path": "/leave_limit/__manifest__.py", "repo_name": "ma7moudelfeky/OdooApps", "src_encoding": "UTF-8", "text": "# pylint: disable=missing-docstring,manifest-required-author\n{\n 'name': 'JCB Leave Limit',\n 'summary': 'JCB Leave Limit',\n 'author': 'Mahmoud Elfeky , AJIG CAIRO',\n 'website': 'i.jcbvip.com',\n 'version': '12.0.1.0.0',\n 'category': 'Humman Resources',\n 'license': 'AGPL-3',\n 'sequence': 1,\n 'depends': [\n 'base',\n 'document',\n 'hr',\n 'hr_holidays',\n ],\n 'data': [\n # 'security/ir.model.access.csv',\n # 'report/',\n # 'wizard/',\n 'views/leave_type_view.xml',\n # 'views/hr_leave_view.xml',\n # 'views/menu_view.xml',\n # 'data/salry_rule.xml',\n ],\n 'demo': [],\n 'installable': True,\n 'application': True,\n 'auto_install': False,\n}\n" }, { "alpha_fraction": 0.46742725372314453, "alphanum_fraction": 0.46808862686157227, "avg_line_length": 48.9752082824707, "blob_id": "63cc0c140c52a8061e4393ce4e18ab190c73f7ca", "content_id": "a318e394637cdcde970d73802a000ff59032a079", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6048, "license_type": "no_license", "max_line_length": 92, "num_lines": 121, "path": "/leave_limit/models/hr_leave.py", "repo_name": "ma7moudelfeky/OdooApps", "src_encoding": "UTF-8", "text": "\"\"\" Hr Leave \"\"\"\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import UserError, Warning, ValidationError\nfrom datetime import datetime, date, time\nfrom dateutil.relativedelta import relativedelta\n\nclass HrLeave(models.Model):\n \"\"\" inherit Hr Leave \"\"\"\n _inherit = 'hr.leave'\n\n holiday_take = fields.Float(compute='_compute_holiday_take')\n holiday_take_plus_duration = fields.Float(compute='_compute_holiday_take_plus_duration')\n remain_leave = fields.Float(compute='_compute_remain_leave')\n leave_limit = fields.Float(related='holiday_status_id.leave_limit', store=True)\n first_day_month = fields.Date(compute='_compute_first_day_month')\n last_day_month = fields.Date(compute='_compute_first_day_month')\n\n @api.depends('holiday_take', 'number_of_days_display')\n def _compute_holiday_take_plus_duration(self):\n \"\"\" Compute holiday_take_plus_duration value \"\"\"\n for rec in self:\n rec.holiday_take_plus_duration = rec.holiday_take + rec.number_of_days_display\n\n @api.depends('holiday_status_id')\n def _compute_holiday_take(self):\n \"\"\" Compute holiday_take value \"\"\"\n for rec in self:\n if rec.holiday_status_id.monthly_leave_limit:\n holiday = rec.env['hr.leave'].search([\n ('employee_id', '=', rec.employee_id.id),\n ('holiday_status_id', '=', rec.holiday_status_id.id),\n ('request_date_from', '>=', rec.first_day_month),\n ('request_date_to', '<=', rec.last_day_month),\n ('state', '=', 'validate')\n ])\n for h in holiday:\n rec.holiday_take += h.number_of_days_display\n\n @api.depends('request_date_from')\n def _compute_first_day_month(self):\n \"\"\" Compute first_day_month value \"\"\"\n for rec in self:\n rec.first_day_month = rec.request_date_from.replace(day=1)\n rec.last_day_month = (rec.request_date_from + relativedelta(months=+1, day=1,\n days=-1))\n\n @api.depends('leave_limit', 'holiday_take')\n def _compute_remain_leave(self):\n \"\"\" Compute remain_leave value \"\"\"\n for rec in self:\n rec.remain_leave = rec.leave_limit - rec.holiday_take\n\n # @api.onchange('holiday_status_id')\n # def _onchange_holiday_status_id(self):\n # \"\"\" holiday_status_id \"\"\"\n # for rec in self:\n # if rec.holiday_status_id.monthly_leave_limit:\n # if rec.holiday_take_plus_duration > rec.leave_limit:\n # raise ValidationError('Your Leave Limit By Month Is : ' +\n # str(rec.leave_limit) +\n # '\\n' + 'You Take : ' +\n # str(rec.holiday_take) +\n # '\\n' + 'Your Available Leaves Is : ' +\n # str(rec.remain_leave))\n #\n # @api.onchange('request_date_from')\n # def _onchange_request_date_from(self):\n # \"\"\" request_date_from \"\"\"\n # for rec in self:\n # if rec.holiday_status_id.monthly_leave_limit:\n # if rec.holiday_take_plus_duration > rec.leave_limit:\n # raise ValidationError('Your Leave Limit By Month Is : ' +\n # str(rec.leave_limit) +\n # '\\n' + 'You Take : ' +\n # str(rec.holiday_take) +\n # '\\n' + 'Your Available Leaves Is : ' +\n # str(rec.remain_leave))\n #\n # @api.onchange('request_date_to')\n # def _onchange_request_date_to(self):\n # \"\"\" request_date_to \"\"\"\n # for rec in self:\n # if rec.holiday_status_id.monthly_leave_limit:\n # if rec.holiday_take_plus_duration > rec.leave_limit:\n # raise ValidationError('Your Leave Limit By Month Is : ' +\n # str(rec.leave_limit) +\n # '\\n' + 'You Take : ' +\n # str(rec.holiday_take) +\n # '\\n' + 'Your Available Leaves Is : ' +\n # str(rec.remain_leave))\n\n @api.model\n def create(self, vals):\n \"\"\" Override create() \"\"\"\n # vals ={'field': value} -> dectionary contains only new filled fields\n res = super(HrLeave, self).create(vals)\n for rec in res:\n if rec.holiday_status_id.monthly_leave_limit:\n if rec.holiday_take_plus_duration > rec.leave_limit:\n raise ValidationError('Your Leave Limit By Month Is : ' +\n str(rec.leave_limit) +\n ' .\\n' + 'You Take : ' +\n str(rec.holiday_take) +\n ' .\\n' + 'Your Available Leaves Is : ' +\n str(rec.remain_leave)+\n ' .\\n' )\n return res\n \n @api.multi\n def action_approve(self):\n \"\"\" inherit action_approve() \"\"\"\n for rec in self:\n if rec.holiday_take_plus_duration > rec.leave_limit:\n raise ValidationError('Your Leave Limit By Month Is : ' +\n str(rec.leave_limit) +\n ' .\\n' + 'You Take : ' +\n str(rec.holiday_take) +\n ' .\\n' + 'Your Available Leaves Is : ' +\n str(rec.remain_leave)+\n ' .\\n')\n res = super(HrLeave, self).action_approve()\n\n" } ]
5
lvcc2018/Style2Paints_V3
https://github.com/lvcc2018/Style2Paints_V3
638fb483b50bc05bc0574cc12ff31dddc4e799a2
4b4485f082d4a3b76d072c7c411a49ba159464f9
bb4c22667b72428f9c93be8bc9810ffc5e8c9f7f
refs/heads/master
2021-10-07T16:15:09.812920
2021-09-26T12:57:15
2021-09-26T12:57:15
219,532,076
0
0
null
2019-11-04T15:20:41
2019-11-02T15:05:46
2019-04-19T09:07:29
null
[ { "alpha_fraction": 0.6347092986106873, "alphanum_fraction": 0.6635596752166748, "avg_line_length": 27.087499618530273, "blob_id": "f0df9f890b3c0ed6a47d1c017975ef1d8c81fda2", "content_id": "c38eeac78ff58572d91519ad0881fe91b92c3569", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2253, "license_type": "no_license", "max_line_length": 103, "num_lines": 80, "path": "/Extract_edge.py", "repo_name": "lvcc2018/Style2Paints_V3", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[ ]:\n\n\nimport os\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\nimport tensorflow as tf\ngpu_options = tf.GPUOptions(allow_growth=True)\nsess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\nfrom PIL import Image\nimport os.path as osp\nimport glob \nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom tqdm import tqdm\nimport cv2\nfrom keras.models import load_model\nfrom helper import *\n\nmod = load_model('mod.h5')\n\n\n# In[ ]:\n\n\ndef edge_detecton(path):\n '''\n get sketch\n '''\n from_mat = cv2.imread(path)\n width = float(from_mat.shape[1])\n height = float(from_mat.shape[0])\n new_width = 0\n new_height = 0\n if (width > height):\n from_mat = cv2.resize(from_mat, (512, int(512 / width * height)), interpolation=cv2.INTER_AREA)\n new_width = 512\n new_height = int(512 / width * height)\n else:\n from_mat = cv2.resize(from_mat, (int(512 / height * width), 512), interpolation=cv2.INTER_AREA)\n new_width = int(512 / height * width)\n new_height = 512\n from_mat = from_mat.transpose((2, 0, 1))\n light_map = np.zeros(from_mat.shape, dtype=np.float)\n for channel in range(3):\n light_map[channel] = get_light_map_single(from_mat[channel])\n light_map = normalize_pic(light_map)\n light_map = resize_img_512_3d(light_map)\n line_mat = mod.predict(light_map, batch_size=1)\n line_mat = line_mat.transpose((3, 1, 2, 0))[0]\n line_mat = line_mat[0:int(new_height), 0:int(new_width), :]\n \n line_mat = np.amax(line_mat, 2)\n\n sketchKeras = show_active_img_and_save_denoise('sketchKeras', line_mat, 'sketchKeras.jpg')\n\n return sketchKeras\n\n\n# In[ ]:\n\n\nsource_data_path = \"original image path\"#\"/data4/wangpengxiao/danbooru2017/original\"\nsource_img_path = glob.glob(osp.join(source_data_path,'*/*.jpg'))\nsource_img_path += glob.glob(osp.join(source_data_path,'*/*.png'))\nsource_img_path = sorted(source_img_path)\n\n\n# In[ ]:\n\n\n#get sketch\nsketch_path = \"sketch save path\"#\"/data4/wangpengxiao/danbooru2017/original_sketch\"\nos.mkdir(sketch_path)\nfor path in tqdm(source_img_path):\n sketch_img = edge_detecton(path)\n cv2.imwrite(osp.join(sketch_path, osp.basename(path)), sketch_img) \n\n" }, { "alpha_fraction": 0.5584014654159546, "alphanum_fraction": 0.6054496169090271, "avg_line_length": 31.96407127380371, "blob_id": "2dce8047e081fdf0548c430ebe8f56259133d09b", "content_id": "a67c54f17470126d47209af09c7266c1f1c10754", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5517, "license_type": "no_license", "max_line_length": 143, "num_lines": 167, "path": "/README.md", "repo_name": "lvcc2018/Style2Paints_V3", "src_encoding": "UTF-8", "text": "# Style2Paints_V3\nPytorch Reimplementation of Style2Paints V3 ([https://github.com/lllyasviel/style2paints/blob/master/papers/sa.pdf]())\n\n## Results\n\n<table>\n <tr>\n <td ><center><img src=\"./example/1_sketch.png\" width=\"200px\">figure 1_1 </center></td>\n <td ><center><img src=\"./example/1_color.png\" width=\"200px\">figure 1_2 </center></td>\n </tr>\n <tr>\n \t<td ><center><img src=\"./example/2_sketch.png\" width=\"200px\">figure 2_1 </center></td>\n \t<td ><center><img src=\"./example/2_color.png\" width=\"200px\">figure 2_2 </center></td>\n\t</tr>\n <tr>\n \t<td ><center><img src=\"./example/3_sketch.png\" width=\"200px\">figure 3_1 </center></td>\n \t<td ><center><img src=\"./example/3_color.png\" width=\"200px\">figure 3_2 </center></td>\n\t</tr>\n <tr>\n \t<td ><center><img src=\"./example/4_sketch.png\" width=\"200px\">figure 4_1 </center></td>\n \t<td ><center><img src=\"./example/4_color.png\" width=\"200px\">figure 4_2 </center></td>\n\t</tr>\n <tr>\n \t<td ><center><img src=\"./example/5_sketch.png\" width=\"200px\">figure 5_1 </center></td>\n \t<td ><center><img src=\"./example/5_color.png\" width=\"200px\">figure 5_2 </center></td>\n\t</tr>\n <tr>\n \t<td ><center><img src=\"./example/6_sketch.png\" width=\"200px\">figure 6_1 </center></td>\n \t<td ><center><img src=\"./example/6_color.png\" width=\"200px\">figure 6_2 </center></td>\n\t</tr>\n <tr>\n \t<td ><center><img src=\"./example/7_sketch.png\" width=\"200px\">figure 7_1 </center></td>\n \t<td ><center><img src=\"./example/7_color.png\" width=\"200px\">figure 7_2 </center></td>\n\t</tr>\n <tr>\n \t<td ><center><img src=\"./example/8_sketch.png\" width=\"200px\">figure 8_1 </center></td>\n \t<td ><center><img src=\"./example/8_color.png\" width=\"200px\">figure 8_2 </center></td>\n\t</tr>\n <tr>\n \t<td ><center><img src=\"./example/9_sketch.png\" width=\"200px\">figure 9_1 </center></td>\n \t<td ><center><img src=\"./example/9_color.png\" width=\"200px\">figure 9_2 </center></td>\n\t</tr>\n</table>\n\n\n\n## Step1 : Dataset Simulation\n\n#### ​\tOne should modify simulate_step\\*.ipynd or simulate_step\\*.py with your own data path before runing this script. \n\n### Simulate_step1 : Random Region Proposal and Pasting\n\n<center><img src=\"./example/Random Region Proposal and Pasting.png\" width=\"400px\"></center>\n\n​\tSee script : simulate_step1.ipynb \n\n### Simulate_step2 : Random transform\n\n<center>\n <img src=\"./example/Random transform.png\" width=\"400px\">\n</center>\n\n​\tSee script : simulate_step2.ipynb \n\n### Simulate_step3 : Random color spray\n\n<center>\n <img src=\"./example/Random color spray.png\" width=\"400px\">\n</center>\n\n​\tI merged this part with the Pytorch data-loader. Refer to ./Pytorch-Style2paints/dataset_multi.py\n\n### Effect Picture\n\n<table>\n <tr>\n <td ><center>Ground truth</center></td>\n <td ><center>Color draft</center></td>\n </tr>\n <tr>\n \t<td ><center><img src=\"./example/gt_1.png\" width=\"200px\"></center></td>\n \t<td ><center><img src=\"./example/df_1.png\" width=\"200px\"></center></td>\n\t</tr>\n <tr>\n \t<td ><center><img src=\"./example/gt_2.png\" width=\"200px\"></center></td>\n \t<td ><center><img src=\"./example/df_2.png\" width=\"200px\"></center></td>\n\t</tr>\n</table>\n\n## User hints\n\n#### \tI chose [0, 20) user hints from the ground truth randomly, and pasted them on a hint map with 3 channels( RGB while the paper used RGBA )\n\n<table>\n <tr>\n <td ><center>Ground truth</center></td>\n <td ><center>User hints</center></td>\n </tr>\n <tr>\n \t<td ><center><img src=\"./example/uh_gt_1.png\" width=\"200px\"></center></td>\n \t<td ><center><img src=\"./example/uh_1.png\" width=\"200px\"></center></td>\n\t</tr>\n <tr>\n \t<td ><center><img src=\"./example/uh_gt_2.png\" width=\"200px\"></center></td>\n \t<td ><center><img src=\"./example/uh_2.png\" width=\"200px\"></center></td>\n\t</tr>\n</table>\n\n## Network structure\n\n<table>\n <td >\n <center><img src=\"./example/network.png\" width=\"800px\"></center>\t\t</td>\n</table>\n\n## Train \n\n​\tJust run train.py. Don't forget to modify the easydict in the train.py script!\n\n​ All the hyper-parameters are chosen as same as the original paper.\n\n```python\nargs = easydict.EasyDict({\n 'epochs' : 100,\n 'batch_size' : 16,\n 'train_path' : 'train data path'#'./your train data path/train',\n 'val_path' : 'val data path'#'./your val data path/val',\n 'sketch_path' : 'sketch path'#\"./your sketch data path/sketch\",\n 'draft_path' : 'STL path'#\"./your STL data path/STL\",\n 'save_path' : 'result path'#\"./your save path/results\" ,\n 'img_size' : 270,\n 're_size' : 256,\n 'learning_rate' : 1e-5,\n 'gpus' : '[0,1,2,3]',\n 'lr_steps' : [5, 10, 15, 20],\n \"lr_decay\" : 0.1,\n 'lamda_L1' : 0.01,\n 'workers' : 16,\n 'weight_decay' : 1e-4\n})\n```\n\n## Output of the data-loader & I/O of the Model\n\n### Data-loader:\n\n```python\nfor i, (input, df, gt) in enumerate(train_loader):\n```\n\nInput : (batch_size, 4, 256, 256) : sketch(1 channel) + hint map(3 channels) \n\ndf : (batch_size, 3, 224, 224) : simulated data which is the input of Inception V1\n\ngt: (batch_size, 3, 256, 256) : ground truth\n\n### I/O of the Inception V1:\n\nINPUT: df : (batch_size, 3, 224, 224) : simulated data which is the input of Inception V1\n\nOUTPUT: latent code (batch_size, 1024, 1, 1)\n\n### I/O of the Style2Paints_V3:\n\nINPUT: Input : (batch_size, 4, 256, 256) : sketch(1 channel) + hint map(3 channels) \n\nOUTPUT: output : (batch_size, 3, 256, 256) : colorized manuscripts\n" }, { "alpha_fraction": 0.6047254204750061, "alphanum_fraction": 0.6270753741264343, "avg_line_length": 24.434959411621094, "blob_id": "38415c869264df0490c205cf675a68f44abd1179", "content_id": "692312c1d6bf265e09b9ef70a37744587e21b650", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6332, "license_type": "no_license", "max_line_length": 143, "num_lines": 246, "path": "/simulate_step1.py", "repo_name": "lvcc2018/Style2Paints_V3", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[ ]:\n\n\nimport os\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\nimport tensorflow as tf\ngpu_options = tf.GPUOptions(allow_growth=True)\nsess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\nfrom PIL import Image\nimport os.path as osp\nimport glob \nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom tqdm import tqdm\nimport cv2\nfrom keras.models import load_model\nfrom helper import *\n\nmod = load_model('mod.h5')\n\n\n# In[ ]:\n\n\ndef RandomCenterCrop(path, min_size, max_size):\n '''\n simulate dataset step 1: Crop Randomly\n '''\n size = np.random.randint(min_size, max_size)\n \n img = cv2.imread(path)\n h, w, _ = img.shape\n\n top = np.random.randint(0, h - size)\n left = np.random.randint(0, w - size)\n\n return img[top:size+top, left:size+left, :]\n\n\n# In[ ]:\n\n\ndef get_patch(path, min_patch_size, max_patch_size):\n '''\n get patch from clothes\n '''\n patch_size = np.random.randint(min_patch_size, max_patch_size)\n \n img = cv2.imread(path)\n h, w, _ = img.shape\n \n center_h = h/2\n center_w = w/2\n \n patch = img[int(center_h - patch_size/2):int(center_h + patch_size/2), int(center_w - patch_size/2):int(center_w + patch_size/2), :]\n \n return patch\n\n\n# In[ ]:\n\n\ndef edge_detecton(path):\n '''\n get sketch\n '''\n from_mat = cv2.imread(path)\n width = float(from_mat.shape[1])\n height = float(from_mat.shape[0])\n new_width = 0\n new_height = 0\n if (width > height):\n from_mat = cv2.resize(from_mat, (512, int(512 / width * height)), interpolation=cv2.INTER_AREA)\n new_width = 512\n new_height = int(512 / width * height)\n else:\n from_mat = cv2.resize(from_mat, (int(512 / height * width), 512), interpolation=cv2.INTER_AREA)\n new_width = int(512 / height * width)\n new_height = 512\n from_mat = from_mat.transpose((2, 0, 1))\n light_map = np.zeros(from_mat.shape, dtype=np.float)\n for channel in range(3):\n light_map[channel] = get_light_map_single(from_mat[channel])\n light_map = normalize_pic(light_map)\n light_map = resize_img_512_3d(light_map)\n line_mat = mod.predict(light_map, batch_size=1)\n line_mat = line_mat.transpose((3, 1, 2, 0))[0]\n line_mat = line_mat[0:int(new_height), 0:int(new_width), :]\n \n line_mat = np.amax(line_mat, 2)\n\n sketchKeras = show_active_img_and_save_denoise('sketchKeras', line_mat, 'sketchKeras.jpg')\n\n return sketchKeras\n\n\n# In[ ]:\n\n\ndef get_mask(path):\n '''\n 提取衣服的mask\n 返回numpy数组\n '''\n from linefiller.trappedball_fill import trapped_ball_fill_multi, flood_fill_multi, mark_fill, build_fill_map, merge_fill, show_fill_map\n from linefiller.thinning import thinning\n\n im = cv2.imread(path, cv2.IMREAD_GRAYSCALE)\n ret, binary = cv2.threshold(im, 220, 255, cv2.THRESH_BINARY)\n\n fills = []\n result = binary\n\n fill = trapped_ball_fill_multi(result, 3, method='max')\n fills += fill\n result = mark_fill(result, fill)\n\n fill = trapped_ball_fill_multi(result, 2, method=None)\n fills += fill\n result = mark_fill(result, fill)\n\n fill = trapped_ball_fill_multi(result, 1, method=None)\n fills += fill\n result = mark_fill(result, fill)\n\n fill = flood_fill_multi(result)\n fills += fill\n\n fillmap = build_fill_map(result, fills)\n\n fillmap = merge_fill(fillmap)\n\n\n for i in range(len(fillmap[:,0])):\n for j in range(len(fillmap[0,:])):\n if fillmap[i,j] == 1:\n fillmap[i,j] = 0\n else:\n fillmap[i,j] = 1\n \n return fillmap\n\n\n# In[ ]:\n\n\nsource_data_path = \"original image path\"#\"/data4/wangpengxiao/danbooru2017/original\"\nsource_img_path = glob.glob(osp.join(source_data_path,'*/*.jpg'))\nsource_img_path += glob.glob(osp.join(source_data_path,'*/*.png'))\nsource_img_path = sorted(source_img_path)\n\n\n# In[ ]:\n\n\n#simulate step1 of the paper:1 \n\n\n# In[ ]:\n\n\nrandom_crop_path = \"random crop save path\"#\"/data4/wangpengxiao/zalando_random_crop\"\npatch_path = \"small path save path\"#\"/data4/wangpengxiao/zalando_center_patch\"\nfor path in tqdm(source_img_path):\n try:\n #step1_1: make randomly croped rectangular patches \n r_im = RandomCenterCrop(path, 64, 256)\n cv2.imwrite(osp.join(random_crop_path, osp.basename(path)), r_im)\n #step1_2: make randomly croped rectangular patches \n p_im = get_patch(path, 64, 256)\n cv2.imwrite(osp.join(patch_path, osp.basename(path)), p_im)\n except:\n os.system(\"rm \"+path)\n\n\n# In[ ]:\n\n\n#simulate step1 of the paper:2\n\n\n# In[ ]:\n\n\nfrom linefiller.trappedball_fill import trapped_ball_fill_multi, flood_fill_multi, mark_fill, build_fill_map, merge_fill, show_fill_map\nfrom linefiller.thinning import thinning\ndef get_region_picture(path):\n '''\n 获取不规则形状的图片,背景是黑色0,方便rotate\n '''\n im = cv2.imread(path, cv2.IMREAD_GRAYSCALE)\n ret, binary = cv2.threshold(im, 200, 255, cv2.THRESH_BINARY)\n\n fills = []\n result = binary\n\n fill = trapped_ball_fill_multi(result, 3, method='max')\n fills += fill\n result = mark_fill(result, fill)\n\n fill = trapped_ball_fill_multi(result, 2, method=None)\n fills += fill\n result = mark_fill(result, fill)\n\n fill = trapped_ball_fill_multi(result, 1, method=None)\n fills += fill\n result = mark_fill(result, fill)\n\n fill = flood_fill_multi(result)\n fills += fill\n\n fillmap = build_fill_map(result, fills)\n\n fillmap = merge_fill(fillmap)\n\n fillmap = thinning(fillmap)\n\n #获得region mask\n for i in range(len(fillmap[:,0])):\n for j in range(len(fillmap[0,:])):\n if fillmap[i,j] == 0:\n fillmap[i,j] = 1\n else:\n fillmap[i,j] = 0\n #获得region picture \n im = cv2.imread(path)\n rgb_fillmap = np.zeros(im.shape)\n rgb_fillmap[:,:,0] = fillmap\n rgb_fillmap[:,:,1] = fillmap\n rgb_fillmap[:,:,2] = fillmap\n im = im * rgb_fillmap\n \n return im.astype('uint8')\n\n\n# In[ ]:\n\n\nregion_picture_path = \"region patch save path\"#\"/data4/wangpengxiao/danbooru2017/original_region_picture\"\nfor path in tqdm(source_img_path):\n rp_im = get_region_picture(path)\n cv2.imwrite(osp.join(region_picture_path, osp.basename(path)), rp_im)\n \n\n" }, { "alpha_fraction": 0.8208954930305481, "alphanum_fraction": 0.8208954930305481, "avg_line_length": 32.5, "blob_id": "0d62a408ae7a34cd40bcc994692dcb0b89d4b85b", "content_id": "d3ce8636137399728123edd3e4e9ba9e71c4079f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 67, "license_type": "no_license", "max_line_length": 37, "num_lines": 2, "path": "/Pytorch-Style2paints/unet/__init__.py", "repo_name": "lvcc2018/Style2Paints_V3", "src_encoding": "UTF-8", "text": "from .unet_model import UNet\nfrom .unet_model import Discriminator\n" }, { "alpha_fraction": 0.5498644113540649, "alphanum_fraction": 0.5745706558227539, "avg_line_length": 26.18852424621582, "blob_id": "e9db618d72971422529d26948be62a918c86a7e8", "content_id": "92d13d6fb03f54d03df078b34df55516f329c48b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3357, "license_type": "no_license", "max_line_length": 106, "num_lines": 122, "path": "/simulate_step2.py", "repo_name": "lvcc2018/Style2Paints_V3", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[ ]:\n\n\nimport os\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" \nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\nimport tensorflow as tf\ngpu_options = tf.GPUOptions(allow_growth=True)\nsess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\nimport numpy as np\nimport cv2\nfrom tqdm import tqdm\nimport glob\nimport os.path as osp\nimport random\nfrom PIL import Image\nfrom scipy import ndimage\nimport tensorflow as tf\nfrom spatial_transformer import transformer\nimport numpy as np\nfrom tf_utils import weight_variable, bias_variable, dense_to_one_hot\nimport matplotlib.pyplot as plt\n\n\n# In[ ]:\n\n\ndef get_STL(path, num_batch):\n h = 384\n w = 384\n im = cv2.imread(path[0])\n im = im / 255.\n im = cv2.resize(im, (w, h), interpolation=cv2.INTER_CUBIC)\n \n im = im.reshape(1, h, w, 3)\n im = im.astype('float32')\n \n batch = np.append(im, im, axis=0)\n for p in path: \n im = cv2.imread(p)\n im = im / 255.\n im = cv2.resize(im, (w, h), interpolation=cv2.INTER_CUBIC)\n im = im.reshape(1, h, w, 3)\n im = im.astype('float32')\n batch = np.append(batch, im, axis=0)\n \n batch = batch[2:,:,:,:]\n\n out_size = (h, w)\n\n # %% Simulate batch\n x = tf.placeholder(tf.float32, [None, h, w, 3])\n x = tf.cast(batch, 'float32')\n\n # %% Create localisation network and convolutional layer\n with tf.variable_scope('spatial_transformer_0'):\n\n # %% Create a fully-connected layer with 6 output nodes\n n_fc = 6\n W_fc1 = tf.Variable(tf.zeros([h * w * 3, n_fc]), name='W_fc1')\n\n # %% Zoom into the image\n a, b, c, d, e, f = np.random.random(6)/10\n\n initial = np.array([[1-a, b, c], [d, 1-e, f]])\n initial = initial.astype('float32')\n initial = initial.flatten()\n\n b_fc1 = tf.Variable(initial_value=initial, name='b_fc1')\n h_fc1 = tf.matmul(tf.zeros([num_batch, h * w * 3]), W_fc1) + b_fc1\n h_trans = transformer(x, h_fc1, out_size)\n\n # %% Run session\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n y = sess.run(h_trans, feed_dict={x: batch})\n sess.close()\n \n return y\n\n\n# In[ ]:\n\n\nsource_data_path = \"source data path\"#\"/data4/wangpengxiao/danbooru2017/original\"\nSTL_path = \"STL result path\"#\"/data4/wangpengxiao/danbooru2017/original_STL\"\n\nsource_img_path = glob.glob(osp.join(source_data_path,'*/*.jpg'))\nsource_img_path += glob.glob(osp.join(source_data_path,'*/*.png'))\nsource_img_path = sorted(source_img_path)\n\nbatch_size = 16\n\nos.makedirs(STL_path,exist_ok=True)\nq = []\ncount = 0\nc = 0\nfor path in tqdm(source_img_path):\n c += 1\n if c != 0 :\n if count == batch_size-1 :\n q.append(path)\n tf.reset_default_graph()\n im = get_STL(q, batch_size)\n tf.get_default_graph().finalize()\n for j in range(len(im)):\n img = im[j]\n amin, amax = img.min(), img.max() # 求最大最小值\n img = (img-amin)/(amax-amin) # (矩阵元素-最小值)/(最大值-最小值)\n \n cv2.imwrite(osp.join(STL_path, osp.basename(q[j])), (img*255).astype('uint8')) \n \n count = 0\n q = []\n else:\n count += 1\n q.append(path)\n else:\n continue\n\n" }, { "alpha_fraction": 0.5263515710830688, "alphanum_fraction": 0.555026650428772, "avg_line_length": 32.93461608886719, "blob_id": "6afd800c0ab469fec096e5bc0fddc92ceed3f8b2", "content_id": "b84a7782d6ebf600115d9b1bf8746cb23c79f32f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9079, "license_type": "no_license", "max_line_length": 106, "num_lines": 260, "path": "/Pytorch-Style2paints/dataset_multi.py", "repo_name": "lvcc2018/Style2Paints_V3", "src_encoding": "UTF-8", "text": "\"\"\"\nDefinition of PyTorch \"Dataset\" that iterates through compressed videos\nand return compressed representations (I-frames, motion vectors, \nor residuals) for training or testing.\n\"\"\"\n\n\n\nimport numpy as np\nimport torch\nimport torch.utils.data as data\n\nimport os\nimport random\nimport glob \nimport os.path as osp\nfrom PIL import Image\nimport cv2\nimport matplotlib.pyplot as plt\nimport torchvision.transforms as transforms\n\nclass ClothDataSet(data.Dataset):\n def __init__(self, data_path, sketch_path, STL_path, img_size, re_size, is_train):\n\n region_picture_path = \"/data4/wangpengxiao/zalando_region_picture\"\n region_img_path = glob.glob(osp.join(region_picture_path,'*.jpg')) \n region_img_path = sorted(region_img_path)\n\n self._data_path = data_path\n self._sketch_path = sketch_path\n self._STL_path = STL_path\n self._img_size = img_size\n self._re_size = re_size\n self._is_train = is_train\n self.region_img_path = region_img_path\n\n self._get_ground_truth()\n self._inception_transform = transforms.Compose(\n [\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5))\n ])\n\n def _get_ground_truth(self):\n\n gt_img = glob.glob(osp.join(self._data_path,'*.jpg'))\n gt_img += glob.glob(osp.join(self._data_path,'*.png'))\n self.gt_img = sorted(gt_img)\n\n def __getitem__(self, index):\n\n p = self.gt_img[index]\n\n gt = Image.open(p).convert('RGB')\n sk = Image.open(osp.join(self._sketch_path, osp.basename(p))).convert('L')\n #学习gt的上色\n #df = gt.copy()\n STL = Image.open(osp.join(self._STL_path, osp.basename(p))).convert('RGB')\n df = make_draft(STL, self.region_img_path)\n\n if self._is_train:\n gt = gt.resize((self._img_size, self._img_size), Image.BICUBIC)\n sk = sk.resize((self._img_size, self._img_size), Image.BICUBIC)\n else:\n gt = gt.resize((self._re_size, self._re_size), Image.BICUBIC)\n sk = sk.resize((self._re_size, self._re_size), Image.BICUBIC)\n df = df.resize((224, 224), Image.BICUBIC)\n\n #make point map\n gt = np.array(gt)\n point_map = np.zeros(gt.shape)\n\n #coordinate = np.where(np.sum(gt,axis=2) < np.sum(np.array([255,255,255])))\n \n num_of_point = np.random.randint(0, 20)\n x = random.sample(range(0,gt.shape[0]),num_of_point)\n y = random.sample(range(0,gt.shape[1]),num_of_point)\n\n for i in range(len(x)): \n r,g,b = gt[x[i],y[i],:]\n cv2.circle(point_map,(y[i],x[i]),1,(int(r),int(g),int(b)),-1) \n\n #finish making point map\n gt = Image.fromarray(gt)\n point_map = Image.fromarray(point_map.astype('uint8'))\n\n #transform sk,point_map,gt\n #to tensor\n sk = transforms.ToTensor()(sk)\n point_map = transforms.ToTensor()(point_map)\n gt = transforms.ToTensor()(gt)\n #random crop\n if self._is_train:\n w_offset = random.randint(0, max(0, self._img_size - self._re_size - 1))\n h_offset = random.randint(0, max(0, self._img_size - self._re_size - 1))\n\n sk = sk[:, h_offset:h_offset + self._re_size, w_offset:w_offset + self._re_size]\n point_map = point_map[:, h_offset:h_offset + self._re_size, w_offset:w_offset + self._re_size]\n gt = gt[:, h_offset:h_offset + self._re_size, w_offset:w_offset + self._re_size]\n\n #normalize\n sk = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(sk)\n point_map = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(point_map)\n gt = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(gt)\n #flip image\n# if self._is_train and random.random() < 0.5:\n# idx = [i for i in range(gt.size(2) - 1, -1, -1)]\n# idx = torch.LongTensor(idx)\n# sk = sk.index_select(2, idx)\n# point_map = point_map.index_select(2, idx)\n# gt = gt.index_select(2, idx)\n\n\n\n input = torch.cat((sk,point_map),0) \n df = self._inception_transform(df)\n\n return input, df, gt \n\n def __len__(self):\n return len(self.gt_img)\n\n\ndef make_draft(STL, region_img_path):\n #第一步:step2 of paper: STL\n ori_img = STL.copy()\n\n #第二步:step1 of paper: paste\n color = get_dominant_color(ori_img)#but get color first \n\n region_num = np.random.randint(1, 3)\n for i in range(region_num):\n region_img = Image.open(random.choice(region_img_path))\n ori_img = Random_paste_region_img(ori_img, region_img)\n\n #第三步: step3 of paper: spray\n img = np.array(ori_img)\n h = int(img.shape[0]/30)\n w = int(img.shape[1]/30)\n a_x = np.random.randint(0, h)\n a_y = np.random.randint(0, w)\n b_x = np.random.randint(0, h)\n b_y = np.random.randint(0, w)\n begin_point = np.array([min(a_x,b_x),a_y])\n end_point = np.array([max(a_x,b_x),b_y])\n tan = (begin_point[1] - end_point[1]) / (begin_point[0] - end_point[0]+0.001)\n\n\n center_point_list = []\n for i in range(begin_point[0],end_point[0]+1):\n a = i\n b = (i-begin_point[0])*tan + begin_point[1]\n center_point_list.append(np.array([int(a),int(b)]))\n center_point_list = np.array(center_point_list) \n\n\n lamda = random.uniform(0.01, 10) #一个超参\n paper = np.zeros((h,w,3))\n mask = np.zeros((h,w))\n center = [int(h/2),int(w/2)]\n paper[center[0],center[1],:] = color\n for i in range(h):\n for j in range(w):\n dis = min_dis([i, j],center_point_list)\n paper[i,j,:] = np.array(color)/np.exp(lamda*dis)#*lamda/dis\n mask[i,j] = np.array([255])/np.exp(lamda*dis)#*lamda/dis\n\n paper = (paper).astype('uint8')\n mask = (mask).astype('uint8')\n\n mask = cv2.resize(mask, (img.shape[1], img.shape[0]), interpolation=cv2.INTER_CUBIC)\n im = cv2.resize(paper, (img.shape[1], img.shape[0]), interpolation=cv2.INTER_CUBIC)\n imq = Image.fromarray(im)\n imp = ori_img.copy()\n\n imp.paste(imq,(0, 0, imp.size[0], imp.size[1]),mask = Image.fromarray(mask))\n\n return imp \n\n\n\nimport colorsys\n \ndef get_dominant_color(image):#获取图片主要颜色\n \n#颜色模式转换,以便输出rgb颜色值\n image = image.convert('RGBA')\n \n#生成缩略图,减少计算量,减小cpu压力\n image.thumbnail((200, 200))\n \n max_score = 0#原来的代码此处为None\n dominant_color = 0#原来的代码此处为None,但运行出错,改为0以后 运行成功,原因在于在下面的 score > max_score的比较中,max_score的初始格式不定\n \n for count, (r, g, b, a) in image.getcolors(image.size[0] * image.size[1]):\n # 跳过纯黑色\n if a == 0:\n continue\n \n saturation = colorsys.rgb_to_hsv(r / 255.0, g / 255.0, b / 255.0)[1]\n \n y = min(abs(r * 2104 + g * 4130 + b * 802 + 4096 + 131072) >> 13, 235)\n \n y = (y - 16.0) / (235 - 16)\n \n # 忽略高亮色\n if y > 0.9:\n continue\n \n # 忽略白背景\n if ((r>230)&(g>230)&(b>230)):\n continue\n \n # Calculate the score, preferring highly saturated colors.\n # Add 0.1 to the saturation so we don't completely ignore grayscale\n # colors by multiplying the count by zero, but still give them a low\n # weight.\n score = (saturation + 0.1) * count\n \n if score > max_score:\n max_score = score\n dominant_color = (r, g, b)\n \n return dominant_color\n\n\n\ndef Random_paste_region_img(ori_img, region_img):\n\n paste_x = np.random.randint(0, ori_img.size[0])\n paste_y = np.random.randint(0, ori_img.size[1])\n rotate_angle = np.random.randint(1, 359)\n resize_x = np.random.randint(64, 384)\n resize_y = np.random.randint(64, 384)\n region_img = region_img.resize((resize_x,resize_y))\n tem = ori_img.copy()\n tem.paste(region_img.rotate(rotate_angle),(paste_x,paste_y))\n tem = np.array(tem)\n ori_img = np.array(ori_img)\n# for i in range(ori_img.shape[0]):\n# for j in range(ori_img.shape[1]):\n# if (tem[i,j,:] == np.array([0,0,0])).all():\n# tem[i,j,:] = ori_img[i,j,:]\n coordinate = np.where(tem == np.array([0,0,0]))\n for i in range(len(coordinate[0])):\n tem[coordinate[0][i],coordinate[1][i],:] = ori_img[coordinate[0][i],coordinate[1][i],:]\n ori_img = np.array(tem)\n ori_img = Image.fromarray(ori_img)\n# plt.imshow(ori_img)\n \n return ori_img\n\n\ndef min_dis(point, point_list):\n dis = []\n for p in point_list:\n dis.append(np.sqrt(np.sum(np.square(np.array(point)-np.array(p)))))\n \n return min(dis) " }, { "alpha_fraction": 0.5608495473861694, "alphanum_fraction": 0.5913918018341064, "avg_line_length": 29.14447593688965, "blob_id": "0b3828ea72ae6bfd0cf78cfb457f78e96239680e", "content_id": "b0a711660a4cd9428fa840bf2d872c9955bf03fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10913, "license_type": "no_license", "max_line_length": 136, "num_lines": 353, "path": "/utils.py", "repo_name": "lvcc2018/Style2Paints_V3", "src_encoding": "UTF-8", "text": "from helper import *\n\nimport numpy as np\nimport cv2\nfrom tqdm import tqdm\nimport glob\nimport os.path as osp\nimport random\nfrom PIL import Image\nfrom scipy import ndimage\nimport tensorflow as tf\nfrom spatial_transformer import transformer\nimport numpy as np\nfrom tf_utils import weight_variable, bias_variable, dense_to_one_hot\nimport matplotlib.pyplot as plt\n\n\ndef RandomCenterCrop(path, min_size, max_size):\n '''\n simulate dataset step 1: Crop Randomly\n '''\n size = np.random.randint(min_size, max_size)\n \n img = cv2.imread(path)\n h, w, _ = img.shape\n\n top = np.random.randint(0, h - size)\n left = np.random.randint(0, w - size)\n\n return img[top:size+top, left:size+left, :]\n\n\ndef get_patch(path, min_patch_size, max_patch_size):\n '''\n get patch from clothes\n '''\n patch_size = np.random.randint(min_patch_size, max_patch_size)\n \n img = cv2.imread(path)\n h, w, _ = img.shape\n \n center_h = h/2\n center_w = w/2\n \n patch = img[int(center_h - patch_size/2):int(center_h + patch_size/2), int(center_w - patch_size/2):int(center_w + patch_size/2), :]\n \n return patch\n\n\ndef edge_detecton(path):\n '''\n get sketch\n '''\n from_mat = cv2.imread(path)\n width = float(from_mat.shape[1])\n height = float(from_mat.shape[0])\n new_width = 0\n new_height = 0\n if (width > height):\n from_mat = cv2.resize(from_mat, (512, int(512 / width * height)), interpolation=cv2.INTER_AREA)\n new_width = 512\n new_height = int(512 / width * height)\n else:\n from_mat = cv2.resize(from_mat, (int(512 / height * width), 512), interpolation=cv2.INTER_AREA)\n new_width = int(512 / height * width)\n new_height = 512\n from_mat = from_mat.transpose((2, 0, 1))\n light_map = np.zeros(from_mat.shape, dtype=np.float)\n for channel in range(3):\n light_map[channel] = get_light_map_single(from_mat[channel])\n light_map = normalize_pic(light_map)\n light_map = resize_img_512_3d(light_map)\n line_mat = mod.predict(light_map, batch_size=1)\n line_mat = line_mat.transpose((3, 1, 2, 0))[0]\n line_mat = line_mat[0:int(new_height), 0:int(new_width), :]\n #sketchKeras_colored = show_active_img_and_save('sketchKeras_colored', line_mat, 'sketchKeras_colored.jpg')\n line_mat = np.amax(line_mat, 2)\n #sketchKeras_enhanced = show_active_img_and_save_denoise_filter2('sketchKeras_enhanced', line_mat, 'sketchKeras_enhanced.jpg')\n #sketchKeras_pured = show_active_img_and_save_denoise_filter('sketchKeras_pured', line_mat, 'sketchKeras_pured.jpg')\n sketchKeras = show_active_img_and_save_denoise('sketchKeras', line_mat, 'sketchKeras.jpg')\n cv2.waitKey(0)\n return sketchKeras\n\n\ndef get_mask(path):\n '''\n 提取衣服的mask\n 返回numpy数组\n '''\n from linefiller.trappedball_fill import trapped_ball_fill_multi, flood_fill_multi, mark_fill, build_fill_map, merge_fill, \\\n show_fill_map\n from linefiller.thinning import thinning\n\n im = cv2.imread(path, cv2.IMREAD_GRAYSCALE)\n ret, binary = cv2.threshold(im, 220, 255, cv2.THRESH_BINARY)\n\n fills = []\n result = binary\n\n fill = trapped_ball_fill_multi(result, 3, method='max')\n fills += fill\n result = mark_fill(result, fill)\n\n fill = trapped_ball_fill_multi(result, 2, method=None)\n fills += fill\n result = mark_fill(result, fill)\n\n fill = trapped_ball_fill_multi(result, 1, method=None)\n fills += fill\n result = mark_fill(result, fill)\n\n fill = flood_fill_multi(result)\n fills += fill\n\n fillmap = build_fill_map(result, fills)\n\n fillmap = merge_fill(fillmap)\n\n\n for i in range(len(fillmap[:,0])):\n for j in range(len(fillmap[0,:])):\n if fillmap[i,j] == 1:\n fillmap[i,j] = 0\n else:\n fillmap[i,j] = 1\n \n return fillmap\n\n\nfrom linefiller.trappedball_fill import trapped_ball_fill_multi, flood_fill_multi, mark_fill, build_fill_map, merge_fill, \\\n show_fill_map\nfrom linefiller.thinning import thinning\ndef get_region_picture(path):\n '''\n 获取不规则形状的图片,背景是黑色0,方便rotate\n '''\n im = cv2.imread(path, cv2.IMREAD_GRAYSCALE)\n ret, binary = cv2.threshold(im, 220, 255, cv2.THRESH_BINARY)\n\n fills = []\n result = binary\n\n fill = trapped_ball_fill_multi(result, 3, method='max')\n fills += fill\n result = mark_fill(result, fill)\n\n fill = trapped_ball_fill_multi(result, 2, method=None)\n fills += fill\n result = mark_fill(result, fill)\n\n fill = trapped_ball_fill_multi(result, 1, method=None)\n fills += fill\n result = mark_fill(result, fill)\n\n fill = flood_fill_multi(result)\n fills += fill\n\n fillmap = build_fill_map(result, fills)\n\n fillmap = merge_fill(fillmap)\n\n fillmap = thinning(fillmap)\n\n #获得region mask\n for i in range(len(fillmap[:,0])):\n for j in range(len(fillmap[0,:])):\n if fillmap[i,j] == 0:\n fillmap[i,j] = 1\n else:\n fillmap[i,j] = 0\n #获得region picture \n im = cv2.imread(path)\n# plt.imshow(im)\n rgb_fillmap = np.zeros(im.shape)\n rgb_fillmap[:,:,0] = fillmap\n rgb_fillmap[:,:,1] = fillmap\n rgb_fillmap[:,:,2] = fillmap\n im = im * rgb_fillmap\n \n return im.astype('uint8')\n\ndef Random_paste_patch_img(ori_img, patch_img):\n\n paste_x = np.random.randint(0, ori_img.size[0] - patch_img.size[0])\n paste_y = np.random.randint(0, ori_img.size[1] - patch_img.size[1])\n rotate_angle = np.random.randint(1, 359)\n resize_x = np.random.randint(64, 384)\n resize_y = np.random.randint(64, 384)\n patch_img = patch_img.resize((resize_x,resize_y))\n tem = ori_img.copy()\n tem.paste(patch_img.rotate(rotate_angle),(paste_x,paste_y))\n tem = np.array(tem)\n ori_img = np.array(ori_img)\n# for i in range(ori_img.shape[0]):\n# for j in range(ori_img.shape[1]):\n# if (tem[i,j,:] == np.array([0,0,0])).all():\n# tem[i,j,:] = ori_img[i,j,:]\n coordinate = np.where(tem == np.array([0,0,0]))\n for i in range(len(coordinate[0])):\n tem[coordinate[0][i],coordinate[1][i],:] = ori_img[coordinate[0][i],coordinate[1][i],:]\n ori_img = np.array(tem)\n ori_img = Image.fromarray(ori_img)\n# plt.imshow(ori_img)\n \n return ori_img\n\n\ndef Random_paste_region_img(ori_img, region_img):\n\n paste_x = np.random.randint(0, ori_img.size[0])\n paste_y = np.random.randint(0, ori_img.size[1])\n rotate_angle = np.random.randint(1, 359)\n resize_x = np.random.randint(64, 384)\n resize_y = np.random.randint(64, 384)\n region_img = region_img.resize((resize_x,resize_y))\n tem = ori_img.copy()\n tem.paste(region_img.rotate(rotate_angle),(paste_x,paste_y))\n tem = np.array(tem)\n ori_img = np.array(ori_img)\n# for i in range(ori_img.shape[0]):\n# for j in range(ori_img.shape[1]):\n# if (tem[i,j,:] == np.array([0,0,0])).all():\n# tem[i,j,:] = ori_img[i,j,:]\n coordinate = np.where(tem == np.array([0,0,0]))\n for i in range(len(coordinate[0])):\n tem[coordinate[0][i],coordinate[1][i],:] = ori_img[coordinate[0][i],coordinate[1][i],:]\n ori_img = np.array(tem)\n ori_img = Image.fromarray(ori_img)\n# plt.imshow(ori_img)\n \n return ori_img\n\n\ndef get_STL(path, num_batch):\n h = 1000\n w = 700\n im = cv2.imread(path[0])\n im = im / 255.\n# h = im.shape[0]\n# w = im.shape[1]\n im = cv2.resize(im, (w, h), interpolation=cv2.INTER_CUBIC)\n \n im = im.reshape(1, h, w, 3)\n im = im.astype('float32')\n \n batch = np.append(im, im, axis=0)\n for p in path: \n im = cv2.imread(p)\n im = im / 255.\n # h = im.shape[0]\n # w = im.shape[1]\n im = cv2.resize(im, (w, h), interpolation=cv2.INTER_CUBIC)\n im = im.reshape(1, h, w, 3)\n im = im.astype('float32')\n batch = np.append(batch, im, axis=0)\n \n# print(batch.shape)\n batch = batch[2:,:,:,:]\n# print(batch.shape)\n\n out_size = (h, w)\n\n # %% Simulate batch\n# batch = np.append(im, im, axis=0)\n # batch.append(im)\n # batch = np.append(batch, im, axis=0)\n# num_batch = 1\n\n x = tf.placeholder(tf.float32, [None, h, w, 3])\n x = tf.cast(batch, 'float32')\n\n # %% Create localisation network and convolutional layer\n with tf.variable_scope('spatial_transformer_0'):\n\n # %% Create a fully-connected layer with 6 output nodes\n n_fc = 6\n W_fc1 = tf.Variable(tf.zeros([h * w * 3, n_fc]), name='W_fc1')\n\n # %% Zoom into the image\n a = np.random.randint(5, 10)/10\n b = np.random.randint(0, 3)/10\n c = np.random.randint(0, 3)/10\n d = np.random.randint(5, 10)/10 \n# initial = np.array([[s, 0, tx], [0, s,ty]])\n initial = np.array([[a, b, 0], [b, d, 0]])\n initial = initial.astype('float32')\n initial = initial.flatten()\n\n b_fc1 = tf.Variable(initial_value=initial, name='b_fc1')\n h_fc1 = tf.matmul(tf.zeros([num_batch, h * w * 3]), W_fc1) + b_fc1\n h_trans = transformer(x, h_fc1, out_size)\n\n # %% Run session\n sess = tf.Session()\n sess.run(tf.initialize_all_variables())\n y = sess.run(h_trans, feed_dict={x: batch})\n# y = batch\n \n return y\n\n\n#提取图片主要色\nimport colorsys\n \ndef get_dominant_color(image):\n \n#颜色模式转换,以便输出rgb颜色值\n image = image.convert('RGBA')\n \n#生成缩略图,减少计算量,减小cpu压力\n image.thumbnail((200, 200))\n \n max_score = 0#原来的代码此处为None\n dominant_color = 0#原来的代码此处为None,但运行出错,改为0以后 运行成功,原因在于在下面的 score > max_score的比较中,max_score的初始格式不定\n \n for count, (r, g, b, a) in image.getcolors(image.size[0] * image.size[1]):\n # 跳过纯黑色\n if a == 0:\n continue\n \n saturation = colorsys.rgb_to_hsv(r / 255.0, g / 255.0, b / 255.0)[1]\n \n y = min(abs(r * 2104 + g * 4130 + b * 802 + 4096 + 131072) >> 13, 235)\n \n y = (y - 16.0) / (235 - 16)\n \n # 忽略高亮色\n if y > 0.9:\n continue\n \n # 忽略白背景\n if ((r>230)&(g>230)&(b>230)):\n continue\n \n # Calculate the score, preferring highly saturated colors.\n # Add 0.1 to the saturation so we don't completely ignore grayscale\n # colors by multiplying the count by zero, but still give them a low\n # weight.\n score = (saturation + 0.1) * count\n \n if score > max_score:\n max_score = score\n dominant_color = (r, g, b)\n \n return dominant_color\n\n\ndef min_dis(point, point_list):\n dis = []\n for p in point_list:\n dis.append(np.sqrt(np.sum(np.square(np.array(point)-np.array(p)))))\n \n return min(dis) " }, { "alpha_fraction": 0.7519999742507935, "alphanum_fraction": 0.7760000228881836, "avg_line_length": 124, "blob_id": "37beb5bf88e3fbb5e60a1219c66ca089be54fea1", "content_id": "422bb0744c97b286734b6c02dacdf8c2217170d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 125, "license_type": "no_license", "max_line_length": 124, "num_lines": 1, "path": "/Pytorch-Style2paints/weights/README.md", "repo_name": "lvcc2018/Style2Paints_V3", "src_encoding": "UTF-8", "text": "### Weights of google net(inception v1) is available at [Release](https://github.com/Pengxiao-Wang/Style2Paints_V3/releases)\n" }, { "alpha_fraction": 0.557652473449707, "alphanum_fraction": 0.5784360766410828, "avg_line_length": 30.38249969482422, "blob_id": "93638e005930b5048b4e24f975daf768d160ca3e", "content_id": "08564160ea9425665f054ddfabba178dd155fc11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12568, "license_type": "no_license", "max_line_length": 146, "num_lines": 400, "path": "/Pytorch-Style2paints/train.py", "repo_name": "lvcc2018/Style2Paints_V3", "src_encoding": "UTF-8", "text": "# coding: utf-8\nimport os\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" \nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0,1,2,3\"\n\nimport sys\nimport time\nfrom optparse import OptionParser\nimport numpy as np\nimport glob \nimport os.path as osp\nfrom PIL import Image\nimport cv2\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn.parallel\nimport torchvision\nimport torch.nn as nn\nfrom torch import optim\nfrom torch.autograd import Variable\nimport torchvision.utils as vutils\n\nfrom transforms import GroupRandomCrop\nfrom transforms import GroupScale\n\nfrom eval import eval_net\nfrom unet import UNet\nfrom unet import Discriminator\nfrom utils import get_ids, split_ids, split_train_val, get_imgs_and_masks, batch\nimport easydict\n\nfrom dataset_multi import ClothDataSet\n\nfrom tensorboardX import SummaryWriter\nwriter = SummaryWriter()\n\n\nargs = easydict.EasyDict({\n 'epochs' : 100,\n 'batch_size' : 16,\n 'train_path' : 'train data path'#'/data4/wangpengxiao/danbooru2017/original/train',\n 'val_path' : 'val data path'#'/data4/wangpengxiao/danbooru2017/original/val',\n 'sketch_path' : 'sketch path'#\"/data4/wangpengxiao/danbooru2017/original_sketch\",\n 'draft_path' : 'STL path'#\"/data4/wangpengxiao/danbooru2017/original_STL\",\n 'save_path' : 'result path'#\"/data4/wangpengxiao/danbooru2017/result\" ,\n 'img_size' : 270,\n 're_size' : 256,\n 'learning_rate' : 1e-5,#changed\n 'gpus' : '[0,1,2,3]',\n 'lr_steps' : [5, 10, 15, 20],\n \"lr_decay\" : 0.1,\n 'lamda_L1' : 0.01,#changed\n 'workers' : 16,\n 'weight_decay' : 1e-4\n})\n\n\nUnet = UNet(in_channels=4, out_channels=3)\nD = Discriminator(in_channels=3, out_channels=1)\n\n\nwriter.add_graph(Unet, (Variable(torch.randn(1,2,4,256,256), requires_grad=True)[0], Variable(torch.randn(1,2,3,224,224), requires_grad=True)[0]))\n\nUnet = torch.nn.DataParallel(Unet, device_ids=eval(args.gpus)).cuda()\n\nD = torch.nn.DataParallel(D, device_ids=eval(args.gpus)).cuda()\n\ncudnn.benchmark = True # faster convolutions, but more memory\n\n\ntrain_loader = torch.utils.data.DataLoader(\n ClothDataSet(\n args.train_path,\n args.sketch_path,\n args.draft_path,\n args.img_size,\n args.re_size,\n is_train = True\n ),\n batch_size=args.batch_size, shuffle=True,\n num_workers=args.workers, pin_memory=True)\n\nval_loader = torch.utils.data.DataLoader(\n ClothDataSet(\n args.val_path,\n args.sketch_path,\n args.draft_path,\n args.img_size,\n args.re_size,\n is_train = False\n ),\n batch_size=int(args.batch_size/4), shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\n\nG_optimizer = torch.optim.Adam(\n Unet.parameters(),\n weight_decay=args.weight_decay,\n lr = args.learning_rate,\n betas=(0.9, 0.99),\n eps=0.001)\n\nD_optimizer = torch.optim.Adam(\n D.parameters(),\n weight_decay=args.weight_decay,\n lr = args.learning_rate,\n betas=(0.9, 0.99),\n eps=0.001)\n\n\n\ncriterionD = torch.nn.BCEWithLogitsLoss().cuda()\ncriterionG = torch.nn.L1Loss().cuda()\n\n\n\nSAVE_FREQ = 40\nPRINT_FREQ = 20\nVAL_NUM = 30\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\n# In[ ]:\n\n\ndef adjust_learning_rate(optimizer, epoch, lr_steps, lr_decay):\n decay = lr_decay ** (sum(epoch >= np.array(lr_steps)))\n lr = args.learning_rate * decay\n wd = args.weight_decay\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr \n param_group['weight_decay'] = wd\n return lr\n\n\n# In[ ]:\n\n\ndef train_net(args, train_loader, Unet, D, epoch, save_epoch, last_count_train, cur_lr):\n batch_time = AverageMeter()\n lossg = AverageMeter()\n lossd = AverageMeter()\n\n end = time.time()\n \n \n \n for i, (input, df, gt) in enumerate(train_loader):\n #import pdb;pdb.set_trace()\n input=input.float()\n df=df.float()\n gt=gt.float()\n input.size\n df.size\n\n input = Variable(input)\n df = Variable(df)\n gt = Variable(gt)\n label = Variable(torch.ones(input.size(0),int(gt.size(2)/8),int(gt.size(3)/8))) # 1 for real\n \n input = input.cuda()\n df = df.cuda()\n gt = gt.cuda()\n # ----- train netd -----\n D.zero_grad() \n ## train netd with real img\n #import pdb;pdb.set_trace()\n output=D(gt)\n error_real=criterionD(output.squeeze(),label.cuda().squeeze())\n ## train netd with fake img\n fake_pic=Unet(input,df)\n output2=D(fake_pic)\n label.data.fill_(0) # 0 for fake\n error_fake=criterionD(output2.squeeze(),label.cuda().squeeze())\n error_D=(error_real + error_fake)*0.5\n error_D.backward()\n D_optimizer.step()\n\n # ------ train netg -------\n Unet.zero_grad()\n label.data.fill_(1)\n #import pdb;pdb.set_trace()\n fake_pic = Unet(input,df)\n output = D(fake_pic)\n error_G = criterionD(output.squeeze(),label.cuda().squeeze())\n error_L1 = criterionG(fake_pic.cuda(),gt.cuda())\n error_G = error_G*args.lamda_L1 + error_L1\n# error_G.backward(retain_graph=True)\n \n error_G.backward()\n G_optimizer.step()\n \n lossg.update(error_G.item(), input.size(0))\n lossd.update(error_D.item(), input.size(0))\n batch_time.update(time.time() - end)\n end = time.time()\n \n last_count_train += 1\n writer.add_scalar('Tdata/batch_time_val', batch_time.val, last_count_train)\n writer.add_scalar('Tdata/batch_time_avg', batch_time.avg, last_count_train)\n writer.add_scalar('Tdata/lossG_val', lossg.val, last_count_train)\n writer.add_scalar('Tdata/lossG_avg', lossg.avg, last_count_train) \n writer.add_scalar('Tdata/lossD_val', lossd.val, last_count_train)\n writer.add_scalar('Tdata/lossD_avg', lossd.avg, last_count_train) \n \n \n \n if i % PRINT_FREQ == 0:\n tb_view_pic(input, df, gt, fake_pic)\n #print('max = '+str(output.max().item())+' '+'min = '+str(output.min().item()))\n print(('Epoch: [{0}][{1}/{2}], lr: {lr:.7f}\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'LossG {lossG.val:.4f} ({lossG.avg:.4f})\\t'\n 'LossD {lossD.val:.4f} ({lossD.avg:.4f})'.format(\n epoch, i, len(train_loader),\n batch_time=batch_time,\n lossG=lossg,\n lossD=lossd,\n lr=cur_lr)))\n #save Unet\n torch.save({\n 'epoch': epoch,\n 'arch': 'Unet',\n 'state_dict': Unet.state_dict(),\n }, osp.join(save_epoch,str(epoch)+'_Unet_'+'checkpoint.pth.tar'))\n #save D\n torch.save({\n 'epoch': epoch,\n 'arch': 'D',\n 'state_dict': D.state_dict(),\n }, osp.join(save_epoch,str(epoch)+'_D_'+'checkpoint.pth.tar'))\n \n return last_count_train\n\n\n# In[ ]:\n\n\ndef val_net(args, val_loader, Unet, D, epoch, save_epoch, last_count_val):\n batch_time_val = AverageMeter()\n lossg_val = AverageMeter()\n lossd_val = AverageMeter()\n \n Unet.eval()\n \n end = time.time()\n for i, (input, df, gt) in enumerate(val_loader):\n if i >= int(VAL_NUM):\n break\n input=input.float()\n df=df.float()\n gt=gt.float()\n with torch.no_grad(): \n input_var = input\n df_var = df\n gt_var = gt\n label = torch.ones(input.size(0),int(gt.size(2)/8),int(gt.size(3)/8)) # 1 for real\n\n# input_var = input_var.cuda()\n# df_var = df_var.cuda()\n# gt_var = gt_var.cuda()\n\n # ------ val netg -------\n label.data.fill_(1)\n fake_pic = Unet(input_var,df_var)\n output = D(fake_pic)\n error_GAN_G = criterionD(output.squeeze(),label.cuda().squeeze())\n error_L1 = criterionG(fake_pic.cuda(),gt_var.cuda())\n error_G = error_GAN_G*args.lamda_L1 + error_L1\n \n lossg_val.update(error_G.item(), input.size(0))\n batch_time_val.update(time.time() - end)\n end = time.time()\n \n save_pic(save_epoch, i, input_var, df_var, gt_var, fake_pic)\n \n last_count_val += 1\n writer.add_scalar('Vdata/batch_time_val', batch_time_val.val, last_count_val)\n writer.add_scalar('Vdata/batch_time_avg', batch_time_val.avg, last_count_val)\n writer.add_scalar('Vdata/lossG_val', lossg_val.val, last_count_val)\n writer.add_scalar('Vdata/lossG_avg', lossg_val.avg, last_count_val) \n if i % PRINT_FREQ == 0:\n #print('max = '+str(output.max().item())+' '+'min = '+str(output.min().item()))\n print(('Epoch: [{0}][{1}/{2}], lr: {lr:.7f}\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'LossG {lossG.val:.4f} ({lossG.avg:.4f})'.format(\n epoch, i, len(train_loader),\n batch_time=batch_time_val,\n lossG=lossg_val,\n lr=args.learning_rate)))\n# return np.transpose(fake_pic.detach().cpu().numpy(), (0,2,3,1))\n return last_count_val\n\n\n# In[ ]:\n\n\ndef mkdir(path):\n \n isExists=os.path.exists(path)\n \n if not isExists:\n os.makedirs(path) \n\n return True\n else:\n print (path+' 目录已存在')\n return False\n\n\n# In[ ]:\n\n\ndef tb_view_pic(input, df, gt, fake_pic):\n \n sketch = input[:,0,:,:].view(input[:,0,:,:].shape[0],1,input[:,0,:,:].shape[1],input[:,0,:,:].shape[2])\n point_map = input[:,1:,:,:]\n draft = df\n fake = fake_pic\n ground_truth = gt\n \n sketch = vutils.make_grid(sketch, normalize=True, scale_each=True)\n point_map = vutils.make_grid(point_map, normalize=True, scale_each=True)\n draft = vutils.make_grid(draft, normalize=True, scale_each=True)\n fake = vutils.make_grid(fake, normalize=True, scale_each=True)\n ground_truth = vutils.make_grid(ground_truth, normalize=True, scale_each=True)\n \n writer.add_image('sketch', sketch, 1)\n writer.add_image('point_map', point_map, 2)\n writer.add_image('draft', draft, 3)\n writer.add_image('fake', fake, 4)\n writer.add_image('ground_truth', ground_truth, 5)\n \n return \n\n\n# In[ ]:\n\n\ndef save_image(image_path, image_numpy):\n image_pil = Image.fromarray(image_numpy)\n image_pil.save(image_path)\n \n \ndef save_pic(save_epoch, i, input_var, df_var, gt_var, fake_pic):\n fake_pic = (np.transpose(fake_pic.detach().cpu().numpy(), (0,2,3,1))+1) / 2.0 * 255.0\n point_map = (np.transpose(input_var.detach().cpu().numpy(), (0,2,3,1))[:,:,:,1:]+1) / 2.0 * 255.0\n sketch = (np.transpose(input_var.detach().cpu().numpy(), (0,2,3,1))[:,:,:,0]+1) / 2.0 * 255.0\n df_var = (np.transpose(df_var.detach().cpu().numpy(), (0,2,3,1))+1) / 2.0 * 255.0\n gt_var = (np.transpose(gt_var.detach().cpu().numpy(), (0,2,3,1))+1) / 2.0 * 255.0\n \n p = osp.join(save_epoch, str(i))\n mkdir(p)\n \n for j in range(len(fake_pic[:,0,0,0])):\n save_image(osp.join(p, str(j)+'fake.jpg'),fake_pic[j].astype('uint8'))\n save_image(osp.join(p, str(j)+'input_sketch.jpg'),sketch[j].astype('uint8'))\n save_image(osp.join(p, str(j)+'input_pointmap.jpg'),point_map[j].astype('uint8'))\n save_image(osp.join(p, str(j)+'df.jpg'),df_var[j].astype('uint8'))\n save_image(osp.join(p, str(j)+'gt.jpg'),gt_var[j].astype('uint8'))\n\n\n# In[ ]:\n\n\nlast_count_train = 0\nlast_count_val = 0\n\nfor epoch in range(args.epochs):\n \n cur_lr = adjust_learning_rate(G_optimizer, epoch, args.lr_steps, args.lr_decay)\n cur_lr = adjust_learning_rate(D_optimizer, epoch, args.lr_steps, args.lr_decay)\n \n save_epoch = osp.join(args.save_path,str(epoch))\n mkdir(save_epoch)\n \n \n last_count_train = train_net(args, train_loader, Unet, D, epoch, save_epoch, last_count_train, cur_lr)\n last_count_val = val_net(args, val_loader, Unet, D, epoch, save_epoch, last_count_val) \n \n# for i in range(len(g_pic[:,0,0,0])-1):\n# cv2.imwrite(osp.join(save_epoch, str(i)+'.jpg'),g_pic[i])\nwriter.close() \n\n" }, { "alpha_fraction": 0.6113041043281555, "alphanum_fraction": 0.64323890209198, "avg_line_length": 28.2265625, "blob_id": "b682d530b1457e4ba932595559b9797adc991a95", "content_id": "27140b2dfa188c92254fc318fbf3e7e09ef86e6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7512, "license_type": "no_license", "max_line_length": 130, "num_lines": 256, "path": "/Pytorch-Style2paints/test.py", "repo_name": "lvcc2018/Style2Paints_V3", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport os\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" # see issue #152\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"6\"\nimport tensorflow as tf\ngpu_options = tf.GPUOptions(allow_growth=True)\nsess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\nimport sys\nimport time\nfrom optparse import OptionParser\nimport numpy as np\nimport glob \nimport os.path as osp\nfrom PIL import Image\nimport cv2\nimport matplotlib.pyplot as plt\nimport random\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn.parallel\nimport torchvision\nimport torch.nn as nn\nfrom torch import optim\nfrom torch.autograd import Variable\nimport torchvision.utils as vutils\nimport torchvision.transforms as transforms\n\nfrom transforms import GroupRandomCrop\nfrom transforms import GroupScale\n\nfrom eval import eval_net\nfrom unet import UNet\nfrom unet import Discriminator\nfrom utils import get_ids, split_ids, split_train_val, get_imgs_and_masks, batch\nimport easydict\n\nfrom dataset_multi import ClothDataSet\n\n\n# In[2]:\n\n\nargs = easydict.EasyDict({\n 'epochs' : 100,\n 'batch_size' : 12,\n 'train_path' : '/data6/wangpengxiao/zalando_dataset2/train',\n 'val_path' : '/data6/wangpengxiao/zalando_dataset2/val',\n 'sketch_path' : \"/data6/wangpengxiao/zalando_sketch2\",\n 'draft_path' : \"/data6/wangpengxiao/zalando_final2\",\n 'save_path' : \"/data6/wangpengxiao/style2paint2\" ,\n 'weight_path' : \"/data6/wangpengxiao/style2paint3/25/25_Unet_checkpoint.pth.tar\",\n 'hanfu_path' : \"/data6/wangpengxiao/hanfu\",\n 'img_size' : 300,\n 're_size' : 256,\n 'learning_rate' : 1e-3,\n 'gpus' : '[0]',\n 'lr_steps' : [5, 10, 15, 20, 25],\n \"lr_decay\" : 0.1,\n 'lamda_L1' : 100,\n 'workers' : 8,\n 'weight_decay' : 1e-4\n})\n\n\n# In[3]:\n\n\nUnet = UNet(in_channels=4, out_channels=3)\ncheckpoint = torch.load(args.weight_path)\n# print(\"model epoch {} best prec@1: {}\".format(checkpoint['epoch'], checkpoint['best_prec1']))\nbase_dict = {'.'.join(k.split('.')[1:]): v for k,v in list(checkpoint['state_dict'].items())}\n# model.load_state_dict(base_dict)\n\n\n# In[4]:\n\n\nUnet.load_state_dict(base_dict)\nUnet.eval()\n\n\n# In[5]:\n\n\nfrom keras.models import load_model\nmod = load_model('/data2/wangpengxiao/GANs/style2paints3/V4/mod.h5')\nfrom helper import *\ndef edge_detecton(path):\n '''\n get sketch\n '''\n from_mat = cv2.imread(path)\n width = float(from_mat.shape[1])\n height = float(from_mat.shape[0])\n new_width = 0\n new_height = 0\n if (width > height):\n from_mat = cv2.resize(from_mat, (512, int(512 / width * height)), interpolation=cv2.INTER_AREA)\n new_width = 512\n new_height = int(512 / width * height)\n else:\n from_mat = cv2.resize(from_mat, (int(512 / height * width), 512), interpolation=cv2.INTER_AREA)\n new_width = int(512 / height * width)\n new_height = 512\n from_mat = from_mat.transpose((2, 0, 1))\n light_map = np.zeros(from_mat.shape, dtype=np.float)\n for channel in range(3):\n light_map[channel] = get_light_map_single(from_mat[channel])\n light_map = normalize_pic(light_map)\n light_map = resize_img_512_3d(light_map)\n line_mat = mod.predict(light_map, batch_size=1)\n line_mat = line_mat.transpose((3, 1, 2, 0))[0]\n line_mat = line_mat[0:int(new_height), 0:int(new_width), :]\n #sketchKeras_colored = show_active_img_and_save('sketchKeras_colored', line_mat, 'sketchKeras_colored.jpg')\n line_mat = np.amax(line_mat, 2)\n #sketchKeras_enhanced = show_active_img_and_save_denoise_filter2('sketchKeras_enhanced', line_mat, 'sketchKeras_enhanced.jpg')\n #sketchKeras_pured = show_active_img_and_save_denoise_filter('sketchKeras_pured', line_mat, 'sketchKeras_pured.jpg')\n sketchKeras = show_active_img_and_save_denoise('sketchKeras', line_mat, 'sketchKeras.jpg')\n# cv2.waitKey(0)\n return sketchKeras\n\n\n# In[6]:\n\n\n#输入图片预处理\nhanfu_img = glob.glob(osp.join(args.hanfu_path,'*.jpg'))\nhanfu_img = sorted(hanfu_img)\ngt_img = glob.glob(osp.join(args.val_path,'*.jpg'))\ngt_img = sorted(gt_img)\n\np = gt_img[335]\ngt = Image.open(p).convert('RGB')\n#sk = Image.open(osp.join(args.sketch_path, osp.basename(p))).convert('L')\ndf = Image.open(osp.join(args.draft_path, osp.basename(p))).convert('RGB')\n# plt.imshow(gt)\nsketch = edge_detecton(hanfu_img[5])\nsketch = Image.fromarray(sketch).convert('L')\n\n\n# In[7]:\n\n\n# sk = Image.open(hanfu_img[5]).convert('L')\n# plt.imshow(Image.open(hanfu_img[5]).convert('RGB'))\n\n\n# In[8]:\n\n\n# style_path = '/data6/wangpengxiao/hanfu/style'\n# sty = glob.glob(osp.join(style_path,'*.jpg'))\n# sty = sorted(sty)\n# df = Image.open(sty[1]).convert('RGB')\n# plt.imshow(df)\n\n\n# In[ ]:\n\n\ndef test(Unet,args,gt,sk,df):\n gt = gt.resize((args.re_size, args.re_size), Image.BICUBIC)\n sk = sk.copy()\n sk = sk.resize((args.re_size, args.re_size), Image.BICUBIC)\n # df = gt.copy()\n df = df.resize((299, 299), Image.BICUBIC)\n\n gt = np.array(gt)\n point_map = np.zeros(gt.shape)\n #\n coordinate = np.where(np.sum(gt,axis=2) < np.sum(np.array([240,240,240])))\n num_of_point = np.random.randint(1, 6)\n a = random.sample(range(0,max(num_of_point,len(coordinate[0]))),10)\n\n for i in range(len(a)): \n r,g,b = gt[coordinate[0][a[i]],coordinate[1][a[i]],:]\n cv2.circle(point_map,(coordinate[1][a[i]],coordinate[0][a[i]]),4,(int(r),int(g),int(b)),-1) \n #\n gt = Image.fromarray(gt)\n point_map = Image.fromarray(point_map.astype('uint8'))\n pm = point_map.copy()\n\n sk = transforms.ToTensor()(sk)\n point_map = transforms.ToTensor()(point_map)\n gt = transforms.ToTensor()(gt)\n df = transforms.ToTensor()(df)\n sk = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(sk)\n point_map = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(point_map)\n gt = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(gt)\n df = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(df)\n\n\n input = torch.cat((sk,point_map),0) \n\n input = input.view(1,input.shape[0],input.shape[1],input.shape[2])\n df = df.view(1,df.shape[0],df.shape[1],df.shape[2])\n\n\n input=input.float()\n df=df.float()\n with torch.no_grad(): \n input_var = input\n df_var = df\n\n fake_pic = Unet(input_var,df_var)\n fake = vutils.make_grid(fake_pic, normalize=True, scale_each=True)\n fake = np.transpose(fake.detach().numpy(),(1,2,0))\n \n return fake,pm\n\n\n# In[ ]:\n\n\n#输入图片预处理\nfrom tqdm import tqdm\nsave_path = '/data6/wangpengxiao/hanfu/result'\nhanfu_img = glob.glob(osp.join(args.hanfu_path,'*.jpg'))\nhanfu_img = sorted(hanfu_img)\ngt_img = glob.glob(osp.join(args.val_path,'*.jpg'))\ngt_img = sorted(gt_img)\nfor i in range(len(hanfu_img)):\n sketch = edge_detecton(hanfu_img[i])\n sketch = Image.fromarray(sketch).convert('L')\n for j in tqdm(range(30)):\n p = gt_img[j]\n gt = Image.open(p).convert('RGB')\n df = Image.open(p).convert('RGB')\n\n\n pic,pm = test(Unet,args,gt,sketch,df)\n pm.save(osp.join(save_path,str(i)+'_'+str(j)+'point_map.jpg'))\n pic = Image.fromarray((pic*255).astype('uint8'))\n plt.imshow(pic)\n gt.save(osp.join(save_path,str(i)+'_'+str(j)+'gt.jpg'))\n pic.save(osp.join(save_path,str(i)+'_'+str(j)+'fake.jpg'))\n\n\n# In[ ]:\n\n\n# pic = test(Unet,args,gt,sketch,df)\n# cv2.imwrite(osp.join(save_path,str(i)+'_'+str(j)+'fake.jpg'),pic)\n\n\n# In[ ]:\n\n\n# plt.imshow(pic)\n# cv2.imwrite(osp.join(save_path,str(i)+'_'+str(j)+'fake.jpg'),(pic*255).astype('uint8'))\n\n" } ]
10
jackleland/ouueg_vote_counter
https://github.com/jackleland/ouueg_vote_counter
a628eaaf09e107743733daf98813782e1b7aa519
7bc7dd9ce9173547c4c7b0d727f7d5ee87e6d402
2ec205db610f8e658c138d366142c77187d61010
refs/heads/master
2020-08-05T01:44:34.888581
2019-10-02T13:49:18
2019-10-02T13:49:18
212,351,770
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5796595215797424, "alphanum_fraction": 0.5892623066902161, "avg_line_length": 23.559139251708984, "blob_id": "42542512596626d99526a883aaa49907622ed179", "content_id": "b6ee5baa9019d251e954fb8b61aeded92a0e592b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2291, "license_type": "no_license", "max_line_length": 120, "num_lines": 93, "path": "/ouueg_vote_counter.py", "repo_name": "jackleland/ouueg_vote_counter", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[50]:\n\n\nimport math\nimport csv\n\n\n# In[51]:\n\n\nclass Vote():\n def __init__(self, row):\n self.ranks = []\n self.voted_set = set()\n for rank in row:\n rank = rank.split('(')[0].strip()\n if rank not in self.voted_set:\n self.voted_set.add(rank)\n self.ranks.append(rank)\n \n def get_choice(self, disallowed):\n for rank in self.ranks:\n if rank not in disallowed:\n return rank\n return 'not counted'\n\n\n# In[52]:\n\n\nvotes = []\nwith open('ouueg_results_anonymised.csv', newline='') as f:\n reader = csv.reader(f, delimiter='\\t')\n for row in reader:\n vote = Vote(row)\n votes.append(vote)\n\n\n# In[60]:\n\n\nno_of_votes_needed = math.ceil((len(votes) + 1) / 2)\nprint(f'There are {len(votes)} total votes, so a candidate motto requires {no_of_votes_needed} votes to win a majority')\n\n\n# In[63]:\n\n\nvotes_for_leader = 0\nround_no = 1\ndisallowed = set()\noptions = {\n 'frigore turbidum mare itinerantur', \n 'submergo ergo sum',\n 'rimam me habere puto',\n 'omnia erit bene',\n 'ave, oceane, morituri te salutant!',\n 'duc in profundum',\n 'resolve sis me',\n 'ut plurimum siccus manemus',\n 'ut sis nocte sollicitudin pede vulnus sentire quod a Moravia',\n 'in aquae laetitia',\n}\n\nwhile votes_for_leader < no_of_votes_needed and round_no <= 10:\n print(f'\\n\\n Round {round_no}')\n results = {option: 0 for option in options - disallowed}\n for vote in votes:\n round_pref = vote.get_choice(disallowed)\n if round_pref == 'not counted':\n continue\n else:\n results[round_pref] += 1\n leaders, losers = [], []\n leader_vote_count = max(*results.values())\n loser_vote_count = min(*results.values())\n for candidate, count in results.items():\n if count == leader_vote_count:\n leaders.append(candidate)\n if count == loser_vote_count:\n losers.append(candidate)\n print(f'Results of round {round_no}: {results}')\n if len(leaders) == 1:\n votes_for_leader = leader_vote_count\n \n print(f'Removing {losers}')\n disallowed = disallowed | set(losers)\n round_no += 1\n\nprint('\\n\\n\\nWinner found!')\nprint(leaders, votes_for_leader)\n \n\n" } ]
1
rai-gaurav/python-testing
https://github.com/rai-gaurav/python-testing
272dc8dd5125ae94da94d9c4d07a3199e082e3bb
bfd26c8aa160ca89781aec0c0ac6995d82744608
64180d5baba04b24c7ab623fb6dc96223bb7e3de
refs/heads/main
2023-03-14T11:09:14.793452
2021-03-08T18:58:03
2021-03-08T18:58:03
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.683955729007721, "alphanum_fraction": 0.683955729007721, "avg_line_length": 38.08108139038086, "blob_id": "64300ba9821a684b70639323b844304231049538", "content_id": "6cd6f1271d7d88a260b33d951f6e99805fd752d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1446, "license_type": "no_license", "max_line_length": 117, "num_lines": 37, "path": "/tests/ui/test_login.py", "repo_name": "rai-gaurav/python-testing", "src_encoding": "UTF-8", "text": "import allure\nimport pytest\nfrom playwright.sync_api import Page\n\nfrom pages.HomePage import HomePage\nfrom pages.SignInPage import SignInPage\n\n\[email protected]('Log in negative tests')\[email protected]('Authentication error')\[email protected](\"login,password\",\n [(\"[email protected]\", \"123qew\"), (\"[email protected]\", \"zxcv545\"), (\"[email protected]\", \"dfdsf7876\")])\ndef test_sign_in_error(page: Page, login, password):\n home_page = HomePage(page)\n home_page.open_main_page()\n page.click(home_page.SIGN_IN_LOCATOR)\n sign_in_page = SignInPage(page)\n sign_in_page.populate_credentials(login, password)\n sign_in_page.click_sign_in_button()\n\n assert \"Authentication failed.\" in sign_in_page.get_authentication_error_text(), \\\n \"Wrong authentication validation message\"\n\n\[email protected]('Log in negative tests')\[email protected]('Incorrect login error')\[email protected](\"login,password\",\n [(\"aaaa@aa,aa\", \"123qew\"), (\"bbb@bb,bb\", \"zxcv545\"), (\"cccc@ccc,cc\", \"dfdsf7876\")])\ndef test_email_error(page: Page, login, password):\n home_page = HomePage(page)\n home_page.open_main_page()\n page.click(home_page.SIGN_IN_LOCATOR)\n sign_in_page = SignInPage(page)\n sign_in_page.populate_credentials(login, password)\n sign_in_page.click_sign_in_button()\n\n assert \"Invalid email address.\" in sign_in_page.get_authentication_error_text(), \"Wrong email validation message\"\n" }, { "alpha_fraction": 0.7061503529548645, "alphanum_fraction": 0.7107061743736267, "avg_line_length": 30.35714340209961, "blob_id": "0bc71927c6e42625a3d84c87df63a35bc2adc64d", "content_id": "91c333a6bab9aad1149c87df2afe789d5ae5b5ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 878, "license_type": "no_license", "max_line_length": 99, "num_lines": 28, "path": "/tests/ui/test_cart.py", "repo_name": "rai-gaurav/python-testing", "src_encoding": "UTF-8", "text": "import allure\nfrom playwright.sync_api import Page\n\nfrom pages.HomePage import HomePage\n\n\[email protected]('Cart functionality tests')\[email protected]('Add item to cart')\ndef test_confirmation(page: Page):\n home_page = HomePage(page)\n home_page.open_main_page()\n home_page.add_thing_to_cart_by_name(\"Blouse\")\n text = home_page.get_confirmation_text()\n\n assert \"Product successfully added to your shopping cart\" in text, \"Wrong confirmation message\"\n\n\[email protected]('Cart functionality tests')\[email protected]('Added item info')\ndef test_cart(page: Page):\n home_page = HomePage(page)\n home_page.open_main_page()\n home_page.add_thing_to_cart_by_name(\"Printed Dress\")\n home_page.close_confirmation_modal()\n home_page.expand_cart()\n data = home_page.return_purchase_data()\n\n assert (\"Printed Dressd\", \"$26.00\") == data, f\"Wrong item info: '{data}'\"\n" }, { "alpha_fraction": 0.673815906047821, "alphanum_fraction": 0.673815906047821, "avg_line_length": 30.97142791748047, "blob_id": "75f41362b8660e79c7c4f145e77d63e5803f84dc", "content_id": "8d7c665735ce5c19371f8819bdc425e3d2b78e80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1119, "license_type": "no_license", "max_line_length": 64, "num_lines": 35, "path": "/pages/SignInPage.py", "repo_name": "rai-gaurav/python-testing", "src_encoding": "UTF-8", "text": "import logging\n\nfrom playwright.sync_api import Page\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n\n\nclass SignInPage:\n EMAIL_ADDRESS_LOCATOR = \"#email\"\n PASSWORD_LOCATOR = \"#passwd\"\n SIGN_IN_LOCATOR = \"#SubmitLogin\"\n AUTHENTICATION_ERROR = \".alert.alert-danger li\"\n\n def __init__(self, page: Page):\n self.page = page\n\n def populate_credentials(self, email, password):\n self.page.wait_for_selector(self.EMAIL_ADDRESS_LOCATOR)\n logger.info(f\"Type login: '{email}'\")\n self.page.type(self.EMAIL_ADDRESS_LOCATOR, email)\n logger.info(f\"Type password: '{email}'\")\n self.page.type(self.PASSWORD_LOCATOR, password)\n\n def get_authentication_error_text(self):\n logger.info(\"Get authentication error\")\n self.page.wait_for_selector(self.AUTHENTICATION_ERROR)\n text = self.page.text_content(self.AUTHENTICATION_ERROR)\n\n return text\n\n def click_sign_in_button(self):\n self.page.wait_for_selector(self.SIGN_IN_LOCATOR)\n logger.info(\"Click on the 'Sign In' button\")\n self.page.click(self.SIGN_IN_LOCATOR)\n" }, { "alpha_fraction": 0.6590517163276672, "alphanum_fraction": 0.6599137783050537, "avg_line_length": 37.66666793823242, "blob_id": "22997c6cb63e16996a25390a034ce2fe6cd2e025", "content_id": "c53960ed44cf932df352fa463d62d1c800d96eea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2320, "license_type": "no_license", "max_line_length": 111, "num_lines": 60, "path": "/pages/HomePage.py", "repo_name": "rai-gaurav/python-testing", "src_encoding": "UTF-8", "text": "import logging\n\nfrom playwright.sync_api import Page\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n\n\nclass HomePage:\n ADDRESS = \"http://automationpractice.com/index.php\"\n\n CONFIRMATION_LOCATOR = \"#layer_cart h2\"\n SIGN_IN_LOCATOR = \".header_user_info a\"\n CLOSE_MODAL_WINDOW_LOCATOR = \"span[title='Close window']\"\n VIEW_MY_SHOPPING_CART_LOCATOR = \".shopping_cart a[title='View my shopping cart']\"\n SINGLE_ITEM_IN_CART_LOCATOR = \".cart_block_list .product-name a\"\n SINGLE_ITEM_PRICE_IN_CART_LOCATOR = \".cart_block_list .cart-info .price\"\n\n def __init__(self, page: Page):\n self.page = page\n\n def open_main_page(self):\n logger.info(\"Open Home page\")\n home_page = HomePage(self.page)\n self.page.goto(home_page.ADDRESS)\n self.page.wait_for_load_state()\n\n def add_thing_to_cart_by_name(self, name):\n logger.info(f\"Waiting for the '{name}' item\")\n self.page.wait_for_selector(f\"a[title='{name}']\")\n logger.info(f\"Hover on the '{name}' item\")\n self.page.hover(f\"a[title='{name}']\")\n logger.info(\"Click on the 'Add to cart' button\")\n self.page.click(\n f\"//ul[@id='homefeatured']//div[h5//a[normalize-space()='{name}']]//a[span[text()='Add to cart']]\")\n\n def get_confirmation_text(self):\n logger.info(\"Return confirmation message\")\n self.page.wait_for_selector(self.CONFIRMATION_LOCATOR)\n\n return self.page.text_content(self.CONFIRMATION_LOCATOR)\n\n def close_confirmation_modal(self):\n logger.info(\"Close confirmation window\")\n self.page.wait_for_selector(self.CLOSE_MODAL_WINDOW_LOCATOR)\n self.page.click(self.CLOSE_MODAL_WINDOW_LOCATOR)\n\n def expand_cart(self):\n logger.info(\"Expand cart\")\n self.page.wait_for_selector(self.VIEW_MY_SHOPPING_CART_LOCATOR)\n self.page.hover(self.VIEW_MY_SHOPPING_CART_LOCATOR)\n\n def return_purchase_data(self):\n self.page.wait_for_selector(self.SINGLE_ITEM_IN_CART_LOCATOR)\n logger.info(\"Get title of the added item to cart\")\n title = self.page.get_attribute(self.SINGLE_ITEM_IN_CART_LOCATOR, \"title\")\n logger.info(\"Get price of the added item to cart\")\n price = self.page.text_content(self.SINGLE_ITEM_PRICE_IN_CART_LOCATOR)\n\n return title, price\n" }, { "alpha_fraction": 0.5028463006019592, "alphanum_fraction": 0.7001897692680359, "avg_line_length": 16.566667556762695, "blob_id": "c5380bbb6decce6a97a44e88fa594e87d00507af", "content_id": "4677ba570abe295069ff8f766c08b58e2d26e780", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 527, "license_type": "no_license", "max_line_length": 29, "num_lines": 30, "path": "/requirements.txt", "repo_name": "rai-gaurav/python-testing", "src_encoding": "UTF-8", "text": "allure-pytest==2.8.36\nallure-python-commons==2.8.36\napipkg==1.5\natomicwrites==1.4.0\nattrs==20.3.0\ncertifi==2020.12.5\nchardet==4.0.0\ncolorama==0.4.4\nexecnet==1.8.0\ngreenlet==1.0.0\nidna==2.10\niniconfig==1.1.1\npackaging==20.9\nplaywright==1.9.1\npluggy==0.13.1\npy==1.10.0\npyee==8.1.0\npyparsing==2.4.7\npytest==6.2.2\npytest-base-url==1.4.2\npytest-forked==1.3.0\npytest-playwright==0.0.12\npytest-xdist==2.2.1\npython-slugify==4.0.1\nrequests==2.25.1\nsix==1.15.0\ntext-unidecode==1.3\ntoml==0.10.2\ntyping-extensions==3.7.4.3\nurllib3==1.26.3\n" }, { "alpha_fraction": 0.7540000081062317, "alphanum_fraction": 0.7559999823570251, "avg_line_length": 34.64285659790039, "blob_id": "ebb92563488386cefc438811deedee227a0bd6e0", "content_id": "94f759be63ad06551de83c1b52cb3986612d7760", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 500, "license_type": "no_license", "max_line_length": 136, "num_lines": 14, "path": "/README.md", "repo_name": "rai-gaurav/python-testing", "src_encoding": "UTF-8", "text": "# python-testing\nSome tests for testing UI using Playwright, and API using requests. Tests run every each push and Allure results deploy on GitHub Pages.\n\n## Local running\n### Console\n`pytest` - run tests without any report in 1 thread\n\n`pytest --headful` - Playwright will run in headful mode\n\n`pytest -n <thread count>` - run tests in \"thread count\" threads\n\n`pytest --alluredir=/tmp/my_allure_results` - run tests with collection of results\n\n`allure serve /tmp/my_allure_results` generate report\n\n" }, { "alpha_fraction": 0.6164215803146362, "alphanum_fraction": 0.626225471496582, "avg_line_length": 30.384614944458008, "blob_id": "b2e05fa855236f51e432966a387a3ec2a3e67ba9", "content_id": "3677fd4716197008996901bf5400383ec29c3a47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 816, "license_type": "no_license", "max_line_length": 87, "num_lines": 26, "path": "/tests/ui/conftest.py", "repo_name": "rai-gaurav/python-testing", "src_encoding": "UTF-8", "text": "from pathlib import Path\n\nimport allure\nimport pytest\nfrom slugify import slugify\n\n\[email protected](scope=\"session\")\ndef browser_context_args(browser_context_args):\n return {\n **browser_context_args,\n \"viewport\": {\n \"width\": 1920,\n \"height\": 1080,\n },\n }\n\n\ndef pytest_runtest_makereport(item, call) -> None:\n if call.when == \"call\" and call.excinfo is not None:\n page = item.funcargs[\"page\"]\n screenshot_dir = Path(Path(__file__).parent.parent / \".playwright-screenshots\")\n screenshot_dir.mkdir(exist_ok=True)\n page.screenshot(path=str(screenshot_dir / f\"{slugify(item.nodeid)}.png\"))\n allure.attach.file(str(screenshot_dir / f\"{slugify(item.nodeid)}.png\"),\n attachment_type=allure.attachment_type.PNG)\n" }, { "alpha_fraction": 0.5757692456245422, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 31.098766326904297, "blob_id": "6e80170ce12cff4378e235014e09b31ac44e33de", "content_id": "2725c3131c40443e640dd582a1b6feaf765d0df2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5200, "license_type": "no_license", "max_line_length": 108, "num_lines": 162, "path": "/tests/api/test_booking.py", "repo_name": "rai-gaurav/python-testing", "src_encoding": "UTF-8", "text": "import allure\nimport pytest\nimport requests\n\n\ndef create_booking(json_body):\n with allure.step('Create booking'):\n response = requests.post(\"https://restful-booker.herokuapp.com/booking\", json=json_body)\n return response\n\n\[email protected]\ndef authorize():\n login = \"admin\"\n password = \"password123\"\n with allure.step('Get token'):\n response = requests.post(\"https://restful-booker.herokuapp.com/auth\",\n json={\"username\": login, \"password\": password})\n return response.json()[\"token\"]\n\n\[email protected]('Check booking API')\[email protected]('Receive all booking test')\ndef test_all_bookings():\n with allure.step('Get all bookings'):\n response = requests.get(\"https://restful-booker.herokuapp.com/booking\")\n\n assert response.status_code == requests.codes.ok, f\"Wrong status code: {response.status_code}\"\n assert len(response.content) > 5, \"Wrong count of bookings\"\n\n\[email protected]('Check booking API')\[email protected]('Create booking tests')\ndef test_create_booking():\n booking = {\n \"firstname\": \"James\",\n \"lastname\": \"Bond\",\n \"totalprice\": 1200,\n \"depositpaid\": True,\n \"bookingdates\": {\n \"checkin\": \"2018-01-01\",\n \"checkout\": \"2019-01-01\"\n },\n \"additionalneeds\": \"Breakfast\"\n }\n response = create_booking(booking)\n id = response.json()[\"bookingid\"]\n\n assert response.status_code == requests.codes.ok, f\"Wrong status code: {response.status_code}\"\n assert id > 0, \"Booking isn't created\"\n\n\[email protected]('Check booking API')\[email protected]('Update created booking test')\ndef test_update_booking(authorize):\n token = authorize\n booking = {\n \"firstname\": \"James\",\n \"lastname\": \"Bond\",\n \"totalprice\": 3000,\n \"depositpaid\": True,\n \"bookingdates\": {\n \"checkin\": \"2018-01-01\",\n \"checkout\": \"2019-01-01\"\n },\n \"additionalneeds\": \"Breakfast\"\n }\n id = create_booking(booking).json()[\"bookingid\"]\n booking_edited = {\n \"firstname\": \"James\",\n \"lastname\": \"Bond\",\n \"totalprice\": 2100,\n \"depositpaid\": True,\n \"bookingdates\": {\n \"checkin\": \"2018-01-01\",\n \"checkout\": \"2019-01-01\"\n },\n \"additionalneeds\": \"Breakfast\"\n }\n\n cookies = {'token': token}\n with allure.step('Update booking'):\n response = requests.put(f\"https://restful-booker.herokuapp.com/booking/{id}\", json=booking_edited,\n cookies=cookies)\n\n assert response.status_code == requests.codes.ok, f\"Wrong status code: {response.status_code}\"\n\n\[email protected]('Check booking API')\[email protected]('Patch created booking test')\ndef test_patch_booking(authorize):\n token = authorize\n booking = {\n \"firstname\": \"James\",\n \"lastname\": \"Bond\",\n \"totalprice\": 1250,\n \"depositpaid\": True,\n \"bookingdates\": {\n \"checkin\": \"2018-01-01\",\n \"checkout\": \"2019-01-01\"\n },\n \"additionalneeds\": \"Breakfast\"\n }\n id = create_booking(booking).json()[\"bookingid\"]\n booking_edited = {\n \"depositpaid\": False,\n \"additionalneeds\": \"Dinner\"\n }\n\n cookies = {'token': token}\n with allure.step('Patch booking'):\n response = requests.patch(f\"https://restful-booker.herokuapp.com/booking/{id}\", json=booking_edited,\n cookies=cookies)\n\n assert response.status_code == requests.codes.ok, f\"Wrong status code: {response.status_code}\"\n\n\[email protected]('Check booking API')\[email protected]('Get created booking by Id tests')\ndef test_get_by_id():\n booking = {\n \"firstname\": \"James\",\n \"lastname\": \"Bond\",\n \"totalprice\": 2100,\n \"depositpaid\": False,\n \"bookingdates\": {\n \"checkin\": \"2018-01-01\",\n \"checkout\": \"2019-01-01\"\n },\n \"additionalneeds\": \"Launch\"\n }\n id = create_booking(booking).json()[\"bookingid\"]\n with allure.step('Get booking by Id'):\n response = requests.get(f\"https://restful-booker.herokuapp.com/booking/{id}\")\n\n assert response.json()[\"totalprice\"] == 2100, f'Wrong price: {response.json()[\"totalprice\"]}'\n assert response.json()[\"additionalneeds\"] == \"Launch\", \\\n f'Wrong additional needs: {response.json()[\"additionalneeds\"]}'\n assert response.json()[\"depositpaid\"] == False, f'Wrong depositpaid: {response.json()[\"depositpaid\"]}'\n\n\[email protected]('Check booking API')\[email protected]('Delete created booking by Id test')\ndef test_delete_booking(authorize):\n token = authorize\n booking = {\n \"firstname\": \"James\",\n \"lastname\": \"Bond\",\n \"totalprice\": 6000,\n \"depositpaid\": False,\n \"bookingdates\": {\n \"checkin\": \"2018-01-01\",\n \"checkout\": \"2019-01-01\"\n },\n \"additionalneeds\": \"Launch\"\n }\n id = create_booking(booking).json()[\"bookingid\"]\n cookies = {'token': token}\n with allure.step('Delete booking'):\n response = requests.delete(f\"https://restful-booker.herokuapp.com/booking/{id}\", cookies=cookies)\n\n assert response.status_code == requests.codes.created\n" } ]
8
castironclay/url_shortener
https://github.com/castironclay/url_shortener
92e06f5c8015a8dccb208b5008d503b476fb5583
2d5906b9f2f8b776b36722336ef100d30c84e494
f43a34a51209ee049fd11466c1ef6a255150a5be
refs/heads/master
2021-04-12T11:26:31.097331
2018-04-03T00:39:51
2018-04-03T00:39:51
126,612,932
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.5796812772750854, "alphanum_fraction": 0.6474103331565857, "avg_line_length": 25.421052932739258, "blob_id": "bd43236b4bb179c54dfd6ddd8f23f684c2acd0b7", "content_id": "c853e5253f5b7371d8f8b70b0d15487e216dbff7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 502, "license_type": "no_license", "max_line_length": 138, "num_lines": 19, "path": "/src/shortener/migrations/0003_auto_20180324_1307.py", "repo_name": "castironclay/url_shortener", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.3 on 2018-03-24 17:07\n\nfrom django.db import migrations, models\nimport shortener.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('shortener', '0002_auto_20180323_2303'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='clayurl',\n name='url',\n field=models.CharField(max_length=220, validators=[shortener.validators.validate_url, shortener.validators.validate_dot_com]),\n ),\n ]\n" }, { "alpha_fraction": 0.6024096608161926, "alphanum_fraction": 0.6405622363090515, "avg_line_length": 25.210525512695312, "blob_id": "6524397be38949139304a403c3c8a8d7a37604a5", "content_id": "2c38df73cf27fb58c953447cba6d6820e53f988f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 498, "license_type": "no_license", "max_line_length": 120, "num_lines": 19, "path": "/src/analytics/migrations/0003_auto_20180324_1558.py", "repo_name": "castironclay/url_shortener", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.3 on 2018-03-24 19:58\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('analytics', '0002_clickevent_clay_url'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='clickevent',\n name='clay_url',\n field=models.OneToOneField(blank=True, on_delete=django.db.models.deletion.CASCADE, to='shortener.ClayURL'),\n ),\n ]\n" }, { "alpha_fraction": 0.7016806602478027, "alphanum_fraction": 0.7016806602478027, "avg_line_length": 30.733333587646484, "blob_id": "5718bcb0373dbe1cef37f79b3b0df566997c5aa7", "content_id": "dbd48b0b81fc0c8a9286a2c6486fd2568bf0c785", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 476, "license_type": "no_license", "max_line_length": 73, "num_lines": 15, "path": "/src/shortener/management/commands/refreshcodes.py", "repo_name": "castironclay/url_shortener", "src_encoding": "UTF-8", "text": "from django.core.management.base import BaseCommand, CommandError\n\nfrom shortener.models import ClayURL\n\nclass Command(BaseCommand):\n help = 'Refreshes all ClayURL shortcodes'\n\n #arguments\n def add_arguments(self, parser):\n # no longer a required argument\n # must be an integer\n argument = parser.add_argument('--items', type=int)\n\n def handle(self, *args, **options):\n return ClayURL.objects.refresh_shortcodes(items=options['items'])\n" }, { "alpha_fraction": 0.5609756112098694, "alphanum_fraction": 0.6285178065299988, "avg_line_length": 25.649999618530273, "blob_id": "b8bb59ebc8904f065a511cdd4781b77f612f44db", "content_id": "33b3c970c478b019588d0a6536b970da872eee73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 533, "license_type": "no_license", "max_line_length": 119, "num_lines": 20, "path": "/src/analytics/migrations/0002_clickevent_clay_url.py", "repo_name": "castironclay/url_shortener", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.3 on 2018-03-24 19:55\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('shortener', '0003_auto_20180324_1307'),\n ('analytics', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='clickevent',\n name='clay_url',\n field=models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, to='shortener.ClayURL'),\n ),\n ]\n" } ]
4
ShaneOfAllTrades/datanitro-mysql
https://github.com/ShaneOfAllTrades/datanitro-mysql
4784e60892d277f0d40b881801bd6f67b47c604c
4af9604f188213d2b23a72ea25ee853bef972b8b
1f8066afd9641d1a3ae00e8d5571b21da2322f7b
refs/heads/master
2021-01-23T13:17:16.212371
2013-10-11T06:53:24
2013-10-11T06:53:24
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7740585803985596, "alphanum_fraction": 0.7740585803985596, "avg_line_length": 52.11111068725586, "blob_id": "801f3cf4e643d91c7040ea3c375888bf3cfa71f1", "content_id": "fca51042a7c0ca55165fc352f1b0bc3ab93a2017", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 478, "license_type": "no_license", "max_line_length": 116, "num_lines": 9, "path": "/README.md", "repo_name": "ShaneOfAllTrades/datanitro-mysql", "src_encoding": "UTF-8", "text": "datanitro-mysql\n===============\n\nPython code to access MySQL from within Excel, using DataNitro\n\nThere are a few examples accessing a few databases already, but this one shows you how to connect to MySQL,\nallowing for ways to CRUD from an Excel spreadsheet. So for example if you had a standard LAMP stack using PHP, you \ncan now make use of all Excel features along with Python. Or you can improve updating your database if all your \nsource data comes from local spreadsheets.\n" }, { "alpha_fraction": 0.5521835684776306, "alphanum_fraction": 0.5854922533035278, "avg_line_length": 31.190475463867188, "blob_id": "2a6b6bad90a01d8d8cb38e1336f3f3a3e8b20ccb", "content_id": "c02b490aff843f33a8c45702bb5a99dc22081f50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1351, "license_type": "no_license", "max_line_length": 101, "num_lines": 42, "path": "/py-mysql-datanitro.py", "repo_name": "ShaneOfAllTrades/datanitro-mysql", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport MySQLdb\nimport sys\nimport csv\n\nactive_sheet(\"GUI\")\ndb = MySQLdb.connect(host=\"localhost\", user=\"mysql_user\",\n passwd=\"noobnoob\",db=\"mysql_db\")\n# the if elif can be a switch in Excel\nif Cell(\"G1\").value == \"insert/update\":\n active_sheet(\"active_1\")\n row_up = 10\n\tc_up = db.cursor()\n\t\n\tsqy = \"INSERT INTO table_name (col_1, col_2) \\\n\tVALUES (%s,) \\\n\tON DUPLICATE KEY UPDATE \\\n\tstructure_for=VALUES(col_1, col_2);\"\n\twhile Cell(row_up,1).value is not None:\n c_up.execute(sqy,\n (CellRange((row_up,1),(row_up,2)).value)) #this range will extend to how many columns\n row_up = row_up + 1\n active_sheet(\"GUI\")\n# this one will import from database to Excel spreadsheet\nelif Cell(\"G1\").value == \"select\":\n active_sheet(\"active_1\")\n CellRange(\"A10:W1090\").value = None # cleanup existing data \n\tc = db.cursor()\n\texstring = \"select * from table_name where some_id = 1\"\n\t# whatever SELECT you want to use\n\tc.execute(exstring)\n\tsh = c.fetchall()\n\tfor i, pos in enumerate(sh):\n Cell(10+i, 1).horizontal = pos\n if Cell(10+i, 12).value is None:\n Cell(10+i, 12).value = \"01/01/2000 12:00\"\n\n active_sheet(\"GUI\")\n\nif Cell(\"G1\").value == \"select\":\n db.commit()\ndb.close()" } ]
2
vishalvaibhav31244/Naya
https://github.com/vishalvaibhav31244/Naya
5d903f92ab66113a20f1c104f2f845fce8f18d5a
2351aaeee870feff213b93987899e8922c021ab3
5959ba0b7e2e1733731936d992c9a319cc0a615f
refs/heads/master
2023-02-18T18:08:43.963777
2021-01-20T09:30:13
2021-01-20T09:30:13
331,251,355
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 15, "blob_id": "bf8d4b6f3712e3cccea15a73ec926091c01ce324", "content_id": "04f664595be6d3df1940c5e34778786c5d9920d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15, "license_type": "no_license", "max_line_length": 15, "num_lines": 1, "path": "/asd2.py", "repo_name": "vishalvaibhav31244/Naya", "src_encoding": "UTF-8", "text": "print(\"Gorbit Sahu\")" } ]
1
mbaddar1/sklearn-svm-classification
https://github.com/mbaddar1/sklearn-svm-classification
19712b86dbb192cdfa1853bc97f9d905d2abd36e
6af774d6e5ab3d9ff2363b53213e69ccd7b28134
547538b53c97957fb31de503648bc1e764482f66
refs/heads/master
2021-01-10T07:32:59.514394
2016-03-06T13:54:27
2016-03-06T13:54:27
52,729,838
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.5946704149246216, "alphanum_fraction": 0.5993455052375793, "avg_line_length": 32.82539749145508, "blob_id": "2889cd7b525e99f7f3b2b099efb47b5b90583927", "content_id": "166097ec860160636f095a3b7fa757680fefce7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2139, "license_type": "no_license", "max_line_length": 95, "num_lines": 63, "path": "/src/Meals.py", "repo_name": "mbaddar1/sklearn-svm-classification", "src_encoding": "UTF-8", "text": "'''\nCreated on Feb 22, 2016\n\n@author: baddar\n'''\nfrom twisted.python.util import println\n\nclass Meals(object):\n '''\n classdocs\n '''\n\n\n def __init__(self, params=None):\n '''\n Constructor\n '''\n self.id = list()\n self.name = list()\n self.name_and_desc = list()\n self.description = list()\n self.meal_category = list()\n self.meal_parent_category = list()\n self.meal_category_value_index = dict()\n self.meal_category_index_value = dict()\n self.meal_category_mapped = list()\n self.meal_parent_category_value_index = dict()\n self.meal_parent_category_index_value = dict()\n self.meal_parent_category_mapped = list()\n \n def disp(self):\n println(self.id)\n println(self.name)\n println(self.description)\n println(self.meal_category)\n println(self.meal_parent_category)\n \n def calcMealCategoryIndex(self):\n distict_vals = set(self.meal_parent_category)\n idx = 1\n for val in distict_vals:\n self.meal_parent_category_value_index[val] = idx\n self.meal_parent_category_index_value[idx] = val\n idx = idx+1\n for val in self.meal_parent_category:\n self.meal_parent_category_mapped.append(self.meal_parent_category_value_index[val])\n \n distict_vals = set(self.meal_category)\n idx = 1\n for val in distict_vals:\n self.meal_category_value_index[val] = idx\n self.meal_category_index_value[idx] = val\n idx = idx+1\n for val in self.meal_category:\n self.meal_category_mapped.append(self.meal_category_value_index[val])\n# print len(distict_vals)\n# print len(self.meal_category_value_index)\n# print len(self.meal_category_mapped)\n# print self.meal_category_value_index\n# print self.meal_category_mapped\n def printStat(self):\n print \"Number of categories = \"+str(len(self.meal_category_value_index))\n print \"Number of parent categoris = \"+str(len(self.meal_parent_category_value_index))\n " }, { "alpha_fraction": 0.5728291273117065, "alphanum_fraction": 0.6071428656578064, "avg_line_length": 28.163265228271484, "blob_id": "3b5768790ee7a1bffe25374465b9a50f9f60cf92", "content_id": "36b96c4e6c2a75e226c9c7c82f003e8bd36375a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1428, "license_type": "no_license", "max_line_length": 103, "num_lines": 49, "path": "/src/svm_decision_function.py", "repo_name": "mbaddar1/sklearn-svm-classification", "src_encoding": "UTF-8", "text": "'''\nCreated on Feb 25, 2016\n\n@author: baddar\n'''\n\n\n \nfrom sklearn import svm\nimport numpy as np\nfrom __builtin__ import str\n#ref\n#http://stackoverflow.com/questions/20113206/scikit-learn-svc-decision-function-and-predict\ndef svm_predict_from_decition_function(svm_decision_function_result,nclasses):\n tmp = svm_decision_function_result\n if not(isinstance(tmp,(tuple,np.ndarray,list))):\n l = list()\n l.append(tmp)\n svm_decision_function_result = l\n \n votes = np.zeros(nclasses)\n p = 0\n for i in range(1,nclasses+1,1):\n for j in range(i+1,nclasses+1,1): #compare each class i to class j where j > i 1<=i,j<=nclasses\n if svm_decision_function_result[p] > 0:\n votes[i-1] += 1\n else: \n votes[j-1] += 1\n p += 1\n \n max_index = np.argmax(votes)\n sorted_votes = np.sort(votes)\n n = len(sorted_votes)\n diff = sorted_votes[n-1] - sorted_votes[n-2]\n if(diff <=0):\n return None\n else:\n return (max_index+1)\n \n# y = [1,1,2,2,3,3,4,4]\n# X = np.random.randn(8, 10)\n# svm = svm.SVC().fit(X,y)\n# for sample_index in range(0,8,1):\n# result = svm.decision_function(X)[sample_index]\n# decision_class = svm_predict_from_decition_function(result, 4)\n# decision_class_2 = svm.predict(X)[sample_index]\n# print (decision_class,decision_class_2)\n# # print result.shape\n# # print result" }, { "alpha_fraction": 0.6438269019126892, "alphanum_fraction": 0.6495544910430908, "avg_line_length": 38.915252685546875, "blob_id": "7fac7b8bbfb88e1b05fd1920aea056e1f50d3fa0", "content_id": "c840a128f9ea1223c80763d96cb0c0446d51b655", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4714, "license_type": "no_license", "max_line_length": 117, "num_lines": 118, "path": "/src/exercise.py", "repo_name": "mbaddar1/sklearn-svm-classification", "src_encoding": "UTF-8", "text": "import csv\nfrom Meals import Meals\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn import svm\nfrom sklearn import cross_validation\nfrom sklearn.externals import joblib\n\nimport numpy as np\nfrom svm_decision_function import svm_predict_from_decition_function\nfrom __builtin__ import str\n\n \ndef predict_meal_category(name, description):\n '''\n Predicts a category based on name and description of a meal.\n\n Parameters:\n name: name of meal as a unicode string\n description: description of meal as a unicode string\n\n Returns:\n * sub category, if classifier is sure about it\n * parent category, if classifier is sure about the parent category but not about the sub category\n * None, if classifier thinks it is neither a pizza nor a pasta (or is unsure about both)\n\n Example returns:\n\n return 'Pizza Salmone' # if classifier is sure about the sub category\n\n return 'Pizza' # if classifier is only sure that it is a pizza\n\n return None # totally unsure if it is a pasta or pizza\n '''\n #make sure name and desc are string\n name = str(name)\n description = str(description)\n \n if not('model_created' in globals()):\n global global_svm_cat\n global global_svm_parent_cat\n global model_created\n global global_decision_fraction\n global count_vect\n global tfidf_transformer\n global meals\n \n global_svm_cat = svm.SVC(probability=True,kernel='linear', C=1)\n global_svm_parent_cat = svm.SVC(probability=True,kernel='linear', C=1)\n model_created = False\n if not(model_created):\n model_created = True\n print \"Model is not created yet , creating it\"\n meals = Meals()\n with open('../data/meal_descriptions_train.csv', 'rb') as csvfile:\n r = csv.reader(csvfile, delimiter=',', quotechar='\"')\n rowIndex=0\n for row in r:\n if rowIndex >0:\n meals.id.append(row[0])\n meals.name.append(row[1])\n meals.description.append(row[2])\n meals.name_and_desc.append(row[1]+\" \"+row[2])\n meals.meal_category.append(row[3])\n meals.meal_parent_category.append(row[4])\n rowIndex = rowIndex +1\n meals.calcMealCategoryIndex()\n count_vect = CountVectorizer()\n X_train_counts = count_vect.fit_transform(meals.name_and_desc)\n\n tfidf_transformer = TfidfTransformer(use_idf=True).fit(X_train_counts)\n X_train_tfidf = tfidf_transformer.transform(X_train_counts)\n\n Y_cat = meals.meal_category_mapped\n Y_parent_cat = meals.meal_parent_category_mapped\n \n global_svm_parent_cat.fit(X_train_tfidf,Y_parent_cat)\n global_svm_cat.fit(X_train_tfidf,Y_cat)\n \n scores_cat = cross_validation.cross_val_score(global_svm_cat,X_train_tfidf, Y_cat, cv=5)\n scores_parent_cat = cross_validation.cross_val_score(global_svm_parent_cat,X_train_tfidf, Y_parent_cat, cv=5)\n print \"scores_cat => \"+ str(scores_cat) +str(np.mean(scores_cat))\n print \"scores_parent_cat =>\"+ str(scores_parent_cat) + str(np.mean(scores_parent_cat))\n \n else:\n print \"Model already created!\"\n #Apply the model\n new_text= name+\" \"+description\n new_text_list = [new_text]\n X_new_counts = count_vect.transform(new_text_list)\n X_new_tfidf = tfidf_transformer.transform(X_new_counts)\n \n \n num_cats = len(meals.meal_category_value_index)\n num_parent_cats = len(meals.meal_parent_category_value_index)\n \n pred_parent_cat = global_svm_parent_cat.predict_proba(X_new_tfidf)\n pred_parent_cat_index = global_svm_parent_cat.predict(X_new_tfidf)\n pred_parent_cat_max_prob = np.max(pred_parent_cat[0])\n \n pred_cat = global_svm_cat.predict_proba(X_new_tfidf)\n pred_cat_index = global_svm_cat.predict(X_new_tfidf)\n pred_cat_max_prob = np.max(pred_cat[0])\n \n cat_thr = 1.0/num_cats *2 #twice as the random classifier\n parent_cat_thr = 1.0/num_parent_cats*1.5 #1.5 times as random classifier\n \n if(pred_cat_max_prob >=cat_thr):\n pstr = meals.meal_category_index_value.get(pred_cat_index[0])\n return pstr\n if(pred_parent_cat_max_prob >= parent_cat_thr):\n pstr = meals.meal_parent_category_index_value.get(pred_parent_cat_index[0])\n return pstr\n return None\n# \n # TODO: insert call of your classifier code here\n raise NotImplementedError('Please implement the meal category classifier.')\n " }, { "alpha_fraction": 0.6877934336662292, "alphanum_fraction": 0.7300469279289246, "avg_line_length": 24.117647171020508, "blob_id": "c6045a98bace22cd8b13d81e6a0f024b510e8e2f", "content_id": "c92aff8fe9fc3bfab78db61ba6f0fe0029ff0533", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 426, "license_type": "no_license", "max_line_length": 79, "num_lines": 17, "path": "/src/test_exercise.py", "repo_name": "mbaddar1/sklearn-svm-classification", "src_encoding": "UTF-8", "text": "'''\nCreated on Feb 25, 2016\n\n@author: baddar\n'''\nimport exercise\nfrom exercise import predict_meal_category\n\nnew_name_1 = \"Pizza Capricciosa\"\nnew_desc_1 = \"mit Schinken, Paprika, frischen Champignons, Zwiebeln und Oliven\"\n\nnew_name_2 = \"Gnocchi Gorgonzola\"\nnew_desc_2 = \"mit Gorgonzola\"\nprint \"Hello\"\np1 = predict_meal_category(name=new_name_1, description=new_desc_1)\np2 = predict_meal_category(name=new_name_2, description=new_desc_2)\nprint(p1,p2)" }, { "alpha_fraction": 0.6685314774513245, "alphanum_fraction": 0.6856643557548523, "avg_line_length": 31.5, "blob_id": "f396bd5544023dd54e8f6df18d287d0df3048c88", "content_id": "eb3a513b8532605f394ddf7a58c0545668eb3b24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2860, "license_type": "no_license", "max_line_length": 105, "num_lines": 88, "path": "/src/ModelExperimentation.py", "repo_name": "mbaddar1/sklearn-svm-classification", "src_encoding": "UTF-8", "text": "'''\nCreated on Feb 23, 2016\n\n@author: baddar\n'''\n#http://stackoverflow.com/questions/20113206/scikit-learn-svc-decision-function-and-predict\n\nimport csv\nimport Meals\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn import svm\nfrom sklearn import cross_validation\nimport numpy as np\nmeals = Meals()\nwith open('../../data/meal_descriptions_train.csv', 'rb') as csvfile:\n r = csv.reader(csvfile, delimiter=',', quotechar='\"')\n rowIndex=0\n for row in r:\n if rowIndex >0:\n meals.id.append(row[0])\n meals.name.append(row[1])\n meals.description.append(row[2])\n meals.name_and_desc.append(row[1]+\" \"+row[2])\n meals.meal_category.append(row[3])\n meals.meal_parent_category.append(row[4])\n rowIndex = rowIndex +1\nmeals.calcMealCategoryIndex()\nmeals.printStat()\ncount_vect = CountVectorizer()\nX_train_counts = count_vect.fit_transform(meals.description)\n\ntfidf_transformer = TfidfTransformer(use_idf=True).fit(X_train_counts)\nX_train_tfidf = tfidf_transformer.transform(X_train_counts)\n\nY_cat = meals.meal_category_mapped\nY_parent_cat = meals.meal_parent_category_mapped\n\n\n\n''''\nSVM experiment\n'''\n#, 'poly','rbf', 'sigmoid'\n#np.arange(0.1,1.1,0.1)\nkernels = ['linear','poly', 'rbf', 'sigmoid']\nC_vals = [0.1,1,10]\n# for i in C_vals:\n# C_vals.append(i)\nsvm_cat_results = list()\nsvm_parent_cat_results = list()\nfor C in C_vals:\n for kernel in kernels:\n svm_model_desc = \"SVM Model : kernel = \"+kernel+\" C = \"+str(C)\n print \"Building \"+svm_model_desc\n svm_model = svm.SVC(kernel=kernel, C=C)\n \n scores_cat = cross_validation.cross_val_score(svm_model,X_train_tfidf, Y_cat, cv=5)\n scores_parent_cat = cross_validation.cross_val_score(svm_model,X_train_tfidf, Y_parent_cat, cv=5)\n svm_cat_results.append((svm_model_desc,scores_cat,np.mean(scores_cat)))\n svm_parent_cat_results.append((svm_model_desc,scores_parent_cat,np.mean(scores_parent_cat)))\nprint \"SVM Cat results\"\nprint svm_cat_results\nprint \"SVM Parent Cat Results\"\nprint svm_parent_cat_results\n\nmax_accuracy = 0\nidx=0\nmax_accuracy_index = -1\nfor res in svm_cat_results:\n if res[2] > max_accuracy:\n max_accuracy_index = idx\n max_accuracy = res[2]\nprint \"For cat classification\"\nprint \"max avg accuracy = \"+str(max_accuracy)\nprint \"Best model is \"+svm_cat_results[max_accuracy_index][0]\n\nmax_accuracy = 0\nidx=0\nmax_accuracy_index = -1\nfor res in svm_parent_cat_results:\n if res[2] > max_accuracy:\n max_accuracy_index = idx\n max_accuracy = res[2]\nprint \"For parent cat classification\"\nprint \"max avg accuracy = \"+str(max_accuracy)\nprint \"Best model is \"+svm_parent_cat_results[max_accuracy_index][0]\n" }, { "alpha_fraction": 0.6994670033454895, "alphanum_fraction": 0.7125871181488037, "avg_line_length": 33.36619567871094, "blob_id": "960e07d66fec9f50cc4b823efa4e4a8b907a6af0", "content_id": "2b87085cac77d5d10a36efcaa38e00887dcd034f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2439, "license_type": "no_license", "max_line_length": 98, "num_lines": 71, "path": "/src/FinalModelBuilder.py", "repo_name": "mbaddar1/sklearn-svm-classification", "src_encoding": "UTF-8", "text": "'''\nCreated on Feb 24, 2016\n\n@author: baddar\n'''\n\n#http://stackoverflow.com/questions/20113206/scikit-learn-svc-decision-function-and-predict\nimport csv\nfrom Meals import Meals\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn import svm\nfrom sklearn import cross_validation\nfrom sklearn.externals import joblib\nimport numpy as np\nfrom __builtin__ import str\n\nmeals = Meals()\nwith open('../data/meal_descriptions_train_reduced.csv', 'rb') as csvfile:\n r = csv.reader(csvfile, delimiter=',', quotechar='\"')\n rowIndex=0\n for row in r:\n if rowIndex >0:\n meals.id.append(row[0])\n meals.name.append(row[1])\n meals.description.append(row[2])\n meals.name_and_desc.append(row[1]+\" \"+row[2])\n meals.meal_category.append(row[3])\n meals.meal_parent_category.append(row[4])\n rowIndex = rowIndex +1\nmeals.calcMealCategoryIndex()\n\ncount_vect = CountVectorizer()\nX_train_counts = count_vect.fit_transform(meals.name_and_desc)\n\ntfidf_transformer = TfidfTransformer(use_idf=True).fit(X_train_counts)\nX_train_tfidf = tfidf_transformer.transform(X_train_counts)\n\nY_cat = meals.meal_category_mapped\nY_parent_cat = meals.meal_parent_category_mapped\n\n\n''''\nFinal SVM model builder\n'''\nsvm_model = svm.SVC(probability=True,kernel='linear', C=1)\n\nsvm_model.fit(X_train_tfidf,Y_parent_cat)\n# joblib.dump(svm_model, './saved_models/svm_parent_cat.pkl')\n\n# svm_model.fit(X_train_tfidf,Y_cat)\n# joblib.dump(svm_model, './saved_models/svm_cat.pkl')\nscores_cat = cross_validation.cross_val_score(svm_model,X_train_tfidf, Y_cat, cv=10)\nscores_parent_cat = cross_validation.cross_val_score(svm_model,X_train_tfidf, Y_parent_cat, cv=10)\nprint \"scores_cat => \"+ str(scores_cat) +str(np.mean(scores_cat))\nprint \"scores_parent_cat =>\"+ str(scores_parent_cat) + str(np.mean(scores_parent_cat))\nsvm_model_cat_loaded = joblib.load('./saved_models/svm_cat.pkl')\n\ntest_desc = [\"mit Schinken, Paprika, frischen Champignons, Zwiebeln und Oliven\"]\n\nX_test_counts = count_vect.transform(test_desc)\nX_test_tfidf = tfidf_transformer.transform(X_test_counts)\np = svm_model.predict_proba(X_test_tfidf)\nprint \"p=>\"+str(p)\nprint \"p shape=>\"+str(p.shape)\nprint \"max p =>\"+str(np.max(p[0]))\n# print p[0]\n# print str(type(p))\n# print meals.meal_category_index_value.get(p[0])\nprint \"finished\"" } ]
6
nurzat87/mycity
https://github.com/nurzat87/mycity
1e2449b5a0e3e3da1bc752922efeec7cb17db8ae
a199ded051e46ac85c152670c5362ab5653aaa3e
d5575ed638ee3530d1ef93d26fa1089dc8e36f27
refs/heads/main
2023-07-05T22:11:49.640063
2021-08-18T15:34:23
2021-08-18T15:34:23
393,715,708
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6745707392692566, "alphanum_fraction": 0.6794766783714294, "avg_line_length": 31.83783721923828, "blob_id": "5534acfe6cf5140be22a1b3d891e8b361ae43639", "content_id": "22e726d8e09d2a434df611eeec986496b3c300d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1223, "license_type": "no_license", "max_line_length": 93, "num_lines": 37, "path": "/core/views.py", "repo_name": "nurzat87/mycity", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom rest_framework.views import APIView\nfrom rest_framework.generics import RetrieveAPIView\nfrom rest_framework.response import Response\n\nfrom .serializers import ProposalListSerializer, ProposalSerializer, ProposalCreateSerializer\n \nfrom .models import Proposal\n\n\n\nclass ProposalListAPIView(APIView):\n def get(self, request, *args, **kwargs):\n proposals = Proposal.objects.all()\n proposals_json = ProposalListSerializer(proposals, many=True)\n return Response(data=proposals_json.data)\n\n\nclass ProposalCreateAPIView(APIView):\n def post(self,request, *args, **kwargs):\n data = request.POST\n serializer = ProposalCreateSerializer(data=data)\n if serializer.is_valid():\n proposal = serializer.save()\n json_data = ProposalSerializer(instance=proposal)\n return Response(json_data.data, 201) \n return Response(\n data={\n \"message\":\"Data not valid\",\n \"errors\": serializer.errors\n },\n status=400\n )\n\nclass ProposalRetreveAPIView(RetrieveAPIView):\n queryset = Proposal.objects.all()\n serializer_class = ProposalSerializer\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.7983871102333069, "alphanum_fraction": 0.7983871102333069, "avg_line_length": 16.714284896850586, "blob_id": "1d44bd7bef845b88710b040acd368bed7a43bdec", "content_id": "12cbd0caf6033b079b5a92320e5c8aa5aa970ac1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 124, "license_type": "no_license", "max_line_length": 32, "num_lines": 7, "path": "/core/admin.py", "repo_name": "nurzat87/mycity", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Proposal\n\n\nadmin.site.register(Proposal)\n\n# Register your models here.\n" }, { "alpha_fraction": 0.6819012761116028, "alphanum_fraction": 0.6819012761116028, "avg_line_length": 25.850000381469727, "blob_id": "b6b26207888d62b970caeb7588b6323c94c43b36", "content_id": "4464bbe453b056fdf471a9c42b07a2df231aae27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 547, "license_type": "no_license", "max_line_length": 60, "num_lines": 20, "path": "/core/serializers.py", "repo_name": "nurzat87/mycity", "src_encoding": "UTF-8", "text": "from django.conf import settings\nfrom rest_framework import serializers\nfrom .models import Proposal\n\n\nclass ProposalListSerializer(serializers.ModelSerializer):\n class Meta:\n model = Proposal\n fields = ['id', 'title']\n\nclass ProposalCreateSerializer(serializers.ModelSerializer):\n class Meta:\n model = Proposal\n fields = ['title', 'description']\n\n\nclass ProposalSerializer(serializers.ModelSerializer):\n class Meta:\n model = Proposal\n fields = ['id', 'title', 'description', 'photo']\n \n\n" } ]
3
eratner/ucb-jaco-control
https://github.com/eratner/ucb-jaco-control
f18c05266e3367fc1eb2f8e09a16441ec85f0b43
acc9598bbce5c954f95d9377f167dbf2d94d6364
b1c6f0e4afffda8ce856d35c559cbd5ced9d1a8a
refs/heads/master
2020-05-04T19:32:38.968564
2019-05-09T05:42:05
2019-05-09T05:42:05
179,398,503
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6245090365409851, "alphanum_fraction": 0.6520031690597534, "avg_line_length": 32.06493377685547, "blob_id": "d813ac2ce1ed95afded98410ffc59d7baa2f05cb", "content_id": "de2258d98b014f1b3f1bb4e500704f57fbbffcb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2546, "license_type": "no_license", "max_line_length": 93, "num_lines": 77, "path": "/ucb_jaco_control/test/test_pid_regulation_controller.cpp", "repo_name": "eratner/ucb-jaco-control", "src_encoding": "UTF-8", "text": "#include <ucb_jaco_control/pid_regulation_controller.h>\n#include <extern/matplotlibcpp/matplotlibcpp.h>\n#include <gtest/gtest.h>\n#include <iostream>\n#include <vector>\n\nnamespace plt = matplotlibcpp;\n\nTEST(PIDRegulationController, testConstruction)\n{\n const unsigned int N = 3;\n ucb_jaco_control::PIDRegulationController<N> controller({1, 2, 3},\n {10, 20, 30},\n {100, 200, 300});\n\n ASSERT_FLOAT_EQ(1, controller.getProportionalGainMatrix().diagonal()(0));\n ASSERT_FLOAT_EQ(2, controller.getProportionalGainMatrix().diagonal()(1));\n ASSERT_FLOAT_EQ(3, controller.getProportionalGainMatrix().diagonal()(2));\n\n ASSERT_FLOAT_EQ(10, controller.getDerivativeGainMatrix().diagonal()(0));\n ASSERT_FLOAT_EQ(20, controller.getDerivativeGainMatrix().diagonal()(1));\n ASSERT_FLOAT_EQ(30, controller.getDerivativeGainMatrix().diagonal()(2));\n\n ASSERT_FLOAT_EQ(100, controller.getIntegralGainMatrix().diagonal()(0));\n ASSERT_FLOAT_EQ(200, controller.getIntegralGainMatrix().diagonal()(1));\n ASSERT_FLOAT_EQ(300, controller.getIntegralGainMatrix().diagonal()(2));\n}\n\nTEST(PIDRegulationController, testSingleIntegrator)\n{\n // Simulation of a single integrator system (unit point mass in 1D).\n const unsigned int N = 1;\n const double dt = 0.1;\n const unsigned int T = 25;\n\n ucb_jaco_control::PIDRegulationController<N>::StateVector setpoint;\n setpoint << 0;\n\n ucb_jaco_control::PIDRegulationController<N>::AugmentedStateVector state;\n state << 1, 0;\n\n ucb_jaco_control::PIDRegulationController<N> controller({1.75}, {0.0}, {0.25}, setpoint);\n\n std::vector<double> pos;\n std::vector<double> times;\n std::vector<double> setpoints;\n\n for (int t = 0; t < T; ++t)\n {\n pos.push_back(state(0));\n times.push_back(t * dt);\n setpoints.push_back(setpoint(0));\n\n std::cout << \"t = \" << static_cast<double>(t) * dt << \", state = \" << state << std::endl;\n\n ucb_jaco_control::PIDRegulationController<N>::ControlVector control =\n controller.getControl(state, dt);\n\n // std::cout << \" error = \" << controller.getError() << std::endl;\n // std::cout << \" control = \" << control << std::endl;\n\n // Update the state.\n state(0) += dt * control(0);\n state(1) = control(0);\n }\n\n plt::named_plot(\"State\", times, pos);\n plt::named_plot(\"Setpoint\", times, setpoints);\n plt::legend();\n plt::show();\n}\n\nint main(int argc, char *argv[])\n{\n testing::InitGoogleTest(&argc, argv);\n return RUN_ALL_TESTS();\n}\n" }, { "alpha_fraction": 0.6959761381149292, "alphanum_fraction": 0.7034277319908142, "avg_line_length": 22.13793182373047, "blob_id": "2fccc95e0aaf17d6dde07ad93edb694cc6c5abc7", "content_id": "3897492af4a105e75a0cc4a3c53320e806cc00c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 671, "license_type": "no_license", "max_line_length": 58, "num_lines": 29, "path": "/ucb_jaco_control/include/ucb_jaco_control/robot_dynamics.h", "repo_name": "eratner/ucb-jaco-control", "src_encoding": "UTF-8", "text": "#ifndef UCB_JACO_CONTROL_ROBOT_DYNAMICS_H\n#define UCB_JACO_CONTROL_ROBOT_DYNAMICS_H\n\n#include <Eigen/Eigen>\n\nnamespace ucb_jaco_control\n{\n\ntemplate <unsigned int DOF>\nclass RobotDynamics\n{\npublic:\n typedef Eigen::Matrix<double, DOF, DOF> Matrix;\n typedef Eigen::Matrix<double, DOF, 1> Vector;\n\n virtual Matrix getInertiaMatrix(const Vector& pos) = 0;\n\n virtual Vector getCoriolisVector(const Vector& pos,\n const Vector& vel) = 0;\n\n virtual Vector getGravityVector(const Vector &pos) = 0;\n\n virtual Vector getFrictionVector(const Vector &vel) = 0;\n\n};\n\n} // namespace ucb_jaco_control\n\n#endif // UCB_JACO_CONTROL_ROBOT_DYNAMICS_H\n" }, { "alpha_fraction": 0.6037464141845703, "alphanum_fraction": 0.6098703145980835, "avg_line_length": 24.943925857543945, "blob_id": "4070a362b3aeab917523c1ff544a1b198dc7a722", "content_id": "8230f1d967e5475cf93212a16b565a9ccf8ae852", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2776, "license_type": "no_license", "max_line_length": 80, "num_lines": 107, "path": "/ucb_jaco_control/include/ucb_jaco_control/urdf_robot_dynamics.h", "repo_name": "eratner/ucb-jaco-control", "src_encoding": "UTF-8", "text": "#ifndef UCB_JACO_CONTROL_URDF_ROBOT_DYNAMICS_H\n#define UCB_JACO_CONTROL_URDF_ROBOT_DYNAMICS_H\n\n#include <ucb_jaco_control/robot_dynamics.h>\n#include <kdl_parser/kdl_parser.hpp>\n#include <kdl/chaindynparam.hpp>\n#include <kdl/jntspaceinertiamatrix.hpp>\n#include <exception>\n#include <iostream>\n\nnamespace ucb_jaco_control\n{\n\ntemplate <unsigned int DOF>\nclass URDFRobotDynamics : public RobotDynamics<DOF>\n{\npublic:\n typedef Eigen::Matrix<double, DOF, DOF> Matrix;\n typedef Eigen::Matrix<double, DOF, 1> Vector;\n\n URDFRobotDynamics(const std::string& urdf_file,\n const std::string& root_name,\n const std::string& tip_name)\n {\n KDL::Tree tree;\n if (!kdl_parser::treeFromFile(urdf_file, tree))\n throw std::invalid_argument(\"Could not read URDF file \" + urdf_file);\n\n // for (auto s : tree.getSegments())\n // std::cout << s.first << std::endl;\n\n KDL::Chain chain;\n if (!tree.getChain(\"j2s7s300_link_base\",\n \"j2s7s300_end_effector\",\n chain))\n {\n throw std::invalid_argument(\"Could not get chain with root \" + root_name +\n \" and tip \" + tip_name);\n }\n\n // TODO: Is this correct? Make into a parameter.\n KDL::Vector gravity(0.0, 0.0, -9.8);\n dynamic_params_ = new KDL::ChainDynParam(chain, gravity);\n }\n\n ~URDFRobotDynamics()\n {\n delete dynamic_params_;\n dynamic_params_ = nullptr;\n }\n\n Matrix getInertiaMatrix(const Vector& pos)\n {\n KDL::JntArray pos_array(DOF);\n pos_array.data = pos;\n\n KDL::JntSpaceInertiaMatrix M(DOF);\n int result = dynamic_params_->JntToMass(pos_array, M);\n // TODO: Check for errors.\n\n return Eigen::Ref<Matrix>(M.data);\n }\n\n Vector getCoriolisVector(const Vector& pos,\n const Vector& vel)\n {\n KDL::JntArray pos_array(DOF);\n pos_array.data = pos;\n\n KDL::JntArray vel_array(DOF);\n vel_array.data = pos;\n\n KDL::JntArray C(DOF);\n int result = dynamic_params_->JntToCoriolis(pos_array,\n vel_array,\n C);\n // TODO: Check for errors.\n\n return Eigen::Ref<Vector>(C.data);\n }\n\n Vector getGravityVector(const Vector& pos)\n {\n KDL::JntArray pos_array(DOF);\n pos_array.data = pos;\n\n KDL::JntArray G(DOF);\n int result = dynamic_params_->JntToGravity(pos_array, G);\n // TODO: Check for errors.\n\n return Eigen::Ref<Vector>(G.data);\n }\n\n virtual Vector getFrictionVector(const Vector& vel)\n {\n // Subclass to add custom frictional forces. \n return Vector::Zero();\n }\n\nprivate:\n KDL::ChainDynParam *dynamic_params_;\n\n};\n\n} // namespace ucb_jaco_control\n\n#endif // UCB_JACO_CONTROL_URDF_ROBOT_DYNAMICS_H\n" }, { "alpha_fraction": 0.5641143918037415, "alphanum_fraction": 0.5668818950653076, "avg_line_length": 16.3439998626709, "blob_id": "43063ee4ec4241bf3856816d3a07e956ed6ad5b2", "content_id": "9b3849f00a5fd6ff54f387ee908a4fc2f14ec2d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2168, "license_type": "no_license", "max_line_length": 77, "num_lines": 125, "path": "/ucb_jaco_control/include/ucb_jaco_control/buffer.h", "repo_name": "eratner/ucb-jaco-control", "src_encoding": "UTF-8", "text": "#ifndef UCB_JACO_CONTROL_BUFFER_H\n#define UCB_JACO_CONTROL_BUFFER_H\n\n#include <vector>\n#include <iostream>\n#include <stdexcept>\n\nnamespace ucb_jaco_control\n{\n\ntemplate <typename DataType>\nclass Buffer\n{\npublic:\n Buffer(unsigned int size)\n : size_(size), count_(0), front_(nullptr), back_(nullptr)\n {\n }\n\n ~Buffer()\n {\n clear();\n }\n\n void add(const DataType& data)\n {\n if (count_ == size_)\n {\n // Buffer is full, so remove the oldest item.\n DataNode *to_delete = front_;\n front_->next->prev = nullptr;\n front_ = front_->next;\n\n delete to_delete;\n to_delete = nullptr;\n --count_;\n }\n\n // Add the new data to the back.\n DataNode *node = new DataNode(data, nullptr, back_);\n if (back_)\n back_->next = node;\n else\n {\n // First node, need to set front pointer.\n front_ = node;\n }\n\n back_ = node;\n ++count_;\n }\n\n std::vector<DataType> getData() const\n {\n // Insert the data into a vector ordered with front at data[0].\n std::vector<DataType> data(count_);\n\n DataNode *node = front_;\n for (unsigned int i = 0; i < count_; ++i)\n {\n data[i] = node->data;\n node = node->next;\n }\n\n return data;\n }\n\n const DataType& back() const\n {\n if (count_ == 0)\n throw std::out_of_range(\"Buffer is empty!\");\n\n return back_->data;\n }\n\n const DataType& front() const\n {\n if (count_ == 0)\n throw std::out_of_range(\"Buffer is empty!\");\n\n return front_->data;\n }\n\n int count() const\n {\n return count_;\n }\n\n void clear()\n {\n count_ = 0;\n DataNode* node = front_;\n while (node)\n {\n DataNode* next = node->next;\n delete node;\n node = next;\n }\n }\n\nprivate:\n struct DataNode\n {\n DataNode(const DataType& d, DataNode* n = nullptr, DataNode* p = nullptr)\n : data(d), next(n), prev(p)\n {\n }\n\n DataType data;\n\n DataNode* next;\n DataNode* prev;\n };\n\n DataNode* front_;\n DataNode* back_;\n\n unsigned int size_; // Maximum size of the buffer.\n unsigned int count_; // Current size of the buffer. \n\n};\n\n} // namespace ucb_jaco_control\n\n#endif // UCB_JACO_CONTROL_BUFFER_H\n" }, { "alpha_fraction": 0.7699275612831116, "alphanum_fraction": 0.7753623127937317, "avg_line_length": 24.090909957885742, "blob_id": "76f31b29e27ca5b1478bf9ff4086322ab4dab6d6", "content_id": "0b1c2def8f0272c5b643a1a78178bd8b695e0fe1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 552, "license_type": "no_license", "max_line_length": 76, "num_lines": 22, "path": "/ucb_jaco_control/include/ucb_jaco_control/state_feedback_controller.h", "repo_name": "eratner/ucb-jaco-control", "src_encoding": "UTF-8", "text": "#ifndef UCB_JACO_CONTROL_STATE_FEEDBACK_CONTROLLER_H\n#define UCB_JACO_CONTROL_STATE_FEEDBACK_CONTROLLER_H\n\n#include <Eigen/Eigen>\n\nnamespace ucb_jaco_control\n{\n\ntemplate <unsigned int StateDim, unsigned int ControlDim>\nclass StateFeedbackController\n{\npublic:\n typedef Eigen::Matrix<double, StateDim, 1> StateVector;\n typedef Eigen::Matrix<double, ControlDim, 1> ControlVector;\n\n virtual ControlVector getControl(const StateVector& state, double dt) = 0;\n\n};\n\n} // namespace ucb_jaco_control\n\n#endif // UCB_JACO_CONTROL_STATE_FEEDBACK_CONTROLLER_H\n" }, { "alpha_fraction": 0.6322706341743469, "alphanum_fraction": 0.6368946433067322, "avg_line_length": 26.952381134033203, "blob_id": "25aa8e5807ecaf2f35c0505ba11b115c5a822268", "content_id": "470553b37f24a41f707bf4b3bf0a02e635bd6ce1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4109, "license_type": "no_license", "max_line_length": 99, "num_lines": 147, "path": "/ucb_jaco_control/include/ucb_jaco_control/pid_regulation_controller.h", "repo_name": "eratner/ucb-jaco-control", "src_encoding": "UTF-8", "text": "#ifndef UCB_JACO_CONTROL_PID_REGULATION_CONTROLLER_H\n#define UCB_JACO_CONTROL_PID_REGULATION_CONTROLLER_H\n\n#include <ucb_jaco_control/state_feedback_controller.h>\n#include <array>\n#include <angles/angles.h> // TODO: Would be nice not to depend on ROS packages here.\n\nnamespace ucb_jaco_control\n{\n\ntemplate <unsigned int StateDim>\nclass PIDRegulationController : public StateFeedbackController<2*StateDim, StateDim>\n{\npublic:\n typedef Eigen::Matrix<double, StateDim, 1> StateVector;\n typedef Eigen::Matrix<double, 2*StateDim, 1> AugmentedStateVector;\n typedef Eigen::Matrix<double, StateDim, 1> ControlVector;\n\n PIDRegulationController(const std::array<double, StateDim>& proportional_gain,\n const std::array<double, StateDim>& integral_gain,\n const std::array<double, StateDim>& derivative_gain,\n const StateVector& setpoint = StateVector::Zero(),\n bool wrap_angles = false)\n : setpoint_(setpoint)\n {\n setProportionalGain(proportional_gain);\n setIntegralGain(integral_gain);\n setDerivativeGain(derivative_gain);\n reset();\n }\n\n ~PIDRegulationController()\n {\n }\n\n void setProportionalGain(const std::array<double, StateDim>& gain)\n {\n Eigen::Matrix<double, StateDim, 1>& diag = proportional_gain_.diagonal();\n for (int i = 0; i < StateDim; ++i)\n diag(i) = gain[i];\n }\n\n void setIntegralGain(const std::array<double, StateDim>& gain)\n {\n Eigen::Matrix<double, StateDim, 1>& diag = integral_gain_.diagonal();\n for (int i = 0; i < StateDim; ++i)\n diag(i) = gain[i];\n }\n\n void setDerivativeGain(const std::array<double, StateDim>& gain)\n {\n Eigen::Matrix<double, StateDim, 1>& diag = derivative_gain_.diagonal();\n for (int i = 0; i < StateDim; ++i)\n diag(i) = gain[i];\n }\n\n const Eigen::DiagonalMatrix<double, StateDim>& getProportionalGainMatrix() const\n {\n return proportional_gain_;\n }\n\n const Eigen::DiagonalMatrix<double, StateDim>& getIntegralGainMatrix() const\n {\n return integral_gain_;\n }\n\n const Eigen::DiagonalMatrix<double, StateDim>& getDerivativeGainMatrix() const\n {\n return derivative_gain_;\n }\n\n const StateVector& getSetpoint() const\n {\n return setpoint_;\n }\n\n void setSetpoint(const StateVector& setpoint)\n {\n setpoint_ = setpoint;\n }\n\n void reset()\n {\n // Set integral to zero.\n integral_ = StateVector::Zero();\n\n // Clear last error.\n last_error_ = StateVector::Zero();\n }\n\n const StateVector& getError() const\n {\n return last_error_;\n }\n\n ControlVector getControl(const AugmentedStateVector& state, double dt)\n {\n StateVector pos = state.block(0, 0, StateDim, 1);\n StateVector vel = state.block(StateDim, 0, StateDim, 1);\n\n // Compute the error and derivative.\n StateVector err = diff(setpoint_, pos);\n StateVector err_deriv = -vel;\n\n // Update the integral.\n integral_ += dt * err;\n\n ControlVector control = -1.0 * proportional_gain_ * err\n - derivative_gain_ * err_deriv\n - integral_gain_ * integral_;\n\n // Update the last error.\n last_error_ = err;\n\n return control;\n }\n\nprotected:\n StateVector diff(const StateVector &a, const StateVector &b) const\n {\n StateVector d;\n if (wrap_angles_)\n {\n for (int i = 0; i < StateDim; ++i)\n d(i) = angles::shortest_angular_distance(a(i), b(i));\n }\n else\n d = b - a;\n\n return d;\n }\n\n Eigen::DiagonalMatrix<double, StateDim> proportional_gain_; // Proportional gain matrix.\n Eigen::DiagonalMatrix<double, StateDim> derivative_gain_; // Derivative gain matrix.\n Eigen::DiagonalMatrix<double, StateDim> integral_gain_; // Integral gain matrix.\n\n StateVector setpoint_; // Setpoint to regulate to.\n bool wrap_angles_; // If set, wraps values to [-pi, pi).\n\n StateVector integral_;\n StateVector last_error_;\n\n};\n\n} // namespace ucb_jaco_control\n\n#endif // UCB_JACO_CONTROL_PID_REGULATION_CONTROLLER_H\n" }, { "alpha_fraction": 0.7163832783699036, "alphanum_fraction": 0.7256568670272827, "avg_line_length": 24.372549057006836, "blob_id": "826c105ef3affaefea5b2e9ee9dabad7c9801c88", "content_id": "71e42fbd4c3a558ffc891c11a05777d637fec9c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1294, "license_type": "no_license", "max_line_length": 85, "num_lines": 51, "path": "/ucb_jaco_control/include/ucb_jaco_control/test_controller_ros.h", "repo_name": "eratner/ucb-jaco-control", "src_encoding": "UTF-8", "text": "#ifndef UCB_JACO_CONTROL_TEST_CONTROLLER_ROS_H\n#define UCB_JACO_CONTROL_TEST_CONTROLLER_ROS_H\n\n#include <controller_interface/controller.h>\n#include <hardware_interface/joint_command_interface.h>\n#include <ucb_jaco_control/pid_regulation_controller.h>\n#include <std_msgs/Float64MultiArray.h>\n#include <dynamic_reconfigure/server.h>\n#include <ucb_jaco_control/PIDGainsConfig.h>\n#include <cmath>\n\n#define P_GAIN 50.0\n#define D_GAIN 0.0\n#define I_GAIN 0.0\n\nnamespace ucb_jaco_control\n{\n\nclass TestControllerROS :\n public controller_interface::Controller<hardware_interface::EffortJointInterface>\n{\npublic:\n TestControllerROS();\n\n ~TestControllerROS();\n\n bool init(hardware_interface::EffortJointInterface* hw,\n ros::NodeHandle& nh);\n\n void starting(ros::Time& time);\n\n void update(const ros::Time& time, const ros::Duration& period);\n\n void stopping(const ros::Time& time);\n\nprivate:\n void pidGainsCallback(PIDGainsConfig& config, uint32_t level);\n\n std::vector<hardware_interface::JointHandle> joint_handle_;\n\n PIDRegulationController<7> controller_;\n\n ros::Publisher error_pub_;\n\n dynamic_reconfigure::Server<PIDGainsConfig>* server_;\n\n};\n\n} // namespace ucb_jaco_control\n\n#endif // UCB_JACO_CONTROL_TEST_CONTROLLER_ROS_H\n" }, { "alpha_fraction": 0.6081441640853882, "alphanum_fraction": 0.6421895623207092, "avg_line_length": 28.372549057006836, "blob_id": "163d295a2d784b05860f25e737acba24f5d7835d", "content_id": "217134c31e68ba965e1bacecddebcc0d8ccd8e03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1498, "license_type": "no_license", "max_line_length": 90, "num_lines": 51, "path": "/ucb_jaco_control/test/test_robot_dynamics.cpp", "repo_name": "eratner/ucb-jaco-control", "src_encoding": "UTF-8", "text": "#include <ucb_jaco_control/robot_dynamics.h>\n#include <ucb_jaco_control/urdf_robot_dynamics.h>\n#include <gtest/gtest.h>\n#include <iostream>\n#include <exception>\n\nTEST(RobotDynamics, testLoadURDF)\n{\n // Generated URDF using command\n // rosrun xacro xacro.py `rospack find\n // kinova_description`/urdf/j2s7s300_standalone.xacro > j2s7s300.urdf\n std::string urdf_file = \"/home/eratner/j2s7s300.urdf\";\n std::string root_name = \"j2s7s300_link_base\";\n std::string tip_name = \"j2s7s300_end_effector\";\n\n ucb_jaco_control::RobotDynamics<7> *dynamics = nullptr;\n\n try\n {\n dynamics = new ucb_jaco_control::URDFRobotDynamics<7>(urdf_file, root_name, tip_name);\n }\n catch (const std::exception &e)\n {\n std::cout << \"Error: \" << e.what() << std::endl;\n }\n\n Eigen::Matrix<double, 7, 1> pos;\n pos << 0, 0, 0, 0, 0, 0, 0;\n Eigen::Matrix<double, 7, 7> M = dynamics->getInertiaMatrix(pos);\n std::cout << \"%% Inertia Matrix: \" << std::endl;\n std::cout << M << std::endl;\n\n Eigen::Matrix<double, 7, 1> vel;\n vel << 0, 0, 0, 0, 0, 0, 0;\n Eigen::Matrix<double, 7, 1> C = dynamics->getCoriolisVector(pos, vel);\n std::cout << \"%% Coriolis Vector: \" << std::endl;\n std::cout << C << std::endl;\n\n Eigen::Matrix<double, 7, 1> G = dynamics->getGravityVector(pos);\n std::cout << \"%% Gravity Vector: \" << std::endl;\n std::cout << G << std::endl;\n\n delete dynamics;\n dynamics = nullptr;\n}\n\nint main(int argc, char *argv[])\n{\n testing::InitGoogleTest(&argc, argv);\n return RUN_ALL_TESTS();\n}\n" }, { "alpha_fraction": 0.61654132604599, "alphanum_fraction": 0.6741854548454285, "avg_line_length": 32.33333206176758, "blob_id": "f33b6147143b59d86e9eadf087687f3ffd53783d", "content_id": "300318dccdea4ea5e0e853167a150cc87b1a1bdd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 399, "license_type": "no_license", "max_line_length": 66, "num_lines": 12, "path": "/ucb_jaco_control/config/PIDGains.cfg", "repo_name": "eratner/ucb-jaco-control", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nPACKAGE = \"ucb_jaco_control\"\n\nfrom dynamic_reconfigure.parameter_generator_catkin import *\n\ngen = ParameterGenerator()\n\ngen.add(\"p_gain\", double_t, 0, \"Proportional gain.\", 100, 0, 1000)\ngen.add(\"i_gain\", double_t, 0, \"Integral gain.\", 0, 0, 1000)\ngen.add(\"d_gain\", double_t, 0, \"Derivative gain.\", 0, 0, 1000)\n\nexit(gen.generate(PACKAGE, \"ucb_jaco_control\", \"PIDGains\"))" }, { "alpha_fraction": 0.6649168729782104, "alphanum_fraction": 0.6797900199890137, "avg_line_length": 20.148147583007812, "blob_id": "c2115eaa34012921dcb8ee426a6247fde2471a22", "content_id": "414d1b0266bd28ab50bd5ce2d8cc081c17bd4b48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1143, "license_type": "no_license", "max_line_length": 67, "num_lines": 54, "path": "/ucb_jaco_control/test/test_constant_trajectory.cpp", "repo_name": "eratner/ucb-jaco-control", "src_encoding": "UTF-8", "text": "#include <ucb_jaco_control/trajectory.h>\n#include <gtest/gtest.h>\n#include <iostream>\n\nnamespace ucb_jaco_control\n{\n\ntemplate <unsigned int Dim>\nclass ConstantTrajectory : public Trajectory<Dim>\n{\npublic:\n ConstantTrajectory(const Eigen::Matrix<double, Dim, 1> &setpoint)\n : setpoint_(setpoint)\n {\n }\n\n Eigen::Matrix<double, Dim, 1> getDesiredPosition(double t)\n {\n return setpoint_;\n }\n\n Eigen::Matrix<double, Dim, 1> getDesiredVelocity(double t)\n {\n return Eigen::Matrix<double, Dim, 1>::Zero();\n }\n\n Eigen::Matrix<double, Dim, 1> getDesiredAcceleration(double t)\n {\n return Eigen::Matrix<double, Dim, 1>::Zero();\n }\n\nprivate:\n Eigen::Matrix<double, Dim, 1> setpoint_;\n\n};\n\n}\n\nTEST(ConstantTrajectory, testTraj)\n{\n Eigen::Matrix<double, 2, 1> setpoint;\n setpoint << 1.5, 2.3;\n\n ucb_jaco_control::ConstantTrajectory<2> traj(setpoint);\n std::cout << traj.getDesiredPosition(0) << std::endl;\n std::cout << traj.getDesiredVelocity(0) << std::endl;\n std::cout << traj.getDesiredAcceleration(0) << std::endl;\n}\n\nint main(int argc, char *argv[])\n{\n testing::InitGoogleTest(&argc, argv);\n return RUN_ALL_TESTS();\n}\n\n" }, { "alpha_fraction": 0.646073579788208, "alphanum_fraction": 0.6565074324607849, "avg_line_length": 28.852458953857422, "blob_id": "902038f6aadc11a57a753a5c2d67c4a6ae2bb7fe", "content_id": "f54cf6db092ea03c3b9313658b8fa313dd9818e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3642, "license_type": "no_license", "max_line_length": 88, "num_lines": 122, "path": "/ucb_jaco_control/src/test_controller_ros.cpp", "repo_name": "eratner/ucb-jaco-control", "src_encoding": "UTF-8", "text": "#include <ucb_jaco_control/test_controller_ros.h>\n#include <pluginlib/class_list_macros.hpp>\n\nnamespace ucb_jaco_control\n{\n\nTestControllerROS::TestControllerROS()\n : controller_({P_GAIN, P_GAIN, P_GAIN, P_GAIN, P_GAIN, P_GAIN, P_GAIN},\n {I_GAIN, I_GAIN, I_GAIN, I_GAIN, I_GAIN, I_GAIN, I_GAIN},\n {D_GAIN, D_GAIN, D_GAIN, D_GAIN, D_GAIN, D_GAIN, D_GAIN},\n PIDRegulationController<7>::StateVector::Zero(),\n true)\n{\n}\n\nTestControllerROS::~TestControllerROS()\n{\n delete server_;\n server_ = nullptr;\n}\n\nbool TestControllerROS::init(hardware_interface::EffortJointInterface* hw,\n ros::NodeHandle& nh)\n{\n ROS_INFO_STREAM(\"Initializing test controller\");\n\n std::vector<std::string> joint_names;\n if (!nh.getParam(\"joints\", joint_names))\n {\n ROS_ERROR(\"Failed to get joint names from parameter server!\");\n return false;\n }\n\n for (int i = 0; i < joint_names.size(); ++i)\n {\n ROS_INFO_STREAM(\"Getting handle for joint \" << joint_names[i]);\n try\n {\n joint_handle_.push_back(hw->getHandle(joint_names[i]));\n }\n catch (const hardware_interface::HardwareInterfaceException& e)\n {\n ROS_ERROR_STREAM(\"Error: \" << e.what());\n return false;\n }\n }\n\n // TODO: Implement a topic/service for the commanded setpoint.\n PIDRegulationController<7>::StateVector setpoint;\n setpoint << 0.0, M_PI_2, 0.0, M_PI_2, 0.0, M_PI_2, 0.0;\n controller_.setSetpoint(setpoint);\n\n // Publisher for the errors.\n error_pub_ = nh.advertise<std_msgs::Float64MultiArray>(\"errors\", 1);\n\n // Dynamic reconfigure for the PID gains.\n server_ = new dynamic_reconfigure::Server<PIDGainsConfig>(nh);\n server_->setCallback(boost::bind(&TestControllerROS::pidGainsCallback, this, _1, _2));\n\n ROS_INFO_STREAM(\"Setpoint is \" << controller_.getSetpoint());\n\n return true;\n}\n\nvoid TestControllerROS::starting(ros::Time& time)\n{\n ROS_INFO_STREAM(\"Starting test controller at time \" << time);\n\n controller_.reset();\n}\n\nvoid TestControllerROS::update(const ros::Time& time, const ros::Duration& period)\n{\n PIDRegulationController<7>::AugmentedStateVector state;\n for (int i = 0; i < 7; ++i)\n state(i) = joint_handle_[i].getPosition();\n for (int i = 0; i < 7; ++i)\n state(i + 7) = joint_handle_[i].getVelocity();\n\n const double dt = period.toSec();\n PIDRegulationController<7>::ControlVector control = controller_.getControl(state, dt);\n\n for (int i = 0; i < 7; ++i)\n joint_handle_[i].setCommand(control(i));\n\n // Publish the errors.\n std_msgs::Float64MultiArray error_msg;\n const PIDRegulationController<7>::StateVector& error = controller_.getError();\n for (int i = 0; i < 7; ++i)\n error_msg.data.push_back(error(i));\n\n error_pub_.publish(error_msg);\n}\n\nvoid TestControllerROS::stopping(const ros::Time& time)\n{\n ROS_INFO_STREAM(\"Stopping test controller at time \" << time);\n}\n\nvoid TestControllerROS::pidGainsCallback(PIDGainsConfig& config, uint32_t level)\n{\n ROS_INFO_STREAM(\"Reconfiguring with proportional gain: \" << config.p_gain <<\n \", integral gain: \" << config.i_gain << \", derivative gain: \" <<\n config.d_gain);\n\n std::array<double, 7> p_gain;\n p_gain.fill(config.p_gain);\n controller_.setProportionalGain(p_gain);\n\n std::array<double, 7> i_gain;\n i_gain.fill(config.i_gain);\n controller_.setIntegralGain(i_gain);\n\n std::array<double, 7> d_gain;\n d_gain.fill(config.d_gain);\n controller_.setDerivativeGain(d_gain);\n}\n\n} // namespace ucb_jaco_control\n\nPLUGINLIB_EXPORT_CLASS(ucb_jaco_control::TestControllerROS,\n controller_interface::ControllerBase)\n" }, { "alpha_fraction": 0.727642297744751, "alphanum_fraction": 0.7398374080657959, "avg_line_length": 20.39130401611328, "blob_id": "451946ccbf76ef4ba1e768637f81aef39ce8a5da", "content_id": "ea944388e25f7b393fdb6006957c5c9dca50a05e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 492, "license_type": "no_license", "max_line_length": 77, "num_lines": 23, "path": "/ucb_jaco_control/include/ucb_jaco_control/trajectory.h", "repo_name": "eratner/ucb-jaco-control", "src_encoding": "UTF-8", "text": "#ifndef UCB_JACO_CONTROL_TRAJECTORY_H\n#define UCB_JACO_CONTROL_TRAJECTORY_H\n\n#include <Eigen/Eigen>\n\nnamespace ucb_jaco_control\n{\n\ntemplate <unsigned int Dim>\nclass Trajectory\n{\npublic:\n virtual Eigen::Matrix<double, Dim, 1> getDesiredPosition(double t) = 0;\n\n virtual Eigen::Matrix<double, Dim, 1> getDesiredVelocity(double t) = 0;\n\n virtual Eigen::Matrix<double, Dim, 1> getDesiredAcceleration(double t) = 0;\n\n};\n\n} // namespace ucb_jaco_control\n\n#endif // UCB_JACO_CONTROL_TRAJECTORY_H\n" }, { "alpha_fraction": 0.5832752585411072, "alphanum_fraction": 0.602787435054779, "avg_line_length": 21.40625, "blob_id": "37e6f641ca39e335943fe77d27b3b27cc98643c1", "content_id": "0127d88c6ecafa7b46373755f53f19074a1a4d70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1435, "license_type": "no_license", "max_line_length": 48, "num_lines": 64, "path": "/ucb_jaco_control/test/test_buffer.cpp", "repo_name": "eratner/ucb-jaco-control", "src_encoding": "UTF-8", "text": "#include <ucb_jaco_control/buffer.h>\n#include <gtest/gtest.h>\n#include <iostream>\n#include <stdexcept>\n\nusing ucb_jaco_control::Buffer;\n\ntemplate <typename T>\nvoid print(const std::vector<T> &vec)\n{\n for (int i = 0; i < vec.size(); ++i)\n {\n std::cout << vec[i];\n if (i < vec.size() - 1)\n std::cout << \", \";\n }\n std::cout << std::endl;\n}\n\nTEST(Buffer, testBuffer)\n{\n Buffer<double> buff(3);\n\n ASSERT_EQ(buff.getData().size(), 0);\n ASSERT_THROW(buff.back(), std::out_of_range);\n ASSERT_THROW(buff.front(), std::out_of_range);\n\n buff.add(1);\n ASSERT_EQ(buff.count(), 1);\n ASSERT_EQ(buff.getData().size(), 1);\n ASSERT_EQ(buff.back(), 1);\n ASSERT_EQ(buff.front(), 1);\n\n buff.add(2);\n ASSERT_EQ(buff.count(), 2);\n ASSERT_EQ(buff.getData().size(), 2);\n ASSERT_EQ(buff.front(), 1);\n ASSERT_EQ(buff.back(), 2);\n\n buff.add(3);\n ASSERT_EQ(buff.count(), 3);\n ASSERT_EQ(buff.getData().size(), 3);\n ASSERT_EQ(buff.front(), 1);\n ASSERT_EQ(buff.back(), 3);\n\n buff.add(4);\n ASSERT_EQ(buff.count(), 3);\n ASSERT_EQ(buff.getData().size(), 3);\n ASSERT_EQ(buff.front(), 2);\n ASSERT_EQ(buff.back(), 4);\n\n std::vector<double> data = buff.getData();\n std::vector<double> expected_data = {2, 3, 4};\n for (int i = 0; i < data.size(); ++i)\n ASSERT_FLOAT_EQ(expected_data[i], data[i]);\n\n print(buff.getData());\n}\n\nint main(int argc, char *argv[])\n{\n testing::InitGoogleTest(&argc, argv);\n return RUN_ALL_TESTS();\n}\n\n" } ]
13
safiyamufti/String-Translator
https://github.com/safiyamufti/String-Translator
18db3f15e3f8666bc0d28917ffcef725f1cc938b
f69bf8e5390a9d85b9bdb7aae9dda3821430b426
da21ec5b9d7e914dc5bf772830607e1ae1898cd7
refs/heads/master
2023-05-17T08:40:33.226187
2021-06-15T21:22:06
2021-06-15T21:22:06
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5305067300796509, "alphanum_fraction": 0.5387797355651855, "avg_line_length": 21.511627197265625, "blob_id": "80a04356cac673630e62e040521c76572d2e79b1", "content_id": "490856df5e155eeee9bd772f71799c94f206df47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1008, "license_type": "no_license", "max_line_length": 63, "num_lines": 43, "path": "/translate.py", "repo_name": "safiyamufti/String-Translator", "src_encoding": "UTF-8", "text": "import uvicorn\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel\n\napp = FastAPI()\n\nclass Translate:\n def __init__(self):\n pass\n\n def translate(self, lang, text, output):\n if output == \"fr\":\n return \"Bonjour\"\n elif output == \"sp\":\n return \"Hola\"\n elif output == \"zh\":\n return \"你好\"\n elif output == \"jp\":\n return \"こんにちは\"\n elif output == \"ar\":\n return \"مرحبا\"\n elif output == \"ko\":\n return \"안녕하세요\"\n elif output == \"hd\":\n return \"नमस्ते\"\n\nclass Item(BaseModel):\n lang : str\n text : str\n output : str\n\[email protected](\"/\")\ndef read_root():\n return {\"Hello\": \"World\"}\n\[email protected](\"/translate\")\ndef translate(item : Item):\n trans = Translate()\n result = trans.translate(item.lang, item.text, item.output)\n return {\"translation\" : result}\n\nif __name__ == \"__main__\":\n uvicorn.run(app, host=\"0.0.0.0\", port=8000)" } ]
1
sinclairliang/Ciphers
https://github.com/sinclairliang/Ciphers
3c3b2fd37068bca4dd75346434d3a1fcda3a9c28
eab1ada7b8a6d3430fcc82fe0781b43d478c4074
3c85b1c25d6fadd506e26c9f5df06fe725379b6d
refs/heads/master
2020-04-13T00:21:29.883484
2020-01-29T08:59:49
2020-01-29T08:59:49
162,843,600
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4858958423137665, "alphanum_fraction": 0.5240235328674316, "avg_line_length": 34.256832122802734, "blob_id": "1d6b53f014a76f76b71a38ad5e833e964768c35b", "content_id": "364c27bc8ce8ac306f9aeb631416aada10188ecf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6452, "license_type": "permissive", "max_line_length": 113, "num_lines": 183, "path": "/Books_Cipher.py", "repo_name": "sinclairliang/Ciphers", "src_encoding": "UTF-8", "text": "# Sinclair Liang\nimport time\nimport copy\nimport re\nimport sys\nimport pyprind\nimport os\n\n\ndef banner():\n banner = '''\n \n oooooooooo. oooo \n `888' `Y8b `888 \n 888 888 .ooooo. .ooooo. 888 oooo .oooo.o \n 888oooo888' d88' `88b d88' `88b 888 .8P' d88( \"8 \n 888 `88b 888 888 888 888 888888. `\"Y88b. \n 888 .88P 888 888 888 888 888 `88b. o. )88b \n o888bood8P' `Y8bod8P' `Y8bod8P' o888o o888o 8\"\"888P' \n \n \n \n .oooooo. o8o oooo \n d8P' `Y8b `\"' `888 \n 888 oooo oo.ooooo. 888 .oo. .ooooo. oooo d8b \n 888 `888 888' `88b 888P\"Y88b d88' `88b `888\"\"8P \n 888 888 888 888 888 888 888ooo888 888 \n `88b ooo 888 888 888 888 888 888 .o 888 \n `Y8bood8P' o888o 888bod8P' o888o o888o `Y8bod8P' d888b \n 888 \n o888o \n \n '''\n return banner\n\n\ndef cleaning_text(book_address):\n \"\"\"\n :param book_address: the address of the book used as the key\n :return: words without any special characters\n \"\"\"\n original_text = open(book_address).read()\n new_text = re.sub('[^a-zA-Z0-9]', r' ', original_text)\n re.sub(' +', ' ', new_text).strip()\n return new_text\n\n\ndef cleaning_cipher(cipher_file_address):\n \"\"\"\n :param cipher_file_address: the address of the encrypted file\n :return: a list of integers\n \"\"\"\n cipher_text = open(cipher_file_address).read()\n cipher_text_list = cipher_text[1:-1].split(\",\")\n for i in range(0, len(cipher_text_list)):\n cipher_text_list[i] = int(cipher_text_list[i])\n return cipher_text_list\n\n\ndef get_words(text):\n \"\"\"\n :param text: words without any special characters\n :return: a list of words\n \"\"\"\n list_of_words = list()\n words = re.split(r'\\s+', text)\n for word in words:\n list_of_words.append(word.upper())\n return list_of_words\n\n\ndef get_numbers(words):\n \"\"\"\n :param words: a list of words\n :return: a list containing numbers indicating the position of words\n \"\"\"\n numbers = dict()\n for i in range(0, len(words) - 1):\n if len(words[i]) > 0:\n current_letter = words[i][0]\n sys.stdout.write('\\r')\n sys.stdout.write(\"Now encrypting word: \" + current_letter)\n sys.stdout.flush()\n if current_letter in numbers:\n numbers[current_letter].append(i + 1)\n else:\n numbers[current_letter] = list()\n numbers[current_letter].append(i + 1)\n return numbers\n\n\ndef encode(plaintext, numbers):\n \"\"\"\n :param plaintext: plaintext\n :param numbers: a list containing numbers indicating the position of words\n :return: A list of numbers after encryption\n \"\"\"\n length = len(plaintext)\n bar = pyprind.ProgBar(length, width=40)\n\n numbers_to_use = copy.deepcopy(numbers)\n output_list = list()\n for i in range(0, length):\n current_letter = plaintext[i].upper()\n if current_letter in numbers_to_use:\n output_list.append(numbers_to_use[current_letter].pop(0))\n bar.update()\n return output_list\n\n\ndef decode(ciphertext, numbers):\n \"\"\"\n :param ciphertext: ciphertext\n :param numbers: a list containing numbers indicating the position of words\n :return: plaintext without space nor special characters\n \"\"\"\n length = len(ciphertext)\n bar = pyprind.ProgBar(length, width=40)\n # test 1\n numbers_to_use = copy.deepcopy(numbers)\n output_list = list()\n for i in range(0, length):\n current_number = int(ciphertext[i])\n for letter, number_list in numbers_to_use.items():\n if current_number in number_list:\n output_list.append(letter)\n bar.update()\n return ''.join(output_list)\n\n\ndef main():\n ENCODE = False\n DECODE = False\n while ENCODE is False and DECODE is False:\n mode = input(\"Would you like to (E)ncode ot (D)ecode a file?\\n\")\n if mode == 'E':\n ENCODE = True\n DECODE = False\n\n if mode == 'D':\n ENCODE = False\n DECODE = True\n\n if ENCODE:\n plaintext_file_address = input(\n \"Please enter the address of the file you would like to encrypt\\n\")\n key_file = input(\n \"Please enter the address of the key file you would like to use\\n\")\n start_time = time.time()\n file = open(\"encrypted_file\", \"w\")\n plaintext = cleaning_text(plaintext_file_address)\n words = get_words(cleaning_text(key_file))\n numbers = get_numbers(words)\n l = encode(plaintext, numbers)\n file.write(str(l))\n file.close()\n end_time = time.time()\n print(\"Your encrypted file has been generated! \" + \"%s %d m %.2f s \" % (\n \"Finish encrypting! Time elapsed:\", int((end_time - start_time) / 60), (end_time - start_time) % 60))\n os.system(\"open encrypted_file\")\n\n if DECODE:\n cipher_file_address = input(\n \"Please enter the address of the file you would like to decrypt\\n\")\n key_file = input(\n \"Please enter the address of the key file you would like to use\\n\")\n start_time = time.time()\n file = open(\"decrypted_file\", \"w\")\n cipher_list = cleaning_cipher(cipher_file_address)\n words = get_words(cleaning_text(key_file))\n numbers = get_numbers(words)\n d = decode(cipher_list, numbers)\n file.write(d)\n file.close()\n end_time = time.time()\n print(\"Your decrypted file has been generated! \" + \"%s %d m %.2f s \" % (\n \"Finish decrypting! Time elapsed:\", int((end_time - start_time) / 60), (end_time - start_time) % 60))\n os.system(\"open decrypted_file\")\n\n\nif __name__ == '__main__':\n sys.stdout.write(banner())\n main()\n" }, { "alpha_fraction": 0.6783867478370667, "alphanum_fraction": 0.7300930619239807, "avg_line_length": 32.379310607910156, "blob_id": "10c5ed58d424e93e26a54ea0e5d1ce0c2c208b18", "content_id": "648fb62587a4c16bc98474ff77e7ef73144b92b0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 967, "license_type": "permissive", "max_line_length": 148, "num_lines": 29, "path": "/README.md", "repo_name": "sinclairliang/Ciphers", "src_encoding": "UTF-8", "text": "# Books Cipher ##\n#### A simple command line programme to perform books cipher to encrypt and decrypt any secret messages\n\n## ***Background Knowledge of Book Cipher***\n\n[![080901dr01-f2.gif](https://i.postimg.cc/Kj50rBzh/080901dr01-f2.gif)](https://postimg.cc/QKHgjB1f)\n(Source: drdobbs.com)\n\nIn other words, this cipher is close to impossible to break without the knowing the key book. \n\n## ***Features***\n- show real time progress of encryption and decryption\n- supports both encryption and decryption\n\n## ***Module Installation***\n` pip install -r requirements.txt `\n\n\n## ***Download Books Cipher***\n\nYou can download the latest version of `Ciphers` by cloning the GitHub repository.\n\n` git clone https://github.com/sinclairliang/Ciphers.git`\n\n## ***Examples***\n\nA demonstration using `Henry V` as the key book. \n\n[![Screen-Shot-2018-12-23-at-11-53-25-PM.jpg](https://i.postimg.cc/qvNmyMfB/Screen-Shot-2018-12-23-at-11-53-25-PM.jpg)](https://postimg.cc/HrC4CmsF)" }, { "alpha_fraction": 0.7051724195480347, "alphanum_fraction": 0.7310344576835632, "avg_line_length": 24.2608699798584, "blob_id": "45a26950c2c70b6b5b68c932dce664648457424c", "content_id": "a3acbd1b501ed43030412387ed4765a1a818ef05", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 580, "license_type": "permissive", "max_line_length": 143, "num_lines": 23, "path": "/gui.py", "repo_name": "sinclairliang/Ciphers", "src_encoding": "UTF-8", "text": "import tkinter\nfrom tkinter import *\n\n\n\n\ndef testFunction():\n print(\"Hello World\")\n\nwindow = tkinter.Tk()\npathStr = StringVar()\nwindow.title(\"Book Cipher\")\ntkinter.Label(window, text=\"Path of the message\").grid(row=0)\npath = Entry(window).grid(row=0, column=1)\n\ntkinter.Label(window, text=\"Path of the key\").grid(row=1)\ntkinter.Entry(window).grid(row=1, column=1)\n\ntkinter.Button(window, text=\"Enter\").grid(row=2)\n\ntextArea0 = tkinter.Text(window, height=10, width=20, highlightcolor=\"green\", highlightthickness=1, borderwidth=1, relief=\"groove\").grid(row=3)\n\nwindow.mainloop()" } ]
3
boogiedev/UFO-SIGHTINGS
https://github.com/boogiedev/UFO-SIGHTINGS
2e90b919ed2ec159c90ee2af904e7628680f441b
8ace9acf6ee43def4ccb90a3f47f81db03396394
fc0878d1bc8a142c2621aa7b45606e99aefd67b5
refs/heads/master
2022-07-03T23:14:37.062327
2020-05-09T00:06:44
2020-05-09T00:06:44
262,439,657
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.6208423972129822, "alphanum_fraction": 0.6395136713981628, "avg_line_length": 46.77593231201172, "blob_id": "e1d8ab26c0085ab0181325710341c44dbe87940d", "content_id": "e73b504be6ddc24d8e5e83fef73040af575f407b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 11557, "license_type": "permissive", "max_line_length": 1400, "num_lines": 241, "path": "/README.md", "repo_name": "boogiedev/UFO-SIGHTINGS", "src_encoding": "UTF-8", "text": "![NLP Header](https://github.com/boogiedev/UFO-SIGHTINGS/blob/master/media/ufo-header.png)\n\n<p align=\"center\">\n <img src=\"https://img.shields.io/badge/Maintained%3F-IN PROG-blue?style=flat-square\"></img>\n <img src=\"https://img.shields.io/github/commit-activity/m/boogiedev/UFO-SIGHTINGS?style=flat-square\"></img>\n</p>\n\n\n## Team X Æ A-12 (*X Ash A Twelve*)\n\n[Tyler Woods](https://github.com/tylerjwoods) | [Joseph Shanks](https://github.com/josephshanks) | [Wesley Nguyen](https://github.com/boogiedev)\n---|---|---|\n\n \n## Table of Contents\n\n- [Basic Overview](#basic-overview)\n - [Context](#context)\n - [Goal](#goal)\n- [Exploring Data](#exploring-data)\n - [Initial Intake](#initial-intake)\n - [Feature Engineering](#feature-engineering)\n- [Language Processing](#language-processing)\n - [Tokenizing](#tokenizing)\n - [Visualizations](#visualizations)\n- [Future Considerations](#future-considerations)\n- [License](#license)\n\n\n## Basic Overview\n\n### Context\n\n<img align=\"right\" src=\"https://i.pinimg.com/236x/32/47/16/324716a77ab7183025a1ad46786de375--x-files-funny-love-puns.jpg\">\n\nIt's a bird... it's a plane...it's... a U.F.O. sighting? Over the course of human history U.F.O. sightings seem to be commonplace; commonly described as \"flying saucers\", strange lights and objects or straight up \"Aliens\". There are a lot of unknowns that surround the idea of unidentified flying objects, but one thing that is known, are that people are continually fascinated by them. Today, we are looking at reported U.F.O. sightings from ![THE NATIONAL UFO REPORTING CENTER](http://www.nuforc.org/). These are anonymous reports from various people all over the U.S. and sometimes even internationally.\n\n### Goal\n\nUsing Natural Language Processing, we are hoping to parse through these sighting reports and explore possible commonalities, insights, and sentiment about these suspicious objects. By doing this, we are hoping to gain a more concrete truth of whether these sightings are figments of people's imaginations, or that there might be actually be an alien overlord visiting us from time to time.\n\n> If reports from Alabama mention that this mysterious \"object\" has the shape of a rectangle, while reports from other states express the same thing, is it possible to make a connection here?\n\n\n## Exploring Data\n\nSOURCE | TIMEFRAME | N_RECORDS\n:-------------------------:|:-------------------------:|:-------------------------:|\n![NUFORC](http://www.nuforc.org/) | MAY 10th 2017 | 100\n\n<img align=\"right\" src=\"https://raw.githubusercontent.com/boogiedev/UFO-SIGHTINGS/master/media/nuforc.PNG\"></img>\n\nThe specific data that we are focusing on today are U.F.O. reports from May 10th, 2017, with a total of 99 sightings. Below is a preview of the format that the data comes in from the website.\n\n<br/>\n\n<p align=\"center\">\n <img src=\"https://raw.githubusercontent.com/boogiedev/UFO-SIGHTINGS/master/media/dataexcerpt.PNG\"></img>\n</p>\n\n\n\n### Initial Intake\n\nHere is a detailed description of the intake data:\n- `ID`: Report ID\n- `url`: Report URL\n- `html`: Raw HTML from report\n- `time`: Date Time Object of the report\n\n<p align=\"center\">\n <img src=\"https://raw.githubusercontent.com/boogiedev/UFO-SIGHTINGS/master/media/dfbefore.PNG\"></img>\n</p>\n\n\n### Feature Engineering / Data Cleaning\n\nLooking at this data, we noticed that there were a couple things that could be cleaned and changed. \n\n```python\ndef clean_data(data:pd.DataFrame) -> pd.DataFrame:\n \"\"\"Cleaner for UFO DataFrame\"\"\"\n # Copy data to avoid collision\n df_copy = data.copy()\n \n # Rename ID Column for Clarity\n df_copy.columns.values[0] = 'ID'\n \n # Convert Time to DateTime Object\n df_copy['time'] = pd.to_datetime(df_copy['time'])\n \n # Parse data from HTML Column\n df_copy['state'] = None\n df_copy['content'] = None\n df_copy['shape'] = None\n for i in range(len(df_copy)):\n soup = BeautifulSoup(df_copy['html'][i], 'html.parser')\n meta_data = soup.find_all('tbody')[0].find_all('tr')[0]\n s = meta_data.get_text('|', strip=True).split(\"|\")\n # store data into a dictionary\n s_dict = {x.partition(\":\")[0]:x.partition(\":\")[-1] for x in s}\n state = s_dict['Location'][-2:]\n df_copy.loc[i, 'state'] = state\n entry = soup.find_all('tbody')[0].find_all('tr')[1]\n df_copy.loc[i, 'content'] = entry.get_text(strip=True)\n duration = s_dict['Duration']\n df_copy.loc[i, 'duration'] = duration\n shape = s_dict['Shape']\n df_copy.loc[i, 'shape'] = shape\n \n return df_copy\n```\n\nWe ended up parsing the data in the 'HTML' rows of our dataframe in order to extract useful information, using the cleaner function above we succesfully created new columns. The resule of this are below:\n\n- `ID`: Report ID\n- `url`: Report URL\n- `html`: Raw HTML from report\n- `time`: Date Time Object of the report\n- `state`: Reported Location\n- `content`: Content of the report\n- `shape`: Alleged Shape of the UFO\n- `duration`: Duration of the report sighting\n\n<p align=\"center\">\n <img src=\"https://raw.githubusercontent.com/boogiedev/UFO-SIGHTINGS/master/media/dfafter.PNG\"></img>\n</p>\n\n\n---\n## Language Processing\n\n### Tokenizing\n\nAfter creating our stop words list and removing punctuations we tokenized our documents. “Tokenize” means creating “tokens” which are atomic units of the text. These tokens are words extracted by splitting the document.We then used the “SnowballStemmer” to stem our tokenized words. We decided to use the snowball stemmer over the WordNetLemmatizer or the PorterStemmer. The reason for this is show below. \n\n```python\n\nporter = PorterStemmer()\nsnowball = SnowballStemmer('english')\nwordnet = WordNetLemmatizer()\n\ndocs_porter = [[porter.stem(word) for word in words]\n for words in doc_filter]\ndocs_snowball = [[snowball.stem(word) for word in words]\n for words in doc_filter]\ndocs_wordnet = [[wordnet.lemmatize(word) for word in words]\n for words in doc_filter]\n\n\n## Print the stemmed and lemmatized words from the first document\nprint(“%16s | %16s | %16s | %16s |” % (“WORD”, “PORTER”, “SNOWBALL”, “LEMMATIZER”))\nfor i in range(min(len(docs_porter[0]), len(docs_snowball[0]), len(docs_wordnet[0]))):\n p, s, w = docs_porter[0][i], docs_snowball[0][i], docs_wordnet[0][i]\n if len(set((p, s, w))) != 1:\n print(“%16s | %16s | %16s | %16s |” % (doc_filter[0][i], p, s, w))\n```\n```\n WORD | PORTER | SNOWBALL | LEMMATIZER |\n hovered | hover | hover | hovered |\n looked | look | look | looked |\n helicopter | helicopt | helicopt | helicopter |\n stayed | stay | stay | stayed |\n disappeared | disappear | disappear | disappeared |\n appears | appear | appear | appears |\n us | us | us | u |\n consistent | consist | consist | consistent |\n sighting | sight | sight | sighting |\n venus | venu | venus | venus |\n\n```\n\nWe chose to stem the words with the Snowball Stemmer due to its preservation of important words for this usecase such as ‘venus’. The Snowball Stemmmer normalizes these words from its appeared form into their root form. We now have our list of clean tokens for each document! We turned this into a pandas Series to compute the TF-IDF\n\n\n### Visualizations\n\nChoropleth Map: \n\n> The choropleth map shown in the notebook 'choropleth_map.ipynb' shows the number of reports from each state in the time period, as well as the three most common words from those reports of each state.\nWith this information, we see a detailed image of where the reports are coming from and what the reports are talking about.\n15:51\n\nObserved UFO Shapes:\n\nThe bar chart shows what the most common shapes are in the reports. We can see that 'Circles' and 'Teardrops' are common shapes, as well as individuals reporting just seeing 'Light'.\n\n\n<p align=\"center\">\n <img src=\"https://github.com/boogiedev/UFO-SIGHTINGS/blob/master/media/observed_ufo_shapes.png?raw=true\"></img>\n</p>\n\nChanges in Vocabulary Size Per Minimum Document Frequency:\n\n<p align=\"center\">\n <img src=\"https://raw.githubusercontent.com/boogiedev/UFO-SIGHTINGS/master/media/mindf.PNG\"></img>\n</p>\n\n```\n0.1 -- vocabulary (len=157): ['light', 'look', 'nuforc', 'helicopt', 'first', 'bright', 'pd', 'disappear', 'way', 'went', 'sight', 'report', 'seen', 'us', 'one', 'like', 'east', 'appear', 'note', 'hover', 'could', 'sky', 'provid', 'elect', 'stationari', 'star', 'inform', 'anonym', 'contact', 'remain', 'sourc', 'travel', 'notic', 'fli', 'someth', 'approxim', 'clear', 'see', 'would', 'wit', 'sound', 'come', 'direct', 'near', 'craft', 'saw', 'west', 'air', 'north', 'feet', 'object', 'tree', 'mayb', 'shape', 'side', 'view', 'size', 'orang', 'circl', 'never', 'hous', 'gone', 'pass', 'time', 'seem', 'move', 'low', 'almost', 'straight', 'white', 'plane', 'still', 'anoth', 'know', 'quick', 'toward', 'made', 'outsid', 'normal', 'stop', 'make', 'flash', 'mile', 'distanc', 'high', 'insid', 'chang', 'thought', 'go', 'minut', 'back', 'second', 'watch', 'show', 'around', 'two', 'ball', 'even', 'away', 'night', 'south', 'thing', 'came', 'point', 'color', 'end', 'green', 'complet', 'take', 'drive', 'reflect', 'window', 'line', 'nois', 'noth', 'ufo', 'blue', 'left', 'speed', 'red', 'behind', 'live', 'area', 'aircraft', 'get', 'slowli', 'head', 'flew', 'glow', 'across', 'right', 'slow', 'phone', 'fast', 'also', 'larg', 'home', 'cloud', 'big', 'tri', 'photo', 'indic', 'turn', 'video', 'three', 'got', 'eye', 'float', 'moon', 'face', 'street', 'later', 'front', 'observ', 'start', 'visibl', 'think']\n0.2 -- vocabulary (len=54): ['light', 'look', 'nuforc', 'first', 'bright', 'pd', 'disappear', 'went', 'sight', 'report', 'seen', 'one', 'like', 'east', 'appear', 'note', 'could', 'sky', 'provid', 'elect', 'inform', 'anonym', 'contact', 'remain', 'sourc', 'notic', 'see', 'would', 'wit', 'sound', 'direct', 'craft', 'saw', 'west', 'object', 'shape', 'time', 'seem', 'move', 'white', 'plane', 'still', 'stop', 'thought', 'go', 'minut', 'back', 'second', 'watch', 'around', 'two', 'night', 'south', 'get']\n0.3 -- vocabulary (len=15): ['light', 'look', 'nuforc', 'bright', 'pd', 'report', 'one', 'like', 'note', 'could', 'sky', 'see', 'saw', 'object', 'move']\n0.4 -- vocabulary (len=10): ['light', 'look', 'nuforc', 'pd', 'like', 'note', 'sky', 'saw', 'object', 'move']\n0.5 -- vocabulary (len=5): ['light', 'look', 'sky', 'saw', 'move']\n0.6 -- vocabulary (len=1): ['light']\n0.7 -- vocabulary (len=0): []\n0.8 -- vocabulary (len=0): []\n0.9 -- vocabulary (len=0): []\n1.0 -- vocabulary (len=0): []\n\n```\n\nDocument Frequencies at Minimum of 0.5\n\n```python\n# See words with a high frequency threshhold 50%\nthresh = 0.5\nfor word, freq in doc_freq.items():\n if freq >= thresh:\n print(f\"{word}: {freq}\")\n\n```\n```\nlight: 0.6767676767676768\nlook: 0.5252525252525253\nsky: 0.5656565656565656\nsaw: 0.5151515151515151\nmove: 0.5858585858585859\n```\n\n\n## Future Considerations\n\nUsing NaieveBayes to test comminalities of words used to derive if these occurences are related.\n\n\nDo the U.F.O. sightings have a similar distribution of reports from states?\n\n\n## License\n[MIT ©](https://choosealicense.com/licenses/mit/)\n\n" }, { "alpha_fraction": 0.57472825050354, "alphanum_fraction": 0.5801630616188049, "avg_line_length": 31.04347801208496, "blob_id": "21a6f67912f15d3411de92a24adb866144597efa", "content_id": "97146881717a1d86f1ddc79ed81866ff0030d04d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 736, "license_type": "permissive", "max_line_length": 56, "num_lines": 23, "path": "/src/helpers.py", "repo_name": "boogiedev/UFO-SIGHTINGS", "src_encoding": "UTF-8", "text": "import unicodedata\n\n\ndef remove_accents(input_str:str) -> str:\n '''Removes accents from input string'''\n nfkd_form = unicodedata.normalize('NFKD', input_str)\n only_ascii = nfkd_form.encode('ASCII', 'ignore')\n return only_ascii.decode()\n\ndef filter_tokens(tokens:list, stops:object) -> list:\n \"\"\"Filters tokens base on membership in stop list\"\"\"\n# split_punc = lambda x: \n res = []\n check = [\".\", \"-\"]\n for token in tokens:\n if token not in stops and token.isalpha():\n if check[0] in token:\n res += token.partition(check[0])\n elif check[1] in token:\n res += token.partition(check[1])\n else:\n res.append(token)\n return res" }, { "alpha_fraction": 0.5703999996185303, "alphanum_fraction": 0.5776000022888184, "avg_line_length": 31.921052932739258, "blob_id": "5743c311e5494a90819a20e3e76a01c5399e548a", "content_id": "6c856efbd1fe4c8e786a45f8b47536aeefe5ffe3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1250, "license_type": "permissive", "max_line_length": 70, "num_lines": 38, "path": "/src/cleaner.py", "repo_name": "boogiedev/UFO-SIGHTINGS", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nimport re\nfrom bs4 import BeautifulSoup\n\ndef clean_data(data:pd.DataFrame) -> pd.DataFrame:\n \"\"\"Cleaner for UFO DataFrame\"\"\"\n # Copy data to avoid collision\n df_copy = data.copy()\n \n \n # Rename ID Column for Clarity\n df_copy.columns.values[0] = 'ID'\n \n # Convert Time to DateTime Object\n df_copy['time'] = pd.to_datetime(df_copy['time'])\n \n # Parse data from HTML Column\n df_copy['state'] = None\n df_copy['content'] = None\n df_copy['shape'] = None\n for i in range(len(df_copy)):\n soup = BeautifulSoup(df_copy['html'][i], 'html.parser')\n meta_data = soup.find_all('tbody')[0].find_all('tr')[0]\n s = meta_data.get_text('|', strip=True).split(\"|\")\n # store data into a dictionary\n s_dict = {x.partition(\":\")[0]:x.partition(\":\")[-1] for x in s}\n state = s_dict['Location'][-2:]\n df_copy.loc[i, 'state'] = state\n entry = soup.find_all('tbody')[0].find_all('tr')[1]\n df_copy.loc[i, 'content'] = entry.get_text(strip=True)\n duration = s_dict['Duration']\n df_copy.loc[i, 'duration'] = duration\n shape = s_dict['Shape']\n df_copy.loc[i, 'shape'] = shape\n \n \n return df_copy" }, { "alpha_fraction": 0.6440678238868713, "alphanum_fraction": 0.6482251286506653, "avg_line_length": 28.78095245361328, "blob_id": "0141eaf88bcb4cfde8cd4ad6000965409bbc87eb", "content_id": "d208c1ca1e50849fe016d21acc9585f1f4214bd8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3127, "license_type": "permissive", "max_line_length": 83, "num_lines": 105, "path": "/src/state_common_words.py", "repo_name": "boogiedev/UFO-SIGHTINGS", "src_encoding": "UTF-8", "text": "# Import Modules\nfrom collections import Counter\nimport numpy as np\nimport pandas as pd\nimport re\nimport matplotlib.pyplot as plt\nfrom bs4 import BeautifulSoup\nimport json\nfrom pprint import pprint\nimport string\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n# NLTK Modules\nimport nltk\nfrom nltk.stem.porter import PorterStemmer\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem.snowball import SnowballStemmer\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom nltk.corpus import stopwords\nfrom src.cleaner import clean_data\nfrom src.helpers import *\n\ndef common_words(df):\n clean_df = df.copy()\n\n # Create Documents\n documents = clean_df['content']\n\n # Set Stop Words\n stop = set(stopwords.words('english'))\n # Set Stop Punctuations\n puncs = set(string.punctuation)\n # Merge Stops\n full_stop = stop.union(puncs)\n # full_stop\n\n # Tokenize Words from Documents\n tokens = [word_tokenize(doc.lower()) for doc in documents]\n\n # Filter each token for stop words\n doc_filter = [filter_tokens(token, full_stop) for token in tokens]\n\n snowball = SnowballStemmer('english')\n\n docs_snowball = [[snowball.stem(word) for word in words]\n for words in doc_filter]\n\n # Stem Words in Each Document\n clean_tokens = [list(map(snowball.stem, sent)) for sent in doc_filter]\n # clean_tokens\n\n # Check for stray tokens (ones with weird puncs, not alphabetical strings)\n strays = []\n for i in range(len(clean_tokens)):\n # print(\"--- sentence tokens (lemmatize): {}\".format(tokens_lemmatize[i]))\n for word in clean_tokens[i]:\n if not word.isalpha():\n strays.append(word)\n set(strays)\n\n # Documents to series\n document_series = pd.Series([\" \".join(x) for x in clean_tokens])\n\n # term occurence = counting distinct words in each bag\n term_occ = [Counter(doc) for doc in clean_tokens]\n # term_occ\n term_freq = list()\n for i in range(len(clean_tokens)):\n term_freq.append( {k: (v / float(len(clean_tokens[i])))\n for k, v in term_occ[i].items()} )\n\n # Get Document Frequencies\n doc_occ = Counter( [word for token in clean_tokens for word in set(token)] )\n\n doc_freq = {k: (v / float(len(clean_tokens)))\n for k, v in doc_occ.items()}\n\n # doc_freq\n\n # See words with a high frequency threshhold 50%\n thresh = 0.5\n for word, freq in doc_freq.items():\n if freq >= thresh:\n pass\n #print(f\"{word}: {freq}\")\n\n # Get Vocabulary\n\n # the minimum document frequency (in proportion of the length of the corpus)\n min_df = 0.5\n\n # filtering items to obtain the vocabulary\n vocabulary = [ k for k,v in doc_freq.items() if v >= min_df ]\n\n # print vocabulary\n #print (\"-- vocabulary (len={}): {}\".format(len(vocabulary),vocabulary))\n\n x = np.arange(0.1, 1.1, 0.1)\n\n all_vocabs = [[ k for k,v in doc_freq.items() if v >= thresh ] for thresh in x]\n for vocab in all_vocabs:\n pass\n #pprint(\"-- vocabulary (len={}): {}\".format(len(vocab),vocab))\n\n return all_vocabs\n" } ]
4
nityanandmathur/TheNewNormal
https://github.com/nityanandmathur/TheNewNormal
f1b6997269addc2f9641f0e842c6c063bffba7ce
bf8a4a5272224e484d44a97a1a0140c6bc893bb7
52ecb0b15ea82ad9ad5d943ca6ceeb6b3048d986
refs/heads/main
2023-08-23T10:23:37.828272
2021-10-31T04:24:16
2021-10-31T04:24:16
422,658,636
0
2
null
null
null
null
null
[ { "alpha_fraction": 0.6370468735694885, "alphanum_fraction": 0.656940758228302, "avg_line_length": 29.513513565063477, "blob_id": "237247476a90ae88a4140572d6776c4acbc16aa8", "content_id": "e23faa2c4bb2f55867db506092f7680ebace46dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2262, "license_type": "no_license", "max_line_length": 123, "num_lines": 74, "path": "/HealthyorNot_DesktopApp/app.py", "repo_name": "nityanandmathur/TheNewNormal", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nfrom flask import Flask, request, jsonify, render_template\nimport pickle\n\n# In[23]:\napp = Flask(__name__,static_url_path='/static')\n# In[24]:\n\n\[email protected]('/')\ndef home():\n return render_template('index.html')\n\n\[email protected]('/predictDib',methods=['POST'])\ndef predictDib():\n model = pickle.load(open('model.pkl', 'rb'))\n\n dataset = pd.read_csv('diabetes.csv')\n dataset_X = dataset.iloc[:,[1, 2, 5, 6, 7]].values\n\n from sklearn.preprocessing import MinMaxScaler\n sc = MinMaxScaler(feature_range = (0,1))\n dataset_scaled = sc.fit_transform(dataset_X)\n\n arr = list(request.form.values())\n float_features = [float(x) for x in arr]\n final_features = [np.array(float_features)]\n prediction = model.predict(sc.transform(final_features))\n # print(prediction)\n pred=''\n if prediction[0] == 1:\n pred = \"You could have Diabetic problem,so please please, follow covid norms, get vaccinated and consult a Doctor.\"\n elif prediction[0] == 0:\n pred = \"You are probably in safe zone but, please follow covid norms as much as possible \"\n \n return render_template('index.html', prediction_text='{}'.format(pred))\n\[email protected]('/predictHeart',methods=['POST'])\ndef predictHeart():\n \n model2 = pickle.load(open('model2.pkl', 'rb'))\n dataset2 = pd.read_csv('heart.csv')\n dataset2_X = dataset2.iloc[:,[2,3,4,5,7,8,9,11,12]].values\n from sklearn.preprocessing import MinMaxScaler\n sc = MinMaxScaler(feature_range = (0,1))\n dataset2_scaled = sc.fit_transform(dataset2_X)\n\n arr = list(request.form.values())\n float_features = [float(x) for x in arr]\n final_features = [np.array(float_features)]\n prediction1 = model2.predict(sc.transform(final_features))\n predh=''\n if prediction1[0] == 1:\n predh = \"You could have Heart problems, so please please follow covid norms, get vaccinated and consult a Doctor.\"\n elif prediction1[0] == 0:\n predh = \"You are probably in safe zone but, please follow covid norms as much as possible \"\n \n return render_template('index.html', prediction_text='{}'.format(predh))\n \n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n\n# In[ ]:\n\n\n\n\n" }, { "alpha_fraction": 0.6265486478805542, "alphanum_fraction": 0.6539822816848755, "avg_line_length": 10.978723526000977, "blob_id": "009fdcd004492765863e3b10cf591c77a2132ab8", "content_id": "13c6bee3da7502229bf7b026595dbd28095a641a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1130, "license_type": "no_license", "max_line_length": 126, "num_lines": 94, "path": "/HealthyorNot_DesktopApp/Model_1.py", "repo_name": "nityanandmathur/TheNewNormal", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nimport pickle\n\n\n# In[2]:\n\n\ndataset = pd.read_csv('diabetes.csv')\n\n\n# In[3]:\n\n\ndataset_X = dataset.iloc[:,[1,2,5,6, 7]].values\ndataset_Y = dataset.iloc[:,8].values\n\n\n# In[4]:\n\n\nfrom sklearn.preprocessing import MinMaxScaler\nsc = MinMaxScaler(feature_range = (0,1))\ndataset_scaled = sc.fit_transform(dataset_X)\n\n\n# In[5]:\n\n\ndataset_scaled = pd.DataFrame(dataset_scaled)\n\n\n# In[6]:\n\n\n\nX = dataset_scaled\nY = dataset_Y\n\n\n# In[7]:\n\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.20, random_state = 42, stratify = dataset['Outcome'] )\n\n\n# In[8]:\n\n\n\nfrom sklearn.svm import SVC\nsvc = SVC(kernel = 'linear', random_state = 42)\nsvc.fit(X_train, Y_train)\n\n\n# In[9]:\n\n\nsvc.score(X_test, Y_test)\n\n\n# In[10]:\n\n\nY_pred = svc.predict(X_test)\n\n\n# In[11]:\n\n\npickle.dump(svc, open('model.pkl','wb'))\nmodel = pickle.load(open('model.pkl','rb'))\n\n\n# In[14]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n" }, { "alpha_fraction": 0.6995490193367004, "alphanum_fraction": 0.7418263554573059, "avg_line_length": 64.62963104248047, "blob_id": "c7ac8bef4e4730603b2a7ad0e43558e84690974e", "content_id": "21fc78560496db7efa653df192d6924240434875", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3564, "license_type": "no_license", "max_line_length": 602, "num_lines": 54, "path": "/README.md", "repo_name": "nityanandmathur/TheNewNormal", "src_encoding": "UTF-8", "text": "# <p align=center> TheNewNormal ✈︎ <br />\n\n \n\n\n\n## Our Team 🧑‍💻<br /> \nName - Nityanand Mathur <br />\nEmail - [email protected]. Contact number: `7247412358` \n \nName - Ayush Pratap Singh <br />\nEmail - [email protected]. Contact number: `98391662530` <br />\n <img align=\"right\" width=\"345.5\" height=\"110\" src=\"https://github.com/nisheetkaran/B16_Wonders/blob/main/FilesForReadme/Version-B.png\">\n <img align=\"right\" width=\"110\" height=\"110\" src=\"https://github.com/nisheetkaran/B16_Wonders/blob/main/FilesForReadme/iste.png\"> <br />\nName - Nisheet Karan <br />\nEmail - [email protected]. Contact number: `8529468896` <br />\n \nName - Harshit Singh <br />\nEmail - [email protected]. Contact number: `9205021433` <br />\n\n\n## Problem Statement 1 - Post Covid Solution\n\nFinally the deadly second wave of covid 19 has ended and things are getting back to normal. But we cannot deny the fact that there is no going back to the lifestyle of the Pre-Covid Era. The only thing we can do is to accept this 'New-normal' and find solutions to ease the issues we are facing in this Post-Covid Era. Develop a platform that solves the problem of accumulation of crowd and prevent outbreak of this deadly disease in an innovative manner.\n\n## From Pandemic to Endemic\n \nGraph of covid seems to relieve as compared to what damage it has already offered to all of us. \nIn other regions, if aided by vaccines, aimed for a similar zero-COVID strategy, then could the world hope to rid itself of the virus?\nMany scientists and researchers believe that Covid-19 It’s a beautiful dream but most scientists think it’s improbable.\n \n<img align=\"centre\" width=\"616\" height=\"250\" src=\"https://github.com/nityanandmathur/B16_Wonders/blob/main/FilesForReadme/ActiveCases.png\">\n<img align=\"centre\" width=\"616\" height=\"250\" src=\"https://github.com/nityanandmathur/B16_Wonders/blob/main/FilesForReadme/Recovered.png\"> <br />\n \n \n\n \n \n ## Problems We are facing \n <img align=\"centre\" width=\"752.8\" height=\"196\" src=\"https://github.com/nityanandmathur/B16_Wonders/blob/main/FilesForReadme/News%201.png\"> <img align=\"right\" width=\"145\" height=\"201\" src=\"https://github.com/nityanandmathur/B16_Wonders/blob/main/FilesForReadme/clipart233452.png\">\n \n ![image](https://user-images.githubusercontent.com/77379835/139567512-eebe6068-51a8-4d33-89b9-3310416c3611.png)\n\n\n\n \n## List of solutions to the problem statement\n1) Overcrowding - As we have seen there's so many mutations happening over time. According to many scientists and researchers our body will take ample amount of time to totally get rid of all these, according to the mutations happening and as each and every persons have different genes we need to stay safe and shouldn't interact with many people. Overcrowding is a serious predicament that should be handeled properly with time. For the very purpose we have created a Tensorflow based Machine Learning model capable of detecting overcrowding and informing the local authorities at the very same time.\n\n2) Individual Focus/health tips - In the present era of covid, it is important to establish the proper mental and ohysical health. It is extremely important that everybody should be able to diagonse themselves without use of external aid. For the very purpose, we have created a Machine Learning model integrated with Flask to deploy as Web Application. This model would take some geneal parameters from individuals and returns their chances of getting Diabetes or Heart Attack.\n\n<div align=\"center\">\n<h1> Many thanks for this opportunity! \n </h1> \n" }, { "alpha_fraction": 0.5754475593566895, "alphanum_fraction": 0.5890877842903137, "avg_line_length": 35.46875, "blob_id": "fb324f930cb6d37be44908282c15633f72ea3844", "content_id": "fbc782707127fc4b1b0149fdd8ad463a7484982d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1173, "license_type": "no_license", "max_line_length": 205, "num_lines": 32, "path": "/HealthyorNot_DesktopApp/ReadMe.md", "repo_name": "nityanandmathur/TheNewNormal", "src_encoding": "UTF-8", "text": "# *Disease (Diabetes & Heart) Finder*\n************\nThis is machine learning model made to predict diseases in a person using basic informations of medical tests. \nUsing a desktop app the user inputs are taken and various algorithms are implemented on it. Later it is predicted wheteher the person has that particular disease or nor and output is shown on the website. \n\n************\n## <center>How to use the app ??</center>\nStep 1 : Clone the repository\n\nStep 2 : Make an environment in terminal Jupyter Notebook [using: python -m venv C:\\Users\\Admin\\Desktop\\HealthyorNot]\n\nStep 3 : Run app.py on terminal to make a local server.\n\nStep 4 : Click on the website link made through server [ex : * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)]\n\n\n\n----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n### **Screenshots** \n\n\n************\n\n![Screenshots of the App](screenshot1.JPG)\n\nFill in the form to get a prediction of wheteher you have the diasease or not.\n\n************\n\n![Screenshots of the App](screenshot2.JPG)\n\n************\n\n\n\n \n\n" } ]
4
dcgoss/ml-workers
https://github.com/dcgoss/ml-workers
1aadccbee635baa89183b807d9c7dd9ff4dccfa3
f9581f8d20154fec7bff730625e6e64d52284ac6
0958adcc2897d638fe503bd8c9f8e95e58e2fcf4
refs/heads/master
2020-12-03T03:52:15.138239
2017-08-01T20:11:58
2017-08-01T20:11:58
95,785,346
0
0
null
2017-06-29T14:19:11
2016-10-06T14:14:48
2016-11-14T01:13:08
null
[ { "alpha_fraction": 0.7647058963775635, "alphanum_fraction": 0.779411792755127, "avg_line_length": 21.66666603088379, "blob_id": "0ecd66df534fd5609e6767e24156ba84e15a6a9d", "content_id": "ecf37a5adaa59388429c0158b72ddc32ab3c472c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 68, "license_type": "permissive", "max_line_length": 39, "num_lines": 3, "path": "/test_ml_task_runner.py", "repo_name": "dcgoss/ml-workers", "src_encoding": "UTF-8", "text": "from ml_task_runner import run_notebook\n\nrun_notebook('1.download')\n" }, { "alpha_fraction": 0.6353577375411987, "alphanum_fraction": 0.6368330717086792, "avg_line_length": 37.74285888671875, "blob_id": "233586c22d1be9c82f99bb6ceb7aabfe58ed1016", "content_id": "5c9aaab7631cb5fc9d9545c63fbb3bb0c90d4012", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4067, "license_type": "permissive", "max_line_length": 84, "num_lines": 105, "path": "/notebooks/utils.py", "repo_name": "dcgoss/ml-workers", "src_encoding": "UTF-8", "text": "def sanitize_dataframe(df):\n \"\"\"Sanitize a DataFrame to prepare it for serialization.\n * Make a copy\n * Raise ValueError if it has a hierarchical index.\n * Convert categoricals to strings.\n * Convert np.int dtypes to Python int objects\n * Convert floats to objects and replace NaNs by None.\n * Convert DateTime dtypes into appropriate string representations\n \n Args:\n df (object): can be a variety of dataframe like objects\n \n Returns:\n pandas.DataFrame: a dataframe that is ready to be serialized\n \"\"\"\n import pandas as pd\n import numpy as np\n\n df = df.copy()\n\n for col_name, dtype in df.dtypes.iteritems():\n if str(dtype) == 'category':\n # XXXX: work around bug in to_json for categorical types\n # https://github.com/pydata/pandas/issues/10778\n df[col_name] = df[col_name].astype(str)\n elif np.issubdtype(dtype, np.integer):\n # convert integers to objects; np.int is not JSON serializable\n df[col_name] = df[col_name].astype(object)\n elif np.issubdtype(dtype, np.floating):\n # For floats, convert nan->None: np.float is not JSON serializable\n col = df[col_name].astype(object)\n df[col_name] = col.where(col.notnull(), None)\n elif str(dtype).startswith('datetime'):\n # Convert datetimes to strings\n # astype(str) will choose the appropriate resolution\n df[col_name] = df[col_name].astype(str).replace('NaT', '')\n return df\n\ndef fill_spec_with_data(spec, data):\n \"\"\"Take a Vega specification with missing data elements and complete it\n * Uses an incomplete Vega specification that has named data arguments\n which have empty values and replaces those missing values with\n data elements passed in\n * The Vega specification needs to have named data arguments that match\n the data that is passed in to this function\n \n Args:\n spec (dictionary): the Vega specification with incomplete data\n data (named dictionary of pandas.DataFrame): data elements\n \n Returns:\n dictionary: a completed Vega specification ready to be used\n \"\"\"\n data_names = [d['name'] for d in spec['data']]\n \n for name, df in data.items():\n if name not in data_names:\n raise ValueError('Name not in data spec')\n for ix in range(len(data_names)):\n if data_names[ix] == name:\n break\n \n df = sanitize_dataframe(df)\n spec['data'][ix]['values'] = df.to_dict(orient='records')\n\n return spec\n\ndef get_model_coefficients(classifier, feature_set, covariate_names):\n \"\"\"\n Extract the feature names and associate them with the coefficient values\n in the final classifier object.\n * Only works for expressions only model with PCA, covariates only model,\n and a combined model\n * Assumes the PCA features come before any covariates that are included\n * Sorts the final dataframe by the absolute value of the coefficients\n \n Args:\n classifier: the final sklearn classifier object \n feature_set: string of the model's name {expressions, covariates, full}\n covariate_names: list of the names of the covariate features matrix\n \n Returns:\n pandas.DataFrame: mapping of feature name to coefficient value\n \"\"\"\n import pandas as pd\n import numpy as np\n \n coefs = classifier.coef_[0] \n \n if feature_set=='expressions':\n features = ['PCA_%d' %cf for cf in range(len(coefs))]\n elif feature_set=='covariates': \n features = covariate_names\n else: \n features = ['PCA_%d' %cf for cf in range(len(coefs) - len(covariate_names))]\n features.extend(covariate_names)\n \n coef_df = pd.DataFrame({'feature': features, 'weight': coefs}) \n \n coef_df['abs'] = coef_df['weight'].abs()\n coef_df = coef_df.sort_values('abs', ascending=False)\n coef_df['feature_set'] = feature_set\n \n \n return coef_df" } ]
2
mathurk1/Apache-Spark-Analysis
https://github.com/mathurk1/Apache-Spark-Analysis
4eaa50f3ae39a65a72263343a1ec2e31234b9190
c53a0fc6427d9e1b9a3772431b1d515604ea59a7
49e8b206a2928c0ac7a5dda4b143318edc8432ac
refs/heads/master
2021-06-12T18:26:03.656592
2017-05-03T00:06:43
2017-05-03T00:06:43
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6777070164680481, "alphanum_fraction": 0.6955413818359375, "avg_line_length": 30.399999618530273, "blob_id": "3cc448bff577e33129c409ef72173dc06c705680", "content_id": "96f4d407d8b11b4ec31453185a35d793262c6649", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1570, "license_type": "no_license", "max_line_length": 94, "num_lines": 50, "path": "/PurchaseAnalysis.py", "repo_name": "mathurk1/Apache-Spark-Analysis", "src_encoding": "UTF-8", "text": "from pyspark import SparkConf, SparkContext\nfrom datetime import datetime\n\n#setting up spark conf\nconf = SparkConf().setMaster(\"local[*]\").setAppName(\"FriendsByAge\")\nsc = SparkContext(conf= conf)\n\n#caching the RDD so that it can be used multiple times\nlines = sc.textFile(\"purchases_sample.txt\")\nsplit_rdd = lines.map(lambda x: x.split('\\t')).cache()\n\n#breaking down sales by product category\nsalesByCategory = split_rdd.map(lambda x: (x[3], float(x[4]))).reduceByKey(lambda x, y: x + y)\nsortedResult = salesByCategory.map(lambda x: (x[1], x[0])).sortByKey()\nresults = sortedResult.collect()\n\nfor result in results:\n print(result[1] + \" : \" + str(result[0]))\n\n\n#finding the average sales for each day of the week\n#parse line function definition\n\ndef parseLines(line):\n weekday = datetime.strptime(line[0], \"%Y-%m-%d\").weekday()\n return(int(weekday) , float(line[1]))\n\n#broadcasting dayDict so that the 0-6 mapping can be done on each worker node\ndayDict = {\n 0: 'Sunday',\n 1: 'Monday',\n 2: 'Tuesday',\n 3: 'Wednesday',\n 4: 'Thursday',\n 5: 'Friday',\n 6: 'Saturday'\n}\ndayLookUp = sc.broadcast(dayDict)\n\nsalesByDay = split_rdd.map(lambda x: (x[0], x[4]))\nweekDaySalesCount = salesByDay.map(parseLines).mapValues(lambda x: (x, 1))\n\nSaleForWeekDay = weekDaySalesCount.reduceByKey(lambda x, y: (x[0] + y[0], x[1] + y[1]))\nAvgSaleForWeekDay = SaleForWeekDay.mapValues(lambda x: x[0] / x[1])\nmappedResults = AvgSaleForWeekDay.map(lambda x: (dayLookUp.value[x[0]], x[1]))\n\nresults = mappedResults.collect()\n\nfor result in results:\n print(result)\n" }, { "alpha_fraction": 0.5074626803398132, "alphanum_fraction": 0.7391044497489929, "avg_line_length": 45.52777862548828, "blob_id": "f45a701a3d2387a50d70631cd5a57b1d3bd3ac87", "content_id": "e762abcb21d2e050544a732626621be5c66486ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1675, "license_type": "no_license", "max_line_length": 163, "num_lines": 36, "path": "/README.md", "repo_name": "mathurk1/Apache-Spark-Analysis", "src_encoding": "UTF-8", "text": "# Apache-Spark-Analysis\n\nMade a Spark Project to analyse Sales Data and Web Server Logs and used concepts of SparkConf objects as well as DataFrames.\n\nA snapshot of the data :\n\nWebServer Data:\n```\n10.223.157.186 - - [15/Jul/2009:14:58:59 -0700] \"GET / HTTP/1.1\" 403 202\n10.223.157.186 - - [15/Jul/2009:14:58:59 -0700] \"GET /favicon.ico HTTP/1.1\" 404 209\n10.223.157.186 - - [15/Jul/2009:15:50:35 -0700] \"GET / HTTP/1.1\" 200 9157\n10.223.157.186 - - [15/Jul/2009:15:50:35 -0700] \"GET /assets/js/lowpro.js HTTP/1.1\" 200 10469\n10.223.157.186 - - [15/Jul/2009:15:50:35 -0700] \"GET /assets/css/reset.css HTTP/1.1\" 200 1014\n10.223.157.186 - - [15/Jul/2009:15:50:35 -0700] \"GET /assets/css/960.css HTTP/1.1\" 200 6206\n10.223.157.186 - - [15/Jul/2009:15:50:35 -0700] \"GET /assets/css/the-associates.css HTTP/1.1\" 200 15779\n```\n\nAnaylsed the WebServer data using DataFrames and running SQL queries and other builtin functions on the created DataFrame.\n\n \nSales Data:\n```\n2012-01-01\t09:00\tSan Jose\tMen's Clothing\t214.05\tAmex\n2012-01-01\t09:00\tFort Worth\tWomen's Clothing\t153.57\tVisa\n2012-01-01\t09:00\tSan Diego\tMusic\t66.08\tCash\n2012-01-01\t09:00\tPittsburgh\tPet Supplies\t493.51\tDiscover\n2012-01-01\t09:00\tOmaha\tChildren's Clothing\t235.63\tMasterCard\n2012-01-01\t09:00\tStockton\tMen's Clothing\t247.18\tMasterCard\n2012-01-01\t09:00\tAustin\tCameras\t379.6\tVisa\n2012-01-01\t09:00\tNew York\tConsumer Electronics\t296.8\tCash\n\n```\n\nAnalysed the Sales data using the traditional SparkConf objects along with concepts of broadcasting and caching.\n\nThe data was obtained from Udacity's Introduction to Hadoop and MapReduce course. The programs were run over sample data on a pseudo cluster setup on my Windows PC\n" }, { "alpha_fraction": 0.645786702632904, "alphanum_fraction": 0.6532438397407532, "avg_line_length": 31.707317352294922, "blob_id": "f23fedf97818b0a63654f5d5fd3c6f6d5a2830e7", "content_id": "8d33307151ff519875f9c1c24e2cf50290a2160e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1341, "license_type": "no_license", "max_line_length": 115, "num_lines": 41, "path": "/logAnalysis.py", "repo_name": "mathurk1/Apache-Spark-Analysis", "src_encoding": "UTF-8", "text": "from pyspark.sql import SparkSession\nfrom pyspark.sql import Row\nimport re\n\n#### Create a SparkSession (Note, the config section is only for Windows!)\nspark = SparkSession.builder.config(\"spark.sql.warehouse.dir\", \"file:///C:/temp\").appName(\"SparkSQL\").getOrCreate()\n\np = re.compile(\n '([^ ]*) ([^ ]*) ([^ ]*) \\[([^]]*)\\] \"([^\"]*)\" ([^ ]*) ([^ ]*)'\n )\n\ndef mapper_parser(line):\n fields = p.match(line)\n field_name = list(fields.groups())\n\n return Row(host=field_name[0],\n ignore=field_name[1],\n user=field_name[2],\n date=field_name[3],\n request=field_name[4],\n status=field_name[5],\n size=field_name[6])\n\nlines = spark.sparkContext.textFile(\"access_log_sample.txt\")\nmatchedLines = lines.map(mapper_parser)\n\n#### Infer the schema, and register the DataFrame as a table.\nschemaLog = spark.createDataFrame(matchedLines).cache()\nschemaLog.createOrReplaceTempView(\"WebServerLogs\")\n\n#### SQL can be run over DataFrames that have been registered as a table.\n\n# number of page not found return status\npageNotFoundStatus = spark.sql(\"SELECT COUNT(*) AS COUNT FROM WebServerLogs WHERE status = 404\")\nfor results in pageNotFoundStatus.collect():\n print(results)\n\n# number of hits per IP address\nschemaLog.groupBy(\"host\").count().show()\n\nspark.stop()\n" } ]
3
Andriuslima/wa-message-worker-sam
https://github.com/Andriuslima/wa-message-worker-sam
ac6a0708826e3b2586c42c8e57a58a9f788dfc7f
2d06b4f0d82b9f8e0808b8c69b3d2bb9685fec27
806934ff0a3c502642ac33f327bc0d2109dfe373
refs/heads/main
2023-07-19T16:24:40.333327
2023-07-11T21:58:47
2023-07-11T21:58:47
323,898,884
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.6587677597999573, "alphanum_fraction": 0.6587677597999573, "avg_line_length": 26.763158798217773, "blob_id": "3804e00e9f37627ad6f52b9e3e24b266fb4dabcc", "content_id": "7fb8dc3f18e8f512b8a4e944fd99dda2d4741cff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1055, "license_type": "no_license", "max_line_length": 87, "num_lines": 38, "path": "/lambdas/sender/src/queue.ts", "repo_name": "Andriuslima/wa-message-worker-sam", "src_encoding": "UTF-8", "text": "import SQS, { MessageBodyAttributeMap, SendMessageRequest } from 'aws-sdk/clients/sqs';\nimport { IntegrationEvent } from './domain';\n\nexport class Queue {\n constructor(private client: SQS, private queue: string, private dlq: string) {}\n\n async enqueueEvent(event: IntegrationEvent, delay: number): Promise<void> {\n const sendMessageParams: SendMessageRequest = {\n MessageBody: JSON.stringify(event),\n QueueUrl: this.queue,\n DelaySeconds: delay,\n };\n\n await this.client.sendMessage(sendMessageParams).promise();\n }\n\n async sendToDLQ(message: string, error: string): Promise<void> {\n console.log(error);\n const attributes: MessageBodyAttributeMap = {\n error: {\n StringValue: error,\n DataType: 'String',\n },\n retryable: {\n StringValue: 'true',\n DataType: 'String',\n },\n };\n\n const params: SendMessageRequest = {\n MessageBody: message,\n QueueUrl: this.dlq,\n MessageAttributes: attributes,\n };\n\n await this.client.sendMessage(params).promise();\n }\n}\n" }, { "alpha_fraction": 0.6839186549186707, "alphanum_fraction": 0.6882316470146179, "avg_line_length": 29.62264060974121, "blob_id": "85f2f4048a01cd70477538d5edb5e0d193714e19", "content_id": "3df7224168d5bed5bae8ca91ea512a85c2d57206", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1623, "license_type": "no_license", "max_line_length": 102, "num_lines": 53, "path": "/lambdas/sender-retry/src/index.ts", "repo_name": "Andriuslima/wa-message-worker-sam", "src_encoding": "UTF-8", "text": "import { Handler } from 'aws-lambda';\nimport { SQS } from 'aws-sdk';\n\nconst sqs = new SQS();\nconst dlq = process.env.DLQ || 'dlq-url';\nconst queue = process.env.QUEUE || 'dlq-url';\nconst batchSize = 10;\n\nexport const handler: Handler = async () => {\n const attributes = await sqs.getQueueAttributes({ QueueUrl: dlq }).promise();\n\n const numberOfMessage = (attributes.Attributes?.ApproximateNumberOfMessages || batchSize) as number;\n\n if (numberOfMessage < 1) {\n console.log('DLQ is empty, returning...');\n return;\n }\n\n let numberOfRetrieves = Math.floor(numberOfMessage / batchSize);\n if (numberOfMessage % batchSize > 0) {\n numberOfRetrieves += 1;\n }\n\n console.log(`Retrieving ${numberOfRetrieves}x (batches of ${batchSize})`);\n\n for (let i = 0; i < numberOfRetrieves; i++) {\n retrieveMessages(batchSize);\n }\n};\n\nasync function retrieveMessages(size: number): Promise<void> {\n const ops = { QueueUrl: dlq, MaxNumberOfMessages: size, MessageAttributeNames: ['retryable'] };\n const messages = await sqs.receiveMessage(ops).promise();\n\n if (!messages.Messages) {\n console.log('0 messages retrived, returning...');\n return;\n }\n\n for (const message of messages.Messages) {\n await processMessage(message);\n }\n}\n\nasync function processMessage(message: SQS.Message): Promise<void> {\n if (!message.Body) {\n console.log(`message ${message.MessageId} with empty body`);\n return;\n }\n const body = message.Body;\n await sqs.sendMessage({ QueueUrl: queue, MessageBody: body }).promise();\n await sqs.deleteMessage({ QueueUrl: dlq, ReceiptHandle: message.ReceiptHandle || '' }).promise();\n}\n" }, { "alpha_fraction": 0.6867815852165222, "alphanum_fraction": 0.6954023241996765, "avg_line_length": 18.33333396911621, "blob_id": "fa0ca230e2fcf80626ea6861231a6b41ccef2162", "content_id": "095c0dcc39c2da9b79340270de8ecbbf55d15331", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 348, "license_type": "no_license", "max_line_length": 71, "num_lines": 18, "path": "/lambdas/sender/src/domain.ts", "repo_name": "Andriuslima/wa-message-worker-sam", "src_encoding": "UTF-8", "text": "export interface IntegrationEvent {\n phone: string;\n instance: string;\n messages: MessageValue[];\n}\n\nexport interface MessageValue {\n value: string;\n index: number;\n delay: number;\n}\n\nexport function compareMsgs(a: MessageValue, b: MessageValue): number {\n if (a.index > b.index) return 1;\n if (a.index < b.index) return -1;\n\n return 0;\n}\n" }, { "alpha_fraction": 0.7425825595855713, "alphanum_fraction": 0.7684914469718933, "avg_line_length": 60.35897445678711, "blob_id": "39d33d5d7d0e3ff805ee9abf422ce7c4cf4f43b9", "content_id": "a20b1fb158eab6095dcd05bf887dd60c390dead6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2393, "license_type": "no_license", "max_line_length": 430, "num_lines": 39, "path": "/README.md", "repo_name": "Andriuslima/wa-message-worker-sam", "src_encoding": "UTF-8", "text": "[![CodeQL](https://github.com/Andriuslima/wa-message-worker-sam/actions/workflows/codeql-analysis.yml/badge.svg?branch=main)](https://github.com/Andriuslima/wa-message-worker-sam/actions/workflows/codeql-analysis.yml)\n[![Deploy to Development](https://github.com/Andriuslima/wa-message-worker-sam/actions/workflows/deploy-development.yml/badge.svg?branch=main)](https://github.com/Andriuslima/wa-message-worker-sam/actions/workflows/deploy-development.yml)\n[![Deploy to Production](https://github.com/Andriuslima/wa-message-worker-sam/actions/workflows/deploy-production.yml/badge.svg?branch=main)](https://github.com/Andriuslima/wa-message-worker-sam/actions/workflows/deploy-production.yml)\n\n### Dependencie\n\n- yarn = ^1.22.10\n- nodejs = 18\n- aws sam - ^1.24.1\n- docker = ^20.10.8\n\n# How to run locally\n\nBuild the application with: `yarn build`\n\n## Webhook\n\n`sam local start-api --profile xxxxx`\n\n`curl --location --request POST 'http://127.0.0.1:3000/message/fallback/' \\ --header 'Content-Type: application/x-www-form-urlencoded' \\ --data-urlencode 'contact[id]=12060' \\ --data-urlencode 'contact[email][email protected]' \\ --data-urlencode 'contact[first_name]=Tobey' \\ --data-urlencode 'contact[last_name]\"=Maguire' \\ --data-urlencode 'contact[phone]=55999999999' \\ --data-urlencode 'contact[fields][link_do_boleto]=www.google.com'`\n\n## Parser\n\n`docker-compose -f local/docker-compose.yml up -d`\n`aws dynamodb create-table --cli-input-json file://local/MessageValuesTable.json --endpoint-url http://localhost:8000`\n`aws dynamodb batch-write-item --request-items file://local/MessageValuesTableItems.json --endpoint-url http://localhost:8000`\n`sam local invoke WaParser --event local/parser-sqs-event.json -n local/environment.json --docker-network wa-message-worker-network`\n\n## Sender\n\n`sam local invoke WaSender --event local/sender-sqs-event.json --parameter-overrides 'ParameterKey=ZAPITOKEN,ParameterValue=XXX' 'ParameterKey=ZAPIINSTANCE,ParameterValue=XXX'`\n\nReplace the `xxxxx` with the actual values\n\n### Useful DynamoDB Commands\n\nTo list all tables: `aws dynamodb list-tables --endpoint-url http://localhost:8000`\nTo retrieve information about a specific table: `aws dynamodb describe-table --table-name MessageValuesTable --endpoint-url http://localhost:8000`\nTo list items from a specific table: `aws dynamodb scan --table-name MessageValuesTable --endpoint-url http://localhost:8000`\n" }, { "alpha_fraction": 0.5860113501548767, "alphanum_fraction": 0.5879017114639282, "avg_line_length": 26.842105865478516, "blob_id": "3159caa07d099203bb93a3f6cbd21e16b0f99d95", "content_id": "a057758dfefe3d6116113d8ef14ab293701cfdc2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 529, "license_type": "no_license", "max_line_length": 91, "num_lines": 19, "path": "/lambdas/parser/src/formatter.ts", "repo_name": "Andriuslima/wa-message-worker-sam", "src_encoding": "UTF-8", "text": "export class Formatter {\n phPrefix = '{';\n phSuffix = '}';\n\n hasPlaceholders(message: string): boolean {\n const matches = message.match(new RegExp(this.phPrefix + '\\\\w+' + this.phSuffix, 'g'));\n return matches !== null && matches.length > 0;\n }\n\n replace(data: string[], str = ''): string {\n Object.keys(data).forEach((key) => {\n const regexp = new RegExp(this.phPrefix + key + this.phSuffix, 'g');\n const value = data[key] || 'N/A';\n str = str.replace(regexp, value);\n });\n\n return str;\n }\n}\n" }, { "alpha_fraction": 0.7086330652236938, "alphanum_fraction": 0.7086330652236938, "avg_line_length": 15.352941513061523, "blob_id": "764950d79e0c245f19bd5ef1bc5791b9fae40cf6", "content_id": "a70897d05f8731adf5938e59ad83e1399925fc94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 278, "license_type": "no_license", "max_line_length": 35, "num_lines": 17, "path": "/lambdas/parser/src/domain.ts", "repo_name": "Andriuslima/wa-message-worker-sam", "src_encoding": "UTF-8", "text": "export interface IntegrationEvent {\n phone: string;\n instance: string;\n messages: MessageValue[];\n}\n\nexport interface Message {\n key: string;\n instance: string;\n msgs: MessageValue[];\n}\n\nexport interface MessageValue {\n value: string;\n index: number;\n delay: number;\n}\n" }, { "alpha_fraction": 0.6755319237709045, "alphanum_fraction": 0.6755319237709045, "avg_line_length": 29.763635635375977, "blob_id": "a9cb0c13bfb4d07bd7bfa57eeff228f2e5923422", "content_id": "f938885fdd755f8836bca9d922a18a82c3c2cfa2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1692, "license_type": "no_license", "max_line_length": 72, "num_lines": 55, "path": "/lambdas/parser/src/index.ts", "repo_name": "Andriuslima/wa-message-worker-sam", "src_encoding": "UTF-8", "text": "import { Handler, SQSEvent } from 'aws-lambda';\nimport { SQS } from 'aws-sdk';\nimport { DB } from './db';\nimport { IntegrationEvent, Message } from './domain';\nimport { Formatter } from './formatter';\nimport { Queue } from './queue';\n\nconst dlq = process.env.DLQ || 'dlq-url';\nconst senderQueue = process.env.SENDER_QUEUE || 'sender-queue-url';\nconst table = process.env.TABLENAME || 'table-name';\nconst queue = new Queue(new SQS(), senderQueue, dlq);\nconst db = new DB(table);\nconst formatter = new Formatter();\n\nexport const handler: Handler = async (event: SQSEvent) => {\n for (const record of event.Records) {\n await parse(record.body);\n }\n};\n\nasync function parse(body: string): Promise<void> {\n const { id, phone, key, params } = JSON.parse(body);\n\n if (!key) {\n throw new Error('Key is not present');\n }\n\n console.log(`Message received for: ${id}:${phone}:${key}`);\n console.log(`Parameters received: ${params}`);\n\n const message = await db.get(key);\n\n console.log(`${message.msgs.length} message to parse`);\n\n const replacedMessage = replace(message, params);\n const integrationEvent: IntegrationEvent = {\n phone,\n instance: message.instance,\n messages: replacedMessage.msgs,\n };\n\n await queue.send(integrationEvent);\n}\n\nfunction replace(message: Message, params: string[]): Message {\n for (const entry of message.msgs) {\n console.log(`Original message: ${entry.value}`);\n entry.value = formatter.replace(params, entry.value);\n console.log(`Replaced message: ${entry.value}`);\n if (formatter.hasPlaceholders(entry.value) || !entry.value) {\n console.log(`params missing to complete message: ${entry.value}`);\n }\n }\n return message;\n}\n" }, { "alpha_fraction": 0.6745613813400269, "alphanum_fraction": 0.6780701875686646, "avg_line_length": 30.23287582397461, "blob_id": "216d7d525ea4d0f057893e3e7c7a96c14d692368", "content_id": "d879f2c309dd3021ace113e14ed5f724bdb245f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 2280, "license_type": "no_license", "max_line_length": 92, "num_lines": 73, "path": "/lambdas/active-campaign-webhook/src/index.ts", "repo_name": "Andriuslima/wa-message-worker-sam", "src_encoding": "UTF-8", "text": "import { APIGatewayEvent, Handler } from 'aws-lambda';\nimport { SQS } from 'aws-sdk';\nimport { Queue } from './queue';\nimport qs from 'qs';\nimport { PhoneFormatter } from './phoneFormatter';\n\nconst queueUrl = process.env.QUEUE || 'localhost';\nconst queue = new Queue(new SQS(), queueUrl);\nconst phoneFormatter = new PhoneFormatter();\n\nexport const handler: Handler = async (event: APIGatewayEvent) => {\n if (!event.pathParameters || !event.pathParameters.key) {\n throw new Error('Message key not present on request path parameters');\n }\n\n if (!event.body) {\n throw new Error('Body not present request');\n }\n\n const body = qs.parse(event.body);\n const key = event.pathParameters.key;\n const { contact }: any = body;\n\n console.log(`Request body: ${JSON.stringify(body)}`);\n\n console.log(`Contact info received: ${JSON.stringify(contact)}`);\n\n const { id, phone, first_name: firstName, last_name: lastName, fields, email } = contact;\n const formattedPhones = [phoneFormatter.format(phone), fields?.telefone_checkout_hotmart];\n const phones = [...new Set(formattedPhones)];\n const name = (firstName || lastName || 'Abundante').split(' ')[0];\n const {\n link_do_boleto,\n data_de_nascimento,\n nome_completo_para_o_mapa,\n whatsapp,\n whatsapp_cod_ddi_pais,\n reprogramao_do_amorquizdata_de_nascimento,\n } = fields;\n const params = {\n name,\n email,\n linkBoleto: link_do_boleto,\n dataNascimento: data_de_nascimento,\n nomeCompletoMapa: nome_completo_para_o_mapa,\n whatsapp,\n whatsappCodDdiPais: whatsapp_cod_ddi_pais,\n reproDoAmordataNascimento: reprogramao_do_amorquizdata_de_nascimento,\n };\n\n if (phones.length == 0) {\n return {\n statusCode: 201,\n body: 'Could not extract any phone number from request body.',\n };\n }\n\n console.log(`Active campaign event received for contact: ${id}:${name}:${phones}`);\n console.log(`Message key received: ${key}`);\n console.log(`Routing with the following params: ${JSON.stringify(params)}`);\n\n const messages = phones.map((p) => JSON.stringify({ id, phone: p, key, params }));\n\n console.log('Routing requests to queue...');\n await queue.sendAll(messages);\n\n console.log('Routing done!');\n\n return {\n statusCode: 201,\n body: JSON.stringify(messages),\n };\n};\n" }, { "alpha_fraction": 0.6372239589691162, "alphanum_fraction": 0.6687697172164917, "avg_line_length": 20.133333206176758, "blob_id": "e7b2aa92b01f87161b9a3bed715db0cac946f432", "content_id": "28b5abcfab2057740c692e38ecfd1b133af76126", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "YAML", "length_bytes": 317, "license_type": "no_license", "max_line_length": 57, "num_lines": 15, "path": "/local/docker-compose.yml", "repo_name": "Andriuslima/wa-message-worker-sam", "src_encoding": "UTF-8", "text": "version: '3.5'\n\nservices:\n dynamo:\n container_name: dynamodb-local\n image: amazon/dynamodb-local\n networks:\n - wa-message-worker-network\n ports:\n - '8000:8000'\n command: '-jar DynamoDBLocal.jar -sharedDb -dbPath .'\n\nnetworks:\n wa-message-worker-network:\n name: wa-message-worker-network\n" }, { "alpha_fraction": 0.6730434894561768, "alphanum_fraction": 0.6730434894561768, "avg_line_length": 26.380952835083008, "blob_id": "61327e13cdf1e2b66a00c193191b7c80816ad38c", "content_id": "531bc2b72f81360ea95861df41f64b50a686deb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 575, "license_type": "no_license", "max_line_length": 63, "num_lines": 21, "path": "/lambdas/active-campaign-webhook/src/queue.ts", "repo_name": "Andriuslima/wa-message-worker-sam", "src_encoding": "UTF-8", "text": "import SQS, { SendMessageRequest } from 'aws-sdk/clients/sqs';\n\nexport class Queue {\n constructor(private client: SQS, private queue: string) {}\n\n async sendAll(messages: string[]): Promise<void> {\n for (const message of messages) {\n console.log(`Sending message to queue: ${message}`);\n await this.send(message);\n }\n }\n\n private async send(body: string): Promise<void> {\n const sendMessageParams: SendMessageRequest = {\n MessageBody: body,\n QueueUrl: this.queue,\n };\n\n await this.client.sendMessage(sendMessageParams).promise();\n }\n}\n" }, { "alpha_fraction": 0.6736183762550354, "alphanum_fraction": 0.6736183762550354, "avg_line_length": 27.205883026123047, "blob_id": "04ecb5a62bc167d2e037371a238b18cca1b22507", "content_id": "1d668f492a6cd109028b8c8306e1efb3a9081a01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 959, "license_type": "no_license", "max_line_length": 87, "num_lines": 34, "path": "/lambdas/parser/src/queue.ts", "repo_name": "Andriuslima/wa-message-worker-sam", "src_encoding": "UTF-8", "text": "import SQS, { MessageBodyAttributeMap, SendMessageRequest } from 'aws-sdk/clients/sqs';\nimport { IntegrationEvent } from './domain';\n\nexport class Queue {\n constructor(private client: SQS, private queue: string, private dlq: string) {}\n\n async send(body: IntegrationEvent): Promise<void> {\n const sendMessageParams: SendMessageRequest = {\n MessageBody: JSON.stringify(body),\n QueueUrl: this.queue,\n };\n\n await this.client.sendMessage(sendMessageParams).promise();\n }\n async sendToDLQ(message: string, error: string): Promise<void> {\n console.log('Sending error do DLQ');\n console.log(error);\n\n const attributes: MessageBodyAttributeMap = {\n error: {\n StringValue: error,\n DataType: 'String',\n },\n };\n\n const params: SendMessageRequest = {\n MessageBody: message,\n MessageAttributes: attributes,\n QueueUrl: this.dlq,\n };\n\n await this.client.sendMessage(params).promise();\n }\n}\n" }, { "alpha_fraction": 0.6015037298202515, "alphanum_fraction": 0.61654132604599, "avg_line_length": 25.600000381469727, "blob_id": "117ce29225fa8ed3a1e1aa9c0bb29e3393b2f095", "content_id": "b4ff4af26046d244ae3b7b4dde0ed9f4aac76cac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 798, "license_type": "no_license", "max_line_length": 79, "num_lines": 30, "path": "/lambdas/parser/src/db.ts", "repo_name": "Andriuslima/wa-message-worker-sam", "src_encoding": "UTF-8", "text": "import { ClientConfiguration, DocumentClient } from 'aws-sdk/clients/dynamodb';\nimport { Message } from './domain';\n\nexport class DB {\n private client: DocumentClient;\n constructor(private table: string) {\n const local = process.env.LOCAL_DB;\n const options: ClientConfiguration = { apiVersion: '2012-08-10' };\n if (local === 'true') {\n console.log('Connecting to local dynamoDB');\n options.endpoint = 'http://dynamo:8000';\n }\n this.client = new DocumentClient(options);\n }\n\n async get(key: string): Promise<Message> {\n const { Item: item } = await this.client\n .get({\n TableName: this.table,\n Key: { key },\n })\n .promise();\n\n if (!item) {\n throw Error(`DynamoDB item ${key} not found`);\n }\n\n return item as Message;\n }\n}\n" }, { "alpha_fraction": 0.6147540807723999, "alphanum_fraction": 0.6325136423110962, "avg_line_length": 26.11111068725586, "blob_id": "4e022e87ad5ff1f24738de930d8308f88d5549cf", "content_id": "4f48765e8f80150e1f3156da3fbe1dd7307779f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 732, "license_type": "no_license", "max_line_length": 97, "num_lines": 27, "path": "/lambdas/active-campaign-webhook/src/phoneFormatter.ts", "repo_name": "Andriuslima/wa-message-worker-sam", "src_encoding": "UTF-8", "text": "import parsePhoneNumber from 'libphonenumber-js';\n\nexport class PhoneFormatter {\n format(phone: string): string {\n let formattedPhone = phone;\n\n // Phone is BR and does not have an 9 in front.\n if (phone.length === 8) {\n formattedPhone = '9' + phone;\n }\n\n // Phone is BR and has a DDD without a 9.\n if (phone.length === 10) {\n formattedPhone = phone.substring(0, 2) + '9' + phone.substring(2);\n }\n\n const phoneNumber = parsePhoneNumber(formattedPhone, 'BR')?.format('E.164').replace('+', '');\n\n if (!phoneNumber) {\n throw Error('Parse phone number resulted in undefined string');\n }\n\n console.log(`Phone formatted from ${phone} to ${formattedPhone}`);\n\n return phoneNumber;\n }\n}\n" }, { "alpha_fraction": 0.6851297616958618, "alphanum_fraction": 0.6891217827796936, "avg_line_length": 31.852458953857422, "blob_id": "903e733acc287262937858f9bfcd64aac79b6aa2", "content_id": "e4657871db38e67b6fa63b28cf9b9e890c61f900", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 2004, "license_type": "no_license", "max_line_length": 99, "num_lines": 61, "path": "/lambdas/sender/src/index.ts", "repo_name": "Andriuslima/wa-message-worker-sam", "src_encoding": "UTF-8", "text": "import { Handler, SQSEvent } from 'aws-lambda';\nimport { SQS, SSM } from 'aws-sdk';\nimport axios from 'axios';\nimport { compareMsgs, IntegrationEvent, MessageValue } from './domain';\nimport { Queue } from './queue';\nimport { ZApi } from './z-api';\nimport { Parameter } from './parameter';\n\nconst zApiHost = process.env.ZAPI_HOST;\nconst zApiUrl = `${zApiHost}/instances/`;\n\nconst http = axios.create({ baseURL: zApiUrl || 'localhost:1234' });\nconst parameter = new Parameter(new SSM());\nconst zApi = new ZApi(http, parameter);\nconst dlq = process.env.DLQ || 'dlq-url';\nconst senderQueue = process.env.QUEUE || 'queue-url';\nconst queue = new Queue(new SQS(), senderQueue, dlq);\n\nexport const handler: Handler = async (event: SQSEvent) => {\n for (const record of event.Records) {\n await handleMessage(JSON.parse(record.body));\n }\n};\n\nasync function handleMessage(event: IntegrationEvent): Promise<void> {\n console.log(`Handling event: ${JSON.stringify(event)}`);\n if (event.messages.length == 0) {\n console.log('No messages to send');\n return;\n }\n\n event.messages.sort(compareMsgs).reverse();\n const messageToSend = event.messages.pop();\n if (!messageToSend) {\n console.log('There is no message to be sent');\n return;\n }\n\n try {\n await sendMessage(messageToSend, event.phone, event.instance);\n } catch (err) {\n event.messages.push(messageToSend);\n await queue.sendToDLQ(JSON.stringify(event), JSON.stringify(err));\n return;\n }\n\n if (event.messages.length > 0) {\n const nextMessage = event.messages[event.messages.length - 1];\n await queue.enqueueEvent(event, nextMessage.delay | 0);\n } else {\n console.log('No more messages to handle');\n }\n}\n\nasync function sendMessage(message: MessageValue, phone: string, instance: string): Promise<void> {\n console.log(`Message received: ${JSON.stringify(message)}`);\n\n const { data } = await zApi.send(phone, message.value, instance);\n\n console.log(`Message sent to ${phone}, response: ${JSON.stringify(data)}`);\n}\n" }, { "alpha_fraction": 0.6835442781448364, "alphanum_fraction": 0.6835442781448364, "avg_line_length": 26.649999618530273, "blob_id": "50203a299c7b111f3f0833e9ecaa46d5f7185c2a", "content_id": "138eea108beead2a3299de518f0e54a9ef828c67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 553, "license_type": "no_license", "max_line_length": 71, "num_lines": 20, "path": "/lambdas/sender/src/parameter.ts", "repo_name": "Andriuslima/wa-message-worker-sam", "src_encoding": "UTF-8", "text": "import { GetParameterRequest } from 'aws-sdk/clients/ssm';\n\nexport class Parameter {\n constructor(private client: AWS.SSM) {}\n\n async get(parameter: string): Promise<string> {\n console.info(`Reading parameter from SSM: ${parameter}`);\n\n const request: GetParameterRequest = {\n Name: parameter,\n WithDecryption: true,\n };\n\n const response = await this.client.getParameter(request).promise();\n if (response.Parameter?.Value) {\n return response.Parameter.Value;\n }\n throw new Error('Parameter value undefined');\n }\n}\n" }, { "alpha_fraction": 0.6937716007232666, "alphanum_fraction": 0.7110726833343506, "avg_line_length": 26.571428298950195, "blob_id": "80ba7a82dda8617df57c58fe8137b25a227216f5", "content_id": "e640cf5a30d1841dc73e24cf7fc10c631e70ec9d", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 578, "license_type": "no_license", "max_line_length": 83, "num_lines": 21, "path": "/.github/utils/create-bucket.py", "repo_name": "Andriuslima/wa-message-worker-sam", "src_encoding": "UTF-8", "text": "from botocore.client import ClientError\nimport boto3\nimport sys\n\nbucket = sys.argv[1].strip()\nACCESS_KEY = sys.argv[2].strip()\nSECRET_KEY = sys.argv[3].strip()\nREGION = sys.argv[4].strip()\n\ns3 = boto3.resource('s3',\n aws_access_key_id=ACCESS_KEY,\n aws_secret_access_key=SECRET_KEY,\n region_name=REGION)\n\ntry:\n s3.meta.client.head_bucket(Bucket=bucket)\nexcept ClientError:\n # This exception means that the bucket could not be found. So we must create it\n s3.create_bucket(\n Bucket=bucket,\n CreateBucketConfiguration={'LocationConstraint': REGION})" }, { "alpha_fraction": 0.6488991975784302, "alphanum_fraction": 0.6488991975784302, "avg_line_length": 30.962963104248047, "blob_id": "0ddfeb611c5e5e05e027e5bd9b4029166929c790", "content_id": "f3f2e8ef7a4b765adcd38370a1e9ce35dedbf08f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 863, "license_type": "no_license", "max_line_length": 91, "num_lines": 27, "path": "/lambdas/sender/src/z-api.ts", "repo_name": "Andriuslima/wa-message-worker-sam", "src_encoding": "UTF-8", "text": "import { AxiosInstance, AxiosRequestConfig, AxiosResponse } from 'axios';\nimport { Parameter } from './parameter';\n\nexport class ZApi {\n constructor(private client: AxiosInstance, private parameter: Parameter) {}\n\n async send(phone: string, message: string, instance: string): Promise<AxiosResponse> {\n const body = {\n message,\n phone,\n };\n\n const token = await this.parameter.get(`/zap/${instance}`);\n\n const config: AxiosRequestConfig = {\n headers: { 'Content-Type': 'application/json' },\n };\n\n console.log(`Sending Z-API request: ${JSON.stringify(body)}`);\n try {\n return await this.client.post(`/${instance}/token/${token}/send-text`, body, config);\n } catch (err) {\n console.error(`Error while sending Z-Api request: ${err}`);\n throw new Error(`Error while sending Z-Api request: ${err}`);\n }\n }\n}\n" } ]
17
kpashko/flask
https://github.com/kpashko/flask
c587dec43a2246f7a28d2a76e583abc0c8ef024a
38b6e6f01d6b9b32bfd5b37cb0e8c8aa04894729
e813b4356ad4caaea377d063db0bf6c337483072
refs/heads/master
2020-03-19T09:37:50.601191
2018-06-06T09:31:38
2018-06-06T09:31:38
136,305,277
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6776315569877625, "alphanum_fraction": 0.6833881735801697, "avg_line_length": 27.279069900512695, "blob_id": "024c59671cf7a044bd43c3d51e61c7c26a7102ed", "content_id": "2a0a8125bcd719ca3c0588260baa01051b499a16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1216, "license_type": "no_license", "max_line_length": 106, "num_lines": 43, "path": "/flask_app.py", "repo_name": "kpashko/flask", "src_encoding": "UTF-8", "text": "from flask import Flask, redirect, render_template, request, url_for\nfrom flask_sqlalchemy import SQLAlchemy\nfrom collections import Counter\n\n\n\napp = Flask(__name__)\napp.config[\"DEBUG\"] = True\nSQLALCHEMY_DATABASE_URI = \"mysql+mysqlconnector://{username}:{password}@{hostname}/{databasename}\".format(\n username=\"\",\n password=\"\",\n hostname=\"\",\n databasename=\"\",\n)\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = SQLALCHEMY_DATABASE_URI\napp.config[\"SQLALCHEMY_POOL_RECYCLE\"] = 299\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\ndb = SQLAlchemy(app)\n\nclass Note(db.Model):\n\n __tablename__ = \"notes\"\n\n id = db.Column(db.Integer, primary_key=True)\n content = db.Column(db.String(4096))\n unique = db.Column(db.Integer)\n\n\[email protected](\"/\", methods=[\"GET\", \"POST\"])\ndef index():\n if request.method == \"GET\":\n return render_template(\"main_page.html\")\n\n article=request.form[\"contents\"]\n counter = len(Counter(article.split()))\n note = Note(content=article, unique = counter)\n db.session.add(note)\n db.session.commit()\n return redirect(url_for('index'))\n\[email protected](\"/notes\")\ndef notes():\n return render_template(\"notes.html\", notes=Note.query.order_by((Note.unique).desc()))\n" } ]
1
Sparky1313/P2P-Project
https://github.com/Sparky1313/P2P-Project
b22564f39524934595712453bc536adc33cb75eb
6b879ac5cba95ae1e9f8f4ae8224eb2dba135cad
b452aab9eb616fcf63656d60b5c831c448c8a992
refs/heads/master
2023-04-08T21:40:15.823430
2021-04-21T21:54:16
2021-04-21T21:54:16
351,282,622
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.590415894985199, "alphanum_fraction": 0.602299153804779, "avg_line_length": 34.033935546875, "blob_id": "8041b2cd6530e9d3d5dbbf2f4de455c5237fd024", "content_id": "52a7c6eecd0789126c9acb440e493dc261ee4a3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15484, "license_type": "no_license", "max_line_length": 165, "num_lines": 442, "path": "/p2p.py", "repo_name": "Sparky1313/P2P-Project", "src_encoding": "UTF-8", "text": "import socket, traceback, threading, time, re, sys, atexit\nfrom PyQt5.QtCore import Qt, QThread, pyqtSignal\nfrom PyQt5.QtWidgets import QApplication, QDialog, QInputDialog, QHBoxLayout, QWidget, QSplitter, QPushButton, QLabel, QMainWindow, QVBoxLayout, QTextEdit, QLineEdit\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# A QThread that reads incoming messages and lets the main thread\n# know that the messages need to be append to the message display area.\nclass ReadThread(QThread):\n new_msg = pyqtSignal(str)\n req_end = pyqtSignal()\n\n def __init__(self, end_cmd, end_cmd_ack):\n QThread.__init__(self)\n self.end_cmd = end_cmd\n self.end_cmd_ack =end_cmd_ack\n \n\n def run(self):\n try:\n while True:\n recvd_msg = sock.recv(5000)\n msg = recvd_msg.decode()\n\n if msg == self.end_cmd:\n self.new_msg.emit(self.end_cmd_ack)\n self.req_end.emit()\n return\n \n if msg == self.end_cmd_ack:\n self.req_end.emit()\n return\n \n self.new_msg.emit(\"Friend:\\n\" + msg)\n except Exception as e:\n # print(\"reate_read_thread error: \" + str(e))\n self.req_end.emit()\n return\n\n\n\n# A QThread that listens for sockets trying to make a connection\n# when the user is the host.\nclass ListenThread(QThread):\n new_thread = pyqtSignal()\n req_end = pyqtSignal()\n conn_made = pyqtSignal(str)\n sent_err_msg = pyqtSignal(str) # This component is not hooked up\n\n\n def __init__(self):\n QThread.__init__(self)\n \n\n def run(self):\n global sock\n\n try:\n sock.listen(1)\n connection, peer_address = sock.accept()\n sock.close()\n sock = connection\n self.conn_made.emit(peer_address[0] + \":\" + str(peer_address[1]) + \" CONNECTED\")\n self.new_thread.emit()\n except Exception as e:\n # print(\"listen_thread error: \" + str(e))\n self.req_end.emit()\n self.sent_err_msg.emit(\"Unexpected error while listening for connections. Hosting stopped...\")\n\n \n# The main application gui and logic\nclass App:\n def __init__(self):\n self.END_CONN_CMD = \"!** END_CONNECTION **!\"\n self.END_CONN_CMD_ACK = \"!** END_ACK **!\"\n\n self.q_app = QApplication([])\n self.my_ip = \"127.0.0.1\"\n self.my_port = 0\n self.friend_ip = \"\"\n self.friend_port = 0\n\n\n '''Start components and layout'''\n\n ###### Hosting components and layout ######\n\n # ---Components setup---\n self.my_port_input = QLineEdit(\"Enter your port to run on...\")\n self.my_port_input_lbl = QLabel(\"Enter your port number:\")\n self.start_host_btn = QPushButton(\"Host\")\n self.end_host_btn = QPushButton(\"End Hosting\")\n\n # ---Initial function calls---\n self.my_port_input_lbl.setBuddy(self.my_port_input)\n self.start_host_btn.clicked.connect(self.start_host)\n self.end_host_btn.clicked.connect(self.send_end)\n self.end_host_btn.setDisabled(True)\n\n # ---Layout setup---\n my_sel_layout = QHBoxLayout()\n my_sel_layout.addWidget(self.my_port_input_lbl)\n my_sel_layout.addWidget(self.my_port_input)\n my_sel_layout.addWidget(self.start_host_btn)\n my_sel_layout.addWidget(self.end_host_btn)\n\n ################################################\n\n\n ###### Friend selection components and layout ######\n\n # ---Components setup---\n self.friend_ip_input = QLineEdit(\"Enter friend's IP address to connect to...\")\n self.friend_port_input = QLineEdit(\"Enter friend's port number to connect to...\")\n self.friend_ip_input_lbl = QLabel(\"Enter friend's IP Address:\")\n self.friend_port_input_lbl = QLabel(\"Enter friend's port number:\")\n self.connect_btn = QPushButton(\"Connect\")\n self.disconnect_btn = QPushButton(\"End Connection\")\n\n # ---Initial function calls---\n self.friend_ip_input_lbl.setBuddy(self.friend_ip_input)\n self.friend_port_input_lbl.setBuddy(self.friend_port_input)\n self.connect_btn.clicked.connect(self.conn_to_friend)\n self.disconnect_btn.clicked.connect(self.send_end)\n self.enable_friend_sel_components()\n self.disconnect_btn.setDisabled(True)\n\n # ---Layout setup---\n friend_sel_layout = QHBoxLayout()\n friend_sel_layout.addWidget(self.friend_ip_input_lbl)\n friend_sel_layout.addWidget(self.friend_ip_input)\n friend_sel_layout.addWidget(self.friend_port_input_lbl)\n friend_sel_layout.addWidget(self.friend_port_input)\n friend_sel_layout.addWidget(self.connect_btn)\n friend_sel_layout.addWidget(self.disconnect_btn)\n\n ################################################\n\n\n ###### Message area components and layout ######\n\n # ---Components setup---\n self.msg_display_area = QTextEdit(\"Message Display\")\n self.msg_input_box = QLineEdit(\"Message Input\")\n self.msg_input_box_lbl = QLabel(\"Input message here:\")\n\n # ---Initial function calls\n self.msg_display_area.setReadOnly(True)\n self.msg_input_box.returnPressed.connect(self.enter_msg)\n self.msg_input_box_lbl.setBuddy(self.msg_input_box)\n self.msg_input_box.setDisabled(True)\n\n # ---Layout setup---\n msg_input_layout = QHBoxLayout()\n msg_input_layout.addWidget(self.msg_input_box_lbl)\n msg_input_layout.addWidget(self.msg_input_box)\n msg_area_layout = QVBoxLayout()\n msg_area_layout.addWidget(self.msg_display_area)\n msg_area_layout.addLayout(msg_input_layout)\n\n ################################################\n\n\n ###### Emoji components and layout ######\n\n # ---Components setup---\n self.thumbs_up_btn = QPushButton(\"\\U0001F44D\")\n self.thumbs_down_btn = QPushButton(\"\\U0001F44E\")\n self.smile_btn = QPushButton(\"\\U0001F600\")\n self.laugh_btn = QPushButton(\"\\U0001F923\")\n self.cry_btn = QPushButton(\"\\U0001F62D\")\n self.angry_btn = QPushButton(\"\\U0001F620\")\n\n # ---Initial function calls\n self.thumbs_up_btn.clicked.connect(lambda: self.emoji_btn_clicked(\"\\U0001F44D\"))\n self.thumbs_down_btn.clicked.connect(lambda: self.emoji_btn_clicked(\"\\U0001F44E\"))\n self.smile_btn.clicked.connect(lambda: self.emoji_btn_clicked(\"\\U0001F600\"))\n self.laugh_btn.clicked.connect(lambda: self.emoji_btn_clicked(\"\\U0001F923\"))\n self.cry_btn.clicked.connect(lambda: self.emoji_btn_clicked(\"\\U0001F62D\"))\n self.angry_btn.clicked.connect(lambda: self.emoji_btn_clicked(\"\\U0001F620\"))\n self.disable_emoji_btns()\n \n # ---Layout setup---\n emoji_layout = QHBoxLayout()\n emoji_layout.addWidget(self.thumbs_up_btn)\n emoji_layout.addWidget(self.thumbs_down_btn)\n emoji_layout.addWidget(self.smile_btn)\n emoji_layout.addWidget(self.laugh_btn)\n emoji_layout.addWidget(self.cry_btn)\n emoji_layout.addWidget(self.angry_btn)\n\n ################################################\n\n\n ###### Window components and layout ######\n\n # ---Layout setup---\n window_layout = QVBoxLayout()\n window_layout.addLayout(my_sel_layout)\n window_layout.addLayout(friend_sel_layout)\n window_layout.addLayout(msg_area_layout)\n window_layout.addLayout(emoji_layout)\n \n # ---Components setup---\n self.window = QWidget()\n self.window.setLayout(window_layout)\n self.window.setWindowTitle(\"P2P Chat App\")\n self.window.show()\n\n ################################################\n\n ''' End components and layout '''\n\n\n '''Widget callbacks and application functions'''\n\n # Ensures a valid entry is made for hosting and\n # makes the user listen for connections on the specified port number\n def start_host(self):\n str_data = self.my_port_input.text()\n data = 0\n\n # Check if port number entered is valid\n try:\n data = int(str_data)\n except Exception:\n self.append_msg(\"ERROR: Your port number entered is not an integer. Port number must be an integer between 1024 to 49151.\")\n return\n \n if data < 1024 or data > 49151:\n self.append_msg(\"ERROR: Invalid input for your port number. Must be between 1024 to 49151.\")\n return\n \n # Bind socket and start listening\n self.my_port = data\n sock.bind((self.my_ip, self.my_port))\n self.listen_thread = ListenThread()\n self.listen_thread.new_thread.connect(self.create_read_thread)\n self.listen_thread.conn_made.connect(self.append_msg)\n self.listen_thread.start()\n\n # Update display\n self.disable_hosting_components()\n self.end_host_btn.setEnabled(True)\n self.disable_friend_sel_components()\n self.msg_input_box.setEnabled(True)\n self.enable_emoji_btns()\n\n # Sends out a message that let's the connected friend\n # know that the user is ending the connection and then\n # starts the process of ending the connection.\n def send_end(self):\n global sock\n\n # End connection\n try:\n sock.sendall(self.END_CONN_CMD.encode())\n except Exception:\n pass\n\n self.end_conn()\n\n \n # Helper method for changing display\n def enable_hosting_components(self):\n self.my_port_input.setEnabled(True)\n self.start_host_btn.setEnabled(True)\n self.end_host_btn.setEnabled(True)\n\n\n # Helper method for changing display\n def disable_hosting_components(self):\n self.my_port_input.setDisabled(True)\n self.start_host_btn.setDisabled(True)\n self.end_host_btn.setDisabled(True)\n\n\n def conn_to_friend(self):\n ip = self.friend_ip_input.text()\n port_input = self.friend_port_input.text()\n port = 0\n\n # Regex used to compare against for valid IP address\n ip_regex_str = re.compile(\"^(([0-2][0-5][0-5]|[0-1]\\d{2}|\\d{1,2})\\.){3}([0-2][0-5][0-5]|[01]\\d{2}|\\d{1,2})$\")\n\n # Check if IP address entered is valid\n if not ip_regex_str.match(ip):\n self.append_msg(\"ERROR: Invalid IP address. Formatting must be of type XXX.XXX.XXX.XXX and be within 0.0.0.0 to 255.255.255.255\")\n return\n\n # Check if port number entered is valid\n try:\n port = int(port_input)\n except Exception:\n self.append_msg(\"ERROR: Friend port number entered is not an integer. Port number must be an integer.\")\n return\n \n if port < 1024 or port > 49151:\n self.append_msg(\"ERROR: Invalid input for your port number. Must be between 1024 to 49151.\")\n return\n \n # Connect to peer\n self.friend_ip = ip\n self.friend_port = port\n\n try:\n sock.connect((self.friend_ip, self.friend_port))\n self.append_msg(\"CONNECTION SUCCESSFUL\")\n self.create_read_thread()\n except Exception as e:\n # print(\"conn_to_friend error: \" + str(e))\n self.end_conn()\n self.append_msg(\"CONNECTION UNSUCCESSFUL:\" + str(e))\n return\n\n \n # Update display\n self.disable_friend_sel_components()\n self.disconnect_btn.setEnabled(True)\n self.disable_hosting_components()\n self.msg_input_box.setEnabled(True)\n self.enable_emoji_btns()\n \n \n # Updates the display appropriately when the connection\n # between user and friend is ended.\n def disconn_update_display(self):\n self.enable_friend_sel_components()\n self.disconnect_btn.setDisabled(True)\n self.enable_hosting_components()\n self.end_host_btn.setDisabled(True)\n self.msg_input_box.setDisabled(True)\n self.disable_emoji_btns()\n self.clear_all_inputs()\n \n\n # Appends the msg parameter to the message display area.\n def append_msg(self, msg):\n self.msg_display_area.append(msg + \"\\n\")\n\n\n # Closes the socket being used for connections, \n # resets corresponding variables involved in the connection,\n # and updates the display to reflect the disconnection.\n def end_conn(self):\n global sock\n\n try:\n sock.close()\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.friend_ip = \"\"\n self.friend_port = 0\n self.my_port = 0\n self.disconn_update_display()\n except Exception as e:\n print(\"end_conn error: \" + str(e))\n\n \n # Helper method for changing display\n def enable_friend_sel_components(self):\n self.friend_ip_input.setEnabled(True)\n self.friend_port_input.setEnabled(True)\n self.connect_btn.setEnabled(True)\n self.disconnect_btn.setEnabled(True)\n\n\n # Helper method for changing display\n def disable_friend_sel_components(self):\n self.friend_ip_input.setDisabled(True)\n self.friend_port_input.setDisabled(True)\n self.connect_btn.setDisabled(True)\n self.disconnect_btn.setDisabled(True)\n \n\n # Helper method for changing display\n def enable_emoji_btns(self):\n self.thumbs_up_btn.setEnabled(True)\n self.thumbs_down_btn.setEnabled(True)\n self.smile_btn.setEnabled(True)\n self.laugh_btn.setEnabled(True)\n self.cry_btn.setEnabled(True)\n self.angry_btn.setEnabled(True)\n\n\n # Helper method for changing display\n def disable_emoji_btns(self):\n self.thumbs_up_btn.setDisabled(True)\n self.thumbs_down_btn.setDisabled(True)\n self.smile_btn.setDisabled(True)\n self.laugh_btn.setDisabled(True)\n self.cry_btn.setDisabled(True)\n self.angry_btn.setDisabled(True)\n\n\n # Clears all input fields\n def clear_all_inputs(self):\n self.my_port_input.clear()\n self.friend_ip_input.clear()\n self.friend_port_input.clear()\n self.msg_display_area.clear()\n self.msg_input_box.clear()\n \n\n # Handles validation of entry, sends message to friend, and\n # ensures the message is displayed on screen.\n def enter_msg(self):\n msg = self.msg_input_box.text()\n\n if not msg.isspace() and msg != '':\n self.append_msg(\"Me:\\n\" + msg)\n \n sock.sendall(msg.encode())\n self.msg_input_box.clear()\n\n\n # Adds the emoji to the user's message\n def emoji_btn_clicked(self, emoji_str):\n msg = msg = self.msg_input_box.text()\n self.msg_input_box.setText(msg + emoji_str)\n\n\n # Creates and starts a thread where the socket reads incoming messages.\n def create_read_thread(self):\n self.read_thread = ReadThread(self.END_CONN_CMD, self.END_CONN_CMD_ACK)\n self.read_thread.new_msg.connect(self.append_msg)\n self.read_thread.req_end.connect(self.end_conn)\n self.read_thread.start()\n\n\n \n# Makes sure threads and sockets close after the window closes\ndef on_exit_cleanup():\n sock.close()\n \n\nif __name__ == \"__main__\":\n app = App()\n atexit.register(on_exit_cleanup)\n app.q_app.exec_()\n\n # Makes sure threads and sockets close after the window closes\n sock.close()" } ]
1
DeveloperDenis/BirdyFlap
https://github.com/DeveloperDenis/BirdyFlap
edcd5407feb3dee0cae550d4993f1b9eb45ea6cf
2c289589223cf8b6145c3e1a5a86bf1ad10aee9c
edcb02e71bff4f740c9ce9a3b687f83c49573c26
refs/heads/master
2021-01-13T04:53:58.874586
2017-05-09T20:33:18
2017-05-09T20:33:18
81,160,305
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6094464063644409, "alphanum_fraction": 0.6314880847930908, "avg_line_length": 29.479875564575195, "blob_id": "6e3e86f366e8b0202539de850f2c0d9d1389844c", "content_id": "63f579184e6b00e170d0bcc25ca8cf9e695a9f39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9845, "license_type": "no_license", "max_line_length": 129, "num_lines": 323, "path": "/game.py", "repo_name": "DeveloperDenis/BirdyFlap", "src_encoding": "UTF-8", "text": "import pygame\nimport random\n\npygame.mixer.pre_init(0,0,0,1024)\npygame.init()\n\n\n#constants\nTITLE = 0\nPLAYING = 1\nGAME_OVER = 2\n\nGRAV_ACCEL = 0.1\nSCALE_FACTOR = 4\nGROUND_WIDTH = 32\n#the width of the game before scaling\nINTERNAL_WIDTH = 320\nSPAWN_OBSTACLE = pygame.USEREVENT\nOBSTACLE_SPACING = 16*3\nOBSTACLE_RANGE = 144-OBSTACLE_SPACING-8\nOBSTACLE_FREQUENCY = 125\n\n#class initialization\nclass GameObject(object):\n\n def __init__(self, startX, startY, fileName):\n self.image = pygame.image.load(fileName)\n self.image = self.image.convert_alpha()\n self.rect = pygame.Rect(startX, startY, self.image.get_width(), self.image.get_height())\n\n #simple setter methods\n def setX(self, x):\n self.rect.x = x\n def setY(self, y):\n self.rect.y = y\n \n #simple getter methods\n def getTop(self):\n return self.rect.top\n def getBot(self):\n return self.rect.bottom\n def getLeft(self):\n return self.rect.left\n def getRight(self):\n return self.rect.right\n\nclass Player(GameObject):\n falling = True\n flapping = False\n animating = True\n ySpeed = 0\n currentFrame = 0\n maxFrames = 3\n delay = 0\n\n def __init__(self, startX, startY, fileName):\n self.image = pygame.image.load(fileName)\n self.image = self.image.convert_alpha()\n self.rect = self.image.get_rect(x = startX, y = startY)\n\n self.animRect = pygame.Rect(0,0,16,16)\n\n def update(self):\n if self.falling or self.flapping:\n \n self.rect.y += self.ySpeed\n self.ySpeed += GRAV_ACCEL\n\n if self.ySpeed > 0 and self.flapping:\n self.flapping = False\n self.falling = True\n\n if self.animating and self.delay >= 10:\n if self.currentFrame >= 3:\n self.currentFrame = 0\n else:\n self.currentFrame += 1\n\n self.animRect.x = self.currentFrame * self.animRect.width\n self.delay = 0\n\n self.delay += 1\n\n def flap(self):\n self.ySpeed = -1.5\n\n#game wide important stuff\nscreen = pygame.display.set_mode((1280, 720))\npygame.display.set_caption(\"Birdy Flap\")\ncanvas = pygame.Surface((int(screen.get_width()/SCALE_FACTOR), int(screen.get_height()/SCALE_FACTOR)))\nrunning = True\ncurrentState = TITLE\nfpsTimer = pygame.time.Clock()\n#making the game actually random each time (more or less)\nrandom.seed()\npoints = 0\nhighscore = 0\nmouseFlag = False\n\n#game objects\nplayer = Player(50, 50, \"birdSheet.png\")\nobstacleArray = []\ngroundArray = []\npointFont = pygame.font.Font(\"PressStart2P-Regular.ttf\", 32)\npointDisplay = pointFont.render(str(points), False, (255,255,255))\npointCounter = INTERNAL_WIDTH\nobstacleCounter = INTERNAL_WIDTH\ngameOverBoard = GameObject(100, 50, \"gameOverBoard.png\")\nretryButton = GameObject(gameOverBoard.getLeft()+5, gameOverBoard.getBot()-20, \"retryButton.png\")\nquitButton = GameObject(retryButton.getRight() + 18, retryButton.getTop(), \"quitButton.png\")\ngameOverFont = pygame.font.Font(\"PressStart2P-Regular.ttf\", 12)\nbestText = gameOverFont.render(\"Best:\", False, (255,255,255))\ntitleText = GameObject(INTERNAL_WIDTH/2-20, 25, \"titleText.png\")\nplayButton = GameObject(titleText.getLeft()+15, 50, \"playButton.png\")\nflapSound = pygame.mixer.Sound(\"jump.wav\")\nhitSound = pygame.mixer.Sound(\"hit.wav\")\npointSound = pygame.mixer.Sound(\"point.wav\")\n\nfor i in range(0, 11):\n groundArray.append(GameObject(i*GROUND_WIDTH, canvas.get_height()-36, \"ground.png\"))\n\nbackground = GameObject(0, 0, \"background.png\")\n\n#functions\ndef updateObstacles():\n i = 0\n while (i < len(obstacleArray)):\n obstacleArray[i].rect.x -= 1\n\n if obstacleArray[i].rect.right <= 0:\n obstacleArray.pop(i)\n i -= 1\n i += 1\n \n\ndef updateGround():\n for groundObject in groundArray:\n groundObject.rect.x -= 1\n\n if groundObject.rect.right <= 0:\n groundObject.rect.x = INTERNAL_WIDTH\n \n\ndef resetPlayer():\n\n player.setX(50)\n player.setY(100)\n player.ySpeed = 0\n player.falling = True\n\ndef restartFlappy():\n global currentState\n global points\n global mouseFlag\n global player\n global obstacleArray\n global pointDisplay\n global pointCounter\n global obstacleCounter\n \n currentState = PLAYING\n points = 0\n mouseFlag = False\n\n player.setX(50)\n player.setY(50)\n player.animating = True\n player.falling = True\n player.flapping = False\n player.ySpeed = 0\n player.currentFrame = 0\n player.delay = 0\n\n obstacleArray = []\n\n pointDisplay = pointFont.render(str(points), False, (255,255,255))\n pointCounter = obstacleCounter = INTERNAL_WIDTH\n\ndef gameOver():\n global currentState\n global player\n global highscore\n global points\n global bestScoreText\n\n currentState = GAME_OVER\n player.animating = False\n \n if points > highscore:\n highscore = points\n\n bestScoreText = gameOverFont.render(str(highscore), False, (255,255,255))\n\nwhile running:\n #refresh screen\n canvas.fill((100,100,100))\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.MOUSEBUTTONUP and currentState == GAME_OVER:\n if event.button == 1 and quitButton.rect.collidepoint(event.pos[0]/4, event.pos[1]/4):\n running = False\n elif event.button == 1 and retryButton.rect.collidepoint(event.pos[0]/4, event.pos[1]/4):\n restartFlappy()\n elif event.type == pygame.MOUSEBUTTONUP and currentState == TITLE:\n if event.button == 1 and playButton.rect.collidepoint(event.pos[0]/4, event.pos[1]/4):\n player.falling = True\n currentState = PLAYING\n \n if currentState == TITLE:\n\n canvas.blit(background.image, background.rect)\n canvas.blit(player.image, player.rect, player.animRect)\n\n for groundObject in groundArray:\n canvas.blit(groundObject.image, groundObject.rect)\n\n canvas.blit(titleText.image, titleText.rect)\n canvas.blit(playButton.image, playButton.rect)\n\n player.falling = False\n player.flapping = False\n\n player.update()\n updateGround()\n \n elif currentState == PLAYING:\n\n obstacleCounter += 1\n if obstacleCounter >= OBSTACLE_FREQUENCY:\n randomY = random.random()*OBSTACLE_RANGE+52\n \n obstacleArray.append(GameObject(INTERNAL_WIDTH, randomY, \"obstacle.png\"))\n flippedObstacle = GameObject(INTERNAL_WIDTH, randomY-obstacleArray[0].rect.height - OBSTACLE_SPACING, \"obstacle.png\")\n flippedObstacle.image = pygame.transform.flip(flippedObstacle.image, False, True)\n obstacleArray.append(flippedObstacle)\n obstacleCounter = 0\n \n if len(obstacleArray) > 0:\n pointCounter -= 1\n\n if pointCounter <= player.getLeft():\n pointSound.play()\n pointCounter = 175 #hard-coded position of next obstacle after you pass one\n points += 1\n pointDisplay = pointFont.render(str(points), False, (255,255,255))\n \n player.update()\n updateGround()\n updateObstacles()\n\n for groundObject in groundArray:\n if player.getBot() >= groundObject.getTop():\n hitSound.play()\n player.falling = False\n player.setY(groundObject.getTop()-player.rect.height)\n gameOver()\n\n for obstacle in obstacleArray:\n tempRect = player.rect.copy()\n tempRect.width = tempRect.width/4\n if tempRect.colliderect(obstacle.rect):\n hitSound.play()\n gameOver()\n\n if pygame.mouse.get_pressed()[0]:\n mouseFlag = True\n elif mouseFlag and player.getBot() > 2:\n player.flap()\n player.flapping = True\n flapSound.play()\n mouseFlag = False\n elif player.getTop() <= 0 and player.flapping:\n player.falling = True\n player.ySpeed = 0\n \n #game object drawing\n canvas.blit(background.image, background.rect)\n\n #obstacle drawing\n for i in range(0, len(obstacleArray)):\n canvas.blit(obstacleArray[i].image, obstacleArray[i].rect)\n #print(obstacleArray[i].rect.x, obstacleArray[i+1].rect.x)\n \n canvas.blit(player.image, player.rect, player.animRect)\n\n #drawing the ground\n for groundObject in groundArray:\n canvas.blit(groundObject.image, groundObject.rect)\n\n canvas.blit(pointDisplay, pointDisplay.get_rect(center=(INTERNAL_WIDTH/2, 25)))\n\n elif currentState is GAME_OVER:\n\n ''' DRAWING '''\n canvas.blit(background.image, background.rect)\n \n for i in range(0, len(obstacleArray)):\n canvas.blit(obstacleArray[i].image, obstacleArray[i].rect)\n \n canvas.blit(player.image, player.rect, player.animRect)\n \n for groundObject in groundArray:\n canvas.blit(groundObject.image, groundObject.rect)\n\n canvas.blit(gameOverBoard.image, gameOverBoard.rect)\n canvas.blit(retryButton.image, retryButton.rect)\n canvas.blit(quitButton.image, quitButton.rect)\n canvas.blit(bestText, bestText.get_rect(x=gameOverBoard.getLeft()+5, y=gameOverBoard.getTop()+5))\n canvas.blit(bestScoreText, (gameOverBoard.getLeft()+45, gameOverBoard.getTop()+23))\n \n ''' END IF '''\n \n #scale screen up\n screen.blit(pygame.transform.scale(canvas, (canvas.get_width()*SCALE_FACTOR, canvas.get_height()*SCALE_FACTOR)), (0,0))\n #update screen\n pygame.display.flip()\n\n #keep fps 60\n fpsTimer.tick_busy_loop(60)\n\npygame.quit()\n" }, { "alpha_fraction": 0.7628571391105652, "alphanum_fraction": 0.7685714364051819, "avg_line_length": 37.88888931274414, "blob_id": "292a1203ec4acc7486722b6e61bbf3a5f8277713", "content_id": "858798bc6e320ddea59c2c7efb84010db56a6f14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 350, "license_type": "no_license", "max_line_length": 80, "num_lines": 9, "path": "/README.md", "repo_name": "DeveloperDenis/BirdyFlap", "src_encoding": "UTF-8", "text": "# BirdyFlap\nA \"tasteful recreation\" of the game Flappy Bird.\n\nCreated using Python 2.7 and Pygame\n\n### Screenshots/GIFs\n![Titlescreen screenshot](/screenshots/titlescreen.png?raw=true \"Title Screen\")\n![Titlescreen gif](/screenshots/titlescreen_gif.gif?raw=true \"Title Screen GIF\")\n![Score-Six gif](/screenshots/score-six.gif?raw=true \"Gameplay GIF\")\n" } ]
2
danielmaartens/span_challenge_python
https://github.com/danielmaartens/span_challenge_python
99e7059c70d5bb955680dec9ca7ace2abb1e4b0b
dc8e3f8ac1ed7e193815d22aebd39c8ac98ff78b
43ba08b5da6768665b14d2ce7552bbcaa819e246
refs/heads/master
2020-06-09T00:22:45.275537
2019-06-27T05:48:48
2019-06-27T05:48:48
193,333,708
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7058823704719543, "alphanum_fraction": 0.7254902124404907, "avg_line_length": 16, "blob_id": "b81a1687a3da31e3ceccc0f2029abde16fb930d8", "content_id": "9319cbef006323bfa997bab0e18ea333321ff069", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 51, "license_type": "no_license", "max_line_length": 29, "num_lines": 3, "path": "/test", "repo_name": "danielmaartens/span_challenge_python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\npython3 challenge/tests.py -v\n" }, { "alpha_fraction": 0.6505164504051208, "alphanum_fraction": 0.6537137031555176, "avg_line_length": 29.00737953186035, "blob_id": "9e96babfe8f5ff491be1d6163a864d4306815557", "content_id": "baf6c9e230cfd64e97398ede99d240d9ec6456af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8132, "license_type": "no_license", "max_line_length": 114, "num_lines": 271, "path": "/challenge/utils.py", "repo_name": "danielmaartens/span_challenge_python", "src_encoding": "UTF-8", "text": "import re\nimport operator\nfrom time import sleep\nfrom team_value import TeamValue\n\nTEAM_RESULT_GROUPING_PATTERN = '^([a-zA-Z\\\\s]+)([0-9]+$)'\n\n\ndef dictionary_to_list(team_value_dictionary):\n \"\"\"\n converts a dictionary to a list for easier processing of data later.\n\n :param team_value_dictionary:\n :return:\n \"\"\"\n team_value_list = []\n\n for team, value in team_value_dictionary.items():\n team_value_list.append([team, value])\n\n return team_value_list\n\n\ndef list_to_dictionary(team_value_list):\n \"\"\"\n converts a list to a dictionary for use in tests for easier access of team values with the team names we know.\n\n :param team_value_list:\n :return:\n \"\"\"\n\n dictionary = {}\n\n for team_value in team_value_list:\n dictionary[team_value.getname()] = team_value.getvalue()\n\n return dictionary\n\n\ndef boolean_from_string(s):\n \"\"\"\n converts expected user input for yes/no/continue questions into a boolean value.\n\n :param s:\n :return:\n \"\"\"\n\n lower_case = s.split('\\n')[0].lower()\n\n switcher = {\n 'y': True,\n 'yes': True,\n 'c': True,\n 'n': False,\n 'no': False\n }\n\n return switcher.get(lower_case, None)\n\n\ndef delayed_print(text, seconds):\n \"\"\"\n helper function to print delayed text in the console.\n - it's so that the output isn't printed all at once.\n - the user has time to read each line.\n\n :param text:\n :param seconds:\n :return:\n \"\"\"\n\n sleep(seconds)\n print(text)\n\n\ndef get_team_result_from_string(result, regex_pattern):\n \"\"\"\n expects a string containing the name of the team followed by a space and then the team's score for that match.\n - e.g. team \"GoGetters\" with score 10 should have a string as follows: \"GoGetters 10\".\n\n - it will then convert this string into a TeamValue object that has a name and value variable.\n - it should also convert the string score into a number.\n\n :param result:\n :param regex_pattern:\n :return:\n \"\"\"\n\n team_value = None\n\n # use regex pattern to match team names that include spaces\n r = re.search(regex_pattern, result)\n if r:\n # remove the space at the end of the team name\n name = r.group(1)[:-1]\n # convert string value into int type.\n result = int(r.group(2))\n team_value = TeamValue(name, result)\n\n # return a TeamValue class object\n return team_value\n\n\ndef set_team_ranks(sorted_team_match_points):\n \"\"\"\n sets the rank value for all teams.\n note: the list must be sorted.\n\n :param sorted_team_match_points:\n :return:\n \"\"\"\n ranked_teams = []\n\n index = 1\n rank = 0\n previous_team_points = None\n\n for team_match_point in sorted_team_match_points:\n name = team_match_point[0]\n points = team_match_point[1]\n\n # only change rank to running index if current points and previous points are different\n # this is to make sure that teams who have the same points have the same rank.\n if points != previous_team_points:\n rank = index\n\n team = TeamValue(name, points)\n team.setrank(rank)\n ranked_teams.append(team)\n\n # set previous points to current points for next iteration check.\n previous_team_points = points\n index += 1\n\n return ranked_teams\n\n\ndef calculate_match_points(match_results):\n \"\"\"\n processes a list of the two team scores in a single match\n and returns a new TeamValue object for each team where the value parameter\n represents the points the team received from either Losing/Winning/Drawing the match.\n\n :param match_results:\n :return:\n \"\"\"\n match_points = []\n\n team_a = match_results[0]\n team_b = match_results[1]\n\n # initialise new TeamValue objects for each team\n # setting initial points to 0\n team_a_name = team_a.getname()\n team_a_goals = team_a.getvalue()\n team_a_points = TeamValue(team_a_name, 0)\n\n team_b_name = team_b.getname()\n team_b_goals = team_b.getvalue()\n team_b_points = TeamValue(team_b_name, 0)\n\n # match is a DRAW\n if team_a_goals == team_b_goals:\n team_a_points.setvalue(1)\n team_b_points.setvalue(1)\n\n # team A WON\n elif team_a_goals > team_b_goals:\n team_a_points.setvalue(3)\n\n # team B WON\n else:\n team_b_points.setvalue(3)\n\n # add the new objects to an empty list\n match_points.append(team_a_points)\n match_points.append(team_b_points)\n\n return match_points\n\n\ndef reduce_team_match_points(all_teams_match_points):\n \"\"\"\n when this function is called we have a list\n containing each team's match points for all games played.\n\n we want to reduced that list to one that only has\n one entry for each team, with each new object having it's\n value represent the sum of all match points gained in the league.\n\n :param all_teams_match_points:\n :return:\n \"\"\"\n\n # using of a dictionary here makes it easier to reduce into a single entry per team.\n final_team_points = {}\n\n for team in all_teams_match_points:\n name = team.getname()\n points = team.getvalue()\n\n # if the name does not exist in the map, it will be initialised with the value of points.\n # otherwise it will just add this match's points to the previous points value.\n if name in final_team_points:\n next_points_total = final_team_points[name] + points\n final_team_points[name] = next_points_total\n else:\n final_team_points[name] = points\n\n # convert the dictionary back into a list for better processing later.\n return dictionary_to_list(final_team_points)\n\n\ndef get_league_results(file_path):\n \"\"\"\n this is the most important function.\n it serves as the parent for most of the other functions within this module.\n it is responsible for reading through the file contents line by line and\n processing the final ranks of teams in the league based on all the matches played.\n\n :param file_path:\n :return:\n \"\"\"\n\n team_match_points = []\n\n # read file contents\n with open(file_path) as f:\n content = f.readlines()\n\n # go through each line of the file\n content = [line.strip() for line in content]\n for line in content:\n scores = []\n\n # each line represents the outcome of a match.\n # each team's own outcome of the match is separated by a \", \"\n # which is why we first split the line by \", \" to get a match_results list\n # of two strings representing the outcome of each team for the match.\n match_results = line.split(', ')\n\n # now we loop through the match_results\n for result in match_results:\n\n # we parse the string into a TeamValue object for easy processing later.\n team_value = get_team_result_from_string(result, TEAM_RESULT_GROUPING_PATTERN)\n\n # we add this result to a list representing the scores for each team of this match.\n if team_value is not None:\n scores.append(team_value)\n\n # now that we have an array of TeamValue objects for the match representing each team\n # we can calculate the match points.\n match_points = calculate_match_points(scores)\n\n # here we concatenate the new match_points array with all previous added match_points.\n # the purpose of this is to have an array of TeamValue objects each representing\n # the points the team gained in a match.\n team_match_points.extend(match_points)\n\n # now we reduce this array of all our teams' match_points\n # into an array containing a single entry for each team\n # with the value representing the sum of all their match points gained.\n final_team_match_points = reduce_team_match_points(team_match_points)\n\n # sort final_team_match_points by points DESC, and then by name ASC.\n final_team_match_points.sort(key=operator.itemgetter(0))\n final_team_match_points.sort(key=operator.itemgetter(1), reverse=True)\n\n # set the team ranks and return the final league results.\n return set_team_ranks(final_team_match_points)\n" }, { "alpha_fraction": 0.6734693646430969, "alphanum_fraction": 0.6734693646430969, "avg_line_length": 13.142857551574707, "blob_id": "dea668c7ab628e086175f2a0f5f7bc689ac5dd8c", "content_id": "b440816236f2dd4c25ac50ea2929f41c3934d0de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 98, "license_type": "no_license", "max_line_length": 33, "num_lines": 7, "path": "/test_and_run", "repo_name": "danielmaartens/span_challenge_python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\necho \"\\nRunning Tests...\\n\"\nsh test\n\necho \"\\nRunning application...\\n\"\nsh run" }, { "alpha_fraction": 0.6355606913566589, "alphanum_fraction": 0.6470814347267151, "avg_line_length": 34.42856979370117, "blob_id": "70e7891a26c0d54e83d51f4ab4adde568bdf59fc", "content_id": "0f29c6b89b7311f535e2321300d838ba4f81987d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5208, "license_type": "no_license", "max_line_length": 95, "num_lines": 147, "path": "/challenge/tests.py", "repo_name": "danielmaartens/span_challenge_python", "src_encoding": "UTF-8", "text": "import unittest\nimport os\nfrom utils import *\nfrom team_value import TeamValue\n\nteam_value = TeamValue('GoGetters', 10)\ntest_file_path = os.path.abspath('./challenge/resources/input.csv')\n\n\nclass TestTeamValues(unittest.TestCase):\n\n def test_team_value_getname(self):\n print('\\ngetname() should return GoGetters')\n self.assertEqual(team_value.getname(), 'GoGetters')\n\n def test_team_value_getvalue(self):\n print('\\ngetvalue() should return 10 of type int')\n self.assertEqual(team_value.getvalue(), 10)\n\n def test_team_result_from_string_regex_name(self):\n print('\\nRegex should take into account spaces within a team name')\n team_result = get_team_result_from_string('FC Awesome 1', TEAM_RESULT_GROUPING_PATTERN)\n self.assertEqual(team_result.getname(), 'FC Awesome')\n\n def test_team_result_from_string_regex_value(self):\n print('\\nThis function should extract the team\\'s score and convert it into a number')\n team_result = get_team_result_from_string('FC Awesome 1', TEAM_RESULT_GROUPING_PATTERN)\n self.assertEqual(team_result.getvalue(), 1)\n\n\nclass TestWinLose(unittest.TestCase):\n\n def setUp(self):\n team_a_result = TeamValue('A', 1)\n team_b_result = TeamValue('B', 0)\n match_results = [team_a_result, team_b_result]\n match_points = calculate_match_points(match_results)\n match_points_dictionary = list_to_dictionary(match_points)\n self.match_points_dictionary = match_points_dictionary\n\n def test_match_points_team_a(self):\n print('\\nTeam A WON, so they should have 3 points')\n self.assertEqual(self.match_points_dictionary.get('A'), 3)\n\n def test_match_points_team_b(self):\n print('\\nTeam B LOST, so they should have 0 points')\n self.assertEqual(self.match_points_dictionary.get('B'), 0)\n\n\nclass TestDraw(unittest.TestCase):\n\n def setUp(self):\n team_a_result = TeamValue('A', 1)\n team_b_result = TeamValue('B', 1)\n match_results = [team_a_result, team_b_result]\n match_points = calculate_match_points(match_results)\n match_points_dictionary = list_to_dictionary(match_points)\n self.match_points_dictionary = match_points_dictionary\n\n def test_match_points(self):\n print('\\nIt was a DRAW, so Team A and Team B should have 1 point')\n self.assertEqual(self.match_points_dictionary.get('A'), 1)\n self.assertEqual(self.match_points_dictionary.get('B'), 1)\n\n\nclass TestFinalResult(unittest.TestCase):\n\n def setUp(self):\n self.final_results = get_league_results(test_file_path)\n\n def test_first_team_rank(self):\n print('\\nThis team should have a rank of 1')\n team = self.final_results[0]\n self.assertEqual(team.getrank(), 1)\n\n def test_first_team_name(self):\n print('\\n1st team should be Tarantulas')\n team = self.final_results[0]\n self.assertEqual(team.getname(), 'Tarantulas')\n\n def test_first_team_value(self):\n print('\\nTarantulas should have 6 pts')\n team = self.final_results[0]\n self.assertEqual(team.getvalue(), 6)\n\n def test_second_team_rank(self):\n print('\\nThis team should have a rank of 2')\n team = self.final_results[1]\n self.assertEqual(team.getrank(), 2)\n\n def test_second_team_name(self):\n print('\\n2nd team should be Lions')\n team = self.final_results[1]\n self.assertEqual(team.getname(), 'Lions')\n\n def test_second_team_value(self):\n print('\\nLions should have 5 pts')\n team = self.final_results[1]\n self.assertEqual(team.getvalue(), 5)\n\n def test_third_team_rank(self):\n print('\\nThis team should have a rank of 3')\n team = self.final_results[2]\n self.assertEqual(team.getrank(), 3)\n\n def test_third_team_name(self):\n print('\\n3rd team should be FC Awesome')\n team = self.final_results[2]\n self.assertEqual(team.getname(), 'FC Awesome')\n\n def test_third_team_value(self):\n print('\\nFC Awesome should have 1 pt')\n team = self.final_results[2]\n self.assertEqual(team.getvalue(), 1)\n\n def test_fourth_team_rank(self):\n print('\\nThis team should have a rank of 3')\n team = self.final_results[3]\n self.assertEqual(team.getrank(), 3)\n\n def test_fourth_team_name(self):\n print('\\n4th team should be Snakes (after FC Awesome)')\n team = self.final_results[3]\n self.assertEqual(team.getname(), 'Snakes')\n\n def test_fourth_team_value(self):\n print('\\nSnakes should have 1 pt')\n team = self.final_results[3]\n self.assertEqual(team.getvalue(), 1)\n\n def test_fifth_team_rank(self):\n print('\\nThis team should have a rank of 5')\n team = self.final_results[4]\n self.assertEqual(team.getrank(), 5)\n\n def test_fifth_team_name(self):\n print('\\n5th team should be Grouches')\n team = self.final_results[4]\n self.assertEqual(team.getname(), 'Grouches')\n\n def test_fifth_team_value(self):\n print('Grouches should have 0 pts')\n team = self.final_results[4]\n self.assertEqual(team.getvalue(), 0)\n\n\nunittest.main()\n" }, { "alpha_fraction": 0.6965811848640442, "alphanum_fraction": 0.7086894512176514, "avg_line_length": 19.647058486938477, "blob_id": "2acf839ba0e6a93b05760000958f38ddf02b9feb", "content_id": "dc7876771c2a920528542ccbdac2055f27d6c356", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1404, "license_type": "no_license", "max_line_length": 132, "num_lines": 68, "path": "/README.md", "repo_name": "danielmaartens/span_challenge_python", "src_encoding": "UTF-8", "text": "# span_challenge_python #\n\n### League Rank Calculator\n\nThis is a command-line application written in `python` that will calculate the ranking table for a\nsoccer league.\n\n### Requirements\n\n- Python 3\n\n### NOTE:\n- Results of your soccer league must be in a file with the following structure:\n\n```\nLions 3, Snakes 3\nTarantulas 1, FC Awesome 0\nLions 1, FC Awesome 1\nTarantulas 3, Snakes 1\nLions 4, Grouches 0\n```\n\n- Please make sure the file ends with the last line of match outcomes, i.e. do not leave any empty lines in the file.\n- You need to know the absolute file path to copy and paste it into the terminal.\n\n#### NB:\n- This program CANNOT handle tildes (`~`) at the start of the file path. \n\n### Test and Run \nBefore completing the following steps, please make sure you are in the root directory.\n``` \ncd path/to/span_challenge_python\n```\n\n### Programmatic Execution\n\nScripts have been provided so that you do not have to focus on any initial setup, besides making sure you have `python 3` installed.\n\nAll you have to do is run any one of the following from the project `root`:\n\n##### Options\n\n1 - test and run\n``` \nsh test_and_run\n```\n2 - test\n``` \nsh test\n```\n3 - run\n``` \nsh run\n```\n\n### Manual Execution\nIf you rather manually execute in the command line, then run the following commands:\n\n\n#### run tests\n``` \npython3 challenge/tests.py -v\n```\n\n#### run program\n```\npython3 challenge/main.py\n```\n" }, { "alpha_fraction": 0.7234042286872864, "alphanum_fraction": 0.7446808218955994, "avg_line_length": 14.666666984558105, "blob_id": "edcef75489b95148ed4720311f6eb25955d0f63c", "content_id": "b2715c8c695cf6fa74c2f235fb8397f32d08be89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 47, "license_type": "no_license", "max_line_length": 25, "num_lines": 3, "path": "/run", "repo_name": "danielmaartens/span_challenge_python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\npython3 challenge/main.py\n" }, { "alpha_fraction": 0.60118168592453, "alphanum_fraction": 0.6031511425971985, "avg_line_length": 32.295082092285156, "blob_id": "a17244016a00b4072f1460122a93f0f641187f4d", "content_id": "3255ad2bce898c20080e0d2b2653c9631f684f19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2031, "license_type": "no_license", "max_line_length": 191, "num_lines": 61, "path": "/challenge/main.py", "repo_name": "danielmaartens/span_challenge_python", "src_encoding": "UTF-8", "text": "import sys\nimport os.path\n\nfrom utils import *\n\n\ndef run():\n running = True\n\n delay = 1.5\n\n print('\\nWelcome to the League Rank Calculator!\\n')\n\n delayed_print('This program will calculate the ranking table for a soccer league.\\n', delay)\n\n delayed_print('The data for the results of the games should be stored in a text file.', delay)\n\n while running:\n\n delayed_print('\\nPlease provide the full path of the file where your results are stored:\\n', delay)\n\n # read in user input and store it in the file_path variable\n file_path = input('Full File Path: ')\n\n # does file exist ?\n if os.path.exists(file_path):\n\n # it does so let's start processing\n # process the file contents and get the league results\n ranked_teams = get_league_results(file_path)\n\n print('\\nRESULTS\\n')\n\n # print out the ranks in a format specified in the challenge.\n for team in ranked_teams:\n print(str(team.getrank()) + '. ' + team.getname() + ', ' + str(team.getvalue()) + (\n ' pt' if team.getvalue() == 1 else ' pts'))\n\n user_answer = input('\\nWould you like to check match point results of another league ? [y/n]: ')\n\n user_carry_on = boolean_from_string(user_answer)\n\n while user_carry_on is None:\n print('\\nI do not understand your command, please try again... ')\n user_answer = input('\\nWould you like to check match point results of another league ? [y/n]: ')\n\n user_carry_on = boolean_from_string(user_answer)\n\n running = user_carry_on\n\n else:\n\n user_answer = input('\\nSorry, your file does not exist ! Please double-check your file path and try again... Press [c] to continue, or any other key (besides ENTER) to exit...\\n')\n running = boolean_from_string(user_answer)\n delay = 0\n\n print('\\nThank you for using the League Rank Calculator !')\n sys.exit()\n\n\nrun()\n" } ]
7
wjfwzzc/Kaggle_Script
https://github.com/wjfwzzc/Kaggle_Script
40be1253739b93f6b201c26aae1bef86caab889b
931785f22bfe26718f252cfe63ee3b4e03ff63c7
5d6735c583a0e9c6acf840504611a0eb692afa06
refs/heads/master
2018-01-11T21:32:13.906261
2016-08-19T15:48:16
2016-08-19T15:48:16
46,919,799
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.6307189464569092, "alphanum_fraction": 0.6405228972434998, "avg_line_length": 26.81818199157715, "blob_id": "83dfde190bc5500db7d3f7bdb03a55b07720c785", "content_id": "ad54e31084726a62c5ed6c823673a779313ea8ec", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 306, "license_type": "permissive", "max_line_length": 81, "num_lines": 11, "path": "/digit_recognizer/submissions/save_csv.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport pandas\n\n\ndef save_csv(pred, file_name):\n ans = pandas.DataFrame({\"ImageId\": range(1, len(pred) + 1), \"Label\": pred})\n ans[[\"ImageId\", \"Label\"]].to_csv(\"./submissions/%s\" % file_name, index=False)\n" }, { "alpha_fraction": 0.724252462387085, "alphanum_fraction": 0.7275747656822205, "avg_line_length": 19.066667556762695, "blob_id": "d911872d7ea5fd86df88d217be9ef472dff6d657", "content_id": "6862919954d67cad3ba71237bf35333b9e28d88a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 301, "license_type": "permissive", "max_line_length": 56, "num_lines": 15, "path": "/titanic/support_vector_machine.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport sklearn.svm\n\nimport submissions\nfrom data import *\n\nsvm = sklearn.svm.SVC(kernel=\"linear\")\nsvm.fit(train, target)\npred = svm.predict(test)\n\nsubmissions.save_csv(pred, \"support_vector_machine.csv\")\n" }, { "alpha_fraction": 0.6908055543899536, "alphanum_fraction": 0.6965011954307556, "avg_line_length": 28.975608825683594, "blob_id": "469443f3179a869912a2f82977c65e37ec798b96", "content_id": "24ca18671544269ce2c75422bcad741568bc5904", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1229, "license_type": "permissive", "max_line_length": 92, "num_lines": 41, "path": "/titanic/data/load_data.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport numpy\nimport pandas\nimport sklearn.preprocessing\n\n\ndef process_dummy(df, feature):\n df = df.join(pandas.get_dummies(df[feature], prefix=feature))\n df.drop(feature, axis=1, inplace=True)\n return df\n\n\ndef process_scale(df, feature):\n df[feature].fillna(df[feature].dropna().median(), inplace=True)\n df[feature] = sklearn.preprocessing.scale(df[feature].astype(numpy.float64), copy=False)\n return df\n\n\ntrain_df = pandas.read_csv(\"./data/train.csv\")\ntest_df = pandas.read_csv(\"./data/test.csv\")\n\ndata_df = train_df.append(test_df).reset_index(drop=True)\ndata_df.drop([\"PassengerId\", \"Survived\", \"Name\", \"Ticket\", \"Cabin\"], axis=1, inplace=True)\n\ndata_df = process_dummy(data_df, \"Pclass\")\ndata_df = process_dummy(data_df, \"Sex\")\ndata_df = process_scale(data_df, \"Age\")\ndata_df = process_scale(data_df, \"SibSp\")\ndata_df = process_scale(data_df, \"Parch\")\ndata_df = process_scale(data_df, \"Fare\")\ndata_df = process_dummy(data_df, \"Embarked\")\n\ntarget = train_df[\"Survived\"].astype(\"category\")\nids = test_df[\"PassengerId\"].values\n\ntrain = data_df[:train_df.shape[0]].values\ntest = data_df[train_df.shape[0]:].values\n" }, { "alpha_fraction": 0.7191358208656311, "alphanum_fraction": 0.7407407164573669, "avg_line_length": 20.600000381469727, "blob_id": "91a2fe5c823137c5f5435e7babc646a069793415", "content_id": "d61831e0da4b5757ddc3f86b4a8efd817f1f1c5b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 324, "license_type": "permissive", "max_line_length": 65, "num_lines": 15, "path": "/titanic/gradient_boost_xgboost.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport xgboost\n\nimport submissions\nfrom data import *\n\ngbm = xgboost.XGBClassifier(learning_rate=0.05, n_estimators=300)\ngbm.fit(train, target)\npred = gbm.predict(test)\n\nsubmissions.save_csv(pred, \"gradient_boost_xgboost.csv\")\n" }, { "alpha_fraction": 0.6762656569480896, "alphanum_fraction": 0.6846261024475098, "avg_line_length": 38.87036895751953, "blob_id": "7ed557e61cf317acc7921d66721316452f025dd0", "content_id": "36aef8080f54269202b904246adc61ed6d7d20da", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2153, "license_type": "permissive", "max_line_length": 101, "num_lines": 54, "path": "/word2vec_nlp_tutorial/blend.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import generators\nfrom __future__ import nested_scopes\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import with_statement\n\nimport numpy\nimport sklearn.cross_validation\nimport sklearn.ensemble\nimport sklearn.linear_model\nimport sklearn.naive_bayes\nimport sklearn.svm\n\nimport data\nimport process.bag_of_words\nimport submissions\n\nif __name__ == '__main__':\n n_folds = 5\n\n skf = sklearn.cross_validation.StratifiedKFold(data.target, n_folds)\n clfs = [sklearn.ensemble.AdaBoostClassifier(n_estimators=100, random_state=process.seed),\n sklearn.ensemble.GradientBoostingClassifier(n_estimators=100, random_state=process.seed),\n sklearn.ensemble.RandomForestClassifier(n_estimators=100, random_state=process.seed),\n sklearn.linear_model.LogisticRegression(random_state=process.seed),\n sklearn.naive_bayes.MultinomialNB(),\n sklearn.svm.LinearSVC(random_state=process.seed)]\n\n blend_train = numpy.zeros((process.bag_of_words.train.shape[0], len(clfs)))\n blend_test = numpy.zeros((process.bag_of_words.test.shape[0], len(clfs)))\n\n for i, clf in enumerate(clfs):\n blend_test_i = numpy.zeros((process.bag_of_words.test.shape[0], len(skf)))\n for j, (train_index, test_index) in enumerate(skf):\n fold_train = process.bag_of_words.train[train_index]\n fold_target = data.target[train_index]\n fold_test = process.bag_of_words.train[test_index]\n\n clf.fit(fold_train, fold_target)\n fold_pred = clf.predict_proba(fold_test)[:, 1]\n\n blend_train[test_index, i] = fold_pred\n blend_test_i[:, j] = clf.predict_proba(process.bag_of_words.test)[:, 1]\n blend_test[:, i] = blend_test_i.mean(axis=2)\n\n blend_clf = sklearn.linear_model.LogisticRegression(random_state=process.seed)\n blend_clf.fit(blend_train, data.target)\n pred = blend_clf.predict(blend_test)\n\n submissions.save_csv(pred, '{file_name}.csv'.format(file_name=__file__[:-3]))\n" }, { "alpha_fraction": 0.6734972596168518, "alphanum_fraction": 0.7964481115341187, "avg_line_length": 28.280000686645508, "blob_id": "8fa869a8f9b7b0d87245489ef5e40ec09669501b", "content_id": "ec0c58d3ed41be6366effbc6cd2f319910e78b25", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 950, "license_type": "permissive", "max_line_length": 70, "num_lines": 25, "path": "/digit_recognizer/README.md", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# Digit Recognizer\n\n[Digit Recognizer](https://www.kaggle.com/c/digit-recognizer/)\n\nalgorithm|score\n:-:|:-:\nAdaboost|0.74743\nBernoulli Naive Bayes【伯努利朴素贝叶斯】|0.83243\nConvolutional Neural Networks(CNN)【卷积神经网络】|0.99057\nDeep Bernoulli Restricted Boltzmann Machine(RBM)【深度伯努利受限玻尔兹曼机】|0.88929\nDecision Tree【决策树】|0.85500\nGaussian Naive Bayes【高斯朴素贝叶斯】|0.51457\nGradient Boost|0.94343\nK-Nearest Neighbors(KNN)|0.96800\nLogistic Regression【Logistic回归】|0.90800\nLong Short Term Memory(LSTM)【长短期记忆模型】|0.39757\nMulti-Layer Perceptron(MLP)【多层感知机】|0.98057\nMultinomial Naive Bayes【多项式朴素贝叶斯】|0.83114\nRandom Forest【随机森林】|0.96686\nRecurrent Neural Network(RNN)【递归神经网络】|0.83314\nSupport Vector Machine(SVM)【支持向量机】|0.84857\n\nPS: higher is better\n\n各算法具体参数请参考代码\n" }, { "alpha_fraction": 0.6691176295280457, "alphanum_fraction": 0.7904411554336548, "avg_line_length": 24.904762268066406, "blob_id": "1b01d668b3721906317008701aa7723cfdb8cfd9", "content_id": "854c3d5a46ce3497ab198a51ad81d3c45c0abf2f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 670, "license_type": "permissive", "max_line_length": 76, "num_lines": 21, "path": "/titanic/README.md", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# Titanic: Machine Learning from Disaster\n\n[Titanic: Machine Learning from Disaster](https://www.kaggle.com/c/titanic/)\n\nalgorithm|score\n:-:|:-:\nAdaboost|0.74163\nBernoulli Naive Bayes【伯努利朴素贝叶斯】|0.74641\nDecision Tree【决策树】|0.72727\nGaussian Naive Bayes【高斯朴素贝叶斯】|0.74641\nGradient Boost|0.77033\nGradient Boost(Xgboost)|0.77512\nK-Nearest Neighbors(KNN)|0.65072\nLogistic Regression【Logistic回归】|0.76077\nMultinomial Naive Bayes【多项式朴素贝叶斯】|0.65072\nRandom Forest【随机森林】|0.76077\nSupport Vector Machine(SVM)【支持向量机】|0.76555\n\nPS: higher is better\n\n各算法具体参数请参考代码\n" }, { "alpha_fraction": 0.580152690410614, "alphanum_fraction": 0.7816793918609619, "avg_line_length": 30.190475463867188, "blob_id": "9ac9f543a51b49a7183b3cf44a251262147731de", "content_id": "ef5c460b5f84fb371b393c694268d2e8e77c2beb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 781, "license_type": "permissive", "max_line_length": 89, "num_lines": 21, "path": "/airbnb_recruiting_new_user_bookings/README.md", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# Airbnb New User Bookings\n\n[Airbnb New User Bookings](https://www.kaggle.com/c/airbnb-recruiting-new-user-bookings/)\n\nalgorithm|public score|private score\n:-:|:-:|:-:\nAdaboost|0.86454|0.86746\nBernoulli Naive Bayes【伯努利朴素贝叶斯】|0.85008|0.85563\nDecision Tree【决策树】|0.75250|0.75867\nGaussian Naive Bayes【高斯朴素贝叶斯】|0.05340|0.05314\nGradient Boost|0.86521|0.86970\nGradient Boost(Xgboost)|0.86578|0.87055\nK-Nearest Neighbors(KNN)|0.84977|0.85484\nLogistic Regression【Logistic回归】|0.85837|0.86168\nMulti-Layer Perceptron(MLP)【多层感知机】|0.85510|0.85922\nMultinomial Naive Bayes【多项式朴素贝叶斯】|0.85245|0.85585\nRandom Forest【随机森林】|0.84388|0.84868\n\nPS: higher is better\n\n各算法具体参数请参考代码\n" }, { "alpha_fraction": 0.6341991424560547, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 26.176469802856445, "blob_id": "78a61a7a2830b12c70ece60786115a5ec7365a81", "content_id": "5ce405f369defb71234359378008d0aa6196ed8f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 462, "license_type": "permissive", "max_line_length": 115, "num_lines": 17, "path": "/walmart_recruiting_trip_type_classification/submissions/save_csv.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport gzip\n\nimport pandas\n\nfrom data import *\n\n\ndef save_csv(pred, file_name):\n with gzip.GzipFile(\"./submissions/%s.gz\" % file_name, mode=\"w\") as gzfile:\n pandas.DataFrame({\"VisitNumber\": ids}).join(\n pandas.DataFrame(pred, columns=le.classes_).rename(columns=lambda x: \"TripType_\" + str(x))).to_csv(\n gzfile, index=False)\n" }, { "alpha_fraction": 0.7342192530632019, "alphanum_fraction": 0.7375415563583374, "avg_line_length": 19.066667556762695, "blob_id": "514bbdd0e197e10e50459eccfe3d5526ef889cdc", "content_id": "49f094d18a8f59d926871622005959a9c8911e54", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 301, "license_type": "permissive", "max_line_length": 47, "num_lines": 15, "path": "/airbnb_recruiting_new_user_bookings/decision_tree.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport sklearn.tree\n\nimport submissions\nfrom data import *\n\ndt = sklearn.tree.DecisionTreeClassifier()\ndt.fit(train, target)\npred = dt.predict_proba(test)\n\nsubmissions.save_csv(pred, \"decision_tree.csv\")\n" }, { "alpha_fraction": 0.753713846206665, "alphanum_fraction": 0.7724785208702087, "avg_line_length": 27.422222137451172, "blob_id": "5d799455d22503512e0c88826b3116f33e6bcd42", "content_id": "b926de5a01d6a65ebb8d395fd33f6f0c26038cc5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1279, "license_type": "permissive", "max_line_length": 81, "num_lines": 45, "path": "/sf_crime/multi-layer_perceptron.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport keras.layers.advanced_activations\nimport keras.layers.core\nimport keras.layers.normalization\nimport keras.models\nimport keras.utils.np_utils\nimport numpy\n\nimport submissions\nfrom data import *\n\ntarget = target.astype(numpy.uint8)\ntrain = train.astype(numpy.float32)\ntest = test.astype(numpy.float32)\n\ntarget = keras.utils.np_utils.to_categorical(target)\n\ninput_dim = train.shape[1]\nnb_classes = target.shape[1]\n\nmlp = keras.models.Sequential()\n\nmlp.add(keras.layers.core.Dense(512, input_dim=input_dim))\nmlp.add(keras.layers.advanced_activations.PReLU())\nmlp.add(keras.layers.normalization.BatchNormalization())\nmlp.add(keras.layers.core.Dropout(0.5))\n\nmlp.add(keras.layers.core.Dense(512))\nmlp.add(keras.layers.advanced_activations.PReLU())\nmlp.add(keras.layers.normalization.BatchNormalization())\nmlp.add(keras.layers.core.Dropout(0.5))\n\nmlp.add(keras.layers.core.Dense(nb_classes))\nmlp.add(keras.layers.core.Activation(\"softmax\"))\n\nmlp.compile(optimizer=\"adam\", loss=\"categorical_crossentropy\")\n\nmlp.fit(train, target, batch_size=64, nb_epoch=20, verbose=1, show_accuracy=True)\npred = mlp.predict(test, verbose=0)\n\nsubmissions.save_csv(pred, \"multi-layer_perceptron.csv\")\n" }, { "alpha_fraction": 0.6625000238418579, "alphanum_fraction": 0.6656249761581421, "avg_line_length": 23.615385055541992, "blob_id": "025a795db4f9807df7a65ca79366dc351f90dfbe", "content_id": "564f77038da9ec13656c3fe37cfb81c0b9b31f29", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 320, "license_type": "permissive", "max_line_length": 88, "num_lines": 13, "path": "/titanic/submissions/save_csv.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport pandas\n\nfrom data import *\n\n\ndef save_csv(pred, file_name):\n ans = pandas.DataFrame({\"PassengerId\": ids, \"Survived\": pred})\n ans[[\"PassengerId\", \"Survived\"]].to_csv(\"./submissions/%s\" % file_name, index=False)\n" }, { "alpha_fraction": 0.6264744400978088, "alphanum_fraction": 0.7876802086830139, "avg_line_length": 27.259260177612305, "blob_id": "722c42d10b8e6ed23439ae422f15ff8527690529", "content_id": "783dc974f8bf878b4495fed940976de98311315b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 787, "license_type": "permissive", "max_line_length": 85, "num_lines": 27, "path": "/word2vec_nlp_tutorial/README.md", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# Bag of Words Meets Bags of Popcorn\n\n[Bag of Words Meets Bags of Popcorn](https://www.kaggle.com/c/word2vec-nlp-tutorial/)\n\nalgorithm|score\n:-:|:-:\nAdaboost|0.82492\nBernoulli Naive Bayes|0.85024\nC-LSTM Neural Networks[arxiv:1511.08630]|\\\nConvolutional Neural Networks(CNN)[arxiv:1408.5882]|0.89568\nConvolutional Neural Networks(CNN)[arxiv:1603.03827]|\\\nGated Recurrent Unit(GRU)[arxiv:1603.03827]|\\\nGaussian Naive Bayes|0.79616\nGradient Boosting|0.80568\nK-Nearest Neighbors(KNN)|0.71036\nLogistic Regression|0.88380\nLong Short-Term Memory(LSTM)[arxiv:1603.03827]|\\\nMulti-Layer Perceptron(MLP)|0.87036\nMultinomial Naive Bayes|0.85096\nRandom Forest|0.84848\nSupport Vector Machine(SVM)|0.86644\nTextGrocery|0.89028\nXgboost|0.80392\n\nPS: higher is better\n\n各算法具体参数请参考代码\n" }, { "alpha_fraction": 0.7346278429031372, "alphanum_fraction": 0.737864077091217, "avg_line_length": 19.600000381469727, "blob_id": "e9c19b7faeb46d083bc090031d7c1eb9c4284d65", "content_id": "1c9350cdaf0507476eab017f0e66e71eefeb882b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 309, "license_type": "permissive", "max_line_length": 55, "num_lines": 15, "path": "/digit_recognizer/bernoulli_naive_bayes.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport sklearn.naive_bayes\n\nimport submissions\nfrom data import *\n\nbnb = sklearn.naive_bayes.BernoulliNB()\nbnb.fit(train, target)\npred = bnb.predict(test)\n\nsubmissions.save_csv(pred, \"bernoulli_naive_bayes.csv\")\n" }, { "alpha_fraction": 0.7414966225624084, "alphanum_fraction": 0.7437641620635986, "avg_line_length": 23.5, "blob_id": "651265a7ca7bbe76a0f78f5290b51bacc4a3a44d", "content_id": "1cf5f11d5819461ab6d2be83c842295eb5baf021", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 441, "license_type": "permissive", "max_line_length": 80, "num_lines": 18, "path": "/whats_cooking/liblinear_tgrocery.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport tgrocery\n\nimport data.load_data\nimport submissions\n\ntrain_src = zip(data.load_data.target, data.load_data.train_df[\"ingredients\"])\n\ngrocery = tgrocery.Grocery(\"whats_cooking\")\ngrocery.train(train_src)\n\npred = [grocery.predict(text) for text in data.load_data.test_df[\"ingredients\"]]\n\nsubmissions.save_csv(pred, \"liblinear_tgrocery.csv\")\n" }, { "alpha_fraction": 0.6959620118141174, "alphanum_fraction": 0.6995249390602112, "avg_line_length": 29.071428298950195, "blob_id": "53c29c0ca06d164e24eb18b48253e27a4c72a140", "content_id": "d7600b6041d395aa4385026784aa0561b902e42a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 842, "license_type": "permissive", "max_line_length": 83, "num_lines": 28, "path": "/word2vec_nlp_tutorial/liblinear_tgrocery.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import generators\nfrom __future__ import nested_scopes\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import with_statement\n\nimport tgrocery\n\nimport data\nimport process\nimport submissions\n\nif __name__ == '__main__':\n train_df = process.raw_to_texts(data.train_df, 'review', remove_stopwords=True)\n test_df = process.raw_to_texts(data.test_df, 'review', remove_stopwords=True)\n\n train_src = zip(data.target, train_df['review'])\n\n grocery = tgrocery.Grocery('word2vec_nlp_tutorial')\n grocery.train(train_src)\n\n pred = [grocery.predict(text).predicted_y for text in test_df['review']]\n\n submissions.save_csv(pred, '{file_name}.csv'.format(file_name=__file__[:-3]))\n" }, { "alpha_fraction": 0.6752889156341553, "alphanum_fraction": 0.6802421808242798, "avg_line_length": 33.94230651855469, "blob_id": "03f0ca42ffcb15aa758c5a8da967d2627db333a1", "content_id": "9b8a767ab1fccc6c6daa1f93027d70de2e171009", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1817, "license_type": "permissive", "max_line_length": 107, "num_lines": 52, "path": "/word2vec_nlp_tutorial/process/__init__.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import generators\nfrom __future__ import nested_scopes\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import with_statement\n\nimport random\nimport re\n\nimport bs4\nimport keras.preprocessing.sequence\nimport nltk\nimport numpy\n\nseed = 42\nrandom.seed(seed)\nnumpy.random.seed(seed)\n\nwordnet = nltk.stem.WordNetLemmatizer()\nstopwords = set(nltk.corpus.stopwords.words('english'))\n\n\ndef raw_to_words(df, column, remove_stopwords=False, dictionary=None):\n df[column] = df[column].map(lambda x: bs4.BeautifulSoup(x, 'lxml').get_text())\n df[column] = df[column].map(lambda x: re.sub(r'[^a-zA-Z]', ' ', x))\n df[column] = df[column].map(lambda x: x.lower().split())\n df[column] = df[column].map(lambda x: [wordnet.lemmatize(y) for y in x])\n if remove_stopwords:\n df[column] = df[column].map(lambda x: [y for y in x if y not in stopwords])\n if dictionary:\n df[column] = df[column].map(lambda x: [y for y in x if y in dictionary])\n return df\n\n\ndef raw_to_texts(df, column, remove_stopwords=False, dictionary=None):\n df = raw_to_words(df, column, remove_stopwords, dictionary)\n df[column] = df[column].map(lambda x: ' '.join(x))\n return df\n\n\ndef texts_to_sequences(df, column, tokenizer, maxlen=300):\n seq = tokenizer.texts_to_sequences(line.encode('utf-8') for line in df[column].values)\n print('mean:', numpy.mean([len(x) for x in seq]))\n print('std:', numpy.std([len(x) for x in seq]))\n print('median:', numpy.median([len(x) for x in seq]))\n print('max:', numpy.max([len(x) for x in seq]))\n seq = keras.preprocessing.sequence.pad_sequences(seq, maxlen=maxlen, padding='post', truncating='post')\n return seq\n" }, { "alpha_fraction": 0.7348242998123169, "alphanum_fraction": 0.7476038336753845, "avg_line_length": 19.866666793823242, "blob_id": "56c5029d83af595ae2c5c317872fd9ed6adb6600", "content_id": "03af0f782f3fc6058ef076ce258e60c4655a4c1c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 313, "license_type": "permissive", "max_line_length": 59, "num_lines": 15, "path": "/whats_cooking/adaboost.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport sklearn.ensemble\n\nimport submissions\nfrom data import *\n\nadb = sklearn.ensemble.AdaBoostClassifier(n_estimators=100)\nadb.fit(train, target)\npred = adb.predict(test)\n\nsubmissions.save_csv(pred, \"adaboost.csv\")\n" }, { "alpha_fraction": 0.7153392434120178, "alphanum_fraction": 0.7227138876914978, "avg_line_length": 29.81818199157715, "blob_id": "3a43bb1214ada8076f2f68077c1b6fe72e91392f", "content_id": "e39bb03b3341bf98901e3f65aff77478832f36a8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 678, "license_type": "permissive", "max_line_length": 97, "num_lines": 22, "path": "/word2vec_nlp_tutorial/gradient_boosting.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import generators\nfrom __future__ import nested_scopes\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import with_statement\n\nimport sklearn.ensemble\n\nimport data\nimport process.bag_of_words\nimport submissions\n\nif __name__ == '__main__':\n gb = sklearn.ensemble.GradientBoostingClassifier(n_estimators=100, random_state=process.seed)\n gb.fit(process.bag_of_words.train, data.target)\n pred = gb.predict(process.bag_of_words.test)\n\n submissions.save_csv(pred, '{file_name}.csv'.format(file_name=__file__[:-3]))\n" }, { "alpha_fraction": 0.7212121486663818, "alphanum_fraction": 0.7424242496490479, "avg_line_length": 21, "blob_id": "2a2a6f7a1baa1e48cd41d054290796f63fa06d06", "content_id": "eb8efc33379e6a3a6e96e410af36169300e89520", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 330, "license_type": "permissive", "max_line_length": 65, "num_lines": 15, "path": "/sf_crime/gradient_boost_xgboost.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport xgboost\n\nimport submissions\nfrom data import *\n\ngbm = xgboost.XGBClassifier(learning_rate=0.05, n_estimators=300)\ngbm.fit(train, target)\npred = gbm.predict_proba(test)\n\nsubmissions.save_csv(pred, \"gradient_boost_xgboost.csv\")\n" }, { "alpha_fraction": 0.5991501212120056, "alphanum_fraction": 0.7917847037315369, "avg_line_length": 32.619049072265625, "blob_id": "b077d781c0bfdedb94ff6a187806fe5420e1d746", "content_id": "4bb38eb62b4b7eea2ec83a2328f78dfcdb52ab2d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 832, "license_type": "permissive", "max_line_length": 117, "num_lines": 21, "path": "/walmart_recruiting_trip_type_classification/README.md", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# Walmart Recruiting: Trip Type Classification\n\n[Walmart Recruiting: Trip Type Classification](https://www.kaggle.com/c/walmart-recruiting-trip-type-classification/)\n\nalgorithm|public score|private score\n:-:|:-:|:-:\nAdaboost|3.58087|3.57956\nBernoulli Naive Bayes【伯努利朴素贝叶斯】|1.52210|1.50991\nDecision Tree【决策树】|11.87499|11.64445\nGaussian Naive Bayes【高斯朴素贝叶斯】|28.76452|28.77623\nGradient Boost|0.98021|0.96189\nGradient Boost(Xgboost)|0.97626|0.96169\nK-Nearest Neighbors(KNN)|2.32462|2.35547\nLogistic Regression【Logistic回归】|1.31242|1.29950\nMulti-Layer Perceptron(MLP)【多层感知机】|0.88169|0.86591\nMultinomial Naive Bayes【多项式朴素贝叶斯】|2.29216|2.28357\nRandom Forest【随机森林】|1.58259|1.54953\n\nPS: lower is better\n\n各算法具体参数请参考代码\n" }, { "alpha_fraction": 0.7321937084197998, "alphanum_fraction": 0.74643874168396, "avg_line_length": 22.399999618530273, "blob_id": "c4b06a681ad55398679facb7cc59627163c2d7fa", "content_id": "79152d98553358f51e1eb5179b83104a43aa5744", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 351, "license_type": "permissive", "max_line_length": 88, "num_lines": 15, "path": "/airbnb_recruiting_new_user_bookings/random_forest.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport sklearn.ensemble\n\nimport submissions\nfrom data import *\n\nrf = sklearn.ensemble.RandomForestClassifier(n_estimators=300, oob_score=True, n_jobs=2)\nrf.fit(train, target)\npred = rf.predict_proba(test)\n\nsubmissions.save_csv(pred, \"random_forest.csv\")\n" }, { "alpha_fraction": 0.6630244851112366, "alphanum_fraction": 0.6831293702125549, "avg_line_length": 40.599998474121094, "blob_id": "537bedc98de22942bbc95f76692452bebd268905", "content_id": "52b1d558e61e1e5f4283d7edeb06c458ab440f62", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2288, "license_type": "permissive", "max_line_length": 108, "num_lines": 55, "path": "/word2vec_nlp_tutorial/c-lstm_neural_networks.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "\"\"\"\nReferences:\nA C-LSTM Neural Network for Text Classification\nhttp://arxiv.org/pdf/1511.08630v2.pdf\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import generators\nfrom __future__ import nested_scopes\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import with_statement\n\nimport keras\n\nimport data\nimport process.word_vectors\nimport submissions\n\nif __name__ == '__main__':\n input_dim = process.word_vectors.train.shape[1]\n\n input_tensor = keras.layers.Input(shape=(input_dim,), dtype='int32')\n\n embedded = keras.layers.Embedding(input_dim=process.word_vectors.max_features + 1,\n output_dim=process.word_vectors.word_vec_dim, input_length=input_dim,\n weights=[process.word_vectors.weights])(input_tensor)\n # embedded = keras.layers.Dropout(0.5)(embedded)\n\n tensor = keras.layers.Convolution1D(nb_filter=200, filter_length=3)(embedded)\n tensor = keras.layers.Activation('relu')(tensor)\n\n forwards = keras.layers.GRU(output_dim=100, return_sequences=True)(tensor)\n backwards = keras.layers.GRU(output_dim=100, go_backwards=True, return_sequences=True)(tensor)\n\n output_tensor = keras.layers.merge([forwards, backwards], mode='concat', concat_axis=1)\n output_tensor = keras.layers.MaxPooling1D(pool_length=input_dim)(output_tensor)\n output_tensor = keras.layers.Flatten()(output_tensor)\n output_tensor = keras.layers.Dropout(0.5)(output_tensor)\n output_tensor = keras.layers.Dense(1, activation='sigmoid')(output_tensor)\n\n c_lstm = keras.models.Model(input_tensor, output_tensor)\n c_lstm.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])\n print(c_lstm.summary())\n # keras.utils.visualize_util.plot(cnn, to_file='images/{file_name}.png'.format(file_name=__file__[:-3]),\n # show_shapes=True)\n\n c_lstm.fit(process.word_vectors.train, data.target, batch_size=64, nb_epoch=10, validation_split=0.15)\n pred = c_lstm.predict(process.word_vectors.test)\n pred = (pred > 0.5).astype('int32')\n\n submissions.save_csv(pred.flatten(), '{file_name}.csv'.format(file_name=__file__[:-3]))\n" }, { "alpha_fraction": 0.7235022783279419, "alphanum_fraction": 0.7529953718185425, "avg_line_length": 26.125, "blob_id": "05be2ea5b73cb3b65d4e5cf86c7cae1ffb34f33a", "content_id": "3b98f667cc9e2f22af69b9a8b032f3223c021cfd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1085, "license_type": "permissive", "max_line_length": 83, "num_lines": 40, "path": "/digit_recognizer/long_short_term_memory.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport keras.layers.core\nimport keras.layers.recurrent\nimport keras.models\nimport keras.optimizers\nimport keras.utils.np_utils\nimport numpy\n\nimport submissions\nfrom data import *\n\ntarget = target.astype(numpy.uint8)\ntrain = train.reshape((train.shape[0], -1, 1)).astype(numpy.float32)\ntest = test.reshape((test.shape[0], -1, 1)).astype(numpy.float32)\n\ntarget = keras.utils.np_utils.to_categorical(target)\ntrain /= 255\ntest /= 255\n\ninput_shape = train.shape[1:]\nnb_classes = target.shape[1]\n\nlstm = keras.models.Sequential()\n\nlstm.add(keras.layers.recurrent.LSTM(100, input_shape=input_shape))\n\nlstm.add(keras.layers.core.Dense(nb_classes))\nlstm.add(keras.layers.core.Activation(\"softmax\"))\n\nrmsprop = keras.optimizers.RMSprop(lr=1e-6)\nlstm.compile(optimizer=rmsprop, loss=\"categorical_crossentropy\")\n\nlstm.fit(train, target, batch_size=32, nb_epoch=200, verbose=1, show_accuracy=True)\npred = lstm.predict_classes(test, verbose=0)\n\nsubmissions.save_csv(pred, \"long_short_term_memory.csv\")\n" }, { "alpha_fraction": 0.7383177280426025, "alphanum_fraction": 0.7445482611656189, "avg_line_length": 20.399999618530273, "blob_id": "d8063f85414d7bd69946ad5fdd1f3689efc8ea13", "content_id": "156aa771a37a5d0c4f8c1826ee62de7b30285f54", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 321, "license_type": "permissive", "max_line_length": 54, "num_lines": 15, "path": "/digit_recognizer/logistic_regression.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport sklearn.linear_model\n\nimport submissions\nfrom data import *\n\nlr = sklearn.linear_model.LogisticRegression(n_jobs=2)\nlr.fit(train, target)\npred = lr.predict(test)\n\nsubmissions.save_csv(pred, \"logistic_regression.csv\")\n" }, { "alpha_fraction": 0.6496081352233887, "alphanum_fraction": 0.6671277284622192, "avg_line_length": 38.436363220214844, "blob_id": "580c56a6ea983992e3b88014cef4c45498a218b4", "content_id": "2b4b4a019142fbda9271a7997cf614b1499d304a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2169, "license_type": "permissive", "max_line_length": 108, "num_lines": 55, "path": "/word2vec_nlp_tutorial/convolutional_neural_networks(textCNN).py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "\"\"\"\nReferences:\nConvolutional Neural Networks for Sentence Classification\nhttp://arxiv.org/pdf/1408.5882v2.pdf\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import generators\nfrom __future__ import nested_scopes\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import with_statement\n\nimport keras\n\nimport data\nimport process.word_vectors\nimport submissions\n\nif __name__ == '__main__':\n input_dim = process.word_vectors.train.shape[1]\n\n input_tensor = keras.layers.Input(shape=(input_dim,), dtype='int32')\n\n embedded = keras.layers.Embedding(input_dim=process.word_vectors.max_features + 1,\n output_dim=process.word_vectors.word_vec_dim, input_length=input_dim,\n weights=[process.word_vectors.weights])(input_tensor)\n # embedded = keras.layers.Dropout(0.5)(embedded)\n\n tensors = []\n for filter_length in (3, 4, 5):\n tensor = keras.layers.Convolution1D(nb_filter=100, filter_length=filter_length)(embedded)\n tensor = keras.layers.Activation('relu')(tensor)\n tensor = keras.layers.MaxPooling1D(pool_length=input_dim - filter_length + 1)(tensor)\n tensor = keras.layers.Flatten()(tensor)\n tensors.append(tensor)\n\n output_tensor = keras.layers.merge(tensors, mode='concat', concat_axis=1)\n output_tensor = keras.layers.Dropout(0.5)(output_tensor)\n output_tensor = keras.layers.Dense(1, activation='sigmoid')(output_tensor)\n\n cnn = keras.models.Model(input_tensor, output_tensor)\n cnn.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])\n print(cnn.summary())\n # keras.utils.visualize_util.plot(cnn, to_file='images/{file_name}.png'.format(file_name=__file__[:-3]),\n # show_shapes=True)\n\n cnn.fit(process.word_vectors.train, data.target, batch_size=64, nb_epoch=4)\n pred = cnn.predict(process.word_vectors.test)\n pred = (pred > 0.5).astype('int32')\n\n submissions.save_csv(pred.flatten(), '{file_name}.csv'.format(file_name=__file__[:-3]))\n" }, { "alpha_fraction": 0.7086758017539978, "alphanum_fraction": 0.7159817218780518, "avg_line_length": 32.181819915771484, "blob_id": "3124aca2190353c7a5ca17b4f60290c865da3bd2", "content_id": "5d586c23680bb37e1a3ecf0cb9cb6fef01651e85", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1095, "license_type": "permissive", "max_line_length": 102, "num_lines": 33, "path": "/word2vec_nlp_tutorial/process/bag_of_words.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import generators\nfrom __future__ import nested_scopes\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import with_statement\n\nimport sklearn.feature_extraction\n\nimport data\nimport process\n\ntrain_df = process.raw_to_texts(data.train_df, 'review', remove_stopwords=True)\ndel data.train_df\ntest_df = process.raw_to_texts(data.test_df, 'review', remove_stopwords=True)\ndel data.test_df\nunlabeled_df = process.raw_to_texts(data.unlabeled_df, 'review', remove_stopwords=True)\ndel data.unlabeled_df\n\nvectorizer = sklearn.feature_extraction.text.TfidfVectorizer(stop_words='english', ngram_range=(1, 3),\n max_features=10000, sublinear_tf=True)\nvectorizer.fit(train_df['review'].append(unlabeled_df['review']))\ndel unlabeled_df\n\ntrain = vectorizer.transform(train_df['review']).toarray()\ndel train_df\ntest = vectorizer.transform(test_df['review']).toarray()\ndel test_df\n\ndel vectorizer\n" }, { "alpha_fraction": 0.721611738204956, "alphanum_fraction": 0.7570207715034485, "avg_line_length": 30.5, "blob_id": "cc2e93ac6a02e633dae33f0e81d6498c777dd0bf", "content_id": "06c0aa6f11c8aa4f86f62a485352772207bc3f5d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1638, "license_type": "permissive", "max_line_length": 105, "num_lines": 52, "path": "/digit_recognizer/convolutional_neural_network.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport keras.layers.advanced_activations\nimport keras.layers.convolutional\nimport keras.layers.core\nimport keras.models\nimport keras.utils.np_utils\nimport numpy\n\nimport submissions\nfrom data import *\n\ntarget = target.astype(numpy.uint8)\ntrain = train.reshape((-1, 1, 28, 28)).astype(numpy.float32)\ntest = test.reshape((-1, 1, 28, 28)).astype(numpy.float32)\n\ntarget = keras.utils.np_utils.to_categorical(target)\ntrain /= 255\ntest /= 255\n\ninput_shape = train.shape[1:]\nnb_classes = target.shape[1]\n\ncnn = keras.models.Sequential()\n\ncnn.add(keras.layers.convolutional.Convolution2D(32, 3, 3, border_mode=\"valid\", input_shape=input_shape))\ncnn.add(keras.layers.advanced_activations.PReLU())\ncnn.add(keras.layers.convolutional.MaxPooling2D(pool_size=(2, 2)))\ncnn.add((keras.layers.core.Dropout(0.5)))\n\ncnn.add(keras.layers.convolutional.Convolution2D(64, 3, 3))\ncnn.add(keras.layers.advanced_activations.PReLU())\ncnn.add(keras.layers.convolutional.MaxPooling2D(pool_size=(2, 2)))\ncnn.add((keras.layers.core.Dropout(0.5)))\n\ncnn.add(keras.layers.core.Flatten())\ncnn.add(keras.layers.core.Dense(500))\ncnn.add(keras.layers.advanced_activations.PReLU())\ncnn.add((keras.layers.core.Dropout(0.5)))\n\ncnn.add(keras.layers.core.Dense(nb_classes))\ncnn.add(keras.layers.core.Activation(\"softmax\"))\n\ncnn.compile(optimizer=\"adadelta\", loss=\"categorical_crossentropy\")\n\ncnn.fit(train, target, batch_size=128, nb_epoch=12, verbose=1, show_accuracy=True)\npred = cnn.predict_classes(test, verbose=0)\n\nsubmissions.save_csv(pred, \"convolutional_neural_network.csv\")\n" }, { "alpha_fraction": 0.7075268626213074, "alphanum_fraction": 0.7118279337882996, "avg_line_length": 26.352941513061523, "blob_id": "18aa54d64976309571f55af9856a03d5306a3c94", "content_id": "25e2dd124457c6b9db1aa3b21aea32d7313575a8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 465, "license_type": "permissive", "max_line_length": 120, "num_lines": 17, "path": "/word2vec_nlp_tutorial/submissions/__init__.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import generators\nfrom __future__ import nested_scopes\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import with_statement\n\nimport pandas\n\nimport data\n\n\ndef save_csv(pred, file_name):\n pandas.DataFrame({'id': data.ids, 'sentiment': pred}).to_csv('./submissions/%s' % file_name, index=False, quoting=3)\n" }, { "alpha_fraction": 0.8448275923728943, "alphanum_fraction": 0.8448275923728943, "avg_line_length": 6.25, "blob_id": "c983c3a0b25bee680e5d0f02f2775603dc8db13e", "content_id": "d3af5192774e0987d1a3fe830de2d04e42d13338", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 58, "license_type": "permissive", "max_line_length": 12, "num_lines": 8, "path": "/requirements.txt", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "numpy\nscipy\nscikit-learn\npandas\ntheano\nkeras\nnltk\nxgboost\n" }, { "alpha_fraction": 0.7406250238418579, "alphanum_fraction": 0.746874988079071, "avg_line_length": 20.33333396911621, "blob_id": "7541fa0d43274ac7e43a558cbfd2e154d268aa28", "content_id": "3e7f872953eed59c440f74685b8a7d814e6b0095", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 320, "license_type": "permissive", "max_line_length": 54, "num_lines": 15, "path": "/whats_cooking/k-nearest_neighbors.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport sklearn.neighbors\n\nimport submissions\nfrom data import *\n\nknn = sklearn.neighbors.KNeighborsClassifier(n_jobs=2)\nknn.fit(train, target)\npred = knn.predict(test)\n\nsubmissions.save_csv(pred, \"k-nearest_neighbors.csv\")\n" }, { "alpha_fraction": 0.6639676094055176, "alphanum_fraction": 0.7894737124443054, "avg_line_length": 23.700000762939453, "blob_id": "8546622cb52227b6c9d90fe580b5ea9334773f19", "content_id": "5f7dcee7f2e94ae1e03bd83871634984e4b66808", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 600, "license_type": "permissive", "max_line_length": 72, "num_lines": 20, "path": "/sf_crime/README.md", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# San Francisco Crime Classification\n\n[San Francisco Crime Classification](https://www.kaggle.com/c/sf-crime/)\n\nalgorithm|score\n:-:|:-:\nAdaboost|3.61100\nBernoulli Naive Bayes【伯努利朴素贝叶斯】|2.61102\nDecision Tree【决策树】|27.62510\nGaussian Naive Bayes【高斯朴素贝叶斯】|16.54768\nGradient Boost|2.47201\nGradient Boost(Xgboost)|2.47554\nK-Nearest Neighbors(KNN)|5.95085\nLogistic Regression【Logistic回归】|2.59994\nMulti-Layer Perceptron(MLP)【多层感知机】|2.57129\nRandom Forest【随机森林】|9.06884\n\nPS: lower is better\n\n各算法具体参数请参考代码\n" }, { "alpha_fraction": 0.6880058646202087, "alphanum_fraction": 0.71596759557724, "avg_line_length": 30.604650497436523, "blob_id": "516a0574c03cdd185730bf3370295a6008dc07f8", "content_id": "77d6b400817c6bc3f4024512df86e387a6f5cbdb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1359, "license_type": "permissive", "max_line_length": 115, "num_lines": 43, "path": "/digit_recognizer/recurrent_neural_network.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport keras.initializations\nimport keras.layers.core\nimport keras.layers.recurrent\nimport keras.models\nimport keras.optimizers\nimport keras.utils.np_utils\nimport numpy\n\nimport submissions\nfrom data import *\n\ntarget = target.astype(numpy.uint8)\ntrain = train.reshape((train.shape[0], -1, 1)).astype(numpy.float32)\ntest = test.reshape((test.shape[0], -1, 1)).astype(numpy.float32)\n\ntarget = keras.utils.np_utils.to_categorical(target)\ntrain /= 255\ntest /= 255\n\ninput_shape = train.shape[1:]\nnb_classes = target.shape[1]\n\nrnn = keras.models.Sequential()\n\nrnn.add(keras.layers.recurrent.SimpleRNN(100, init=lambda shape: keras.initializations.normal(shape, scale=0.001),\n inner_init=lambda shape: keras.initializations.identity(shape, scale=1.0),\n activation=\"relu\", input_shape=input_shape))\n\nrnn.add(keras.layers.core.Dense(nb_classes))\nrnn.add(keras.layers.core.Activation(\"softmax\"))\n\nrmsprop = keras.optimizers.RMSprop(lr=1e-6)\nrnn.compile(optimizer=rmsprop, loss=\"categorical_crossentropy\")\n\nrnn.fit(train, target, batch_size=32, nb_epoch=200, verbose=1, show_accuracy=True)\npred = rnn.predict_classes(test, verbose=0)\n\nsubmissions.save_csv(pred, \"recurrent_neural_network.csv\")\n" }, { "alpha_fraction": 0.705640435218811, "alphanum_fraction": 0.7115158438682556, "avg_line_length": 30.518518447875977, "blob_id": "a7db18392d53962631048af6821fdf8e59935812", "content_id": "44d9dc1301cf9e5a77140c94f610fbc7e58342d4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1702, "license_type": "permissive", "max_line_length": 92, "num_lines": 54, "path": "/walmart_recruiting_trip_type_classification/data/load_data.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport zipfile\n\nimport numpy\nimport pandas\nimport sklearn.preprocessing\n\n\ndef process_dummy(df, feature):\n df = df.join(pandas.get_dummies(df[feature], prefix=feature))\n df.drop(feature, axis=1, inplace=True)\n return df\n\n\ndef process_scale(df, feature):\n df[feature].fillna(df[feature].dropna().median(), inplace=True)\n df[feature] = sklearn.preprocessing.scale(df[feature].astype(numpy.float64), copy=False)\n return df\n\n\nz = zipfile.ZipFile(\"./data/train.csv.zip\")\ntrain_df = pandas.read_csv(z.open(\"train.csv\"))\nz = zipfile.ZipFile(\"./data/test.csv.zip\")\nz.setpassword(\"Work4WalmarT\")\ntest_df = pandas.read_csv(z.open(\"test.csv\"))\n\ntrain_df.drop([\"Upc\", \"FinelineNumber\"], axis=1, inplace=True)\ntrain_df = process_dummy(train_df, \"Weekday\")\ntrain_df = process_dummy(train_df, \"DepartmentDescription\")\ntrain_df = train_df.groupby([\"VisitNumber\", \"TripType\"], as_index=False).sum()\n\ntest_df.drop([\"Upc\", \"FinelineNumber\"], axis=1, inplace=True)\ntest_df = process_dummy(test_df, \"Weekday\")\ntest_df = process_dummy(test_df, \"DepartmentDescription\")\ntest_df = test_df.groupby(\"VisitNumber\", as_index=False).sum()\n\ndata_df = train_df.append(test_df).reset_index(drop=True)\ndata_df.drop([\"TripType\", \"VisitNumber\"], axis=1, inplace=True)\ndata_df.fillna(0, inplace=True)\n\ndata_df = process_scale(data_df, \"ScanCount\")\n\nle = sklearn.preprocessing.LabelEncoder()\ntrain_df[\"TripType\"] = le.fit_transform(train_df[\"TripType\"])\n\ntarget = train_df[\"TripType\"].astype('category')\nids = test_df[\"VisitNumber\"].values\n\ntrain = data_df[:train_df.shape[0]].values\ntest = data_df[train_df.shape[0]:].values\n" }, { "alpha_fraction": 0.6845124363899231, "alphanum_fraction": 0.6935946345329285, "avg_line_length": 35.70175552368164, "blob_id": "f3035338c728e5f4d6c6cf4554de8582d2db5053", "content_id": "cfe1131bcd22bc0439798490fe740121439113cf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2092, "license_type": "permissive", "max_line_length": 108, "num_lines": 57, "path": "/airbnb_recruiting_new_user_bookings/data/load_data.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport zipfile\n\nimport numpy\nimport pandas\nimport sklearn.preprocessing\n\n\ndef process_dummy(df, feature):\n df = df.join(pandas.get_dummies(df[feature], prefix=feature))\n df.drop(feature, axis=1, inplace=True)\n return df\n\n\ndef process_scale(df, feature):\n df[feature].fillna(df[feature].dropna().median(), inplace=True)\n df[feature] = sklearn.preprocessing.scale(df[feature].astype(numpy.float64), copy=False)\n # df[feature] = sklearn.preprocessing.minmax_scale(df[feature].astype(numpy.float64), copy=False)\n return df\n\n\nz = zipfile.ZipFile(\"./data/train_users_2.csv.zip\")\ntrain_df = pandas.read_csv(z.open(\"train_users_2.csv\"))\nz = zipfile.ZipFile(\"./data/test_users.csv.zip\")\ntest_df = pandas.read_csv(z.open(\"test_users.csv\"))\n\ndata_df = train_df.append(test_df).reset_index(drop=True)\ndata_df.drop([\"id\", \"date_first_booking\", \"country_destination\"], axis=1, inplace=True)\ndata_df.fillna(0, inplace=True)\n\ndac = data_df[\"date_account_created\"].astype(str).map(lambda x: map(int, x.split('-')))\ndata_df[\"dac_year\"], data_df[\"dac_month\"], data_df[\"dac_day\"] = zip(*dac)\ndata_df.drop([\"date_account_created\"], axis=1, inplace=True)\n\ntfa = data_df[\"timestamp_first_active\"].astype(str).map(lambda x: map(int, (x[:4], x[4:6], x[6:8])))\ndata_df[\"tfa_year\"], data_df[\"tfa_month\"], data_df[\"tfa_day\"] = zip(*tfa)\ndata_df.drop([\"timestamp_first_active\"], axis=1, inplace=True)\n\nfeatures = [\"gender\", \"signup_method\", \"signup_flow\", \"language\", \"affiliate_channel\", \"affiliate_provider\",\n \"first_affiliate_tracked\", \"signup_app\", \"first_device_type\", \"first_browser\"]\nfor f in features:\n data_df = process_dummy(data_df, f)\n\ndata_df = process_scale(data_df, \"age\")\n\nle = sklearn.preprocessing.LabelEncoder()\ntrain_df[\"country_destination\"] = le.fit_transform(train_df[\"country_destination\"])\n\ntarget = train_df[\"country_destination\"].astype(\"category\")\nids = test_df[\"id\"].values\n\ntrain = data_df[:train_df.shape[0]].values\ntest = data_df[train_df.shape[0]:].values\n" }, { "alpha_fraction": 0.6754966974258423, "alphanum_fraction": 0.6821191906929016, "avg_line_length": 22.230770111083984, "blob_id": "374330fb15018a935e5d5eeb2984d8f92904475d", "content_id": "df525cbce48f962183ae0bb615e72f0719a8d31c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 302, "license_type": "permissive", "max_line_length": 48, "num_lines": 13, "path": "/digit_recognizer/data/load_data.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport pandas\n\ntrain_df = pandas.read_csv(\"./data/train.csv\")\n\ntarget = train_df[\"label\"].astype('category')\n\ntrain = train_df.drop(\"label\", axis=1).values\ntest = pandas.read_csv(\"./data/test.csv\").values\n" }, { "alpha_fraction": 0.5910020470619202, "alphanum_fraction": 0.5991820096969604, "avg_line_length": 23.450000762939453, "blob_id": "548d2f8a9fba118af11a76dd0e8704ead2fe36ad", "content_id": "41208f704ab76f78e808a3c1087076ce3cf3feaf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 489, "license_type": "permissive", "max_line_length": 81, "num_lines": 20, "path": "/airbnb_recruiting_new_user_bookings/submissions/save_csv.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport numpy\nimport pandas\n\nfrom data import *\n\n\ndef save_csv(pred, file_name):\n idxs = []\n ctys = []\n for idx in xrange(len(ids)):\n idxs += [ids[idx]] * 5\n ctys += le.inverse_transform(numpy.argsort(pred[idx])[::-1])[:5].tolist()\n\n ans = pandas.DataFrame({\"id\": idxs, \"country\": ctys})\n ans[[\"id\", \"country\"]].to_csv(\"./submissions/%s\" % file_name, index=False)\n" }, { "alpha_fraction": 0.7206704020500183, "alphanum_fraction": 0.7262569665908813, "avg_line_length": 24.571428298950195, "blob_id": "ab733ef9c8c64448bd2c97669c954b5de8dd302d", "content_id": "6549ec55023cb21a005922819e4a625c297f6a53", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 179, "license_type": "permissive", "max_line_length": 41, "num_lines": 7, "path": "/titanic/submissions/__init__.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom submissions.save_csv import save_csv\n" }, { "alpha_fraction": 0.7746478915214539, "alphanum_fraction": 0.7840375304222107, "avg_line_length": 25.625, "blob_id": "839ad43c3d48830ce6f61014fc332e2cdc88df36", "content_id": "c00c00293975f5f3fb1baacd16f9f5768977f7fc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 339, "license_type": "permissive", "max_line_length": 88, "num_lines": 8, "path": "/README.md", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# Kaggle Script Library\n\n[Kaggle](https://www.kaggle.com/)是一个有许多机器学习和数据挖掘相关题目的网站,这里的代码都是一些练习Code,水平所限,质量可能不高,请见谅。\n\n如果需要GPU加速,命令如下:\n```shell\nTHEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python model.py\n```\n" }, { "alpha_fraction": 0.7348242998123169, "alphanum_fraction": 0.7380191683769226, "avg_line_length": 19.866666793823242, "blob_id": "6c789eabef0fd2fe73cfa3b506d463ca8e508b79", "content_id": "73d64922481c61a71d87b4faeef5a9263e53e93a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 313, "license_type": "permissive", "max_line_length": 54, "num_lines": 15, "path": "/walmart_recruiting_trip_type_classification/gaussian_naive_bayes.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport sklearn.naive_bayes\n\nimport submissions\nfrom data import *\n\ngnb = sklearn.naive_bayes.GaussianNB()\ngnb.fit(train, target)\npred = gnb.predict_proba(test)\n\nsubmissions.save_csv(pred, \"gaussian_naive_bayes.csv\")\n" }, { "alpha_fraction": 0.7424242496490479, "alphanum_fraction": 0.7545454502105713, "avg_line_length": 21, "blob_id": "77d42b61ab22cf811b2c1311a97d65e10b340ffa", "content_id": "a7a35efd1c20427cb4d001255dac5d33efbefb9b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 330, "license_type": "permissive", "max_line_length": 66, "num_lines": 15, "path": "/airbnb_recruiting_new_user_bookings/gradient_boost.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport sklearn.ensemble\n\nimport submissions\nfrom data import *\n\ngb = sklearn.ensemble.GradientBoostingClassifier(n_estimators=300)\ngb.fit(train, target)\npred = gb.predict_proba(test)\n\nsubmissions.save_csv(pred, \"gradient_boost.csv\")\n" }, { "alpha_fraction": 0.7119565010070801, "alphanum_fraction": 0.717391312122345, "avg_line_length": 25.285715103149414, "blob_id": "e5d9a45c811e033d8cff90b019313e1206e3f416", "content_id": "fd7328f49fe28b193738268cbd2de49481f3eff6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 184, "license_type": "permissive", "max_line_length": 46, "num_lines": 7, "path": "/digit_recognizer/data/__init__.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom data.load_data import target, train, test\n" }, { "alpha_fraction": 0.6527196764945984, "alphanum_fraction": 0.7782427072525024, "avg_line_length": 22.899999618530273, "blob_id": "c0670ecb72d40e64ba835e03741e43eabef23a79", "content_id": "67deb9aaddbe3fe9363384733049b0247d40ba5e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 604, "license_type": "permissive", "max_line_length": 58, "num_lines": 20, "path": "/whats_cooking/README.md", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# What's Cooking?\n\n[What's Cooking?](https://www.kaggle.com/c/whats-cooking/)\n\nalgorithm|score\n:-:|:-:\nAdaboost|0.56436\nBernoulli Naive Bayes【伯努利朴素贝叶斯】|0.71430\nDecision Tree【决策树】|0.62812\nGaussian Naive Bayes【高斯朴素贝叶斯】|0.24035\nK-Nearest Neighbors(KNN)|0.74688\nLibLinear(TextGrocery)|0.79455\nLogistic Regression【Logistic回归】|0.78992\nMultinomial Naive Bayes【多项式朴素贝叶斯】|0.68202\nRandom Forest【随机森林】|0.75453\nSupport Vector Machine(SVM)【支持向量机】|0.78892\n\nPS: higher is better\n\n各算法具体参数请参考代码\n" }, { "alpha_fraction": 0.6339113712310791, "alphanum_fraction": 0.6380860805511475, "avg_line_length": 36.51807403564453, "blob_id": "8a0c5c1c9bc3431f552e13bf4ce420c7a07017cc", "content_id": "acff7cd28ca8325a6becec1f7dcece43b9608bb8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3114, "license_type": "permissive", "max_line_length": 113, "num_lines": 83, "path": "/sf_crime/data/load_data.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport datetime\nimport zipfile\n\nimport pandas\nimport sklearn.preprocessing\n\n\ndef parse_time(timestamp):\n dt = datetime.datetime.strptime(timestamp, \"%Y-%m-%d %H:%M:%S\")\n return dt.hour, dt.day, dt.month, dt.year\n\n\ndef process(df):\n # address_features = df[\"Address\"].apply(lambda x: log_odds[x]).rename(columns=lambda x: \"LogOdds_\" + str(x))\n # df = df.join(address_features)\n # df[\"IsIntersection\"] = df[\"Address\"].apply(lambda x: 1 if \"/\" in x else 0)\n # df[\"LogOddsPA\"] = df[\"Address\"].apply(lambda x: log_odds_pa[x])\n\n sklearn.preprocessing.scale(df[[\"X\", \"Y\"]], copy=False)\n\n df[\"Time\"], df[\"Day\"], df[\"Month\"], df[\"Year\"] = zip(*df[\"Dates\"].apply(parse_time))\n\n dummy_dow = pandas.get_dummies(df[\"DayOfWeek\"], prefix=\"DayOfWeek\")\n dummy_pd = pandas.get_dummies(df[\"PdDistrict\"], prefix=\"PdDistrict\")\n df = df.join([dummy_dow, dummy_pd])\n\n df.drop([\"Dates\", \"DayOfWeek\", \"PdDistrict\", \"Address\"], axis=1, inplace=True)\n\n return df\n\n\nz = zipfile.ZipFile(\"./data/train.csv.zip\")\ntrain_df = pandas.read_csv(z.open(\"train.csv\"))\nz = zipfile.ZipFile(\"./data/test.csv.zip\")\ntest_df = pandas.read_csv(z.open(\"test.csv\"))\n\n# addresses = set(sorted(train_df[\"Address\"].unique()))\n# categories = sorted(train_df[\"Category\"].unique())\n# addr_cnt = train_df.groupby([\"Address\"]).size()\n# cat_cnt = train_df.groupby([\"Category\"]).size()\n# ac_cnt = train_df.groupby([\"Address\", \"Category\"]).size()\n# log_odds = {}\n# log_odds_pa = {}\n# default_log_odds = numpy.log(cat_cnt / len(train_df)) - numpy.log(1 - cat_cnt / len(train_df))\n# for addr in addresses:\n# pa = addr_cnt[addr] / len(train_df)\n# log_odds_pa[addr] = numpy.log(pa) - numpy.log(1 - pa)\n# log_odds[addr] = copy.deepcopy(default_log_odds)\n# for cat in ac_cnt[addr].keys():\n# if 2 < ac_cnt[addr][cat] < addr_cnt[addr]:\n# pa = ac_cnt[addr][cat] / addr_cnt[addr]\n# log_odds[addr][categories.index(cat)] = numpy.log(pa) - numpy.log(1 - pa)\n# log_odds[addr] = pandas.Series(log_odds[addr])\n#\n# new_addresses = set(sorted(test_df[\"Address\"].unique()))\n# new_addr_cnt = test_df.groupby(\"Address\").size()\n# in_both = new_addresses & addresses\n# only_new = new_addresses - in_both\n# for addr in only_new:\n# pa = new_addr_cnt[addr] / (len(train_df) + len(test_df))\n# log_odds_pa[addr] = numpy.log(pa) - numpy.log(1 - pa)\n# log_odds[addr] = copy.deepcopy(default_log_odds)\n# for addr in in_both:\n# pa = (addr_cnt[addr] + new_addr_cnt[addr]) / (len(train_df) + len(test_df))\n# log_odds_pa[addr] = numpy.log(pa) - numpy.log(1 - pa)\n\ndata_df = train_df.append(test_df).reset_index(drop=True)\ndata_df.drop([\"Id\", \"Category\", \"Descript\", \"Resolution\"], axis=1, inplace=True)\ndata_df = process(data_df)\n\nle = sklearn.preprocessing.LabelEncoder()\ntrain_df[\"Category\"] = le.fit_transform(train_df[\"Category\"])\n\ntarget = train_df[\"Category\"].astype('category')\nids = test_df[\"Id\"].values\n\ntrain = data_df[:train_df.shape[0]].values\ntest = data_df[train_df.shape[0]:].values\n" }, { "alpha_fraction": 0.7380191683769226, "alphanum_fraction": 0.7412140369415283, "avg_line_length": 19.866666793823242, "blob_id": "351a1e75dfaf81c6fcab2a20411fd45c8ad096b7", "content_id": "33e222ec85d46f514fd810da2966840198c2af4a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 313, "license_type": "permissive", "max_line_length": 57, "num_lines": 15, "path": "/whats_cooking/multinomial_naive_bayes.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport sklearn.naive_bayes\n\nimport submissions\nfrom data import *\n\nmnb = sklearn.naive_bayes.MultinomialNB()\nmnb.fit(train, target)\npred = mnb.predict(test)\n\nsubmissions.save_csv(pred, \"multinomial_naive_bayes.csv\")\n" }, { "alpha_fraction": 0.7136150002479553, "alphanum_fraction": 0.7441314458847046, "avg_line_length": 25.625, "blob_id": "7fb67da8307723de23b5e16a02101303276fb59f", "content_id": "cee499167565547957c40bf424f6b0da39abf444", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 426, "license_type": "permissive", "max_line_length": 115, "num_lines": 16, "path": "/airbnb_recruiting_new_user_bookings/gradient_boost_xgboost.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport xgboost\n\nimport submissions\nfrom data import *\n\ngbm = xgboost.XGBClassifier(n_estimators=300)\n# gbm = xgboost.XGBClassifier(max_depth=6, learning_rate=0.3, n_estimators=25, subsample=0.5, colsample_bytree=0.5)\ngbm.fit(train, target)\npred = gbm.predict_proba(test)\n\nsubmissions.save_csv(pred, \"gradient_boost_xgboost.csv\")\n" }, { "alpha_fraction": 0.6644295454025269, "alphanum_fraction": 0.6785980463027954, "avg_line_length": 34.28947448730469, "blob_id": "6edb97ae3fa1a1b6f49e979ff07190cbe5a7b967", "content_id": "60ef7d44a62e598484fceaad2331229f54ad7d10", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1341, "license_type": "permissive", "max_line_length": 108, "num_lines": 38, "path": "/word2vec_nlp_tutorial/multi-layer_perceptron.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import generators\nfrom __future__ import nested_scopes\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import with_statement\n\nimport keras\n# import keras.utils.visualize_util\n\nimport data\nimport process.bag_of_words\nimport submissions\n\nif __name__ == '__main__':\n input_dim = process.bag_of_words.train.shape[1]\n\n mlp = keras.models.Sequential()\n mlp.add(keras.layers.Dense(1024, input_dim=input_dim))\n mlp.add(keras.layers.Activation('relu'))\n mlp.add(keras.layers.Dropout(0.5))\n mlp.add(keras.layers.Dense(256))\n mlp.add(keras.layers.Activation('relu'))\n mlp.add(keras.layers.Dropout(0.5))\n mlp.add(keras.layers.Dense(1, activation='sigmoid'))\n\n mlp.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])\n print(mlp.summary())\n # keras.utils.visualize_util.plot(mlp, to_file='images/{file_name}.png'.format(file_name=__file__[:-3]),\n # show_shapes=True)\n\n mlp.fit(process.bag_of_words.train, data.target, batch_size=64, nb_epoch=2)\n pred = mlp.predict_classes(process.bag_of_words.test)\n\n submissions.save_csv(pred.flatten(), '{file_name}.csv'.format(file_name=__file__[:-3]))\n" }, { "alpha_fraction": 0.6521739363670349, "alphanum_fraction": 0.6552795171737671, "avg_line_length": 23.769229888916016, "blob_id": "d3785c4ca97d9681fe24e142c9585eb014f50ec3", "content_id": "df741175d74dc8660927b14b80299bed3f8c74b7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 322, "license_type": "permissive", "max_line_length": 78, "num_lines": 13, "path": "/whats_cooking/submissions/save_csv.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport pandas\n\nfrom data import *\n\n\ndef save_csv(pred, file_name):\n ans = pandas.DataFrame({\"id\": ids, \"cuisine\": le.inverse_transform(pred)})\n ans[[\"id\", \"cuisine\"]].to_csv(\"./submissions/%s\" % file_name, index=False)\n" }, { "alpha_fraction": 0.7252631783485413, "alphanum_fraction": 0.7326315641403198, "avg_line_length": 32.92856979370117, "blob_id": "1eb5994ef8de47539089533fa16b7121afa23b2b", "content_id": "c7cd9a4a073b81b8cc5815417d08b69ed910bc9e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 950, "license_type": "permissive", "max_line_length": 113, "num_lines": 28, "path": "/word2vec_nlp_tutorial/data/__init__.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import generators\nfrom __future__ import nested_scopes\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import with_statement\n\nimport zipfile\n\nimport pandas\n\ntrain_zip = zipfile.ZipFile('./data/labeledTrainData.tsv.zip')\ntrain_df = pandas.read_csv(train_zip.open('labeledTrainData.tsv'), header=0, delimiter='\\t', quoting=3)\ntrain_zip.close()\n\ntest_zip = zipfile.ZipFile('./data/testData.tsv.zip')\ntest_df = pandas.read_csv(test_zip.open('testData.tsv'), header=0, delimiter='\\t', quoting=3)\ntest_zip.close()\n\nunlabeled_zip = zipfile.ZipFile('./data/unlabeledTrainData.tsv.zip')\nunlabeled_df = pandas.read_csv(unlabeled_zip.open('unlabeledTrainData.tsv'), header=0, delimiter='\\t', quoting=3)\nunlabeled_zip.close()\n\ntarget = train_df['sentiment'].astype('category')\nids = test_df['id'].values\n" }, { "alpha_fraction": 0.7032501101493835, "alphanum_fraction": 0.7244465351104736, "avg_line_length": 32.69841384887695, "blob_id": "1c0c2534b9fba1440fe43a66ec8bb9d4507580a8", "content_id": "31185f94c5d13755d81f4bcb3732ac5793b7f9be", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2123, "license_type": "permissive", "max_line_length": 118, "num_lines": 63, "path": "/word2vec_nlp_tutorial/process/word_vectors.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import generators\nfrom __future__ import nested_scopes\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import with_statement\n\nimport gensim\nimport keras.preprocessing.text\nimport nltk\nimport numpy\nimport pandas\n\nimport data\nimport process\n\nword_vec_dim = 300\n\n\ndef build_word2vec():\n sentences = []\n for row in data.train_df['review'].append(data.unlabeled_df['review']):\n sentences_df = pandas.DataFrame(nltk.sent_tokenize(row.decode('utf-8').strip()), columns=['sentence'])\n sentences_df = process.raw_to_words(sentences_df, 'sentence')\n sentences += sentences_df['sentence'].tolist()\n\n model = gensim.models.Word2Vec(sentences, size=word_vec_dim, window=10, min_count=1, workers=1, seed=process.seed)\n return model\n\n\n# word2vec = build_word2vec()\nword2vec = gensim.models.Word2Vec.load_word2vec_format('./process/300features_10contexts.bin', binary=True)\nword2vec.init_sims(replace=True)\n\ndel data.unlabeled_df\ntrain_df = process.raw_to_texts(data.train_df, 'review', dictionary=word2vec.vocab)\ndel data.train_df\ntest_df = process.raw_to_texts(data.test_df, 'review', dictionary=word2vec.vocab)\ndel data.test_df\n\nsequence_tokenizer = keras.preprocessing.text.Tokenizer()\nsequence_tokenizer.fit_on_texts(line.encode('utf-8') for line in train_df['review'].values)\n\nmax_features = len(sequence_tokenizer.word_index)\n\ntrain = process.texts_to_sequences(train_df, 'review', sequence_tokenizer, maxlen=2500)\ndel train_df\ntest = process.texts_to_sequences(test_df, 'review', sequence_tokenizer, maxlen=2500)\ndel test_df\n\nweights = numpy.zeros((max_features + 1, word_vec_dim))\nfor word, index in sequence_tokenizer.word_index.items():\n # if index <= max_features and word in word2vec.vocab:\n # weights[index, :] = word2vec[word]\n if word in word2vec.vocab:\n weights[index, :] = word2vec[word]\n else:\n weights[index, :] = numpy.random.uniform(-0.25, 0.25, word_vec_dim)\ndel word2vec\ndel sequence_tokenizer\n" }, { "alpha_fraction": 0.6859344840049744, "alphanum_fraction": 0.7270391583442688, "avg_line_length": 30.775510787963867, "blob_id": "dc4882818953c396a81be066661acdac6ad5bb49", "content_id": "25f261ba7ec0adb3dcd2106881c235775931be60", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1557, "license_type": "permissive", "max_line_length": 104, "num_lines": 49, "path": "/digit_recognizer/deep_bernoulli_restricted_boltzmann_machine.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport numpy\nimport sklearn.neural_network\nimport sklearn.pipeline\n\nimport submissions\nfrom data import *\n\n\ndef label_to_feature(y):\n feature = [0] * 10\n feature[y] = 1\n return feature\n\ntarget = target.astype(numpy.uint8)\ntrain = train.astype(numpy.float32)\ntest = test.astype(numpy.float32)\n\ntrain /= 255\ntest /= 255\n\nrbm_1 = sklearn.neural_network.BernoulliRBM(n_components=500, learning_rate=0.06, n_iter=20, verbose=1)\nrbm_2 = sklearn.neural_network.BernoulliRBM(n_components=500, learning_rate=0.06, n_iter=20, verbose=1)\npl = sklearn.pipeline.Pipeline([(\"rbm_1\", rbm_1), (\"rbm_2\", rbm_2)])\npl.fit(train, target)\n\nnew_features = []\nfor example, label in zip(train, target):\n train_trans = pl.transform(example)[0]\n new_features.append(numpy.concatenate((train_trans, label_to_feature(label))))\n\nrbm_3 = sklearn.neural_network.BernoulliRBM(n_components=2000, learning_rate=0.06, n_iter=20, verbose=1)\nrbm_3.fit(new_features, target)\n\ntest_trans = pl.transform(test)\ntest_trans = numpy.concatenate((test_trans, [[0] * 10] * len(test_trans)), axis=1)\n\nrbm_aux = sklearn.neural_network.BernoulliRBM()\nrbm_aux.intercept_hidden_ = rbm_3.intercept_visible_\nrbm_aux.intercept_visible_ = rbm_3.intercept_hidden_\nrbm_aux.components_ = numpy.transpose(rbm_3.components_)\nresult = rbm_aux.transform(rbm_3.transform(test_trans))[:, -10:]\npred = numpy.argmax(result, axis=1)\n\nsubmissions.save_csv(pred, \"deep_bernoulli_restricted_boltzmann_machine.csv\")\n" }, { "alpha_fraction": 0.6938309073448181, "alphanum_fraction": 0.6968773603439331, "avg_line_length": 31.825000762939453, "blob_id": "e39fe0ba5926eb353ee0f7000f1c9ca2e78b62af", "content_id": "1ca73c18d1fe35df34fe03c568d871a0314b4591", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1313, "license_type": "permissive", "max_line_length": 117, "num_lines": 40, "path": "/whats_cooking/data/load_data.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport re\nimport zipfile\n\nimport nltk.stem\nimport pandas\nimport sklearn.feature_extraction.text\nimport sklearn.preprocessing\n\n\ndef clean_string(df):\n df[\"ingredients\"] = df[\"ingredients\"].map(lambda x: [re.sub(r\"[^\\w]\", \" \", y) for y in x])\n df[\"ingredients\"] = df[\"ingredients\"].map(lambda x: [z for y in x for z in y.split()])\n df[\"ingredients\"] = df[\"ingredients\"].map(lambda x: [nltk.stem.WordNetLemmatizer().lemmatize(y) for y in x])\n df[\"ingredients\"] = df[\"ingredients\"].map(lambda x: \" \".join(x))\n return df\n\n\nz = zipfile.ZipFile(\"./data/train.json.zip\")\ntrain_df = pandas.read_json(z.open(\"train.json\"))\nz = zipfile.ZipFile(\"./data/test.json.zip\")\ntest_df = pandas.read_json(z.open(\"test.json\"))\n\ntrain_df = clean_string(train_df)\ntest_df = clean_string(test_df)\n\nle = sklearn.preprocessing.LabelEncoder()\ntrain_df[\"cuisine\"] = le.fit_transform(train_df[\"cuisine\"])\n\ntarget = train_df[\"cuisine\"].astype(\"category\")\nids = test_df[\"id\"].values\n\nvectorizer = sklearn.feature_extraction.text.TfidfVectorizer(stop_words=\"english\", token_pattern=r\"\\w+\", max_df=0.57)\n\ntrain = vectorizer.fit_transform(train_df[\"ingredients\"]).toarray()\ntest = vectorizer.transform(test_df[\"ingredients\"]).toarray()\n" }, { "alpha_fraction": 0.6774193644523621, "alphanum_fraction": 0.6801075339317322, "avg_line_length": 23.799999237060547, "blob_id": "b1958793013b43cc03b606b03a406513c0d77531", "content_id": "9dae3597a9264139bcec5cabd5d02d5d9a89426a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 372, "license_type": "permissive", "max_line_length": 115, "num_lines": 15, "path": "/sf_crime/submissions/save_csv.py", "repo_name": "wjfwzzc/Kaggle_Script", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport gzip\n\nimport pandas\n\nfrom data import *\n\n\ndef save_csv(pred, file_name):\n with gzip.GzipFile(\"./submissions/%s.gz\" % file_name, mode=\"w\") as gzfile:\n pandas.DataFrame({\"Id\": ids}).join(pandas.DataFrame(pred, columns=le.classes_)).to_csv(gzfile, index=False)\n" } ]
53
libavg/mtc-poker5card
https://github.com/libavg/mtc-poker5card
df5be5647d2626604bbba83982e39524eef898b7
c95ce3849264509f006ff43c9df3fe9e2124bbc6
4c7aa6d2db5824787398872e2d3594973ad4e659
refs/heads/master
2020-04-25T00:45:57.874257
2011-08-12T23:15:26
2011-08-12T23:15:26
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5468506813049316, "alphanum_fraction": 0.5773425102233887, "avg_line_length": 39.47297286987305, "blob_id": "a20431c1f8f64c08384fc41173b07919d555b042", "content_id": "7991b5b62999a8ea2e8e6d528956040616909f1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8986, "license_type": "no_license", "max_line_length": 281, "num_lines": 222, "path": "/mtcPoker5CardDraw.py", "repo_name": "libavg/mtc-poker5card", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.5\n\nimport os, random\nfrom libavg import avg, button, Point2D, AVGApp, anim\nfrom libavg.utils import getMediaDir\nfrom pokereval import PokerEval\n\nfrom card import Card\nfrom hand import Hand\n\ng_player = avg.Player.get()\npokereval = PokerEval()\nmyHand = None\nholdarray = [\"hold1\", \"hold2\", \"hold3\", \"hold4\", \"hold5\"]\nkartenarray = [\"karte1\", \"karte2\", \"karte3\", \"karte4\", \"karte5\"]\nwintablearray = [\"RoyalFlush\", \"StFlush\", \"Quads\", \"FlHouse\", \"Flush\", \"Straight\", \"Trips\", \"TwoPair\", \"OnePair\"]\nwinpointsarray = [\"10000\", \"5000\", \"3000\", \"1000\", \"300\", \"200\", \"50\", \"20\", \"5\"]\n\nclass DealButton(button.Button):\n def __init__(self, onStartClick):\n dealNode = g_player.getElementByID(\"deal_button\")\n dealNode.active = True\n anim.fadeIn(dealNode, 800).start()\n button.Button.__init__(self, dealNode, onStartClick)\n def delete(self):\n dealNode = g_player.getElementByID(\"deal_button\")\n dealNode.active = False\n anim.fadeOut(dealNode, 800).start()\n button.Button.delete(self)\n \nclass StartButton(button.Button):\n def __init__(self, onStartClick):\n dealNode = g_player.getElementByID(\"go_button\")\n dealNode.active = True\n anim.fadeIn(dealNode, 800).start()\n button.Button.__init__(self, dealNode, onStartClick)\n def delete(self):\n dealNode = g_player.getElementByID(\"go_button\")\n dealNode.active = False\n anim.fadeOut(dealNode, 800).start()\n button.Button.delete(self)\n \nclass ExitButton(button.Button):\n def __init__(self, onStopClick):\n exitNode = g_player.getElementByID(\"exit_button\")\n exitNode.active = True\n anim.fadeIn(exitNode, 800).start()\n button.Button.__init__(self, exitNode, onStopClick)\n def delete(self):\n exitNode = g_player.getElementByID(\"exit_button\")\n exitNode.active = False\n anim.fadeOut(exitNode, 800).start()\n button.Button.delete(self)\n\ndef slurp(filename):\n filename = getMediaDir(__file__, filename)\n f = open(filename,'r')\n contents = f.read()\n f.close()\n return contents\n\nclass Game(AVGApp):\n multitouch = True\n def init(self):\n self._parentNode.mediadir = getMediaDir(__file__, '.')\n mainNode = g_player.createNode(slurp(\"mtc5Poker5CardDraw.avg\"))\n self._parentNode.appendChild(mainNode)\n self.exitButton = ExitButton(lambda e: self.leave())\n self.startButton = StartButton(self.startGame)\n\n def leave(self):\n AVGApp.leave(self)\n \n def _enter(self):\n pass\n\n def startGame(self, event):\n global myHand\n self.startButton.delete()\n self.startButton = None\n self.dealButton = DealButton(self.DealCards)\n self.turn = 1\n self.money = 1000\n g_player.getElementByID(\"credits\").text = \"Credits = \" + str(self.money)\n myHand = Hand()\n self.NewGame()\n self.SetPictures()\n self.ShowCards()\n \n def NewGame(self):\n global myHand\n myHand.holdKarte1 = 0\n myHand.holdKarte2 = 0\n myHand.holdKarte3 = 0\n myHand.holdKarte4 = 0\n myHand.holdKarte5 = 0\n for s in range(len(holdarray)):\n g_player.getElementByID(holdarray[s]).color = \"000000\"\n g_player.getElementByID(\"textzeile\").text = \"\"\n for s in range(len(wintablearray)):\n g_player.getElementByID(wintablearray[s]).color = \"FFFFFF\"\n \n def SetPictures(self):\n global myHand\n for i in range(0, 5):\n myHand.Cards[i].NumberToPicture()\n\n def ShowCards(self):\n global g_player\n for s in range(len(kartenarray)):\n dummykarte = myHand.Cards[s].getPicture()\n g_player.getElementByID(kartenarray[s]).href = dummykarte\n g_player.getElementByID(kartenarray[s]).height = 160\n g_player.getElementByID(kartenarray[s]).width = 100\n \n def DealCards(self, event):\n global g_player\n self.NewGame()\n self.money -= 5\n g_player.getElementByID(\"credits\").text = \"Credits = \" + str(self.money)\n zaehler = 0 \n for i in range(0, 10, 1):\n myHand.tenCards[i] = 0\n while zaehler < 10:\n dummynumber = random.randrange(1, 52, 1)\n if not dummynumber in myHand.tenCards:\n myHand.tenCards[zaehler] = dummynumber\n zaehler += 1\n self.dealButton = DealButton(self.ChangeCards)\n g_player.getElementByID(\"karte1\").setEventHandler(avg.CURSORDOWN, avg.MOUSE | avg.TOUCH , self.HoldKarte1)\n g_player.getElementByID(\"karte2\").setEventHandler(avg.CURSORDOWN, avg.MOUSE | avg.TOUCH , self.HoldKarte2)\n g_player.getElementByID(\"karte3\").setEventHandler(avg.CURSORDOWN, avg.MOUSE | avg.TOUCH , self.HoldKarte3)\n g_player.getElementByID(\"karte4\").setEventHandler(avg.CURSORDOWN, avg.MOUSE | avg.TOUCH , self.HoldKarte4)\n g_player.getElementByID(\"karte5\").setEventHandler(avg.CURSORDOWN, avg.MOUSE | avg.TOUCH , self.HoldKarte5)\n \n for i in range(0, 5, 1):\n myHand.Cards[i].Number = myHand.tenCards[i]\n myHand.Cards[i].NumberToPicture()\n self.ShowCards()\n \n def ChangeCards(self, event):\n if myHand.holdKarte1 == 0:\n myHand.Cards[0].Number = int(myHand.tenCards[5])\n if myHand.holdKarte2 == 0:\n myHand.Cards[1].Number = int(myHand.tenCards[6])\n if myHand.holdKarte3 == 0:\n myHand.Cards[2].Number = int(myHand.tenCards[7])\n if myHand.holdKarte4 == 0:\n myHand.Cards[3].Number = int(myHand.tenCards[8])\n if myHand.holdKarte5 == 0:\n myHand.Cards[4].Number = int(myHand.tenCards[9])\n for i in range(0, 5, 1):\n myHand.Cards[i].NumberToPicture() \n self.dealButton = DealButton(self.DealCards)\n self.ShowCards()\n self.CheckWin()\n \n def CheckWin(self):\n Akkr = [\"00\",\"Ac\",\"As\",\"Ah\",\"Ad\",\"Kc\",\"Ks\",\"Kh\",\"Kd\",\"Qc\",\"Qs\",\"Qh\",\"Qd\",\"Jc\",\"Js\",\"Jh\",\"Jd\",\"Tc\",\"Ts\",\"Th\",\"Td\",\"9c\",\"9s\",\"9h\",\"9d\",\"8c\",\"8s\",\"8h\",\"8d\",\"7c\",\"7s\",\"7h\",\"7d\",\"6c\",\"6s\",\"6h\",\"6d\",\"5c\",\"5s\",\"5h\",\"5d\",\"4c\",\"4s\",\"4h\",\"4d\",\"3c\",\"3s\",\"3h\",\"3d\",\"2c\",\"2s\",\"2h\",\"2d\"]\n hand = [Akkr[myHand.Cards[0].Number], Akkr[myHand.Cards[1].Number], Akkr[myHand.Cards[2].Number], Akkr[myHand.Cards[3].Number], Akkr[myHand.Cards[4].Number] ]\n# hand = [\"Ac\", \"As\", \"Ad\", \"Ts\", \"Tc\"]\n testmu = pokereval.best(\"hi\", hand)\n testmu2 = testmu[1]\n testmu3 = testmu2[0]\n if not testmu3 == \"NoPair\":\n node = g_player.getElementByID(str(testmu3))\n if node:\n node.color = \"FF0000\"\n g_player.getElementByID(\"textzeile\").text = \"You Win \" + str(winpointsarray[wintablearray.index(str(testmu3))]) + \" Credits\"\n self.money += int(winpointsarray[wintablearray.index(str(testmu3))])\n g_player.getElementByID(\"credits\").text = \"Credits = \" + str(self.money)\n else: \n g_player.getElementByID(\"textzeile\").text = \"Nothing...\"\n \n def HoldKarte1(self, event):\n global g_player, myHand\n if myHand.holdKarte1 == 0:\n myHand.holdKarte1 = 1\n g_player.getElementByID(\"hold1\").color = \"FFFFFF\"\n else:\n myHand.holdKarte1 = 0\n g_player.getElementByID(\"hold1\").color = \"000000\"\n \n def HoldKarte2(self, event):\n global g_player, myHand\n if myHand.holdKarte2 == 0:\n myHand.holdKarte2 = 1\n g_player.getElementByID(\"hold2\").color = \"FFFFFF\"\n else:\n myHand.holdKarte2 = 0\n g_player.getElementByID(\"hold2\").color = \"000000\"\n \n def HoldKarte3(self, event):\n global g_player, myHand\n if myHand.holdKarte3 == 0:\n myHand.holdKarte3 = 1\n g_player.getElementByID(\"hold3\").color = \"FFFFFF\"\n else:\n myHand.holdKarte3 = 0\n g_player.getElementByID(\"hold3\").color = \"000000\"\n \n def HoldKarte4(self, event):\n global g_player, myHand\n if myHand.holdKarte4 == 0:\n myHand.holdKarte4 = 1\n g_player.getElementByID(\"hold4\").color = \"FFFFFF\"\n else:\n myHand.holdKarte4 = 0\n g_player.getElementByID(\"hold4\").color = \"000000\"\n \n def HoldKarte5(self, event):\n global g_player, myHand\n if myHand.holdKarte5 == 0:\n myHand.holdKarte5 = 1\n g_player.getElementByID(\"hold5\").color = \"FFFFFF\"\n else:\n myHand.holdKarte5 = 0\n g_player.getElementByID(\"hold5\").color = \"000000\"\n\n\nif __name__=='__main__':\n Game.start(resolution=(1280,720))\n\n" }, { "alpha_fraction": 0.5866666436195374, "alphanum_fraction": 0.5920000076293945, "avg_line_length": 30.08333396911621, "blob_id": "19f12114ad07a3e60d91d6d77f764ec001bc2d8e", "content_id": "3002ca576b814d92ff3f5bc67e4de80b9d60d24d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 375, "license_type": "no_license", "max_line_length": 70, "num_lines": 12, "path": "/card.py", "repo_name": "libavg/mtc-poker5card", "src_encoding": "UTF-8", "text": "\n\nclass Card:\n def __init__(self):\n self.Number = 0\n self.Picture = \"media/cards/0.png\"\n def setNumber(self, number):\n self.Number = number\n def getNumber(self):\n return self.Number\n def getPicture(self):\n return self.Picture\n def NumberToPicture(self):\n self.Picture = \"media/cards/\" + str(self.getNumber()) + \".png\"\n" }, { "alpha_fraction": 0.49501660466194153, "alphanum_fraction": 0.5415282249450684, "avg_line_length": 26.363636016845703, "blob_id": "12b1976b61da22bcc3fd9441382408fef7fa1666", "content_id": "666f35fa2ea8f3deb2582b9d3eec528d9eb78bf4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 301, "license_type": "no_license", "max_line_length": 61, "num_lines": 11, "path": "/hand.py", "repo_name": "libavg/mtc-poker5card", "src_encoding": "UTF-8", "text": "from card import Card\n\nclass Hand:\n def __init__(self):\n self.tenCards = range(0, 10, 1)\n self.Cards = [Card(), Card(), Card(), Card(), Card()]\n self.holdKarte1 = 0\n self.holdKarte2 = 0\n self.holdKarte3 = 0\n self.holdKarte4 = 0\n self.holdKarte5 = 0\n" } ]
3
angian00/poses_server
https://github.com/angian00/poses_server
814451b2f5498cdfe265119f33b1a84da084ec43
28f129e6a40cb5884773b93e9b0b89ae11e42995
e038fe9e9827df50a96e0f14bfead0dcbf2e534f
refs/heads/master
2021-01-19T23:01:14.197999
2017-04-23T17:03:45
2017-04-23T17:03:45
88,910,198
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5796505808830261, "alphanum_fraction": 0.5878725647926331, "avg_line_length": 24.578947067260742, "blob_id": "952697d6247419ad6da4992753f15013aa956297", "content_id": "b4202573c806e87e0b302bb69060ff509b30ab8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 973, "license_type": "no_license", "max_line_length": 74, "num_lines": 38, "path": "/cgi-bin/random_pose.py", "repo_name": "angian00/poses_server", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport random\n\nindex_file_path = \"index.txt\"\n\n\nif __name__ == \"__main__\":\n filenames = [line.rstrip('\\n') for line in open(index_file_path)]\n\n ## DEBUG\n #print \"Content-type: text/plain\\n\"\n #for f in filenames:\n # print f\n \n filename = \"/\" + random.choice(filenames)\n\n print \"Content-type: text/html\\n\"\n print '<html>'\n print '<head>'\n print '<title>Random pose generator</title>'\n print '<link rel=\"stylesheet\" type=\"text/css\" href=\"/mystyle.css\">'\n print '<script src=\"/js/myscript.js\" type=\"text/javascript\"></script>'\n print '</head>'\n\n print '<body>'\n print '<img src=\"%s\">' % filename\n\n print '<div id=\"timerLinks\">'\n print '<a href=\"?time=30\" class=\"timer-link\">30 sec pose</a>'\n print '<a href=\"?time=120\" class=\"timer-link\">2min pose</a>'\n print '</div>'\n\n print '<div id=\"timeText\" />'\n print '<script>startCountdown()</script>'\n\n print '</body>'\n print '</html>'\n\n" }, { "alpha_fraction": 0.8703703880310059, "alphanum_fraction": 0.8703703880310059, "avg_line_length": 53, "blob_id": "42451098448b1f1520a65505fe65b1f4f28d5252", "content_id": "8dde1aeddeaae632414342a1d33ec3ce8134940e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 54, "license_type": "no_license", "max_line_length": 53, "num_lines": 1, "path": "/data/README.txt", "repo_name": "angian00/poses_server", "src_encoding": "UTF-8", "text": "Placeholder for subdirectories containin jpeg images.\n" }, { "alpha_fraction": 0.7272727489471436, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 21, "blob_id": "04030f22a2be2663339399f8a02305353ffc1bae", "content_id": "ffcf5bdea85557ec568ce7a5c7b3bde5479f10e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 66, "license_type": "no_license", "max_line_length": 53, "num_lines": 3, "path": "/serve_cgi.sh", "repo_name": "angian00/poses_server", "src_encoding": "UTF-8", "text": "#/!/bin/sh\n\npython -c \"import CGIHTTPServer;CGIHTTPServer.test()\"\n" }, { "alpha_fraction": 0.7768924236297607, "alphanum_fraction": 0.7768924236297607, "avg_line_length": 48.79999923706055, "blob_id": "d48d17b4c57dfe28f7f63e411460e1b65ca15bd8", "content_id": "c4dfc122c33328d8b0977a70ad7cd0f778e21ec9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 251, "license_type": "no_license", "max_line_length": 89, "num_lines": 5, "path": "/README.txt", "repo_name": "angian00/poses_server", "src_encoding": "UTF-8", "text": "A very simple Python cgi-bin server producing random images from data/ subfolders.\n\n(I use it to produce random drawing poses from the excellent Proko sets).\n\nNB: auto playback of sound effects does not seem to be enabled (by design) on iOS Safari.\n\n\n" }, { "alpha_fraction": 0.592792809009552, "alphanum_fraction": 0.592792809009552, "avg_line_length": 19.55555534362793, "blob_id": "ac1d805125895e6f69df08703e2afc39e50b34d5", "content_id": "40ad40288c88b94d90c764b4503009653002e5b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 555, "license_type": "no_license", "max_line_length": 57, "num_lines": 27, "path": "/build_index.py", "repo_name": "angian00/poses_server", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport os\nimport os.path\n\nindex_file_path = \"index.txt\"\n\n\ndef build_file_list():\n root_image_dir = \"data/poses\"\n file_list = []\n\n for root, subdirs, files in os.walk(root_image_dir):\n for filename in files:\n full_file_path = os.path.join(root, filename)\n file_list.append(full_file_path)\n\n return file_list\n\n\nif __name__ == \"__main__\":\n filenames = build_file_list()\n \n\n with open(index_file_path, 'w') as index_file:\n for fn in filenames:\n print >>index_file, fn\n" }, { "alpha_fraction": 0.633281946182251, "alphanum_fraction": 0.6463790535926819, "avg_line_length": 20.278688430786133, "blob_id": "736b41543c5c4d2aa5a52fea1169b2ddaacabc49", "content_id": "9fef30d05b9a5ff8dee04c32e4a7ebb9c0510668", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1298, "license_type": "no_license", "max_line_length": 85, "num_lines": 61, "path": "/js/myscript.js", "repo_name": "angian00/poses_server", "src_encoding": "UTF-8", "text": "var currTime;\nvar timer;\n\nfunction startCountdown() {\n\tvar timeStr = findGetParameter(\"time\");\n\tif (timeStr == null)\n\t\treturn;\n\n\tvar timeAmount = parseInt(timeStr);\n\tif (isNaN(timeAmount))\n\t\treturn;\n\n\tcurrTime = timeAmount;\n\tdocument.getElementById('timeText').classList.remove(\"blinking\");\n\tupdateTimeText();\n\n\ttimer = setInterval(function() {\n\t\tcurrTime = currTime - 1;\n\n\t\tif (currTime >= 0) {\n\t\t\tupdateTimeText();\n\t\t} else {\n\t\t\tclearInterval(timer);\n\t\t\tdocument.getElementById(\"timeText\").innerHTML = \"TIMER EXPIRED\";\n\t\t\tdocument.getElementById('timeText').classList.add(\"blinking\");\n\t\t\tplaySound();\n\t\t}\n\t}, 1000);\n}\n\n\nfunction findGetParameter(paramName) {\n var result = null, tmp = [];\n location.search.substr(1)\n .split(\"&\")\n .forEach(function (item) {\n tmp = item.split(\"=\");\n if (tmp[0] === paramName) result = decodeURIComponent(tmp[1]);\n });\n\n return result;\n}\n\n\nfunction updateTimeText() {\n\tvar mins = Math.floor(currTime / 60);\n\tvar secs = currTime % 60;\n\tdocument.getElementById('timeText').innerHTML = zeropad(mins) + \":\" + zeropad(secs);\n}\n\nfunction zeropad(num) {\n\tvar str = \"\" + num;\n\tvar pad = \"00\";\n\treturn pad.substring(0, pad.length - str.length) + str;\n}\n\n\nfunction playSound() {\n \tvar audio = new Audio('/stop.mp3');\n\taudio.play();\n}\n" } ]
6
phoenix1796/netmapWin
https://github.com/phoenix1796/netmapWin
663b976a40da5e44c7e4096b8fac5c0fce5114eb
740c96fe3371984c697ef844e062af2c2355ff72
ee855d0efc09dc4fd26dcbd04caa0bc32a59278c
refs/heads/master
2021-01-25T09:31:32.498161
2017-07-10T07:21:18
2017-07-10T07:21:18
93,842,538
8
1
null
null
null
null
null
[ { "alpha_fraction": 0.5948466062545776, "alphanum_fraction": 0.6097177863121033, "avg_line_length": 23.3138427734375, "blob_id": "b18219a69122b86e7323b4dd5e53df7ccddffe6b", "content_id": "14ea8eed6014e49cccbfa8b2e8b50b0221d77b0c", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 20375, "license_type": "permissive", "max_line_length": 96, "num_lines": 838, "path": "/apps/lb/lb.c", "repo_name": "phoenix1796/netmapWin", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2016 Broala and Universita` di Pisa. All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n * 1. Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND\n * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE\n * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n * SUCH DAMAGE.\n */\n\n#include <stdio.h>\n#include <string.h>\n#include <ctype.h>\n#include <stdbool.h>\n#include <inttypes.h>\n#include <syslog.h>\n\n#define NETMAP_WITH_LIBS\n#include <net/netmap_user.h>\n#include <sys/poll.h>\n\n#include <netinet/in.h>\t\t/* htonl */\n\n#include <pthread.h>\n\n#include \"pkt_hash.h\"\n#include \"ctrs.h\"\n\n\n/*\n * use our version of header structs, rather than bringing in a ton\n * of platform specific ones\n */\n#ifndef ETH_ALEN\n#define ETH_ALEN 6\n#endif\n\nstruct compact_eth_hdr {\n\tunsigned char h_dest[ETH_ALEN];\n\tunsigned char h_source[ETH_ALEN];\n\tu_int16_t h_proto;\n};\n\nstruct compact_ip_hdr {\n\tu_int8_t ihl:4, version:4;\n\tu_int8_t tos;\n\tu_int16_t tot_len;\n\tu_int16_t id;\n\tu_int16_t frag_off;\n\tu_int8_t ttl;\n\tu_int8_t protocol;\n\tu_int16_t check;\n\tu_int32_t saddr;\n\tu_int32_t daddr;\n};\n\nstruct compact_ipv6_hdr {\n\tu_int8_t priority:4, version:4;\n\tu_int8_t flow_lbl[3];\n\tu_int16_t payload_len;\n\tu_int8_t nexthdr;\n\tu_int8_t hop_limit;\n\tstruct in6_addr saddr;\n\tstruct in6_addr daddr;\n};\n\n#define MAX_IFNAMELEN \t64\n#define DEF_OUT_PIPES \t2\n#define DEF_EXTRA_BUFS \t0\n#define DEF_BATCH\t2048\n#define DEF_WAIT_LINK\t2\n#define DEF_SYSLOG_INT\t600\n#define BUF_REVOKE\t100\n\nstruct {\n\tchar ifname[MAX_IFNAMELEN];\n\tchar base_name[MAX_IFNAMELEN];\n\tint netmap_fd;\n\tuint16_t output_rings;\n\tuint16_t num_groups;\n\tuint32_t extra_bufs;\n\tuint16_t batch;\n\tint syslog_interval;\n\tint wait_link;\n} glob_arg;\n\n/*\n * the overflow queue is a circular queue of buffers\n */\nstruct overflow_queue {\n\tchar name[MAX_IFNAMELEN];\n\tstruct netmap_slot *slots;\n\tuint32_t head;\n\tuint32_t tail;\n\tuint32_t n;\n\tuint32_t size;\n};\n\nstruct overflow_queue *freeq;\n\nstatic inline int\noq_full(struct overflow_queue *q)\n{\n\treturn q->n >= q->size;\n}\n\nstatic inline int\noq_empty(struct overflow_queue *q)\n{\n\treturn q->n <= 0;\n}\n\nstatic inline void\noq_enq(struct overflow_queue *q, const struct netmap_slot *s)\n{\n\tif (unlikely(oq_full(q))) {\n\t\tD(\"%s: queue full!\", q->name);\n\t\tabort();\n\t}\n\tq->slots[q->tail] = *s;\n\tq->n++;\n\tq->tail++;\n\tif (q->tail >= q->size)\n\t\tq->tail = 0;\n}\n\nstatic inline struct netmap_slot\noq_deq(struct overflow_queue *q)\n{\n\tstruct netmap_slot s = q->slots[q->head];\n\tif (unlikely(oq_empty(q))) {\n\t\tD(\"%s: queue empty!\", q->name);\n\t\tabort();\n\t}\n\tq->n--;\n\tq->head++;\n\tif (q->head >= q->size)\n\t\tq->head = 0;\n\treturn s;\n}\n\nstatic volatile int do_abort = 0;\n\nuint64_t dropped = 0;\nuint64_t forwarded = 0;\nuint64_t non_ip = 0;\n\nstruct port_des {\n\tstruct my_ctrs ctr;\n\tunsigned int last_sync;\n\tstruct overflow_queue *oq;\n\tstruct nm_desc *nmd;\n\tstruct netmap_ring *ring;\n\tstruct group_des *group;\n};\n\nstruct port_des *ports;\n\n/* each group of pipes receives all the packets */\nstruct group_des {\n\tchar pipename[MAX_IFNAMELEN];\n\tstruct port_des *ports;\n\tint first_id;\n\tint nports;\n\tint last;\n\tint custom_port;\n};\n\nstruct group_des *groups;\n\nstatic void *\nprint_stats(void *arg)\n{\n\tint npipes = glob_arg.output_rings;\n\tint sys_int = 0;\n\t(void)arg;\n\tstruct my_ctrs cur, prev;\n\tchar b1[40], b2[40];\n\tstruct my_ctrs *pipe_prev;\n\n\tpipe_prev = calloc(npipes, sizeof(struct my_ctrs));\n\tif (pipe_prev == NULL) {\n\t\tD(\"out of memory\");\n\t\texit(1);\n\t}\n\n\tmemset(&prev, 0, sizeof(prev));\n\tgettimeofday(&prev.t, NULL);\n\twhile (!do_abort) {\n\t\tint j, dosyslog = 0;\n\t\tuint64_t pps, dps, usec;\n\t\tstruct my_ctrs x;\n\n\t\tmemset(&cur, 0, sizeof(cur));\n\t\tusec = wait_for_next_report(&prev.t, &cur.t, 1000);\n\n\t\tif (++sys_int == glob_arg.syslog_interval) {\n\t\t\tdosyslog = 1;\n\t\t\tsys_int = 0;\n\t\t}\n\n\t\tfor (j = 0; j < npipes; ++j) {\n\t\t\tstruct port_des *p = &ports[j];\n\n\t\t\tcur.pkts += p->ctr.pkts;\n\t\t\tcur.drop += p->ctr.drop;\n\n\t\t\tx.pkts = p->ctr.pkts - pipe_prev[j].pkts;\n\t\t\tx.drop = p->ctr.drop - pipe_prev[j].drop;\n\t\t\tpps = (x.pkts*1000000 + usec/2) / usec;\n\t\t\tdps = (x.drop*1000000 + usec/2) / usec;\n\t\t\tprintf(\"%s/%s|\", norm(b1, pps), norm(b2, dps));\n\t\t\tpipe_prev[j] = p->ctr;\n\n\t\t\tif (dosyslog) {\n\t\t\t\tsyslog(LOG_INFO,\n\t\t\t\t\t\"{\"\n\t\t\t\t\t\t\"\\\"interface\\\":\\\"%s\\\",\"\n\t\t\t\t\t\t\"\\\"output_ring\\\":%\"PRIu16\",\"\n\t\t\t\t\t\t\"\\\"packets_forwarded\\\":%\"PRIu64\",\"\n\t\t\t\t\t\t\"\\\"packets_dropped\\\":%\"PRIu64\n\t\t\t\t\t\"}\", glob_arg.ifname, j, p->ctr.pkts, p->ctr.drop);\n\t\t\t}\n\t\t}\n\t\tprintf(\"\\n\");\n\t\tif (dosyslog) {\n\t\t\tsyslog(LOG_INFO,\n\t\t\t\t\"{\"\n\t\t\t\t\t\"\\\"interface\\\":\\\"%s\\\",\"\n\t\t\t\t\t\"\\\"output_ring\\\":null,\"\n\t\t\t\t\t\"\\\"packets_forwarded\\\":%\"PRIu64\",\"\n\t\t\t\t\t\"\\\"packets_dropped\\\":%\"PRIu64\",\"\n\t\t\t\t\t\"\\\"non_ip_packets\\\":%\"PRIu64\n\t\t\t\t\"}\", glob_arg.ifname, forwarded, dropped, non_ip);\n\t\t}\n\t\tx.pkts = cur.pkts - prev.pkts;\n\t\tx.drop = cur.drop - prev.drop;\n\t\tpps = (x.pkts*1000000 + usec/2) / usec;\n\t\tdps = (x.drop*1000000 + usec/2) / usec;\n\t\tprintf(\"===> aggregate %spps %sdps\\n\", norm(b1, pps), norm(b2, dps));\n\t\tprev = cur;\n\t}\n\n\tfree(pipe_prev);\n\n\treturn NULL;\n}\n\nstatic void\nfree_buffers(void)\n{\n\tint i, tot = 0;\n\tstruct port_des *rxport = &ports[glob_arg.output_rings];\n\n\t/* build a netmap free list with the buffers in all the overflow queues */\n\tfor (i = 0; i < glob_arg.output_rings + 1; i++) {\n\t\tstruct port_des *cp = &ports[i];\n\t\tstruct overflow_queue *q = cp->oq;\n\n\t\tif (!q)\n\t\t\tcontinue;\n\n\t\twhile (q->n) {\n\t\t\tstruct netmap_slot s = oq_deq(q);\n\t\t\tuint32_t *b = (uint32_t *)NETMAP_BUF(cp->ring, s.buf_idx);\n\n\t\t\t*b = rxport->nmd->nifp->ni_bufs_head;\n\t\t\trxport->nmd->nifp->ni_bufs_head = s.buf_idx;\n\t\t\ttot++;\n\t\t}\n\t}\n\tD(\"added %d buffers to netmap free list\", tot);\n\n\tfor (i = 0; i < glob_arg.output_rings + 1; ++i) {\n\t\tnm_close(ports[i].nmd);\n\t}\n}\n\n\nstatic void sigint_h(int sig)\n{\n\t(void)sig;\t\t/* UNUSED */\n\tdo_abort = 1;\n\tsignal(SIGINT, SIG_DFL);\n}\n\nvoid usage()\n{\n\tprintf(\"usage: lb [options]\\n\");\n\tprintf(\"where options are:\\n\");\n\tprintf(\" -i iface \tinterface name (required)\\n\");\n\tprintf(\" -p [prefix:]npipes\tadd a new group of output pipes\\n\");\n\tprintf(\" -B nbufs \tnumber of extra buffers (default: %d)\\n\", DEF_EXTRA_BUFS);\n\tprintf(\" -b batch \tbatch size (default: %d)\\n\", DEF_BATCH);\n\tprintf(\" -w seconds \twait for link up (default: %d)\\n\", DEF_WAIT_LINK);\n\tprintf(\" -s seconds \tseconds between syslog messages (default: %d)\\n\",\n\t\t\tDEF_SYSLOG_INT);\n\texit(0);\n}\n\nstatic int\nparse_pipes(char *spec)\n{\n\tchar *end = index(spec, ':');\n\tstatic int max_groups = 0;\n\tstruct group_des *g;\n \n\tND(\"spec %s num_groups %d\", spec, glob_arg.num_groups);\n\tif (max_groups < glob_arg.num_groups + 1) {\n\t\tsize_t size = sizeof(*g) * (glob_arg.num_groups + 1);\n\t\tgroups = realloc(groups, size);\n\t\tif (groups == NULL) {\n\t\t\tD(\"out of memory\");\n\t\t\treturn 1;\n\t\t}\n\t}\n\tg = &groups[glob_arg.num_groups];\n\tmemset(g, 0, sizeof(*g));\n\n\tif (end != NULL) {\n\t\tif (end - spec > MAX_IFNAMELEN - 8) {\n\t\t\tD(\"name '%s' too long\", spec);\n\t\t\treturn 1;\n\t\t}\n\t\tif (end == spec) {\n\t\t\tD(\"missing prefix before ':' in '%s'\", spec);\n\t\t\treturn 1;\n\t\t}\n\t\tstrncpy(g->pipename, spec, end - spec);\n\t\tg->custom_port = 1;\n\t\tend++;\n\t} else {\n\t\t/* no prefix, this group will use the\n\t\t * name of the input port.\n\t\t * This will be set in init_groups(),\n\t\t * since here the input port may still\n\t\t * be uninitialized\n\t\t */\n\t\tend = spec;\n\t}\n\tif (*end == '\\0') {\n\t\tg->nports = DEF_OUT_PIPES;\n\t} else {\n\t\tg->nports = atoi(end);\n\t\tif (g->nports < 1) {\n\t\t\tD(\"invalid number of pipes '%s' (must be at least 1)\", end);\n\t\t\treturn 1;\n\t\t}\n\t}\n\tglob_arg.output_rings += g->nports;\n\tglob_arg.num_groups++;\n\treturn 0;\n}\n\n/* complete the initialization of the groups data structure */\nvoid init_groups(void)\n{\n\tint i, j, t = 0;\n\tstruct group_des *g = NULL;\n\tfor (i = 0; i < glob_arg.num_groups; i++) {\n\t\tg = &groups[i];\n\t\tg->ports = &ports[t];\n\t\tfor (j = 0; j < g->nports; j++)\n\t\t\tg->ports[j].group = g;\n\t\tt += g->nports;\n\t\tif (!g->custom_port)\n\t\t\tstrcpy(g->pipename, glob_arg.base_name);\n\t\tfor (j = 0; j < i; j++) {\n\t\t\tstruct group_des *h = &groups[j];\n\t\t\tif (!strcmp(h->pipename, g->pipename))\n\t\t\t\tg->first_id += h->nports;\n\t\t}\n\t}\n\tg->last = 1;\n}\n\n/* push the packet described by slot rs to the group g.\n * This may cause other buffers to be pushed down the\n * chain headed by g.\n * Return a free buffer.\n */\nuint32_t forward_packet(struct group_des *g, struct netmap_slot *rs)\n{\n\tuint32_t hash = rs->ptr;\n\tuint32_t output_port = hash % g->nports;\n\tstruct port_des *port = &g->ports[output_port];\n\tstruct netmap_ring *ring = port->ring;\n\tstruct overflow_queue *q = port->oq;\n\n\t/* Move the packet to the output pipe, unless there is\n\t * either no space left on the ring, or there is some\n\t * packet still in the overflow queue (since those must\n\t * take precedence over the new one)\n\t*/\n\tif (nm_ring_space(ring) && (q == NULL || oq_empty(q))) {\n\t\tstruct netmap_slot *ts = &ring->slot[ring->cur];\n\t\tstruct netmap_slot old_slot = *ts;\n\t\tuint32_t free_buf;\n\n\t\tts->buf_idx = rs->buf_idx;\n\t\tts->len = rs->len;\n\t\tts->flags |= NS_BUF_CHANGED;\n\t\tts->ptr = rs->ptr;\n\t\tring->head = ring->cur = nm_ring_next(ring, ring->cur);\n\t\tport->ctr.pkts++;\n\t\tforwarded++;\n\t\tif (old_slot.ptr && !g->last) {\n\t\t\t/* old slot not empty and we are not the last group:\n\t\t\t * push it further down the chain\n\t\t\t */\n\t\t\tfree_buf = forward_packet(g + 1, &old_slot);\n\t\t} else {\n\t\t\t/* just return the old slot buffer: it is\n\t\t\t * either empty or already seen by everybody\n\t\t\t */\n\t\t\tfree_buf = old_slot.buf_idx;\n\t\t}\n\n\t\treturn free_buf;\n\t}\n\n\t/* use the overflow queue, if available */\n\tif (q == NULL || oq_full(q)) {\n\t\t/* no space left on the ring and no overflow queue\n\t\t * available: we are forced to drop the packet\n\t\t */\n\t\tdropped++;\n\t\tport->ctr.drop++;\n\t\treturn rs->buf_idx;\n\t}\n\n\toq_enq(q, rs);\n\n\t/*\n\t * we cannot continue down the chain and we need to\n\t * return a free buffer now. We take it from the free queue.\n\t */\n\tif (oq_empty(freeq)) {\n\t\t/* the free queue is empty. Revoke some buffers\n\t\t * from the longest overflow queue\n\t\t */\n\t\tuint32_t j;\n\t\tstruct port_des *lp = &ports[0];\n\t\tuint32_t max = lp->oq->n;\n\n\t\t/* let lp point to the port with the longest queue */\n\t\tfor (j = 1; j < glob_arg.output_rings; j++) {\n\t\t\tstruct port_des *cp = &ports[j];\n\t\t\tif (cp->oq->n > max) {\n\t\t\t\tlp = cp;\n\t\t\t\tmax = cp->oq->n;\n\t\t\t}\n\t\t}\n\n\t\t/* move the oldest BUF_REVOKE buffers from the\n\t\t * lp queue to the free queue\n\t\t */\n\t\t// XXX optimize this cycle\n\t\tfor (j = 0; lp->oq->n && j < BUF_REVOKE; j++) {\n\t\t\tstruct netmap_slot tmp = oq_deq(lp->oq);\n\t\t\toq_enq(freeq, &tmp);\n\t\t}\n\n\t\tND(1, \"revoked %d buffers from %s\", j, lq->name);\n\t\tlp->ctr.drop += j;\n\t\tdropped += j;\n\t}\n\n\treturn oq_deq(freeq).buf_idx;\n}\n\nint main(int argc, char **argv)\n{\n\tint ch;\n\tuint32_t i;\n\tint rv;\n\tunsigned int iter = 0;\n\n\tglob_arg.ifname[0] = '\\0';\n\tglob_arg.output_rings = 0;\n\tglob_arg.batch = DEF_BATCH;\n\tglob_arg.wait_link = DEF_WAIT_LINK;\n\tglob_arg.syslog_interval = DEF_SYSLOG_INT;\n\n\twhile ( (ch = getopt(argc, argv, \"i:p:b:B:s:\")) != -1) {\n\t\tswitch (ch) {\n\t\tcase 'i':\n\t\t\tD(\"interface is %s\", optarg);\n\t\t\tif (strlen(optarg) > MAX_IFNAMELEN - 8) {\n\t\t\t\tD(\"ifname too long %s\", optarg);\n\t\t\t\treturn 1;\n\t\t\t}\n\t\t\tif (strncmp(optarg, \"netmap:\", 7) && strncmp(optarg, \"vale\", 4)) {\n\t\t\t\tsprintf(glob_arg.ifname, \"netmap:%s\", optarg);\n\t\t\t} else {\n\t\t\t\tstrcpy(glob_arg.ifname, optarg);\n\t\t\t}\n\t\t\tbreak;\n\n\t\tcase 'p':\n\t\t\tif (parse_pipes(optarg)) {\n\t\t\t\tusage();\n\t\t\t\treturn 1;\n\t\t\t}\n\t\t\tbreak;\n\n\t\tcase 'B':\n\t\t\tglob_arg.extra_bufs = atoi(optarg);\n\t\t\tD(\"requested %d extra buffers\", glob_arg.extra_bufs);\n\t\t\tbreak;\n\n\t\tcase 'b':\n\t\t\tglob_arg.batch = atoi(optarg);\n\t\t\tD(\"batch is %d\", glob_arg.batch);\n\t\t\tbreak;\n\n\t\tcase 's':\n\t\t\tglob_arg.syslog_interval = atoi(optarg);\n\t\t\tD(\"syslog interval is %d\", glob_arg.syslog_interval);\n\t\t\tbreak;\n\n\t\tdefault:\n\t\t\tD(\"bad option %c %s\", ch, optarg);\n\t\t\tusage();\n\t\t\treturn 1;\n\n\t\t}\n\t}\n\n\tif (glob_arg.ifname[0] == '\\0') {\n\t\tD(\"missing interface name\");\n\t\tusage();\n\t\treturn 1;\n\t}\n\n\t/* extract the base name */\n\tchar *nscan = strncmp(glob_arg.ifname, \"netmap:\", 7) ?\n\t\t\tglob_arg.ifname : glob_arg.ifname + 7;\n\tstrncpy(glob_arg.base_name, nscan, MAX_IFNAMELEN);\n\tfor (nscan = glob_arg.base_name; *nscan && !index(\"-*^{}/@\", *nscan); nscan++)\n\t\t;\n\t*nscan = '\\0';\t\n\n\tif (glob_arg.num_groups == 0)\n\t\tparse_pipes(\"\");\n\n\tsetlogmask(LOG_UPTO(LOG_INFO));\n\topenlog(\"lb\", LOG_CONS | LOG_PID | LOG_NDELAY, LOG_LOCAL1);\n\n\tuint32_t npipes = glob_arg.output_rings;\n\n\n\tpthread_t stat_thread;\n\n\tports = calloc(npipes + 1, sizeof(struct port_des));\n\tif (!ports) {\n\t\tD(\"failed to allocate the stats array\");\n\t\treturn 1;\n\t}\n\tstruct port_des *rxport = &ports[npipes];\n\tinit_groups();\n\n\tif (pthread_create(&stat_thread, NULL, print_stats, NULL) == -1) {\n\t\tD(\"unable to create the stats thread: %s\", strerror(errno));\n\t\treturn 1;\n\t}\n\n\n\t/* we need base_req to specify pipes and extra bufs */\n\tstruct nmreq base_req;\n\tmemset(&base_req, 0, sizeof(base_req));\n\n\tbase_req.nr_arg1 = npipes;\n\tbase_req.nr_arg3 = glob_arg.extra_bufs;\n\n\trxport->nmd = nm_open(glob_arg.ifname, &base_req, 0, NULL);\n\n\tif (rxport->nmd == NULL) {\n\t\tD(\"cannot open %s\", glob_arg.ifname);\n\t\treturn (1);\n\t} else {\n\t\tD(\"successfully opened %s (tx rings: %u)\", glob_arg.ifname,\n\t\t rxport->nmd->req.nr_tx_slots);\n\t}\n\n\tuint32_t extra_bufs = rxport->nmd->req.nr_arg3;\n\tstruct overflow_queue *oq = NULL;\n\t/* reference ring to access the buffers */\n\trxport->ring = NETMAP_RXRING(rxport->nmd->nifp, 0);\n\n\tif (!glob_arg.extra_bufs)\n\t\tgoto run;\n\n\tD(\"obtained %d extra buffers\", extra_bufs);\n\tif (!extra_bufs)\n\t\tgoto run;\n\n\t/* one overflow queue for each output pipe, plus one for the\n\t * free extra buffers\n\t */\n\toq = calloc(npipes + 1, sizeof(struct overflow_queue));\n\tif (!oq) {\n\t\tD(\"failed to allocated overflow queues descriptors\");\n\t\tgoto run;\n\t}\n\n\tfreeq = &oq[npipes];\n\trxport->oq = freeq;\n\n\tfreeq->slots = calloc(extra_bufs, sizeof(struct netmap_slot));\n\tif (!freeq->slots) {\n\t\tD(\"failed to allocate the free list\");\n\t}\n\tfreeq->size = extra_bufs;\n\tsnprintf(freeq->name, MAX_IFNAMELEN, \"free queue\");\n\n\t/*\n\t * the list of buffers uses the first uint32_t in each buffer\n\t * as the index of the next buffer.\n\t */\n\tuint32_t scan;\n\tfor (scan = rxport->nmd->nifp->ni_bufs_head;\n\t scan;\n\t scan = *(uint32_t *)NETMAP_BUF(rxport->ring, scan))\n\t{\n\t\tstruct netmap_slot s;\n\t\ts.len = s.flags = 0;\n\t\ts.ptr = 0;\n\t\ts.buf_idx = scan;\n\t\tND(\"freeq <- %d\", s.buf_idx);\n\t\toq_enq(freeq, &s);\n\t}\n\n\n\tif (freeq->n != extra_bufs) {\n\t\tD(\"something went wrong: netmap reported %d extra_bufs, but the free list contained %d\",\n\t\t\t\textra_bufs, freeq->n);\n\t\treturn 1;\n\t}\n\trxport->nmd->nifp->ni_bufs_head = 0;\n\nrun:\n\tatexit(free_buffers);\n\n\tint j, t = 0;\n\tfor (j = 0; j < glob_arg.num_groups; j++) {\n\t\tstruct group_des *g = &groups[j];\n\t\tint k;\n\t\tfor (k = 0; k < g->nports; ++k) {\n\t\t\tstruct port_des *p = &g->ports[k];\n\t\t\tchar interface[25];\n\t\t\tsprintf(interface, \"netmap:%s{%d/xT@%d\", g->pipename, g->first_id + k,\n\t\t\t\t\trxport->nmd->req.nr_arg2);\n\t\t\tD(\"opening pipe named %s\", interface);\n\n\t\t\tp->nmd = nm_open(interface, NULL, 0, rxport->nmd);\n\n\t\t\tif (p->nmd == NULL) {\n\t\t\t\tD(\"cannot open %s\", interface);\n\t\t\t\treturn (1);\n\t\t\t} else {\n\t\t\t\tD(\"successfully opened pipe #%d %s (tx slots: %d)\",\n\t\t\t\t k + 1, interface, p->nmd->req.nr_tx_slots);\n\t\t\t\tp->ring = NETMAP_TXRING(p->nmd->nifp, 0);\n\t\t\t}\n\t\t\tD(\"zerocopy %s\",\n\t\t\t (rxport->nmd->mem == p->nmd->mem) ? \"enabled\" : \"disabled\");\n\n\t\t\tif (extra_bufs) {\n\t\t\t\tstruct overflow_queue *q = &oq[t + k];\n\t\t\t\tq->slots = calloc(extra_bufs, sizeof(struct netmap_slot));\n\t\t\t\tif (!q->slots) {\n\t\t\t\t\tD(\"failed to allocate overflow queue for pipe %d\", k);\n\t\t\t\t\t/* make all overflow queue management fail */\n\t\t\t\t\textra_bufs = 0;\n\t\t\t\t}\n\t\t\t\tq->size = extra_bufs;\n\t\t\t\tsnprintf(q->name, MAX_IFNAMELEN, \"oq %s{%d\", g->pipename, k);\n\t\t\t\tp->oq = q;\n\t\t\t}\n\t\t}\n\t\tt += g->nports;\n\t}\n\n\tif (glob_arg.extra_bufs && !extra_bufs) {\n\t\tif (oq) {\n\t\t\tfor (i = 0; i < npipes + 1; i++) {\n\t\t\t\tfree(oq[i].slots);\n\t\t\t\toq[i].slots = NULL;\n\t\t\t}\n\t\t\tfree(oq);\n\t\t\toq = NULL;\n\t\t}\n\t\tD(\"*** overflow queues disabled ***\");\n\t}\n\n\tsleep(glob_arg.wait_link);\n\n\tstruct pollfd pollfd[npipes + 1];\n\tmemset(&pollfd, 0, sizeof(pollfd));\n\tsignal(SIGINT, sigint_h);\n\twhile (!do_abort) {\n\t\tu_int polli = 0;\n\t\titer++;\n\n\t\tfor (i = 0; i < npipes; ++i) {\n\t\t\tstruct netmap_ring *ring = ports[i].ring;\n\t\t\tif (nm_ring_next(ring, ring->tail) == ring->cur) {\n\t\t\t\t/* no need to poll, there are no packets pending */\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tpollfd[polli].fd = ports[i].nmd->fd;\n\t\t\tpollfd[polli].events = POLLOUT;\n\t\t\tpollfd[polli].revents = 0;\n\t\t\t++polli;\n\t\t}\n\n\t\tpollfd[polli].fd = rxport->nmd->fd;\n\t\tpollfd[polli].events = POLLIN;\n\t\tpollfd[polli].revents = 0;\n\t\t++polli;\n\n\t\t//RD(5, \"polling %d file descriptors\", polli+1);\n\t\trv = poll(pollfd, polli, 10);\n\t\tif (rv <= 0) {\n\t\t\tif (rv < 0 && errno != EAGAIN && errno != EINTR)\n\t\t\t\tRD(1, \"poll error %s\", strerror(errno));\n\t\t\tcontinue;\n\t\t}\n\n\t\tif (oq) {\n\t\t\t/* try to push packets from the overflow queues\n\t\t\t * to the corresponding pipes\n\t\t\t */\n\t\t\tfor (i = 0; i < npipes; i++) {\n\t\t\t\tstruct port_des *p = &ports[i];\n\t\t\t\tstruct overflow_queue *q = p->oq;\n\t\t\t\tstruct group_des *g = p->group;\n\t\t\t\tuint32_t j, lim;\n\t\t\t\tstruct netmap_ring *ring;\n\t\t\t\tstruct netmap_slot *slot;\n\n\t\t\t\tif (oq_empty(q))\n\t\t\t\t\tcontinue;\n\t\t\t\tring = p->ring;\n\t\t\t\tlim = nm_ring_space(ring);\n\t\t\t\tif (!lim)\n\t\t\t\t\tcontinue;\n\t\t\t\tif (q->n < lim)\n\t\t\t\t\tlim = q->n;\n\t\t\t\tfor (j = 0; j < lim; j++) {\n\t\t\t\t\tstruct netmap_slot s = oq_deq(q), tmp;\n\t\t\t\t\ttmp.ptr = 0;\n\t\t\t\t\tslot = &ring->slot[ring->cur];\n\t\t\t\t\tif (slot->ptr && !g->last) {\n\t\t\t\t\t\ttmp.buf_idx = forward_packet(g + 1, slot);\n\t\t\t\t\t\t/* the forwarding may have removed packets\n\t\t\t\t\t\t * from the current queue\n\t\t\t\t\t\t */\n\t\t\t\t\t\tif (q->n < lim)\n\t\t\t\t\t\t\tlim = q->n;\n\t\t\t\t\t} else {\n\t\t\t\t\t\ttmp.buf_idx = slot->buf_idx;\n\t\t\t\t\t}\n\t\t\t\t\toq_enq(freeq, &tmp);\n\t\t\t\t\t*slot = s;\n\t\t\t\t\tslot->flags |= NS_BUF_CHANGED;\n\t\t\t\t\tring->cur = nm_ring_next(ring, ring->cur);\n\t\t\t\t}\n\t\t\t\tring->head = ring->cur;\n\t\t\t\tforwarded += lim;\n\t\t\t\tp->ctr.pkts += lim;\n\t\t\t}\n\t\t}\n\n\t\tint batch = 0;\n\t\tfor (i = rxport->nmd->first_rx_ring; i <= rxport->nmd->last_rx_ring; i++) {\n\t\t\tstruct netmap_ring *rxring = NETMAP_RXRING(rxport->nmd->nifp, i);\n\n\t\t\t//D(\"prepare to scan rings\");\n\t\t\tint next_cur = rxring->cur;\n\t\t\tstruct netmap_slot *next_slot = &rxring->slot[next_cur];\n\t\t\tconst char *next_buf = NETMAP_BUF(rxring, next_slot->buf_idx);\n\t\t\twhile (!nm_ring_empty(rxring)) {\n\t\t\t\tstruct netmap_slot *rs = next_slot;\n\t\t\t\tstruct group_des *g = &groups[0];\n\n\t\t\t\t// CHOOSE THE CORRECT OUTPUT PIPE\n\t\t\t\tuint32_t hash = pkt_hdr_hash((const unsigned char *)next_buf, 4, 'B');\n\t\t\t\tif (hash == 0) {\n\t\t\t\t\tnon_ip++; // XXX ??\n\t\t\t\t}\n\t\t\t\trs->ptr = hash | (1UL << 32);\n\t\t\t\t// prefetch the buffer for the next round\n\t\t\t\tnext_cur = nm_ring_next(rxring, next_cur);\n\t\t\t\tnext_slot = &rxring->slot[next_cur];\n\t\t\t\tnext_buf = NETMAP_BUF(rxring, next_slot->buf_idx);\n\t\t\t\t__builtin_prefetch(next_buf);\n\t\t\t\t// 'B' is just a hashing seed\n\t\t\t\trs->buf_idx = forward_packet(g, rs);\n\t\t\t\trs->flags |= NS_BUF_CHANGED;\n\t\t\t\trxring->head = rxring->cur = next_cur;\n\n\t\t\t\tbatch++;\n\t\t\t\tif (unlikely(batch >= glob_arg.batch)) {\n\t\t\t\t\tioctl(rxport->nmd->fd, NIOCRXSYNC, NULL);\n\t\t\t\t\tbatch = 0;\n\t\t\t\t}\n\t\t\t\tND(1,\n\t\t\t\t \"Forwarded Packets: %\"PRIu64\" Dropped packets: %\"PRIu64\" Percent: %.2f\",\n\t\t\t\t forwarded, dropped,\n\t\t\t\t ((float)dropped / (float)forwarded * 100));\n\t\t\t}\n\n\t\t}\n\t}\n\n\tpthread_join(stat_thread, NULL);\n\n\tprintf(\"%\"PRIu64\" packets forwarded. %\"PRIu64\" packets dropped. Total %\"PRIu64\"\\n\", forwarded,\n\t dropped, forwarded + dropped);\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.6458563804626465, "alphanum_fraction": 0.6509668231010437, "avg_line_length": 22.9735107421875, "blob_id": "fd21e08b1a500e49a40963313229702e3bed944b", "content_id": "304090affce0781c88330fede46c0257f1014480", "detected_licenses": [ "BSD-2-Clause", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 7240, "license_type": "permissive", "max_line_length": 77, "num_lines": 302, "path": "/LINUX/veth_netmap.h", "repo_name": "phoenix1796/netmapWin", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2014-2016 Vincenzo Maffione. All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n * 1. Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND\n * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE\n * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n * SUCH DAMAGE.\n */\n\n\n#include <bsd_glue.h>\n#include <net/netmap.h>\n#include <netmap/netmap_kern.h>\n#include <dev/netmap/netmap_mem2.h>\n\n#ifndef WITH_PIPES\n#error \"netmap pipes are required by veth native adapter\"\n#endif /* WITH_PIPES */\n\nstatic int veth_open(struct ifnet *ifp);\nstatic int veth_close(struct ifnet *ifp);\n\n/* To be called under RCU read lock */\nstatic struct netmap_adapter *\nveth_get_peer_na(struct netmap_adapter *na)\n{\n\tstruct ifnet *ifp = na->ifp;\n\tstruct veth_priv *priv = netdev_priv(ifp);\n\tstruct ifnet *peer_ifp;\n\n\tpeer_ifp = rcu_dereference(priv->peer);\n\tif (!peer_ifp) {\n\t\treturn NULL;\n\t}\n\n\treturn NA(peer_ifp);\n}\n\n/*\n * Returns true if our krings needed by the other peer, false\n * if they are not, or they do not exist.\n */\nstatic bool\nkrings_needed(struct netmap_adapter *na)\n{\n\tenum txrx t;\n\tint i;\n\n\tif (na->tx_rings == NULL) {\n\t\treturn false;\n\t}\n\n\tfor_rx_tx(t) {\n\t\tfor (i = 0; i < nma_get_nrings(na, t) + 1; i++) {\n\t\t\tstruct netmap_kring *kring = &NMR(na, t)[i];\n\n\t\t\tif (kring->nr_kflags & NKR_NEEDRING) {\n\t\t\t\treturn true;\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false;\n}\n\n/*\n * Register/unregister. We are already under netmap lock.\n * This register function is similar to the one used by\n * pipes; in addition to the regular tasks (commit the rings\n * in/out netmap node and call nm_(set|clear)_native_flags),\n * we also mark the peer rings as needed by us and possibly\n * create/destroy some netmap rings.\n */\nstatic int\nveth_netmap_reg(struct netmap_adapter *na, int onoff)\n{\n\tstruct netmap_adapter *peer_na;\n\tstruct ifnet *ifp = na->ifp;\n\tbool was_up;\n\tenum txrx t;\n\tint error;\n\tint i;\n\n\trcu_read_lock();\n\n\tpeer_na = veth_get_peer_na(na);\n\tif (!peer_na) {\n\t\trcu_read_unlock();\n\t\treturn EINVAL;\n\t}\n\n\twas_up = netif_running(ifp);\n\tif (na->active_fds == 0 && was_up) {\n\t\t/* The interface is up. Close it while (un)registering. */\n\t\tveth_close(ifp);\n\t}\n\n\t/* Enable or disable flags and callbacks in na and ifp. */\n\tif (onoff) {\n\t\tfor_rx_tx(t) {\n\t\t\tfor (i = 0; i < nma_get_nrings(na, t); i++) {\n\t\t\t\tstruct netmap_kring *kring = &NMR(na, t)[i];\n\n\t\t\t\tif (nm_kring_pending_on(kring)) {\n\t\t\t\t\t/* mark the peer ring as needed */\n\t\t\t\t\tkring->pipe->nr_kflags |= NKR_NEEDRING;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t/* create all missing needed rings on the other end */\n\t\terror = netmap_mem_rings_create(peer_na);\n\t\tif (error) {\n\t\t\trcu_read_unlock();\n\t\t\treturn error;\n\t\t}\n\n\t\t/* In case of no error we put our rings in netmap mode */\n\t\tfor_rx_tx(t) {\n\t\t\tfor (i = 0; i < nma_get_nrings(na, t) + 1; i++) {\n\t\t\t\tstruct netmap_kring *kring = &NMR(na, t)[i];\n\n\t\t\t\tif (nm_kring_pending_on(kring)) {\n\t\t\t\t\tkring->nr_mode = NKR_NETMAP_ON;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tnm_set_native_flags(na);\n\t\tif (netmap_verbose) {\n\t\t\tD(\"registered veth %s\", na->name);\n\t\t}\n\t} else {\n\t\tnm_clear_native_flags(na);\n\n\t\tfor_rx_tx(t) {\n\t\t\tfor (i = 0; i < nma_get_nrings(na, t) + 1; i++) {\n\t\t\t\tstruct netmap_kring *kring = &NMR(na, t)[i];\n\n\t\t\t\tif (nm_kring_pending_off(kring)) {\n\t\t\t\t\tkring->nr_mode = NKR_NETMAP_OFF;\n\t\t\t\t\t/* If hw kring, mark the peer kring\n\t\t\t\t\t * as no longer needed by us (it may\n\t\t\t\t\t * still be kept if sombody else is\n\t\t\t\t\t * using it).\n\t\t\t\t\t */\n\t\t\t\t\tif (kring->pipe) {\n\t\t\t\t\t\tkring->pipe->nr_kflags &=\n\t\t\t\t\t\t\t\t~NKR_NEEDRING;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t/* delete all the peer rings that are no longer needed */\n\t\tnetmap_mem_rings_delete(peer_na);\n\t\tif (netmap_verbose) {\n\t\t\tD(\"unregistered veth %s\", na->name);\n\t\t}\n\t}\n\n\trcu_read_unlock();\n\n\tif (na->active_fds == 0 && was_up) {\n\t\tveth_open(ifp);\n\t}\n\n\treturn error;\n}\n\nstatic int\nveth_netmap_krings_create(struct netmap_adapter *na)\n{\n\tstruct netmap_adapter *peer_na;\n\tint error = 0;\n\tenum txrx t;\n\n\tif (krings_needed(na)) {\n\t\t/* Our krings are already needed by our peer, which\n\t\t * means they were already created. */\n\t\tif (netmap_verbose) {\n\t\t\tD(\"krings already created for %s, nothing to do\",\n\t\t\t na->name);\n\t\t}\n\t\treturn 0;\n\t}\n\n\trcu_read_lock();\n\tpeer_na = veth_get_peer_na(na);\n\tif (!peer_na) {\n\t\trcu_read_unlock();\n\t\tD(\"veth peer not found\");\n\t\treturn ENXIO;\n\t}\n\n\t/* create my krings */\n\terror = netmap_krings_create(na, 0);\n\tif (error)\n\t\tgoto err;\n\n\t/* create the krings of the other end */\n\terror = netmap_krings_create(peer_na, 0);\n\tif (error)\n\t\tgoto del_krings1;\n\n\t/* cross link the krings (only the hw ones, not the host krings) */\n\tfor_rx_tx(t) {\n\t\tenum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */\n\t\tint i;\n\n\t\tfor (i = 0; i < nma_get_nrings(na, t); i++) {\n\t\t\tNMR(na, t)[i].pipe = NMR(peer_na, r) + i;\n\t\t\tNMR(peer_na, r)[i].pipe = NMR(na, t) + i;\n\t\t}\n\t}\n\n\trcu_read_unlock();\n\n\tif (netmap_verbose) {\n\t\tD(\"created krings for %s and its peer\", na->name);\n\t}\n\n\treturn 0;\n\ndel_krings1:\n\tnetmap_krings_delete(na);\nerr:\n\trcu_read_unlock();\n\treturn error;\n}\n\nstatic void\nveth_netmap_krings_delete(struct netmap_adapter *na)\n{\n\tstruct netmap_adapter *peer_na;\n\n\tif (krings_needed(na)) {\n\t\t/* Our krings are needed by the other peer, so we\n\t\t * do nothing here, and let the peer destroy also\n\t\t * our krings when it needs to destroy its krings. */\n\t\tif (netmap_verbose) {\n\t\t\tD(\"krings for %s are still needed by its peer\",\n\t\t\t na->name);\n\t\t}\n\t\treturn;\n\t}\n\n\tif (netmap_verbose) {\n\t\tD(\"Delete krings for %s and its peer\", na->name);\n\t}\n\n\t/* Destroy my krings. */\n\tnetmap_krings_delete(na);\n\n\t/* Destroy the krings of our peer. */\n\trcu_read_lock();\n\tpeer_na = veth_get_peer_na(na);\n\tif (!peer_na) {\n\t\trcu_read_unlock();\n\t\tD(\"veth peer not found\");\n\t\treturn;\n\t}\n\n\tnetmap_krings_delete(peer_na);\n\trcu_read_unlock();\n}\n\nstatic void\nveth_netmap_attach(struct ifnet *ifp)\n{\n\tstruct netmap_adapter na;\n\n\tbzero(&na, sizeof(na));\n\n\tna.ifp = ifp;\n\tna.pdev = NULL;\n\tna.num_tx_desc = 1024;\n\tna.num_rx_desc = 1024;\n\tna.nm_register = veth_netmap_reg;\n\tna.nm_txsync = netmap_pipe_txsync;\n\tna.nm_rxsync = netmap_pipe_rxsync;\n\tna.nm_krings_create = veth_netmap_krings_create;\n\tna.nm_krings_delete = veth_netmap_krings_delete;\n\tna.num_tx_rings = na.num_rx_rings = 1;\n\tnetmap_attach(&na);\n}\n\n/* end of file */\n" }, { "alpha_fraction": 0.5916014313697815, "alphanum_fraction": 0.6081609725952148, "avg_line_length": 28.20816421508789, "blob_id": "61d9fa4fbfbf0109086dd837dc8e95fc4141e149", "content_id": "8e5de7f7a9f28d62b28661abd7aa78369e3544fd", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 14312, "license_type": "permissive", "max_line_length": 92, "num_lines": 490, "path": "/sys/dev/netmap/netmap_offloadings.c", "repo_name": "phoenix1796/netmapWin", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2014-2015 Vincenzo Maffione\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n * 1. Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND\n * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE\n * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n * SUCH DAMAGE.\n */\n\n/* $FreeBSD: head/sys/dev/netmap/netmap_offloadings.c 261909 2014-02-15 04:53:04Z luigi $ */\n\n#if defined(__FreeBSD__)\n#include <sys/cdefs.h> /* prerequisite */\n\n#include <sys/types.h>\n#include <sys/errno.h>\n#include <sys/param.h>\t/* defines used in kernel.h */\n#include <sys/kernel.h>\t/* types used in module initialization */\n#include <sys/sockio.h>\n#include <sys/malloc.h>\n#include <sys/socketvar.h>\t/* struct socket */\n#include <sys/socket.h> /* sockaddrs */\n#include <net/if.h>\n#include <net/if_var.h>\n#include <machine/bus.h>\t/* bus_dmamap_* */\n#include <sys/endian.h>\n\n#elif defined(linux)\n\n#include \"bsd_glue.h\"\n\n#elif defined(__APPLE__)\n\n#warning OSX support is only partial\n#include \"osx_glue.h\"\n\n#else\n\n#error\tUnsupported platform\n\n#endif /* unsupported */\n\n#include <net/netmap.h>\n#include <dev/netmap/netmap_kern.h>\n\n\n\n/* This routine is called by bdg_mismatch_datapath() when it finishes\n * accumulating bytes for a segment, in order to fix some fields in the\n * segment headers (which still contain the same content as the header\n * of the original GSO packet). 'pkt' points to the beginning of the IP\n * header of the segment, while 'len' is the length of the IP packet.\n */\nstatic void\ngso_fix_segment(uint8_t *pkt, size_t len, u_int ipv4, u_int iphlen, u_int tcp,\n\t\tu_int idx, u_int segmented_bytes, u_int last_segment)\n{\n\tstruct nm_iphdr *iph = (struct nm_iphdr *)(pkt);\n\tstruct nm_ipv6hdr *ip6h = (struct nm_ipv6hdr *)(pkt);\n\tuint16_t *check = NULL;\n\tuint8_t *check_data = NULL;\n\n\tif (ipv4) {\n\t\t/* Set the IPv4 \"Total Length\" field. */\n\t\tiph->tot_len = htobe16(len);\n\t\tND(\"ip total length %u\", be16toh(ip->tot_len));\n\n\t\t/* Set the IPv4 \"Identification\" field. */\n\t\tiph->id = htobe16(be16toh(iph->id) + idx);\n\t\tND(\"ip identification %u\", be16toh(iph->id));\n\n\t\t/* Compute and insert the IPv4 header checksum. */\n\t\tiph->check = 0;\n\t\tiph->check = nm_os_csum_ipv4(iph);\n\t\tND(\"IP csum %x\", be16toh(iph->check));\n\t} else {\n\t\t/* Set the IPv6 \"Payload Len\" field. */\n\t\tip6h->payload_len = htobe16(len-iphlen);\n\t}\n\n\tif (tcp) {\n\t\tstruct nm_tcphdr *tcph = (struct nm_tcphdr *)(pkt + iphlen);\n\n\t\t/* Set the TCP sequence number. */\n\t\ttcph->seq = htobe32(be32toh(tcph->seq) + segmented_bytes);\n\t\tND(\"tcp seq %u\", be32toh(tcph->seq));\n\n\t\t/* Zero the PSH and FIN TCP flags if this is not the last\n\t\t segment. */\n\t\tif (!last_segment)\n\t\t\ttcph->flags &= ~(0x8 | 0x1);\n\t\tND(\"last_segment %u\", last_segment);\n\n\t\tcheck = &tcph->check;\n\t\tcheck_data = (uint8_t *)tcph;\n\t} else { /* UDP */\n\t\tstruct nm_udphdr *udph = (struct nm_udphdr *)(pkt + iphlen);\n\n\t\t/* Set the UDP 'Length' field. */\n\t\tudph->len = htobe16(len-iphlen);\n\n\t\tcheck = &udph->check;\n\t\tcheck_data = (uint8_t *)udph;\n\t}\n\n\t/* Compute and insert TCP/UDP checksum. */\n\t*check = 0;\n\tif (ipv4)\n\t\tnm_os_csum_tcpudp_ipv4(iph, check_data, len-iphlen, check);\n\telse\n\t\tnm_os_csum_tcpudp_ipv6(ip6h, check_data, len-iphlen, check);\n\n\tND(\"TCP/UDP csum %x\", be16toh(*check));\n}\n\nstatic int\nvnet_hdr_is_bad(struct nm_vnet_hdr *vh)\n{\n\tuint8_t gso_type = vh->gso_type & ~VIRTIO_NET_HDR_GSO_ECN;\n\n\treturn (\n\t\t(gso_type != VIRTIO_NET_HDR_GSO_NONE &&\n\t\t gso_type != VIRTIO_NET_HDR_GSO_TCPV4 &&\n\t\t gso_type != VIRTIO_NET_HDR_GSO_UDP &&\n\t\t gso_type != VIRTIO_NET_HDR_GSO_TCPV6)\n\t\t||\n\t\t (vh->flags & ~(VIRTIO_NET_HDR_F_NEEDS_CSUM\n\t\t\t | VIRTIO_NET_HDR_F_DATA_VALID))\n\t );\n}\n\n/* The VALE mismatch datapath implementation. */\nvoid\nbdg_mismatch_datapath(struct netmap_vp_adapter *na,\n\t\t struct netmap_vp_adapter *dst_na,\n\t\t const struct nm_bdg_fwd *ft_p,\n\t\t struct netmap_ring *dst_ring,\n\t\t u_int *j, u_int lim, u_int *howmany)\n{\n\tstruct netmap_slot *dst_slot = NULL;\n\tstruct nm_vnet_hdr *vh = NULL;\n\tconst struct nm_bdg_fwd *ft_end = ft_p + ft_p->ft_frags;\n\n\t/* Source and destination pointers. */\n\tuint8_t *dst, *src;\n\tsize_t src_len, dst_len;\n\n\t/* Indices and counters for the destination ring. */\n\tu_int j_start = *j;\n\tu_int j_cur = j_start;\n\tu_int dst_slots = 0;\n\n\tif (unlikely(ft_p == ft_end)) {\n\t\tRD(3, \"No source slots to process\");\n\t\treturn;\n\t}\n\n\t/* Init source and dest pointers. */\n\tsrc = ft_p->ft_buf;\n\tsrc_len = ft_p->ft_len;\n\tdst_slot = &dst_ring->slot[j_cur];\n\tdst = NMB(&dst_na->up, dst_slot);\n\tdst_len = src_len;\n\n\t/* If the source port uses the offloadings, while destination doesn't,\n\t * we grab the source virtio-net header and do the offloadings here.\n\t */\n\tif (na->up.virt_hdr_len && !dst_na->up.virt_hdr_len) {\n\t\tvh = (struct nm_vnet_hdr *)src;\n\t\t/* Initial sanity check on the source virtio-net header. If\n\t\t * something seems wrong, just drop the packet. */\n\t\tif (src_len < na->up.virt_hdr_len) {\n\t\t\tRD(3, \"Short src vnet header, dropping\");\n\t\t\treturn;\n\t\t}\n\t\tif (vnet_hdr_is_bad(vh)) {\n\t\t\tRD(3, \"Bad src vnet header, dropping\");\n\t\t\treturn;\n\t\t}\n\t}\n\n\t/* We are processing the first input slot and there is a mismatch\n\t * between source and destination virt_hdr_len (SHL and DHL).\n\t * When the a client is using virtio-net headers, the header length\n\t * can be:\n\t * - 10: the header corresponds to the struct nm_vnet_hdr\n\t * - 12: the first 10 bytes correspond to the struct\n\t * virtio_net_hdr, and the last 2 bytes store the\n\t * \"mergeable buffers\" info, which is an optional\n\t *\t hint that can be zeroed for compatibility\n\t *\n\t * The destination header is therefore built according to the\n\t * following table:\n\t *\n\t * SHL | DHL | destination header\n\t * -----------------------------\n\t * 0 | 10 | zero\n\t * 0 | 12 | zero\n\t * 10 | 0 | doesn't exist\n\t * 10 | 12 | first 10 bytes are copied from source header, last 2 are zero\n\t * 12 | 0 | doesn't exist\n\t * 12 | 10 | copied from the first 10 bytes of source header\n\t */\n\tbzero(dst, dst_na->up.virt_hdr_len);\n\tif (na->up.virt_hdr_len && dst_na->up.virt_hdr_len)\n\t\tmemcpy(dst, src, sizeof(struct nm_vnet_hdr));\n\t/* Skip the virtio-net headers. */\n\tsrc += na->up.virt_hdr_len;\n\tsrc_len -= na->up.virt_hdr_len;\n\tdst += dst_na->up.virt_hdr_len;\n\tdst_len = dst_na->up.virt_hdr_len + src_len;\n\n\t/* Here it could be dst_len == 0 (which implies src_len == 0),\n\t * so we avoid passing a zero length fragment.\n\t */\n\tif (dst_len == 0) {\n\t\tft_p++;\n\t\tsrc = ft_p->ft_buf;\n\t\tsrc_len = ft_p->ft_len;\n\t\tdst_len = src_len;\n\t}\n\n\tif (vh && vh->gso_type != VIRTIO_NET_HDR_GSO_NONE) {\n\t\tu_int gso_bytes = 0;\n\t\t/* Length of the GSO packet header. */\n\t\tu_int gso_hdr_len = 0;\n\t\t/* Pointer to the GSO packet header. Assume it is in a single fragment. */\n\t\tuint8_t *gso_hdr = NULL;\n\t\t/* Index of the current segment. */\n\t\tu_int gso_idx = 0;\n\t\t/* Payload data bytes segmented so far (e.g. TCP data bytes). */\n\t\tu_int segmented_bytes = 0;\n\t\t/* Is this an IPv4 or IPv6 GSO packet? */\n\t\tu_int ipv4 = 0;\n\t\t/* Length of the IP header (20 if IPv4, 40 if IPv6). */\n\t\tu_int iphlen = 0;\n\t\t/* Length of the Ethernet header (18 if 802.1q, otherwise 14). */\n\t\tu_int ethhlen = 14;\n\t\t/* Is this a TCP or an UDP GSO packet? */\n\t\tu_int tcp = ((vh->gso_type & ~VIRTIO_NET_HDR_GSO_ECN)\n\t\t\t\t== VIRTIO_NET_HDR_GSO_UDP) ? 0 : 1;\n\n\t\t/* Segment the GSO packet contained into the input slots (frags). */\n\t\tfor (;;) {\n\t\t\tsize_t copy;\n\n\t\t\tif (dst_slots >= *howmany) {\n\t\t\t\t/* We still have work to do, but we've run out of\n\t\t\t\t * dst slots, so we have to drop the packet. */\n\t\t\t\tRD(3, \"Not enough slots, dropping GSO packet\");\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t/* Grab the GSO header if we don't have it. */\n\t\t\tif (!gso_hdr) {\n\t\t\t\tuint16_t ethertype;\n\n\t\t\t\tgso_hdr = src;\n\n\t\t\t\t/* Look at the 'Ethertype' field to see if this packet\n\t\t\t\t * is IPv4 or IPv6, taking into account VLAN\n\t\t\t\t * encapsulation. */\n\t\t\t\tfor (;;) {\n\t\t\t\t\tif (src_len < ethhlen) {\n\t\t\t\t\t\tRD(3, \"Short GSO fragment [eth], dropping\");\n\t\t\t\t\t\treturn;\n\t\t\t\t\t}\n\t\t\t\t\tethertype = be16toh(*((uint16_t *)\n\t\t\t\t\t\t\t (gso_hdr + ethhlen - 2)));\n\t\t\t\t\tif (ethertype != 0x8100) /* not 802.1q */\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tethhlen += 4;\n\t\t\t\t}\n\t\t\t\tswitch (ethertype) {\n\t\t\t\t\tcase 0x0800: /* IPv4 */\n\t\t\t\t\t{\n\t\t\t\t\t\tstruct nm_iphdr *iph = (struct nm_iphdr *)\n\t\t\t\t\t\t\t\t\t(gso_hdr + ethhlen);\n\n\t\t\t\t\t\tif (src_len < ethhlen + 20) {\n\t\t\t\t\t\t\tRD(3, \"Short GSO fragment \"\n\t\t\t\t\t\t\t \"[IPv4], dropping\");\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tipv4 = 1;\n\t\t\t\t\t\tiphlen = 4 * (iph->version_ihl & 0x0F);\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t\tcase 0x86DD: /* IPv6 */\n\t\t\t\t\t\tipv4 = 0;\n\t\t\t\t\t\tiphlen = 40;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tRD(3, \"Unsupported ethertype, \"\n\t\t\t\t\t\t \"dropping GSO packet\");\n\t\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t\tND(3, \"type=%04x\", ethertype);\n\n\t\t\t\tif (src_len < ethhlen + iphlen) {\n\t\t\t\t\tRD(3, \"Short GSO fragment [IP], dropping\");\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\t/* Compute gso_hdr_len. For TCP we need to read the\n\t\t\t\t * content of the 'Data Offset' field.\n\t\t\t\t */\n\t\t\t\tif (tcp) {\n\t\t\t\t\tstruct nm_tcphdr *tcph = (struct nm_tcphdr *)\n\t\t\t\t\t\t\t\t(gso_hdr + ethhlen + iphlen);\n\n\t\t\t\t\tif (src_len < ethhlen + iphlen + 20) {\n\t\t\t\t\t\tRD(3, \"Short GSO fragment \"\n\t\t\t\t\t\t\t\t\"[TCP], dropping\");\n\t\t\t\t\t\treturn;\n\t\t\t\t\t}\n\t\t\t\t\tgso_hdr_len = ethhlen + iphlen +\n\t\t\t\t\t\t 4 * (tcph->doff >> 4);\n\t\t\t\t} else {\n\t\t\t\t\tgso_hdr_len = ethhlen + iphlen + 8; /* UDP */\n\t\t\t\t}\n\n\t\t\t\tif (src_len < gso_hdr_len) {\n\t\t\t\t\tRD(3, \"Short GSO fragment [TCP/UDP], dropping\");\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\tND(3, \"gso_hdr_len %u gso_mtu %d\", gso_hdr_len,\n\t\t\t\t\t\t\t\t dst_na->mfs);\n\n\t\t\t\t/* Advance source pointers. */\n\t\t\t\tsrc += gso_hdr_len;\n\t\t\t\tsrc_len -= gso_hdr_len;\n\t\t\t\tif (src_len == 0) {\n\t\t\t\t\tft_p++;\n\t\t\t\t\tif (ft_p == ft_end)\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tsrc = ft_p->ft_buf;\n\t\t\t\t\tsrc_len = ft_p->ft_len;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t/* Fill in the header of the current segment. */\n\t\t\tif (gso_bytes == 0) {\n\t\t\t\tmemcpy(dst, gso_hdr, gso_hdr_len);\n\t\t\t\tgso_bytes = gso_hdr_len;\n\t\t\t}\n\n\t\t\t/* Fill in data and update source and dest pointers. */\n\t\t\tcopy = src_len;\n\t\t\tif (gso_bytes + copy > dst_na->mfs)\n\t\t\t\tcopy = dst_na->mfs - gso_bytes;\n\t\t\tmemcpy(dst + gso_bytes, src, copy);\n\t\t\tgso_bytes += copy;\n\t\t\tsrc += copy;\n\t\t\tsrc_len -= copy;\n\n\t\t\t/* A segment is complete or we have processed all the\n\t\t\t the GSO payload bytes. */\n\t\t\tif (gso_bytes >= dst_na->mfs ||\n\t\t\t\t(src_len == 0 && ft_p + 1 == ft_end)) {\n\t\t\t\t/* After raw segmentation, we must fix some header\n\t\t\t\t * fields and compute checksums, in a protocol dependent\n\t\t\t\t * way. */\n\t\t\t\tgso_fix_segment(dst + ethhlen, gso_bytes - ethhlen,\n\t\t\t\t\t\tipv4, iphlen, tcp,\n\t\t\t\t\t\tgso_idx, segmented_bytes,\n\t\t\t\t\t\tsrc_len == 0 && ft_p + 1 == ft_end);\n\n\t\t\t\tND(\"frame %u completed with %d bytes\", gso_idx, (int)gso_bytes);\n\t\t\t\tdst_slot->len = gso_bytes;\n\t\t\t\tdst_slot->flags = 0;\n\t\t\t\tdst_slots++;\n\t\t\t\tsegmented_bytes += gso_bytes - gso_hdr_len;\n\n\t\t\t\tgso_bytes = 0;\n\t\t\t\tgso_idx++;\n\n\t\t\t\t/* Next destination slot. */\n\t\t\t\tj_cur = nm_next(j_cur, lim);\n\t\t\t\tdst_slot = &dst_ring->slot[j_cur];\n\t\t\t\tdst = NMB(&dst_na->up, dst_slot);\n\t\t\t}\n\n\t\t\t/* Next input slot. */\n\t\t\tif (src_len == 0) {\n\t\t\t\tft_p++;\n\t\t\t\tif (ft_p == ft_end)\n\t\t\t\t\tbreak;\n\t\t\t\tsrc = ft_p->ft_buf;\n\t\t\t\tsrc_len = ft_p->ft_len;\n\t\t\t}\n\t\t}\n\t\tND(3, \"%d bytes segmented\", segmented_bytes);\n\n\t} else {\n\t\t/* Address of a checksum field into a destination slot. */\n\t\tuint16_t *check = NULL;\n\t\t/* Accumulator for an unfolded checksum. */\n\t\trawsum_t csum = 0;\n\n\t\t/* Process a non-GSO packet. */\n\n\t\t/* Init 'check' if necessary. */\n\t\tif (vh && (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) {\n\t\t\tif (unlikely(vh->csum_offset + vh->csum_start > src_len))\n\t\t\t\tD(\"invalid checksum request\");\n\t\t\telse\n\t\t\t\tcheck = (uint16_t *)(dst + vh->csum_start +\n\t\t\t\t\t\tvh->csum_offset);\n\t\t}\n\n\t\twhile (ft_p != ft_end) {\n\t\t\t/* Init/update the packet checksum if needed. */\n\t\t\tif (vh && (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) {\n\t\t\t\tif (!dst_slots)\n\t\t\t\t\tcsum = nm_os_csum_raw(src + vh->csum_start,\n\t\t\t\t\t\t\t\tsrc_len - vh->csum_start, 0);\n\t\t\t\telse\n\t\t\t\t\tcsum = nm_os_csum_raw(src, src_len, csum);\n\t\t\t}\n\n\t\t\t/* Round to a multiple of 64 */\n\t\t\tsrc_len = (src_len + 63) & ~63;\n\n\t\t\tif (ft_p->ft_flags & NS_INDIRECT) {\n\t\t\t\tif (copyin(src, dst, src_len)) {\n\t\t\t\t\t/* Invalid user pointer, pretend len is 0. */\n\t\t\t\t\tdst_len = 0;\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tmemcpy(dst, src, (int)src_len);\n\t\t\t}\n\t\t\tdst_slot->len = dst_len;\n\t\t\tdst_slots++;\n\n\t\t\t/* Next destination slot. */\n\t\t\tj_cur = nm_next(j_cur, lim);\n\t\t\tdst_slot = &dst_ring->slot[j_cur];\n\t\t\tdst = NMB(&dst_na->up, dst_slot);\n\n\t\t\t/* Next source slot. */\n\t\t\tft_p++;\n\t\t\tsrc = ft_p->ft_buf;\n\t\t\tdst_len = src_len = ft_p->ft_len;\n\t\t}\n\n\t\t/* Finalize (fold) the checksum if needed. */\n\t\tif (check && vh && (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) {\n\t\t\t*check = nm_os_csum_fold(csum);\n\t\t}\n\t\tND(3, \"using %u dst_slots\", dst_slots);\n\n\t\t/* A second pass on the destination slots to set the slot flags,\n\t\t * using the right number of destination slots.\n\t\t */\n\t\twhile (j_start != j_cur) {\n\t\t\tdst_slot = &dst_ring->slot[j_start];\n\t\t\tdst_slot->flags = (dst_slots << 8)| NS_MOREFRAG;\n\t\t\tj_start = nm_next(j_start, lim);\n\t\t}\n\t\t/* Clear NS_MOREFRAG flag on last entry. */\n\t\tdst_slot->flags = (dst_slots << 8);\n\t}\n\n\t/* Update howmany and j. This is to commit the use of\n\t * those slots in the destination ring. */\n\tif (unlikely(dst_slots > *howmany)) {\n\t\tD(\"Slot allocation error: This is a bug\");\n\t}\n\t*j = j_cur;\n\t*howmany -= dst_slots;\n}\n" }, { "alpha_fraction": 0.6169352531433105, "alphanum_fraction": 0.6244328022003174, "avg_line_length": 25.77793312072754, "blob_id": "40f64acc077f037a31396706f07250ebe300f446", "content_id": "d8a252f8dce14e1bc014fe26e3237f29395b2642", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 76692, "license_type": "permissive", "max_line_length": 110, "num_lines": 2864, "path": "/sys/dev/netmap/netmap_vale.c", "repo_name": "phoenix1796/netmapWin", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2013-2016 Universita` di Pisa\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n * 1. Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND\n * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE\n * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n * SUCH DAMAGE.\n */\n\n\n/*\n * This module implements the VALE switch for netmap\n\n--- VALE SWITCH ---\n\nNMG_LOCK() serializes all modifications to switches and ports.\nA switch cannot be deleted until all ports are gone.\n\nFor each switch, an SX lock (RWlock on linux) protects\ndeletion of ports. When configuring or deleting a new port, the\nlock is acquired in exclusive mode (after holding NMG_LOCK).\nWhen forwarding, the lock is acquired in shared mode (without NMG_LOCK).\nThe lock is held throughout the entire forwarding cycle,\nduring which the thread may incur in a page fault.\nHence it is important that sleepable shared locks are used.\n\nOn the rx ring, the per-port lock is grabbed initially to reserve\na number of slot in the ring, then the lock is released,\npackets are copied from source to destination, and then\nthe lock is acquired again and the receive ring is updated.\n(A similar thing is done on the tx ring for NIC and host stack\nports attached to the switch)\n\n */\n\n/*\n * OS-specific code that is used only within this file.\n * Other OS-specific code that must be accessed by drivers\n * is present in netmap_kern.h\n */\n\n#if defined(__FreeBSD__)\n#include <sys/cdefs.h> /* prerequisite */\n__FBSDID(\"$FreeBSD: head/sys/dev/netmap/netmap.c 257176 2013-10-26 17:58:36Z glebius $\");\n\n#include <sys/types.h>\n#include <sys/errno.h>\n#include <sys/param.h>\t/* defines used in kernel.h */\n#include <sys/kernel.h>\t/* types used in module initialization */\n#include <sys/conf.h>\t/* cdevsw struct, UID, GID */\n#include <sys/sockio.h>\n#include <sys/socketvar.h>\t/* struct socket */\n#include <sys/malloc.h>\n#include <sys/poll.h>\n#include <sys/rwlock.h>\n#include <sys/socket.h> /* sockaddrs */\n#include <sys/selinfo.h>\n#include <sys/sysctl.h>\n#include <net/if.h>\n#include <net/if_var.h>\n#include <net/bpf.h>\t\t/* BIOCIMMEDIATE */\n#include <machine/bus.h>\t/* bus_dmamap_* */\n#include <sys/endian.h>\n#include <sys/refcount.h>\n\n\n#define BDG_RWLOCK_T\t\tstruct rwlock // struct rwlock\n\n#define\tBDG_RWINIT(b)\t\t\\\n\trw_init_flags(&(b)->bdg_lock, \"bdg lock\", RW_NOWITNESS)\n#define BDG_WLOCK(b)\t\trw_wlock(&(b)->bdg_lock)\n#define BDG_WUNLOCK(b)\t\trw_wunlock(&(b)->bdg_lock)\n#define BDG_RLOCK(b)\t\trw_rlock(&(b)->bdg_lock)\n#define BDG_RTRYLOCK(b)\t\trw_try_rlock(&(b)->bdg_lock)\n#define BDG_RUNLOCK(b)\t\trw_runlock(&(b)->bdg_lock)\n#define BDG_RWDESTROY(b)\trw_destroy(&(b)->bdg_lock)\n\n\n#elif defined(linux)\n\n#include \"bsd_glue.h\"\n\n#elif defined(__APPLE__)\n\n#warning OSX support is only partial\n#include \"osx_glue.h\"\n\n#elif defined(_WIN32)\n#include \"win_glue.h\"\n\n#else\n\n#error\tUnsupported platform\n\n#endif /* unsupported */\n\n/*\n * common headers\n */\n\n#include <net/netmap.h>\n#include <dev/netmap/netmap_kern.h>\n#include <dev/netmap/netmap_mem2.h>\n\n#ifdef WITH_VALE\n\n/*\n * system parameters (most of them in netmap_kern.h)\n * NM_BDG_NAME\tprefix for switch port names, default \"vale\"\n * NM_BDG_MAXPORTS\tnumber of ports\n * NM_BRIDGES\tmax number of switches in the system.\n *\tXXX should become a sysctl or tunable\n *\n * Switch ports are named valeX:Y where X is the switch name and Y\n * is the port. If Y matches a physical interface name, the port is\n * connected to a physical device.\n *\n * Unlike physical interfaces, switch ports use their own memory region\n * for rings and buffers.\n * The virtual interfaces use per-queue lock instead of core lock.\n * In the tx loop, we aggregate traffic in batches to make all operations\n * faster. The batch size is bridge_batch.\n */\n#define NM_BDG_MAXRINGS\t\t16\t/* XXX unclear how many. */\n#define NM_BDG_MAXSLOTS\t\t4096\t/* XXX same as above */\n#define NM_BRIDGE_RINGSIZE\t1024\t/* in the device */\n#define NM_BDG_HASH\t\t1024\t/* forwarding table entries */\n#define NM_BDG_BATCH\t\t1024\t/* entries in the forwarding buffer */\n#define NM_MULTISEG\t\t64\t/* max size of a chain of bufs */\n/* actual size of the tables */\n#define NM_BDG_BATCH_MAX\t(NM_BDG_BATCH + NM_MULTISEG)\n/* NM_FT_NULL terminates a list of slots in the ft */\n#define NM_FT_NULL\t\tNM_BDG_BATCH_MAX\n\n\n/*\n * bridge_batch is set via sysctl to the max batch size to be\n * used in the bridge. The actual value may be larger as the\n * last packet in the block may overflow the size.\n */\nstatic int bridge_batch = NM_BDG_BATCH; /* bridge batch size */\nSYSBEGIN(vars_vale);\nSYSCTL_DECL(_dev_netmap);\nSYSCTL_INT(_dev_netmap, OID_AUTO, bridge_batch, CTLFLAG_RW, &bridge_batch, 0 , \"\");\nSYSEND;\n\nstatic int netmap_vp_create(struct nmreq *, struct ifnet *,\n\t\tstruct netmap_mem_d *nmd, struct netmap_vp_adapter **);\nstatic int netmap_vp_reg(struct netmap_adapter *na, int onoff);\nstatic int netmap_bwrap_reg(struct netmap_adapter *, int onoff);\n\n/*\n * For each output interface, nm_bdg_q is used to construct a list.\n * bq_len is the number of output buffers (we can have coalescing\n * during the copy).\n */\nstruct nm_bdg_q {\n\tuint16_t bq_head;\n\tuint16_t bq_tail;\n\tuint32_t bq_len;\t/* number of buffers */\n};\n\n/* XXX revise this */\nstruct nm_hash_ent {\n\tuint64_t\tmac;\t/* the top 2 bytes are the epoch */\n\tuint64_t\tports;\n};\n\n/*\n * nm_bridge is a descriptor for a VALE switch.\n * Interfaces for a bridge are all in bdg_ports[].\n * The array has fixed size, an empty entry does not terminate\n * the search, but lookups only occur on attach/detach so we\n * don't mind if they are slow.\n *\n * The bridge is non blocking on the transmit ports: excess\n * packets are dropped if there is no room on the output port.\n *\n * bdg_lock protects accesses to the bdg_ports array.\n * This is a rw lock (or equivalent).\n */\nstruct nm_bridge {\n\t/* XXX what is the proper alignment/layout ? */\n\tBDG_RWLOCK_T\tbdg_lock;\t/* protects bdg_ports */\n\tint\t\tbdg_namelen;\n\tuint32_t\tbdg_active_ports; /* 0 means free */\n\tchar\t\tbdg_basename[IFNAMSIZ];\n\n\t/* Indexes of active ports (up to active_ports)\n\t * and all other remaining ports.\n\t */\n\tuint8_t\t\tbdg_port_index[NM_BDG_MAXPORTS];\n\n\tstruct netmap_vp_adapter *bdg_ports[NM_BDG_MAXPORTS];\n\n\n\t/*\n\t * The function to decide the destination port.\n\t * It returns either of an index of the destination port,\n\t * NM_BDG_BROADCAST to broadcast this packet, or NM_BDG_NOPORT not to\n\t * forward this packet. ring_nr is the source ring index, and the\n\t * function may overwrite this value to forward this packet to a\n\t * different ring index.\n\t * This function must be set by netmap_bdg_ctl().\n\t */\n\tstruct netmap_bdg_ops bdg_ops;\n\n\t/* the forwarding table, MAC+ports.\n\t * XXX should be changed to an argument to be passed to\n\t * the lookup function, and allocated on attach\n\t */\n\tstruct nm_hash_ent ht[NM_BDG_HASH];\n\n#ifdef CONFIG_NET_NS\n\tstruct net *ns;\n#endif /* CONFIG_NET_NS */\n};\n\nconst char*\nnetmap_bdg_name(struct netmap_vp_adapter *vp)\n{\n\tstruct nm_bridge *b = vp->na_bdg;\n\tif (b == NULL)\n\t\treturn NULL;\n\treturn b->bdg_basename;\n}\n\n\n#ifndef CONFIG_NET_NS\n/*\n * XXX in principle nm_bridges could be created dynamically\n * Right now we have a static array and deletions are protected\n * by an exclusive lock.\n */\nstatic struct nm_bridge *nm_bridges;\n#endif /* !CONFIG_NET_NS */\n\n\n/*\n * this is a slightly optimized copy routine which rounds\n * to multiple of 64 bytes and is often faster than dealing\n * with other odd sizes. We assume there is enough room\n * in the source and destination buffers.\n *\n * XXX only for multiples of 64 bytes, non overlapped.\n */\nstatic inline void\npkt_copy(void *_src, void *_dst, int l)\n{\n uint64_t *src = _src;\n uint64_t *dst = _dst;\n if (unlikely(l >= 1024)) {\n memcpy(dst, src, l);\n return;\n }\n for (; likely(l > 0); l-=64) {\n *dst++ = *src++;\n *dst++ = *src++;\n *dst++ = *src++;\n *dst++ = *src++;\n *dst++ = *src++;\n *dst++ = *src++;\n *dst++ = *src++;\n *dst++ = *src++;\n }\n}\n\n\nstatic int\nnm_is_id_char(const char c)\n{\n\treturn (c >= 'a' && c <= 'z') ||\n\t (c >= 'A' && c <= 'Z') ||\n\t (c >= '0' && c <= '9') ||\n\t (c == '_');\n}\n\n/* Validate the name of a VALE bridge port and return the\n * position of the \":\" character. */\nstatic int\nnm_vale_name_validate(const char *name)\n{\n\tint colon_pos = -1;\n\tint i;\n\n\tif (!name || strlen(name) < strlen(NM_BDG_NAME)) {\n\t\treturn -1;\n\t}\n\n\tfor (i = 0; name[i]; i++) {\n\t\tif (name[i] == ':') {\n\t\t\tif (colon_pos != -1) {\n\t\t\t\treturn -1;\n\t\t\t}\n\t\t\tcolon_pos = i;\n\t\t} else if (!nm_is_id_char(name[i])) {\n\t\t\treturn -1;\n\t\t}\n\t}\n\n\tif (i >= IFNAMSIZ) {\n\t\treturn -1;\n\t}\n\n\treturn colon_pos;\n}\n\n/*\n * locate a bridge among the existing ones.\n * MUST BE CALLED WITH NMG_LOCK()\n *\n * a ':' in the name terminates the bridge name. Otherwise, just NM_NAME.\n * We assume that this is called with a name of at least NM_NAME chars.\n */\nstatic struct nm_bridge *\nnm_find_bridge(const char *name, int create)\n{\n\tint i, namelen;\n\tstruct nm_bridge *b = NULL, *bridges;\n\tu_int num_bridges;\n\n\tNMG_LOCK_ASSERT();\n\n\tnetmap_bns_getbridges(&bridges, &num_bridges);\n\n\tnamelen = nm_vale_name_validate(name);\n\tif (namelen < 0) {\n\t\tD(\"invalid bridge name %s\", name ? name : NULL);\n\t\treturn NULL;\n\t}\n\n\t/* lookup the name, remember empty slot if there is one */\n\tfor (i = 0; i < num_bridges; i++) {\n\t\tstruct nm_bridge *x = bridges + i;\n\n\t\tif (x->bdg_active_ports == 0) {\n\t\t\tif (create && b == NULL)\n\t\t\t\tb = x;\t/* record empty slot */\n\t\t} else if (x->bdg_namelen != namelen) {\n\t\t\tcontinue;\n\t\t} else if (strncmp(name, x->bdg_basename, namelen) == 0) {\n\t\t\tND(\"found '%.*s' at %d\", namelen, name, i);\n\t\t\tb = x;\n\t\t\tbreak;\n\t\t}\n\t}\n\tif (i == num_bridges && b) { /* name not found, can create entry */\n\t\t/* initialize the bridge */\n\t\tstrncpy(b->bdg_basename, name, namelen);\n\t\tND(\"create new bridge %s with ports %d\", b->bdg_basename,\n\t\t\tb->bdg_active_ports);\n\t\tb->bdg_namelen = namelen;\n\t\tb->bdg_active_ports = 0;\n\t\tfor (i = 0; i < NM_BDG_MAXPORTS; i++)\n\t\t\tb->bdg_port_index[i] = i;\n\t\t/* set the default function */\n\t\tb->bdg_ops.lookup = netmap_bdg_learning;\n\t\t/* reset the MAC address table */\n\t\tbzero(b->ht, sizeof(struct nm_hash_ent) * NM_BDG_HASH);\n\t\tNM_BNS_GET(b);\n\t}\n\treturn b;\n}\n\n\n/*\n * Free the forwarding tables for rings attached to switch ports.\n */\nstatic void\nnm_free_bdgfwd(struct netmap_adapter *na)\n{\n\tint nrings, i;\n\tstruct netmap_kring *kring;\n\n\tNMG_LOCK_ASSERT();\n\tnrings = na->num_tx_rings;\n\tkring = na->tx_rings;\n\tfor (i = 0; i < nrings; i++) {\n\t\tif (kring[i].nkr_ft) {\n\t\t\tnm_os_free(kring[i].nkr_ft);\n\t\t\tkring[i].nkr_ft = NULL; /* protect from freeing twice */\n\t\t}\n\t}\n}\n\n\n/*\n * Allocate the forwarding tables for the rings attached to the bridge ports.\n */\nstatic int\nnm_alloc_bdgfwd(struct netmap_adapter *na)\n{\n\tint nrings, l, i, num_dstq;\n\tstruct netmap_kring *kring;\n\n\tNMG_LOCK_ASSERT();\n\t/* all port:rings + broadcast */\n\tnum_dstq = NM_BDG_MAXPORTS * NM_BDG_MAXRINGS + 1;\n\tl = sizeof(struct nm_bdg_fwd) * NM_BDG_BATCH_MAX;\n\tl += sizeof(struct nm_bdg_q) * num_dstq;\n\tl += sizeof(uint16_t) * NM_BDG_BATCH_MAX;\n\n\tnrings = netmap_real_rings(na, NR_TX);\n\tkring = na->tx_rings;\n\tfor (i = 0; i < nrings; i++) {\n\t\tstruct nm_bdg_fwd *ft;\n\t\tstruct nm_bdg_q *dstq;\n\t\tint j;\n\n\t\tft = nm_os_malloc(l);\n\t\tif (!ft) {\n\t\t\tnm_free_bdgfwd(na);\n\t\t\treturn ENOMEM;\n\t\t}\n\t\tdstq = (struct nm_bdg_q *)(ft + NM_BDG_BATCH_MAX);\n\t\tfor (j = 0; j < num_dstq; j++) {\n\t\t\tdstq[j].bq_head = dstq[j].bq_tail = NM_FT_NULL;\n\t\t\tdstq[j].bq_len = 0;\n\t\t}\n\t\tkring[i].nkr_ft = ft;\n\t}\n\treturn 0;\n}\n\n\n/* remove from bridge b the ports in slots hw and sw\n * (sw can be -1 if not needed)\n */\nstatic void\nnetmap_bdg_detach_common(struct nm_bridge *b, int hw, int sw)\n{\n\tint s_hw = hw, s_sw = sw;\n\tint i, lim =b->bdg_active_ports;\n\tuint8_t tmp[NM_BDG_MAXPORTS];\n\n\t/*\n\tNew algorithm:\n\tmake a copy of bdg_port_index;\n\tlookup NA(ifp)->bdg_port and SWNA(ifp)->bdg_port\n\tin the array of bdg_port_index, replacing them with\n\tentries from the bottom of the array;\n\tdecrement bdg_active_ports;\n\tacquire BDG_WLOCK() and copy back the array.\n\t */\n\n\tif (netmap_verbose)\n\t\tD(\"detach %d and %d (lim %d)\", hw, sw, lim);\n\t/* make a copy of the list of active ports, update it,\n\t * and then copy back within BDG_WLOCK().\n\t */\n\tmemcpy(tmp, b->bdg_port_index, sizeof(tmp));\n\tfor (i = 0; (hw >= 0 || sw >= 0) && i < lim; ) {\n\t\tif (hw >= 0 && tmp[i] == hw) {\n\t\t\tND(\"detach hw %d at %d\", hw, i);\n\t\t\tlim--; /* point to last active port */\n\t\t\ttmp[i] = tmp[lim]; /* swap with i */\n\t\t\ttmp[lim] = hw;\t/* now this is inactive */\n\t\t\thw = -1;\n\t\t} else if (sw >= 0 && tmp[i] == sw) {\n\t\t\tND(\"detach sw %d at %d\", sw, i);\n\t\t\tlim--;\n\t\t\ttmp[i] = tmp[lim];\n\t\t\ttmp[lim] = sw;\n\t\t\tsw = -1;\n\t\t} else {\n\t\t\ti++;\n\t\t}\n\t}\n\tif (hw >= 0 || sw >= 0) {\n\t\tD(\"XXX delete failed hw %d sw %d, should panic...\", hw, sw);\n\t}\n\n\tBDG_WLOCK(b);\n\tif (b->bdg_ops.dtor)\n\t\tb->bdg_ops.dtor(b->bdg_ports[s_hw]);\n\tb->bdg_ports[s_hw] = NULL;\n\tif (s_sw >= 0) {\n\t\tb->bdg_ports[s_sw] = NULL;\n\t}\n\tmemcpy(b->bdg_port_index, tmp, sizeof(tmp));\n\tb->bdg_active_ports = lim;\n\tBDG_WUNLOCK(b);\n\n\tND(\"now %d active ports\", lim);\n\tif (lim == 0) {\n\t\tND(\"marking bridge %s as free\", b->bdg_basename);\n\t\tbzero(&b->bdg_ops, sizeof(b->bdg_ops));\n\t\tNM_BNS_PUT(b);\n\t}\n}\n\n/* nm_bdg_ctl callback for VALE ports */\nstatic int\nnetmap_vp_bdg_ctl(struct netmap_adapter *na, struct nmreq *nmr, int attach)\n{\n\tstruct netmap_vp_adapter *vpna = (struct netmap_vp_adapter *)na;\n\tstruct nm_bridge *b = vpna->na_bdg;\n\n\t(void)nmr;\t// XXX merge ?\n\tif (attach)\n\t\treturn 0; /* nothing to do */\n\tif (b) {\n\t\tnetmap_set_all_rings(na, 0 /* disable */);\n\t\tnetmap_bdg_detach_common(b, vpna->bdg_port, -1);\n\t\tvpna->na_bdg = NULL;\n\t\tnetmap_set_all_rings(na, 1 /* enable */);\n\t}\n\t/* I have took reference just for attach */\n\tnetmap_adapter_put(na);\n\treturn 0;\n}\n\n/* nm_dtor callback for ephemeral VALE ports */\nstatic void\nnetmap_vp_dtor(struct netmap_adapter *na)\n{\n\tstruct netmap_vp_adapter *vpna = (struct netmap_vp_adapter*)na;\n\tstruct nm_bridge *b = vpna->na_bdg;\n\n\tND(\"%s has %d references\", na->name, na->na_refcount);\n\n\tif (b) {\n\t\tnetmap_bdg_detach_common(b, vpna->bdg_port, -1);\n\t}\n\n\tif (vpna->autodelete && na->ifp != NULL) {\n\t\tND(\"releasing %s\", na->ifp->if_xname);\n\t\tNMG_UNLOCK();\n\t\tnm_os_vi_detach(na->ifp);\n\t\tNMG_LOCK();\n\t}\n}\n\n/* remove a persistent VALE port from the system */\nstatic int\nnm_vi_destroy(const char *name)\n{\n\tstruct ifnet *ifp;\n\tstruct netmap_vp_adapter *vpna;\n\tint error;\n\n\tifp = ifunit_ref(name);\n\tif (!ifp)\n\t\treturn ENXIO;\n\tNMG_LOCK();\n\t/* make sure this is actually a VALE port */\n\tif (!NM_NA_VALID(ifp) || NA(ifp)->nm_register != netmap_vp_reg) {\n\t\terror = EINVAL;\n\t\tgoto err;\n\t}\n\n\tvpna = (struct netmap_vp_adapter *)NA(ifp);\n\n\t/* we can only destroy ports that were created via NETMAP_BDG_NEWIF */\n\tif (vpna->autodelete) {\n\t\terror = EINVAL;\n\t\tgoto err;\n\t}\n\n\t/* also make sure that nobody is using the inferface */\n\tif (NETMAP_OWNED_BY_ANY(&vpna->up) ||\n\t vpna->up.na_refcount > 1 /* any ref besides the one in nm_vi_create()? */) {\n\t\terror = EBUSY;\n\t\tgoto err;\n\t}\n\n\tNMG_UNLOCK();\n\n\tD(\"destroying a persistent vale interface %s\", ifp->if_xname);\n\t/* Linux requires all the references are released\n\t * before unregister\n\t */\n\tnetmap_detach(ifp);\n\tif_rele(ifp);\n\tnm_os_vi_detach(ifp);\n\treturn 0;\n\nerr:\n\tNMG_UNLOCK();\n\tif_rele(ifp);\n\treturn error;\n}\n\nstatic int\nnm_update_info(struct nmreq *nmr, struct netmap_adapter *na)\n{\n\tnmr->nr_rx_rings = na->num_rx_rings;\n\tnmr->nr_tx_rings = na->num_tx_rings;\n\tnmr->nr_rx_slots = na->num_rx_desc;\n\tnmr->nr_tx_slots = na->num_tx_desc;\n\treturn netmap_mem_get_info(na->nm_mem, &nmr->nr_memsize, NULL, &nmr->nr_arg2);\n}\n\n/*\n * Create a virtual interface registered to the system.\n * The interface will be attached to a bridge later.\n */\nint\nnetmap_vi_create(struct nmreq *nmr, int autodelete)\n{\n\tstruct ifnet *ifp;\n\tstruct netmap_vp_adapter *vpna;\n\tstruct netmap_mem_d *nmd = NULL;\n\tint error;\n\n\t/* don't include VALE prefix */\n\tif (!strncmp(nmr->nr_name, NM_BDG_NAME, strlen(NM_BDG_NAME)))\n\t\treturn EINVAL;\n\tifp = ifunit_ref(nmr->nr_name);\n\tif (ifp) { /* already exist, cannot create new one */\n\t\terror = EEXIST;\n\t\tNMG_LOCK();\n\t\tif (NM_NA_VALID(ifp)) {\n\t\t\tint update_err = nm_update_info(nmr, NA(ifp));\n\t\t\tif (update_err)\n\t\t\t\terror = update_err;\n\t\t}\n\t\tNMG_UNLOCK();\n\t\tif_rele(ifp);\n\t\treturn error;\n\t}\n\terror = nm_os_vi_persist(nmr->nr_name, &ifp);\n\tif (error)\n\t\treturn error;\n\n\tNMG_LOCK();\n\tif (nmr->nr_arg2) {\n\t\tnmd = netmap_mem_find(nmr->nr_arg2);\n\t\tif (nmd == NULL) {\n\t\t\terror = EINVAL;\n\t\t\tgoto err_1;\n\t\t}\n\t}\n\t/* netmap_vp_create creates a struct netmap_vp_adapter */\n\terror = netmap_vp_create(nmr, ifp, nmd, &vpna);\n\tif (error) {\n\t\tD(\"error %d\", error);\n\t\tgoto err_1;\n\t}\n\t/* persist-specific routines */\n\tvpna->up.nm_bdg_ctl = netmap_vp_bdg_ctl;\n\tif (!autodelete) {\n\t\tnetmap_adapter_get(&vpna->up);\n\t} else {\n\t\tvpna->autodelete = 1;\n\t}\n\tNM_ATTACH_NA(ifp, &vpna->up);\n\t/* return the updated info */\n\terror = nm_update_info(nmr, &vpna->up);\n\tif (error) {\n\t\tgoto err_2;\n\t}\n\tD(\"returning nr_arg2 %d\", nmr->nr_arg2);\n\tif (nmd)\n\t\tnetmap_mem_put(nmd);\n\tNMG_UNLOCK();\n\tD(\"created %s\", ifp->if_xname);\n\treturn 0;\n\nerr_2:\n\tnetmap_detach(ifp);\nerr_1:\n\tif (nmd)\n\t\tnetmap_mem_put(nmd);\n\tNMG_UNLOCK();\n\tnm_os_vi_detach(ifp);\n\n\treturn error;\n}\n\n/* Try to get a reference to a netmap adapter attached to a VALE switch.\n * If the adapter is found (or is created), this function returns 0, a\n * non NULL pointer is returned into *na, and the caller holds a\n * reference to the adapter.\n * If an adapter is not found, then no reference is grabbed and the\n * function returns an error code, or 0 if there is just a VALE prefix\n * mismatch. Therefore the caller holds a reference when\n * (*na != NULL && return == 0).\n */\nint\nnetmap_get_bdg_na(struct nmreq *nmr, struct netmap_adapter **na,\n\t\tstruct netmap_mem_d *nmd, int create)\n{\n\tchar *nr_name = nmr->nr_name;\n\tconst char *ifname;\n\tstruct ifnet *ifp = NULL;\n\tint error = 0;\n\tstruct netmap_vp_adapter *vpna, *hostna = NULL;\n\tstruct nm_bridge *b;\n\tint i, j, cand = -1, cand2 = -1;\n\tint needed;\n\n\t*na = NULL; /* default return value */\n\n\t/* first try to see if this is a bridge port. */\n\tNMG_LOCK_ASSERT();\n\tif (strncmp(nr_name, NM_BDG_NAME, sizeof(NM_BDG_NAME) - 1)) {\n\t\treturn 0; /* no error, but no VALE prefix */\n\t}\n\n\tb = nm_find_bridge(nr_name, create);\n\tif (b == NULL) {\n\t\tD(\"no bridges available for '%s'\", nr_name);\n\t\treturn (create ? ENOMEM : ENXIO);\n\t}\n\tif (strlen(nr_name) < b->bdg_namelen) /* impossible */\n\t\tpanic(\"x\");\n\n\t/* Now we are sure that name starts with the bridge's name,\n\t * lookup the port in the bridge. We need to scan the entire\n\t * list. It is not important to hold a WLOCK on the bridge\n\t * during the search because NMG_LOCK already guarantees\n\t * that there are no other possible writers.\n\t */\n\n\t/* lookup in the local list of ports */\n\tfor (j = 0; j < b->bdg_active_ports; j++) {\n\t\ti = b->bdg_port_index[j];\n\t\tvpna = b->bdg_ports[i];\n\t\t// KASSERT(na != NULL);\n\t\tND(\"checking %s\", vpna->up.name);\n\t\tif (!strcmp(vpna->up.name, nr_name)) {\n\t\t\tnetmap_adapter_get(&vpna->up);\n\t\t\tND(\"found existing if %s refs %d\", nr_name)\n\t\t\t*na = &vpna->up;\n\t\t\treturn 0;\n\t\t}\n\t}\n\t/* not found, should we create it? */\n\tif (!create)\n\t\treturn ENXIO;\n\t/* yes we should, see if we have space to attach entries */\n\tneeded = 2; /* in some cases we only need 1 */\n\tif (b->bdg_active_ports + needed >= NM_BDG_MAXPORTS) {\n\t\tD(\"bridge full %d, cannot create new port\", b->bdg_active_ports);\n\t\treturn ENOMEM;\n\t}\n\t/* record the next two ports available, but do not allocate yet */\n\tcand = b->bdg_port_index[b->bdg_active_ports];\n\tcand2 = b->bdg_port_index[b->bdg_active_ports + 1];\n\tND(\"+++ bridge %s port %s used %d avail %d %d\",\n\t\tb->bdg_basename, ifname, b->bdg_active_ports, cand, cand2);\n\n\t/*\n\t * try see if there is a matching NIC with this name\n\t * (after the bridge's name)\n\t */\n\tifname = nr_name + b->bdg_namelen + 1;\n\tifp = ifunit_ref(ifname);\n\tif (!ifp) {\n\t\t/* Create an ephemeral virtual port\n\t\t * This block contains all the ephemeral-specific logics\n\t\t */\n\t\tif (nmr->nr_cmd) {\n\t\t\t/* nr_cmd must be 0 for a virtual port */\n\t\t\terror = EINVAL;\n\t\t\tgoto out;\n\t\t}\n\n\t\t/* bdg_netmap_attach creates a struct netmap_adapter */\n\t\terror = netmap_vp_create(nmr, NULL, nmd, &vpna);\n\t\tif (error) {\n\t\t\tD(\"error %d\", error);\n\t\t\tgoto out;\n\t\t}\n\t\t/* shortcut - we can skip get_hw_na(),\n\t\t * ownership check and nm_bdg_attach()\n\t\t */\n\t} else {\n\t\tstruct netmap_adapter *hw;\n\n\t\terror = netmap_get_hw_na(ifp, nmd, &hw);\n\t\tif (error || hw == NULL)\n\t\t\tgoto out;\n\n\t\t/* host adapter might not be created */\n\t\terror = hw->nm_bdg_attach(nr_name, hw);\n\t\tif (error)\n\t\t\tgoto out;\n\t\tvpna = hw->na_vp;\n\t\thostna = hw->na_hostvp;\n\t\tif (nmr->nr_arg1 != NETMAP_BDG_HOST)\n\t\t\thostna = NULL;\n\t}\n\n\tBDG_WLOCK(b);\n\tvpna->bdg_port = cand;\n\tND(\"NIC %p to bridge port %d\", vpna, cand);\n\t/* bind the port to the bridge (virtual ports are not active) */\n\tb->bdg_ports[cand] = vpna;\n\tvpna->na_bdg = b;\n\tb->bdg_active_ports++;\n\tif (hostna != NULL) {\n\t\t/* also bind the host stack to the bridge */\n\t\tb->bdg_ports[cand2] = hostna;\n\t\thostna->bdg_port = cand2;\n\t\thostna->na_bdg = b;\n\t\tb->bdg_active_ports++;\n\t\tND(\"host %p to bridge port %d\", hostna, cand2);\n\t}\n\tND(\"if %s refs %d\", ifname, vpna->up.na_refcount);\n\tBDG_WUNLOCK(b);\n\t*na = &vpna->up;\n\tnetmap_adapter_get(*na);\n\nout:\n\tif (ifp)\n\t\tif_rele(ifp);\n\n\treturn error;\n}\n\n\n/* Process NETMAP_BDG_ATTACH */\nstatic int\nnm_bdg_ctl_attach(struct nmreq *nmr)\n{\n\tstruct netmap_adapter *na;\n\tstruct netmap_mem_d *nmd = NULL;\n\tint error;\n\n\tNMG_LOCK();\n\n\tif (nmr->nr_arg2) {\n\t\tnmd = netmap_mem_find(nmr->nr_arg2);\n\t\tif (nmd == NULL) {\n\t\t\terror = EINVAL;\n\t\t\tgoto unlock_exit;\n\t\t}\n\t}\n\n\terror = netmap_get_bdg_na(nmr, &na, nmd, 1 /* create if not exists */);\n\tif (error) /* no device */\n\t\tgoto unlock_exit;\n\n\tif (na == NULL) { /* VALE prefix missing */\n\t\terror = EINVAL;\n\t\tgoto unlock_exit;\n\t}\n\n\tif (NETMAP_OWNED_BY_ANY(na)) {\n\t\terror = EBUSY;\n\t\tgoto unref_exit;\n\t}\n\n\tif (na->nm_bdg_ctl) {\n\t\t/* nop for VALE ports. The bwrap needs to put the hwna\n\t\t * in netmap mode (see netmap_bwrap_bdg_ctl)\n\t\t */\n\t\terror = na->nm_bdg_ctl(na, nmr, 1);\n\t\tif (error)\n\t\t\tgoto unref_exit;\n\t\tND(\"registered %s to netmap-mode\", na->name);\n\t}\n\tNMG_UNLOCK();\n\treturn 0;\n\nunref_exit:\n\tnetmap_adapter_put(na);\nunlock_exit:\n\tNMG_UNLOCK();\n\treturn error;\n}\n\nstatic inline int\nnm_is_bwrap(struct netmap_adapter *na)\n{\n\treturn na->nm_register == netmap_bwrap_reg;\n}\n\n/* process NETMAP_BDG_DETACH */\nstatic int\nnm_bdg_ctl_detach(struct nmreq *nmr)\n{\n\tstruct netmap_adapter *na;\n\tint error;\n\n\tNMG_LOCK();\n\terror = netmap_get_bdg_na(nmr, &na, NULL, 0 /* don't create */);\n\tif (error) { /* no device, or another bridge or user owns the device */\n\t\tgoto unlock_exit;\n\t}\n\n\tif (na == NULL) { /* VALE prefix missing */\n\t\terror = EINVAL;\n\t\tgoto unlock_exit;\n\t} else if (nm_is_bwrap(na) &&\n\t\t ((struct netmap_bwrap_adapter *)na)->na_polling_state) {\n\t\t/* Don't detach a NIC with polling */\n\t\terror = EBUSY;\n\t\tnetmap_adapter_put(na);\n\t\tgoto unlock_exit;\n\t}\n\tif (na->nm_bdg_ctl) {\n\t\t/* remove the port from bridge. The bwrap\n\t\t * also needs to put the hwna in normal mode\n\t\t */\n\t\terror = na->nm_bdg_ctl(na, nmr, 0);\n\t}\n\n\tnetmap_adapter_put(na);\nunlock_exit:\n\tNMG_UNLOCK();\n\treturn error;\n\n}\n\nstruct nm_bdg_polling_state;\nstruct\nnm_bdg_kthread {\n\tstruct nm_kthread *nmk;\n\tu_int qfirst;\n\tu_int qlast;\n\tstruct nm_bdg_polling_state *bps;\n};\n\nstruct nm_bdg_polling_state {\n\tbool configured;\n\tbool stopped;\n\tstruct netmap_bwrap_adapter *bna;\n\tu_int reg;\n\tu_int qfirst;\n\tu_int qlast;\n\tu_int cpu_from;\n\tu_int ncpus;\n\tstruct nm_bdg_kthread *kthreads;\n};\n\nstatic void\nnetmap_bwrap_polling(void *data)\n{\n\tstruct nm_bdg_kthread *nbk = data;\n\tstruct netmap_bwrap_adapter *bna;\n\tu_int qfirst, qlast, i;\n\tstruct netmap_kring *kring0, *kring;\n\n\tif (!nbk)\n\t\treturn;\n\tqfirst = nbk->qfirst;\n\tqlast = nbk->qlast;\n\tbna = nbk->bps->bna;\n\tkring0 = NMR(bna->hwna, NR_RX);\n\n\tfor (i = qfirst; i < qlast; i++) {\n\t\tkring = kring0 + i;\n\t\tkring->nm_notify(kring, 0);\n\t}\n}\n\nstatic int\nnm_bdg_create_kthreads(struct nm_bdg_polling_state *bps)\n{\n\tstruct nm_kthread_cfg kcfg;\n\tint i, j;\n\n\tbps->kthreads = nm_os_malloc(sizeof(struct nm_bdg_kthread) * bps->ncpus);\n\tif (bps->kthreads == NULL)\n\t\treturn ENOMEM;\n\n\tbzero(&kcfg, sizeof(kcfg));\n\tkcfg.worker_fn = netmap_bwrap_polling;\n\tfor (i = 0; i < bps->ncpus; i++) {\n\t\tstruct nm_bdg_kthread *t = bps->kthreads + i;\n\t\tint all = (bps->ncpus == 1 && bps->reg == NR_REG_ALL_NIC);\n\t\tint affinity = bps->cpu_from + i;\n\n\t\tt->bps = bps;\n\t\tt->qfirst = all ? bps->qfirst /* must be 0 */: affinity;\n\t\tt->qlast = all ? bps->qlast : t->qfirst + 1;\n\t\tD(\"kthread %d a:%u qf:%u ql:%u\", i, affinity, t->qfirst,\n\t\t\tt->qlast);\n\n\t\tkcfg.type = i;\n\t\tkcfg.worker_private = t;\n\t\tt->nmk = nm_os_kthread_create(&kcfg, 0, NULL);\n\t\tif (t->nmk == NULL) {\n\t\t\tgoto cleanup;\n\t\t}\n\t\tnm_os_kthread_set_affinity(t->nmk, affinity);\n\t}\n\treturn 0;\n\ncleanup:\n\tfor (j = 0; j < i; j++) {\n\t\tstruct nm_bdg_kthread *t = bps->kthreads + i;\n\t\tnm_os_kthread_delete(t->nmk);\n\t}\n\tnm_os_free(bps->kthreads);\n\treturn EFAULT;\n}\n\n/* a version of ptnetmap_start_kthreads() */\nstatic int\nnm_bdg_polling_start_kthreads(struct nm_bdg_polling_state *bps)\n{\n\tint error, i, j;\n\n\tif (!bps) {\n\t\tD(\"polling is not configured\");\n\t\treturn EFAULT;\n\t}\n\tbps->stopped = false;\n\n\tfor (i = 0; i < bps->ncpus; i++) {\n\t\tstruct nm_bdg_kthread *t = bps->kthreads + i;\n\t\terror = nm_os_kthread_start(t->nmk);\n\t\tif (error) {\n\t\t\tD(\"error in nm_kthread_start()\");\n\t\t\tgoto cleanup;\n\t\t}\n\t}\n\treturn 0;\n\ncleanup:\n\tfor (j = 0; j < i; j++) {\n\t\tstruct nm_bdg_kthread *t = bps->kthreads + i;\n\t\tnm_os_kthread_stop(t->nmk);\n\t}\n\tbps->stopped = true;\n\treturn error;\n}\n\nstatic void\nnm_bdg_polling_stop_delete_kthreads(struct nm_bdg_polling_state *bps)\n{\n\tint i;\n\n\tif (!bps)\n\t\treturn;\n\n\tfor (i = 0; i < bps->ncpus; i++) {\n\t\tstruct nm_bdg_kthread *t = bps->kthreads + i;\n\t\tnm_os_kthread_stop(t->nmk);\n\t\tnm_os_kthread_delete(t->nmk);\n\t}\n\tbps->stopped = true;\n}\n\nstatic int\nget_polling_cfg(struct nmreq *nmr, struct netmap_adapter *na,\n\t\t\tstruct nm_bdg_polling_state *bps)\n{\n\tint req_cpus, avail_cpus, core_from;\n\tu_int reg, i, qfirst, qlast;\n\n\tavail_cpus = nm_os_ncpus();\n\treq_cpus = nmr->nr_arg1;\n\n\tif (req_cpus == 0) {\n\t\tD(\"req_cpus must be > 0\");\n\t\treturn EINVAL;\n\t} else if (req_cpus >= avail_cpus) {\n\t\tD(\"for safety, we need at least one core left in the system\");\n\t\treturn EINVAL;\n\t}\n\treg = nmr->nr_flags & NR_REG_MASK;\n\ti = nmr->nr_ringid & NETMAP_RING_MASK;\n\t/*\n\t * ONE_NIC: dedicate one core to one ring. If multiple cores\n\t * are specified, consecutive rings are also polled.\n\t * For example, if ringid=2 and 2 cores are given,\n\t * ring 2 and 3 are polled by core 2 and 3, respectively.\n\t * ALL_NIC: poll all the rings using a core specified by ringid.\n\t * the number of cores must be 1.\n\t */\n\tif (reg == NR_REG_ONE_NIC) {\n\t\tif (i + req_cpus > nma_get_nrings(na, NR_RX)) {\n\t\t\tD(\"only %d rings exist (ring %u-%u is given)\",\n\t\t\t\tnma_get_nrings(na, NR_RX), i, i+req_cpus);\n\t\t\treturn EINVAL;\n\t\t}\n\t\tqfirst = i;\n\t\tqlast = qfirst + req_cpus;\n\t\tcore_from = qfirst;\n\t} else if (reg == NR_REG_ALL_NIC) {\n\t\tif (req_cpus != 1) {\n\t\t\tD(\"ncpus must be 1 not %d for REG_ALL_NIC\", req_cpus);\n\t\t\treturn EINVAL;\n\t\t}\n\t\tqfirst = 0;\n\t\tqlast = nma_get_nrings(na, NR_RX);\n\t\tcore_from = i;\n\t} else {\n\t\tD(\"reg must be ALL_NIC or ONE_NIC\");\n\t\treturn EINVAL;\n\t}\n\n\tbps->reg = reg;\n\tbps->qfirst = qfirst;\n\tbps->qlast = qlast;\n\tbps->cpu_from = core_from;\n\tbps->ncpus = req_cpus;\n\tD(\"%s qfirst %u qlast %u cpu_from %u ncpus %u\",\n\t\treg == NR_REG_ALL_NIC ? \"REG_ALL_NIC\" : \"REG_ONE_NIC\",\n\t\tqfirst, qlast, core_from, req_cpus);\n\treturn 0;\n}\n\nstatic int\nnm_bdg_ctl_polling_start(struct nmreq *nmr, struct netmap_adapter *na)\n{\n\tstruct nm_bdg_polling_state *bps;\n\tstruct netmap_bwrap_adapter *bna;\n\tint error;\n\n\tbna = (struct netmap_bwrap_adapter *)na;\n\tif (bna->na_polling_state) {\n\t\tD(\"ERROR adapter already in polling mode\");\n\t\treturn EFAULT;\n\t}\n\n\tbps = nm_os_malloc(sizeof(*bps));\n\tif (!bps)\n\t\treturn ENOMEM;\n\tbps->configured = false;\n\tbps->stopped = true;\n\n\tif (get_polling_cfg(nmr, na, bps)) {\n\t\tnm_os_free(bps);\n\t\treturn EINVAL;\n\t}\n\n\tif (nm_bdg_create_kthreads(bps)) {\n\t\tnm_os_free(bps);\n\t\treturn EFAULT;\n\t}\n\n\tbps->configured = true;\n\tbna->na_polling_state = bps;\n\tbps->bna = bna;\n\n\t/* disable interrupt if possible */\n\tif (bna->hwna->nm_intr)\n\t\tbna->hwna->nm_intr(bna->hwna, 0);\n\t/* start kthread now */\n\terror = nm_bdg_polling_start_kthreads(bps);\n\tif (error) {\n\t\tD(\"ERROR nm_bdg_polling_start_kthread()\");\n\t\tnm_os_free(bps->kthreads);\n\t\tnm_os_free(bps);\n\t\tbna->na_polling_state = NULL;\n\t\tif (bna->hwna->nm_intr)\n\t\t\tbna->hwna->nm_intr(bna->hwna, 1);\n\t}\n\treturn error;\n}\n\nstatic int\nnm_bdg_ctl_polling_stop(struct nmreq *nmr, struct netmap_adapter *na)\n{\n\tstruct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter *)na;\n\tstruct nm_bdg_polling_state *bps;\n\n\tif (!bna->na_polling_state) {\n\t\tD(\"ERROR adapter is not in polling mode\");\n\t\treturn EFAULT;\n\t}\n\tbps = bna->na_polling_state;\n\tnm_bdg_polling_stop_delete_kthreads(bna->na_polling_state);\n\tbps->configured = false;\n\tnm_os_free(bps);\n\tbna->na_polling_state = NULL;\n\t/* reenable interrupt */\n\tif (bna->hwna->nm_intr)\n\t\tbna->hwna->nm_intr(bna->hwna, 1);\n\treturn 0;\n}\n\n/* Called by either user's context (netmap_ioctl())\n * or external kernel modules (e.g., Openvswitch).\n * Operation is indicated in nmr->nr_cmd.\n * NETMAP_BDG_OPS that sets configure/lookup/dtor functions to the bridge\n * requires bdg_ops argument; the other commands ignore this argument.\n *\n * Called without NMG_LOCK.\n */\nint\nnetmap_bdg_ctl(struct nmreq *nmr, struct netmap_bdg_ops *bdg_ops)\n{\n\tstruct nm_bridge *b, *bridges;\n\tstruct netmap_adapter *na;\n\tstruct netmap_vp_adapter *vpna;\n\tchar *name = nmr->nr_name;\n\tint cmd = nmr->nr_cmd, namelen = strlen(name);\n\tint error = 0, i, j;\n\tu_int num_bridges;\n\n\tnetmap_bns_getbridges(&bridges, &num_bridges);\n\n\tswitch (cmd) {\n\tcase NETMAP_BDG_NEWIF:\n\t\terror = netmap_vi_create(nmr, 0 /* no autodelete */);\n\t\tbreak;\n\n\tcase NETMAP_BDG_DELIF:\n\t\terror = nm_vi_destroy(nmr->nr_name);\n\t\tbreak;\n\n\tcase NETMAP_BDG_ATTACH:\n\t\terror = nm_bdg_ctl_attach(nmr);\n\t\tbreak;\n\n\tcase NETMAP_BDG_DETACH:\n\t\terror = nm_bdg_ctl_detach(nmr);\n\t\tbreak;\n\n\tcase NETMAP_BDG_LIST:\n\t\t/* this is used to enumerate bridges and ports */\n\t\tif (namelen) { /* look up indexes of bridge and port */\n\t\t\tif (strncmp(name, NM_BDG_NAME, strlen(NM_BDG_NAME))) {\n\t\t\t\terror = EINVAL;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tNMG_LOCK();\n\t\t\tb = nm_find_bridge(name, 0 /* don't create */);\n\t\t\tif (!b) {\n\t\t\t\terror = ENOENT;\n\t\t\t\tNMG_UNLOCK();\n\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\terror = 0;\n\t\t\tnmr->nr_arg1 = b - bridges; /* bridge index */\n\t\t\tnmr->nr_arg2 = NM_BDG_NOPORT;\n\t\t\tfor (j = 0; j < b->bdg_active_ports; j++) {\n\t\t\t\ti = b->bdg_port_index[j];\n\t\t\t\tvpna = b->bdg_ports[i];\n\t\t\t\tif (vpna == NULL) {\n\t\t\t\t\tD(\"---AAAAAAAAARGH-------\");\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\t/* the former and the latter identify a\n\t\t\t\t * virtual port and a NIC, respectively\n\t\t\t\t */\n\t\t\t\tif (!strcmp(vpna->up.name, name)) {\n\t\t\t\t\tnmr->nr_arg2 = i; /* port index */\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t\tNMG_UNLOCK();\n\t\t} else {\n\t\t\t/* return the first non-empty entry starting from\n\t\t\t * bridge nr_arg1 and port nr_arg2.\n\t\t\t *\n\t\t\t * Users can detect the end of the same bridge by\n\t\t\t * seeing the new and old value of nr_arg1, and can\n\t\t\t * detect the end of all the bridge by error != 0\n\t\t\t */\n\t\t\ti = nmr->nr_arg1;\n\t\t\tj = nmr->nr_arg2;\n\n\t\t\tNMG_LOCK();\n\t\t\tfor (error = ENOENT; i < NM_BRIDGES; i++) {\n\t\t\t\tb = bridges + i;\n\t\t\t\tfor ( ; j < NM_BDG_MAXPORTS; j++) {\n\t\t\t\t\tif (b->bdg_ports[j] == NULL)\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\tvpna = b->bdg_ports[j];\n\t\t\t\t\tstrncpy(name, vpna->up.name, (size_t)IFNAMSIZ);\n\t\t\t\t\terror = 0;\n\t\t\t\t\tgoto out;\n\t\t\t\t}\n\t\t\t\tj = 0; /* following bridges scan from 0 */\n\t\t\t}\n\t\tout:\n\t\t\tnmr->nr_arg1 = i;\n\t\t\tnmr->nr_arg2 = j;\n\t\t\tNMG_UNLOCK();\n\t\t}\n\t\tbreak;\n\n\tcase NETMAP_BDG_REGOPS: /* XXX this should not be available from userspace */\n\t\t/* register callbacks to the given bridge.\n\t\t * nmr->nr_name may be just bridge's name (including ':'\n\t\t * if it is not just NM_NAME).\n\t\t */\n\t\tif (!bdg_ops) {\n\t\t\terror = EINVAL;\n\t\t\tbreak;\n\t\t}\n\t\tNMG_LOCK();\n\t\tb = nm_find_bridge(name, 0 /* don't create */);\n\t\tif (!b) {\n\t\t\terror = EINVAL;\n\t\t} else {\n\t\t\tb->bdg_ops = *bdg_ops;\n\t\t}\n\t\tNMG_UNLOCK();\n\t\tbreak;\n\n\tcase NETMAP_BDG_VNET_HDR:\n\t\t/* Valid lengths for the virtio-net header are 0 (no header),\n\t\t 10 and 12. */\n\t\tif (nmr->nr_arg1 != 0 &&\n\t\t\tnmr->nr_arg1 != sizeof(struct nm_vnet_hdr) &&\n\t\t\t\tnmr->nr_arg1 != 12) {\n\t\t\terror = EINVAL;\n\t\t\tbreak;\n\t\t}\n\t\tNMG_LOCK();\n\t\terror = netmap_get_bdg_na(nmr, &na, NULL, 0);\n\t\tif (na && !error) {\n\t\t\tvpna = (struct netmap_vp_adapter *)na;\n\t\t\tna->virt_hdr_len = nmr->nr_arg1;\n\t\t\tif (na->virt_hdr_len) {\n\t\t\t\tvpna->mfs = NETMAP_BUF_SIZE(na);\n\t\t\t}\n\t\t\tD(\"Using vnet_hdr_len %d for %p\", na->virt_hdr_len, na);\n\t\t\tnetmap_adapter_put(na);\n\t\t} else if (!na) {\n\t\t\terror = ENXIO;\n\t\t}\n\t\tNMG_UNLOCK();\n\t\tbreak;\n\n\tcase NETMAP_BDG_POLLING_ON:\n\tcase NETMAP_BDG_POLLING_OFF:\n\t\tNMG_LOCK();\n\t\terror = netmap_get_bdg_na(nmr, &na, NULL, 0);\n\t\tif (na && !error) {\n\t\t\tif (!nm_is_bwrap(na)) {\n\t\t\t\terror = EOPNOTSUPP;\n\t\t\t} else if (cmd == NETMAP_BDG_POLLING_ON) {\n\t\t\t\terror = nm_bdg_ctl_polling_start(nmr, na);\n\t\t\t\tif (!error)\n\t\t\t\t\tnetmap_adapter_get(na);\n\t\t\t} else {\n\t\t\t\terror = nm_bdg_ctl_polling_stop(nmr, na);\n\t\t\t\tif (!error)\n\t\t\t\t\tnetmap_adapter_put(na);\n\t\t\t}\n\t\t\tnetmap_adapter_put(na);\n\t\t}\n\t\tNMG_UNLOCK();\n\t\tbreak;\n\n\tdefault:\n\t\tD(\"invalid cmd (nmr->nr_cmd) (0x%x)\", cmd);\n\t\terror = EINVAL;\n\t\tbreak;\n\t}\n\treturn error;\n}\n\nint\nnetmap_bdg_config(struct nmreq *nmr)\n{\n\tstruct nm_bridge *b;\n\tint error = EINVAL;\n\n\tNMG_LOCK();\n\tb = nm_find_bridge(nmr->nr_name, 0);\n\tif (!b) {\n\t\tNMG_UNLOCK();\n\t\treturn error;\n\t}\n\tNMG_UNLOCK();\n\t/* Don't call config() with NMG_LOCK() held */\n\tBDG_RLOCK(b);\n\tif (b->bdg_ops.config != NULL)\n\t\terror = b->bdg_ops.config((struct nm_ifreq *)nmr);\n\tBDG_RUNLOCK(b);\n\treturn error;\n}\n\n\n/* nm_krings_create callback for VALE ports.\n * Calls the standard netmap_krings_create, then adds leases on rx\n * rings and bdgfwd on tx rings.\n */\nstatic int\nnetmap_vp_krings_create(struct netmap_adapter *na)\n{\n\tu_int tailroom;\n\tint error, i;\n\tuint32_t *leases;\n\tu_int nrx = netmap_real_rings(na, NR_RX);\n\n\t/*\n\t * Leases are attached to RX rings on vale ports\n\t */\n\ttailroom = sizeof(uint32_t) * na->num_rx_desc * nrx;\n\n\terror = netmap_krings_create(na, tailroom);\n\tif (error)\n\t\treturn error;\n\n\tleases = na->tailroom;\n\n\tfor (i = 0; i < nrx; i++) { /* Receive rings */\n\t\tna->rx_rings[i].nkr_leases = leases;\n\t\tleases += na->num_rx_desc;\n\t}\n\n\terror = nm_alloc_bdgfwd(na);\n\tif (error) {\n\t\tnetmap_krings_delete(na);\n\t\treturn error;\n\t}\n\n\treturn 0;\n}\n\n\n/* nm_krings_delete callback for VALE ports. */\nstatic void\nnetmap_vp_krings_delete(struct netmap_adapter *na)\n{\n\tnm_free_bdgfwd(na);\n\tnetmap_krings_delete(na);\n}\n\n\nstatic int\nnm_bdg_flush(struct nm_bdg_fwd *ft, u_int n,\n\tstruct netmap_vp_adapter *na, u_int ring_nr);\n\n\n/*\n * main dispatch routine for the bridge.\n * Grab packets from a kring, move them into the ft structure\n * associated to the tx (input) port. Max one instance per port,\n * filtered on input (ioctl, poll or XXX).\n * Returns the next position in the ring.\n */\nstatic int\nnm_bdg_preflush(struct netmap_kring *kring, u_int end)\n{\n\tstruct netmap_vp_adapter *na =\n\t\t(struct netmap_vp_adapter*)kring->na;\n\tstruct netmap_ring *ring = kring->ring;\n\tstruct nm_bdg_fwd *ft;\n\tu_int ring_nr = kring->ring_id;\n\tu_int j = kring->nr_hwcur, lim = kring->nkr_num_slots - 1;\n\tu_int ft_i = 0;\t/* start from 0 */\n\tu_int frags = 1; /* how many frags ? */\n\tstruct nm_bridge *b = na->na_bdg;\n\n\t/* To protect against modifications to the bridge we acquire a\n\t * shared lock, waiting if we can sleep (if the source port is\n\t * attached to a user process) or with a trylock otherwise (NICs).\n\t */\n\tND(\"wait rlock for %d packets\", ((j > end ? lim+1 : 0) + end) - j);\n\tif (na->up.na_flags & NAF_BDG_MAYSLEEP)\n\t\tBDG_RLOCK(b);\n\telse if (!BDG_RTRYLOCK(b))\n\t\treturn 0;\n\tND(5, \"rlock acquired for %d packets\", ((j > end ? lim+1 : 0) + end) - j);\n\tft = kring->nkr_ft;\n\n\tfor (; likely(j != end); j = nm_next(j, lim)) {\n\t\tstruct netmap_slot *slot = &ring->slot[j];\n\t\tchar *buf;\n\n\t\tft[ft_i].ft_len = slot->len;\n\t\tft[ft_i].ft_flags = slot->flags;\n\n\t\tND(\"flags is 0x%x\", slot->flags);\n\t\t/* we do not use the buf changed flag, but we still need to reset it */\n\t\tslot->flags &= ~NS_BUF_CHANGED;\n\n\t\t/* this slot goes into a list so initialize the link field */\n\t\tft[ft_i].ft_next = NM_FT_NULL;\n\t\tbuf = ft[ft_i].ft_buf = (slot->flags & NS_INDIRECT) ?\n\t\t\t(void *)(uintptr_t)slot->ptr : NMB(&na->up, slot);\n\t\tif (unlikely(buf == NULL)) {\n\t\t\tRD(5, \"NULL %s buffer pointer from %s slot %d len %d\",\n\t\t\t\t(slot->flags & NS_INDIRECT) ? \"INDIRECT\" : \"DIRECT\",\n\t\t\t\tkring->name, j, ft[ft_i].ft_len);\n\t\t\tbuf = ft[ft_i].ft_buf = NETMAP_BUF_BASE(&na->up);\n\t\t\tft[ft_i].ft_len = 0;\n\t\t\tft[ft_i].ft_flags = 0;\n\t\t}\n\t\t__builtin_prefetch(buf);\n\t\t++ft_i;\n\t\tif (slot->flags & NS_MOREFRAG) {\n\t\t\tfrags++;\n\t\t\tcontinue;\n\t\t}\n\t\tif (unlikely(netmap_verbose && frags > 1))\n\t\t\tRD(5, \"%d frags at %d\", frags, ft_i - frags);\n\t\tft[ft_i - frags].ft_frags = frags;\n\t\tfrags = 1;\n\t\tif (unlikely((int)ft_i >= bridge_batch))\n\t\t\tft_i = nm_bdg_flush(ft, ft_i, na, ring_nr);\n\t}\n\tif (frags > 1) {\n\t\t/* Here ft_i > 0, ft[ft_i-1].flags has NS_MOREFRAG, and we\n\t\t * have to fix frags count. */\n\t\tfrags--;\n\t\tft[ft_i - 1].ft_flags &= ~NS_MOREFRAG;\n\t\tft[ft_i - frags].ft_frags = frags;\n\t\tD(\"Truncate incomplete fragment at %d (%d frags)\", ft_i, frags);\n\t}\n\tif (ft_i)\n\t\tft_i = nm_bdg_flush(ft, ft_i, na, ring_nr);\n\tBDG_RUNLOCK(b);\n\treturn j;\n}\n\n\n/* ----- FreeBSD if_bridge hash function ------- */\n\n/*\n * The following hash function is adapted from \"Hash Functions\" by Bob Jenkins\n * (\"Algorithm Alley\", Dr. Dobbs Journal, September 1997).\n *\n * http://www.burtleburtle.net/bob/hash/spooky.html\n */\n#define mix(a, b, c) \\\ndo { \\\n a -= b; a -= c; a ^= (c >> 13); \\\n b -= c; b -= a; b ^= (a << 8); \\\n c -= a; c -= b; c ^= (b >> 13); \\\n a -= b; a -= c; a ^= (c >> 12); \\\n b -= c; b -= a; b ^= (a << 16); \\\n c -= a; c -= b; c ^= (b >> 5); \\\n a -= b; a -= c; a ^= (c >> 3); \\\n b -= c; b -= a; b ^= (a << 10); \\\n c -= a; c -= b; c ^= (b >> 15); \\\n} while (/*CONSTCOND*/0)\n\n\nstatic __inline uint32_t\nnm_bridge_rthash(const uint8_t *addr)\n{\n uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = 0; // hask key\n\n b += addr[5] << 8;\n b += addr[4];\n a += addr[3] << 24;\n a += addr[2] << 16;\n a += addr[1] << 8;\n a += addr[0];\n\n mix(a, b, c);\n#define BRIDGE_RTHASH_MASK\t(NM_BDG_HASH-1)\n return (c & BRIDGE_RTHASH_MASK);\n}\n\n#undef mix\n\n\n/* nm_register callback for VALE ports */\nstatic int\nnetmap_vp_reg(struct netmap_adapter *na, int onoff)\n{\n\tstruct netmap_vp_adapter *vpna =\n\t\t(struct netmap_vp_adapter*)na;\n\tenum txrx t;\n\tint i;\n\n\t/* persistent ports may be put in netmap mode\n\t * before being attached to a bridge\n\t */\n\tif (vpna->na_bdg)\n\t\tBDG_WLOCK(vpna->na_bdg);\n\tif (onoff) {\n\t\tfor_rx_tx(t) {\n\t\t\tfor (i = 0; i < nma_get_nrings(na, t) + 1; i++) {\n\t\t\t\tstruct netmap_kring *kring = &NMR(na, t)[i];\n\n\t\t\t\tif (nm_kring_pending_on(kring))\n\t\t\t\t\tkring->nr_mode = NKR_NETMAP_ON;\n\t\t\t}\n\t\t}\n\t\tif (na->active_fds == 0)\n\t\t\tna->na_flags |= NAF_NETMAP_ON;\n\t\t /* XXX on FreeBSD, persistent VALE ports should also\n\t\t * toggle IFCAP_NETMAP in na->ifp (2014-03-16)\n\t\t */\n\t} else {\n\t\tif (na->active_fds == 0)\n\t\t\tna->na_flags &= ~NAF_NETMAP_ON;\n\t\tfor_rx_tx(t) {\n\t\t\tfor (i = 0; i < nma_get_nrings(na, t) + 1; i++) {\n\t\t\t\tstruct netmap_kring *kring = &NMR(na, t)[i];\n\n\t\t\t\tif (nm_kring_pending_off(kring))\n\t\t\t\t\tkring->nr_mode = NKR_NETMAP_OFF;\n\t\t\t}\n\t\t}\n\t}\n\tif (vpna->na_bdg)\n\t\tBDG_WUNLOCK(vpna->na_bdg);\n\treturn 0;\n}\n\n\n/*\n * Lookup function for a learning bridge.\n * Update the hash table with the source address,\n * and then returns the destination port index, and the\n * ring in *dst_ring (at the moment, always use ring 0)\n */\nu_int\nnetmap_bdg_learning(struct nm_bdg_fwd *ft, uint8_t *dst_ring,\n\t\tstruct netmap_vp_adapter *na)\n{\n\tuint8_t *buf = ft->ft_buf;\n\tu_int buf_len = ft->ft_len;\n\tstruct nm_hash_ent *ht = na->na_bdg->ht;\n\tuint32_t sh, dh;\n\tu_int dst, mysrc = na->bdg_port;\n\tuint64_t smac, dmac;\n\tuint8_t indbuf[12];\n\n\t/* safety check, unfortunately we have many cases */\n\tif (buf_len >= 14 + na->up.virt_hdr_len) {\n\t\t/* virthdr + mac_hdr in the same slot */\n\t\tbuf += na->up.virt_hdr_len;\n\t\tbuf_len -= na->up.virt_hdr_len;\n\t} else if (buf_len == na->up.virt_hdr_len && ft->ft_flags & NS_MOREFRAG) {\n\t\t/* only header in first fragment */\n\t\tft++;\n\t\tbuf = ft->ft_buf;\n\t\tbuf_len = ft->ft_len;\n\t} else {\n\t\tRD(5, \"invalid buf format, length %d\", buf_len);\n\t\treturn NM_BDG_NOPORT;\n\t}\n\n\tif (ft->ft_flags & NS_INDIRECT) {\n\t\tif (copyin(buf, indbuf, sizeof(indbuf))) {\n\t\t\treturn NM_BDG_NOPORT;\n\t\t}\n\t\tbuf = indbuf;\n\t}\n\n\tdmac = le64toh(*(uint64_t *)(buf)) & 0xffffffffffff;\n\tsmac = le64toh(*(uint64_t *)(buf + 4));\n\tsmac >>= 16;\n\n\t/*\n\t * The hash is somewhat expensive, there might be some\n\t * worthwhile optimizations here.\n\t */\n\tif (((buf[6] & 1) == 0) && (na->last_smac != smac)) { /* valid src */\n\t\tuint8_t *s = buf+6;\n\t\tsh = nm_bridge_rthash(s); // XXX hash of source\n\t\t/* update source port forwarding entry */\n\t\tna->last_smac = ht[sh].mac = smac;\t/* XXX expire ? */\n\t\tht[sh].ports = mysrc;\n\t\tif (netmap_verbose)\n\t\t D(\"src %02x:%02x:%02x:%02x:%02x:%02x on port %d\",\n\t\t\ts[0], s[1], s[2], s[3], s[4], s[5], mysrc);\n\t}\n\tdst = NM_BDG_BROADCAST;\n\tif ((buf[0] & 1) == 0) { /* unicast */\n\t\tdh = nm_bridge_rthash(buf); // XXX hash of dst\n\t\tif (ht[dh].mac == dmac) {\t/* found dst */\n\t\t\tdst = ht[dh].ports;\n\t\t}\n\t\t/* XXX otherwise return NM_BDG_UNKNOWN ? */\n\t}\n\treturn dst;\n}\n\n\n/*\n * Available space in the ring. Only used in VALE code\n * and only with is_rx = 1\n */\nstatic inline uint32_t\nnm_kr_space(struct netmap_kring *k, int is_rx)\n{\n\tint space;\n\n\tif (is_rx) {\n\t\tint busy = k->nkr_hwlease - k->nr_hwcur;\n\t\tif (busy < 0)\n\t\t\tbusy += k->nkr_num_slots;\n\t\tspace = k->nkr_num_slots - 1 - busy;\n\t} else {\n\t\t/* XXX never used in this branch */\n\t\tspace = k->nr_hwtail - k->nkr_hwlease;\n\t\tif (space < 0)\n\t\t\tspace += k->nkr_num_slots;\n\t}\n#if 0\n\t// sanity check\n\tif (k->nkr_hwlease >= k->nkr_num_slots ||\n\t\tk->nr_hwcur >= k->nkr_num_slots ||\n\t\tk->nr_tail >= k->nkr_num_slots ||\n\t\tbusy < 0 ||\n\t\tbusy >= k->nkr_num_slots) {\n\t\tD(\"invalid kring, cur %d tail %d lease %d lease_idx %d lim %d\",\t\t\tk->nr_hwcur, k->nr_hwtail, k->nkr_hwlease,\n\t\t\tk->nkr_lease_idx, k->nkr_num_slots);\n\t}\n#endif\n\treturn space;\n}\n\n\n\n\n/* make a lease on the kring for N positions. return the\n * lease index\n * XXX only used in VALE code and with is_rx = 1\n */\nstatic inline uint32_t\nnm_kr_lease(struct netmap_kring *k, u_int n, int is_rx)\n{\n\tuint32_t lim = k->nkr_num_slots - 1;\n\tuint32_t lease_idx = k->nkr_lease_idx;\n\n\tk->nkr_leases[lease_idx] = NR_NOSLOT;\n\tk->nkr_lease_idx = nm_next(lease_idx, lim);\n\n\tif (n > nm_kr_space(k, is_rx)) {\n\t\tD(\"invalid request for %d slots\", n);\n\t\tpanic(\"x\");\n\t}\n\t/* XXX verify that there are n slots */\n\tk->nkr_hwlease += n;\n\tif (k->nkr_hwlease > lim)\n\t\tk->nkr_hwlease -= lim + 1;\n\n\tif (k->nkr_hwlease >= k->nkr_num_slots ||\n\t\tk->nr_hwcur >= k->nkr_num_slots ||\n\t\tk->nr_hwtail >= k->nkr_num_slots ||\n\t\tk->nkr_lease_idx >= k->nkr_num_slots) {\n\t\tD(\"invalid kring %s, cur %d tail %d lease %d lease_idx %d lim %d\",\n\t\t\tk->na->name,\n\t\t\tk->nr_hwcur, k->nr_hwtail, k->nkr_hwlease,\n\t\t\tk->nkr_lease_idx, k->nkr_num_slots);\n\t}\n\treturn lease_idx;\n}\n\n/*\n *\n * This flush routine supports only unicast and broadcast but a large\n * number of ports, and lets us replace the learn and dispatch functions.\n */\nint\nnm_bdg_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,\n\t\tu_int ring_nr)\n{\n\tstruct nm_bdg_q *dst_ents, *brddst;\n\tuint16_t num_dsts = 0, *dsts;\n\tstruct nm_bridge *b = na->na_bdg;\n\tu_int i, me = na->bdg_port;\n\n\t/*\n\t * The work area (pointed by ft) is followed by an array of\n\t * pointers to queues , dst_ents; there are NM_BDG_MAXRINGS\n\t * queues per port plus one for the broadcast traffic.\n\t * Then we have an array of destination indexes.\n\t */\n\tdst_ents = (struct nm_bdg_q *)(ft + NM_BDG_BATCH_MAX);\n\tdsts = (uint16_t *)(dst_ents + NM_BDG_MAXPORTS * NM_BDG_MAXRINGS + 1);\n\n\t/* first pass: find a destination for each packet in the batch */\n\tfor (i = 0; likely(i < n); i += ft[i].ft_frags) {\n\t\tuint8_t dst_ring = ring_nr; /* default, same ring as origin */\n\t\tuint16_t dst_port, d_i;\n\t\tstruct nm_bdg_q *d;\n\n\t\tND(\"slot %d frags %d\", i, ft[i].ft_frags);\n\t\t/* Drop the packet if the virtio-net header is not into the first\n\t\t fragment nor at the very beginning of the second. */\n\t\tif (unlikely(na->up.virt_hdr_len > ft[i].ft_len))\n\t\t\tcontinue;\n\t\tdst_port = b->bdg_ops.lookup(&ft[i], &dst_ring, na);\n\t\tif (netmap_verbose > 255)\n\t\t\tRD(5, \"slot %d port %d -> %d\", i, me, dst_port);\n\t\tif (dst_port == NM_BDG_NOPORT)\n\t\t\tcontinue; /* this packet is identified to be dropped */\n\t\telse if (unlikely(dst_port > NM_BDG_MAXPORTS))\n\t\t\tcontinue;\n\t\telse if (dst_port == NM_BDG_BROADCAST)\n\t\t\tdst_ring = 0; /* broadcasts always go to ring 0 */\n\t\telse if (unlikely(dst_port == me ||\n\t\t !b->bdg_ports[dst_port]))\n\t\t\tcontinue;\n\n\t\t/* get a position in the scratch pad */\n\t\td_i = dst_port * NM_BDG_MAXRINGS + dst_ring;\n\t\td = dst_ents + d_i;\n\n\t\t/* append the first fragment to the list */\n\t\tif (d->bq_head == NM_FT_NULL) { /* new destination */\n\t\t\td->bq_head = d->bq_tail = i;\n\t\t\t/* remember this position to be scanned later */\n\t\t\tif (dst_port != NM_BDG_BROADCAST)\n\t\t\t\tdsts[num_dsts++] = d_i;\n\t\t} else {\n\t\t\tft[d->bq_tail].ft_next = i;\n\t\t\td->bq_tail = i;\n\t\t}\n\t\td->bq_len += ft[i].ft_frags;\n\t}\n\n\t/*\n\t * Broadcast traffic goes to ring 0 on all destinations.\n\t * So we need to add these rings to the list of ports to scan.\n\t * XXX at the moment we scan all NM_BDG_MAXPORTS ports, which is\n\t * expensive. We should keep a compact list of active destinations\n\t * so we could shorten this loop.\n\t */\n\tbrddst = dst_ents + NM_BDG_BROADCAST * NM_BDG_MAXRINGS;\n\tif (brddst->bq_head != NM_FT_NULL) {\n\t\tu_int j;\n\t\tfor (j = 0; likely(j < b->bdg_active_ports); j++) {\n\t\t\tuint16_t d_i;\n\t\t\ti = b->bdg_port_index[j];\n\t\t\tif (unlikely(i == me))\n\t\t\t\tcontinue;\n\t\t\td_i = i * NM_BDG_MAXRINGS;\n\t\t\tif (dst_ents[d_i].bq_head == NM_FT_NULL)\n\t\t\t\tdsts[num_dsts++] = d_i;\n\t\t}\n\t}\n\n\tND(5, \"pass 1 done %d pkts %d dsts\", n, num_dsts);\n\t/* second pass: scan destinations */\n\tfor (i = 0; i < num_dsts; i++) {\n\t\tstruct netmap_vp_adapter *dst_na;\n\t\tstruct netmap_kring *kring;\n\t\tstruct netmap_ring *ring;\n\t\tu_int dst_nr, lim, j, d_i, next, brd_next;\n\t\tu_int needed, howmany;\n\t\tint retry = netmap_txsync_retry;\n\t\tstruct nm_bdg_q *d;\n\t\tuint32_t my_start = 0, lease_idx = 0;\n\t\tint nrings;\n\t\tint virt_hdr_mismatch = 0;\n\n\t\td_i = dsts[i];\n\t\tND(\"second pass %d port %d\", i, d_i);\n\t\td = dst_ents + d_i;\n\t\t// XXX fix the division\n\t\tdst_na = b->bdg_ports[d_i/NM_BDG_MAXRINGS];\n\t\t/* protect from the lookup function returning an inactive\n\t\t * destination port\n\t\t */\n\t\tif (unlikely(dst_na == NULL))\n\t\t\tgoto cleanup;\n\t\tif (dst_na->up.na_flags & NAF_SW_ONLY)\n\t\t\tgoto cleanup;\n\t\t/*\n\t\t * The interface may be in !netmap mode in two cases:\n\t\t * - when na is attached but not activated yet;\n\t\t * - when na is being deactivated but is still attached.\n\t\t */\n\t\tif (unlikely(!nm_netmap_on(&dst_na->up))) {\n\t\t\tND(\"not in netmap mode!\");\n\t\t\tgoto cleanup;\n\t\t}\n\n\t\t/* there is at least one either unicast or broadcast packet */\n\t\tbrd_next = brddst->bq_head;\n\t\tnext = d->bq_head;\n\t\t/* we need to reserve this many slots. If fewer are\n\t\t * available, some packets will be dropped.\n\t\t * Packets may have multiple fragments, so we may not use\n\t\t * there is a chance that we may not use all of the slots\n\t\t * we have claimed, so we will need to handle the leftover\n\t\t * ones when we regain the lock.\n\t\t */\n\t\tneeded = d->bq_len + brddst->bq_len;\n\n\t\tif (unlikely(dst_na->up.virt_hdr_len != na->up.virt_hdr_len)) {\n if (netmap_verbose) {\n RD(3, \"virt_hdr_mismatch, src %d dst %d\", na->up.virt_hdr_len,\n dst_na->up.virt_hdr_len);\n }\n\t\t\t/* There is a virtio-net header/offloadings mismatch between\n\t\t\t * source and destination. The slower mismatch datapath will\n\t\t\t * be used to cope with all the mismatches.\n\t\t\t */\n\t\t\tvirt_hdr_mismatch = 1;\n\t\t\tif (dst_na->mfs < na->mfs) {\n\t\t\t\t/* We may need to do segmentation offloadings, and so\n\t\t\t\t * we may need a number of destination slots greater\n\t\t\t\t * than the number of input slots ('needed').\n\t\t\t\t * We look for the smallest integer 'x' which satisfies:\n\t\t\t\t *\tneeded * na->mfs + x * H <= x * na->mfs\n\t\t\t\t * where 'H' is the length of the longest header that may\n\t\t\t\t * be replicated in the segmentation process (e.g. for\n\t\t\t\t * TCPv4 we must account for ethernet header, IP header\n\t\t\t\t * and TCPv4 header).\n\t\t\t\t */\n\t\t\t\tneeded = (needed * na->mfs) /\n\t\t\t\t\t\t(dst_na->mfs - WORST_CASE_GSO_HEADER) + 1;\n\t\t\t\tND(3, \"srcmtu=%u, dstmtu=%u, x=%u\", na->mfs, dst_na->mfs, needed);\n\t\t\t}\n\t\t}\n\n\t\tND(5, \"pass 2 dst %d is %x %s\",\n\t\t\ti, d_i, is_vp ? \"virtual\" : \"nic/host\");\n\t\tdst_nr = d_i & (NM_BDG_MAXRINGS-1);\n\t\tnrings = dst_na->up.num_rx_rings;\n\t\tif (dst_nr >= nrings)\n\t\t\tdst_nr = dst_nr % nrings;\n\t\tkring = &dst_na->up.rx_rings[dst_nr];\n\t\tring = kring->ring;\n\t\tlim = kring->nkr_num_slots - 1;\n\nretry:\n\n\t\tif (dst_na->retry && retry) {\n\t\t\t/* try to get some free slot from the previous run */\n\t\t\tkring->nm_notify(kring, 0);\n\t\t\t/* actually useful only for bwraps, since there\n\t\t\t * the notify will trigger a txsync on the hwna. VALE ports\n\t\t\t * have dst_na->retry == 0\n\t\t\t */\n\t\t}\n\t\t/* reserve the buffers in the queue and an entry\n\t\t * to report completion, and drop lock.\n\t\t * XXX this might become a helper function.\n\t\t */\n\t\tmtx_lock(&kring->q_lock);\n\t\tif (kring->nkr_stopped) {\n\t\t\tmtx_unlock(&kring->q_lock);\n\t\t\tgoto cleanup;\n\t\t}\n\t\tmy_start = j = kring->nkr_hwlease;\n\t\thowmany = nm_kr_space(kring, 1);\n\t\tif (needed < howmany)\n\t\t\thowmany = needed;\n\t\tlease_idx = nm_kr_lease(kring, howmany, 1);\n\t\tmtx_unlock(&kring->q_lock);\n\n\t\t/* only retry if we need more than available slots */\n\t\tif (retry && needed <= howmany)\n\t\t\tretry = 0;\n\n\t\t/* copy to the destination queue */\n\t\twhile (howmany > 0) {\n\t\t\tstruct netmap_slot *slot;\n\t\t\tstruct nm_bdg_fwd *ft_p, *ft_end;\n\t\t\tu_int cnt;\n\n\t\t\t/* find the queue from which we pick next packet.\n\t\t\t * NM_FT_NULL is always higher than valid indexes\n\t\t\t * so we never dereference it if the other list\n\t\t\t * has packets (and if both are empty we never\n\t\t\t * get here).\n\t\t\t */\n\t\t\tif (next < brd_next) {\n\t\t\t\tft_p = ft + next;\n\t\t\t\tnext = ft_p->ft_next;\n\t\t\t} else { /* insert broadcast */\n\t\t\t\tft_p = ft + brd_next;\n\t\t\t\tbrd_next = ft_p->ft_next;\n\t\t\t}\n\t\t\tcnt = ft_p->ft_frags; // cnt > 0\n\t\t\tif (unlikely(cnt > howmany))\n\t\t\t break; /* no more space */\n\t\t\tif (netmap_verbose && cnt > 1)\n\t\t\t\tRD(5, \"rx %d frags to %d\", cnt, j);\n\t\t\tft_end = ft_p + cnt;\n\t\t\tif (unlikely(virt_hdr_mismatch)) {\n\t\t\t\tbdg_mismatch_datapath(na, dst_na, ft_p, ring, &j, lim, &howmany);\n\t\t\t} else {\n\t\t\t\thowmany -= cnt;\n\t\t\t\tdo {\n\t\t\t\t\tchar *dst, *src = ft_p->ft_buf;\n\t\t\t\t\tsize_t copy_len = ft_p->ft_len, dst_len = copy_len;\n\n\t\t\t\t\tslot = &ring->slot[j];\n\t\t\t\t\tdst = NMB(&dst_na->up, slot);\n\n\t\t\t\t\tND(\"send [%d] %d(%d) bytes at %s:%d\",\n\t\t\t\t\t\t\ti, (int)copy_len, (int)dst_len,\n\t\t\t\t\t\t\tNM_IFPNAME(dst_ifp), j);\n\t\t\t\t\t/* round to a multiple of 64 */\n\t\t\t\t\tcopy_len = (copy_len + 63) & ~63;\n\n\t\t\t\t\tif (unlikely(copy_len > NETMAP_BUF_SIZE(&dst_na->up) ||\n\t\t\t\t\t\t copy_len > NETMAP_BUF_SIZE(&na->up))) {\n\t\t\t\t\t\tRD(5, \"invalid len %d, down to 64\", (int)copy_len);\n\t\t\t\t\t\tcopy_len = dst_len = 64; // XXX\n\t\t\t\t\t}\n\t\t\t\t\tif (ft_p->ft_flags & NS_INDIRECT) {\n\t\t\t\t\t\tif (copyin(src, dst, copy_len)) {\n\t\t\t\t\t\t\t// invalid user pointer, pretend len is 0\n\t\t\t\t\t\t\tdst_len = 0;\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\t//memcpy(dst, src, copy_len);\n\t\t\t\t\t\tpkt_copy(src, dst, (int)copy_len);\n\t\t\t\t\t}\n\t\t\t\t\tslot->len = dst_len;\n\t\t\t\t\tslot->flags = (cnt << 8)| NS_MOREFRAG;\n\t\t\t\t\tj = nm_next(j, lim);\n\t\t\t\t\tneeded--;\n\t\t\t\t\tft_p++;\n\t\t\t\t} while (ft_p != ft_end);\n\t\t\t\tslot->flags = (cnt << 8); /* clear flag on last entry */\n\t\t\t}\n\t\t\t/* are we done ? */\n\t\t\tif (next == NM_FT_NULL && brd_next == NM_FT_NULL)\n\t\t\t\tbreak;\n\t\t}\n\t\t{\n\t\t /* current position */\n\t\t uint32_t *p = kring->nkr_leases; /* shorthand */\n\t\t uint32_t update_pos;\n\t\t int still_locked = 1;\n\n\t\t mtx_lock(&kring->q_lock);\n\t\t if (unlikely(howmany > 0)) {\n\t\t\t/* not used all bufs. If i am the last one\n\t\t\t * i can recover the slots, otherwise must\n\t\t\t * fill them with 0 to mark empty packets.\n\t\t\t */\n\t\t\tND(\"leftover %d bufs\", howmany);\n\t\t\tif (nm_next(lease_idx, lim) == kring->nkr_lease_idx) {\n\t\t\t /* yes i am the last one */\n\t\t\t ND(\"roll back nkr_hwlease to %d\", j);\n\t\t\t kring->nkr_hwlease = j;\n\t\t\t} else {\n\t\t\t while (howmany-- > 0) {\n\t\t\t\tring->slot[j].len = 0;\n\t\t\t\tring->slot[j].flags = 0;\n\t\t\t\tj = nm_next(j, lim);\n\t\t\t }\n\t\t\t}\n\t\t }\n\t\t p[lease_idx] = j; /* report I am done */\n\n\t\t update_pos = kring->nr_hwtail;\n\n\t\t if (my_start == update_pos) {\n\t\t\t/* all slots before my_start have been reported,\n\t\t\t * so scan subsequent leases to see if other ranges\n\t\t\t * have been completed, and to a selwakeup or txsync.\n\t\t */\n\t\t\twhile (lease_idx != kring->nkr_lease_idx &&\n\t\t\t\tp[lease_idx] != NR_NOSLOT) {\n\t\t\t j = p[lease_idx];\n\t\t\t p[lease_idx] = NR_NOSLOT;\n\t\t\t lease_idx = nm_next(lease_idx, lim);\n\t\t\t}\n\t\t\t/* j is the new 'write' position. j != my_start\n\t\t\t * means there are new buffers to report\n\t\t\t */\n\t\t\tif (likely(j != my_start)) {\n\t\t\t\tkring->nr_hwtail = j;\n\t\t\t\tstill_locked = 0;\n\t\t\t\tmtx_unlock(&kring->q_lock);\n\t\t\t\tkring->nm_notify(kring, 0);\n\t\t\t\t/* this is netmap_notify for VALE ports and\n\t\t\t\t * netmap_bwrap_notify for bwrap. The latter will\n\t\t\t\t * trigger a txsync on the underlying hwna\n\t\t\t\t */\n\t\t\t\tif (dst_na->retry && retry--) {\n\t\t\t\t\t/* XXX this is going to call nm_notify again.\n\t\t\t\t\t * Only useful for bwrap in virtual machines\n\t\t\t\t\t */\n\t\t\t\t\tgoto retry;\n\t\t\t\t}\n\t\t\t}\n\t\t }\n\t\t if (still_locked)\n\t\t\tmtx_unlock(&kring->q_lock);\n\t\t}\ncleanup:\n\t\td->bq_head = d->bq_tail = NM_FT_NULL; /* cleanup */\n\t\td->bq_len = 0;\n\t}\n\tbrddst->bq_head = brddst->bq_tail = NM_FT_NULL; /* cleanup */\n\tbrddst->bq_len = 0;\n\treturn 0;\n}\n\n/* nm_txsync callback for VALE ports */\nstatic int\nnetmap_vp_txsync(struct netmap_kring *kring, int flags)\n{\n\tstruct netmap_vp_adapter *na =\n\t\t(struct netmap_vp_adapter *)kring->na;\n\tu_int done;\n\tu_int const lim = kring->nkr_num_slots - 1;\n\tu_int const head = kring->rhead;\n\n\tif (bridge_batch <= 0) { /* testing only */\n\t\tdone = head; // used all\n\t\tgoto done;\n\t}\n\tif (!na->na_bdg) {\n\t\tdone = head;\n\t\tgoto done;\n\t}\n\tif (bridge_batch > NM_BDG_BATCH)\n\t\tbridge_batch = NM_BDG_BATCH;\n\n\tdone = nm_bdg_preflush(kring, head);\ndone:\n\tif (done != head)\n\t\tD(\"early break at %d/ %d, tail %d\", done, head, kring->nr_hwtail);\n\t/*\n\t * packets between 'done' and 'cur' are left unsent.\n\t */\n\tkring->nr_hwcur = done;\n\tkring->nr_hwtail = nm_prev(done, lim);\n\tif (netmap_verbose)\n\t\tD(\"%s ring %d flags %d\", na->up.name, kring->ring_id, flags);\n\treturn 0;\n}\n\n\n/* rxsync code used by VALE ports nm_rxsync callback and also\n * internally by the brwap\n */\nstatic int\nnetmap_vp_rxsync_locked(struct netmap_kring *kring, int flags)\n{\n\tstruct netmap_adapter *na = kring->na;\n\tstruct netmap_ring *ring = kring->ring;\n\tu_int nm_i, lim = kring->nkr_num_slots - 1;\n\tu_int head = kring->rhead;\n\tint n;\n\n\tif (head > lim) {\n\t\tD(\"ouch dangerous reset!!!\");\n\t\tn = netmap_ring_reinit(kring);\n\t\tgoto done;\n\t}\n\n\t/* First part, import newly received packets. */\n\t/* actually nothing to do here, they are already in the kring */\n\n\t/* Second part, skip past packets that userspace has released. */\n\tnm_i = kring->nr_hwcur;\n\tif (nm_i != head) {\n\t\t/* consistency check, but nothing really important here */\n\t\tfor (n = 0; likely(nm_i != head); n++) {\n\t\t\tstruct netmap_slot *slot = &ring->slot[nm_i];\n\t\t\tvoid *addr = NMB(na, slot);\n\n\t\t\tif (addr == NETMAP_BUF_BASE(kring->na)) { /* bad buf */\n\t\t\t\tD(\"bad buffer index %d, ignore ?\",\n\t\t\t\t\tslot->buf_idx);\n\t\t\t}\n\t\t\tslot->flags &= ~NS_BUF_CHANGED;\n\t\t\tnm_i = nm_next(nm_i, lim);\n\t\t}\n\t\tkring->nr_hwcur = head;\n\t}\n\n\tn = 0;\ndone:\n\treturn n;\n}\n\n/*\n * nm_rxsync callback for VALE ports\n * user process reading from a VALE switch.\n * Already protected against concurrent calls from userspace,\n * but we must acquire the queue's lock to protect against\n * writers on the same queue.\n */\nstatic int\nnetmap_vp_rxsync(struct netmap_kring *kring, int flags)\n{\n\tint n;\n\n\tmtx_lock(&kring->q_lock);\n\tn = netmap_vp_rxsync_locked(kring, flags);\n\tmtx_unlock(&kring->q_lock);\n\treturn n;\n}\n\n\n/* nm_bdg_attach callback for VALE ports\n * The na_vp port is this same netmap_adapter. There is no host port.\n */\nstatic int\nnetmap_vp_bdg_attach(const char *name, struct netmap_adapter *na)\n{\n\tstruct netmap_vp_adapter *vpna = (struct netmap_vp_adapter *)na;\n\n\tif (vpna->na_bdg)\n\t\treturn EBUSY;\n\tna->na_vp = vpna;\n\tstrncpy(na->name, name, sizeof(na->name));\n\tna->na_hostvp = NULL;\n\treturn 0;\n}\n\n/* create a netmap_vp_adapter that describes a VALE port.\n * Only persistent VALE ports have a non-null ifp.\n */\nstatic int\nnetmap_vp_create(struct nmreq *nmr, struct ifnet *ifp,\n\t\tstruct netmap_mem_d *nmd,\n\t\tstruct netmap_vp_adapter **ret)\n{\n\tstruct netmap_vp_adapter *vpna;\n\tstruct netmap_adapter *na;\n\tint error = 0;\n\tu_int npipes = 0;\n\n\tvpna = nm_os_malloc(sizeof(*vpna));\n\tif (vpna == NULL)\n\t\treturn ENOMEM;\n\n \tna = &vpna->up;\n\n\tna->ifp = ifp;\n\tstrncpy(na->name, nmr->nr_name, sizeof(na->name));\n\n\t/* bound checking */\n\tna->num_tx_rings = nmr->nr_tx_rings;\n\tnm_bound_var(&na->num_tx_rings, 1, 1, NM_BDG_MAXRINGS, NULL);\n\tnmr->nr_tx_rings = na->num_tx_rings; // write back\n\tna->num_rx_rings = nmr->nr_rx_rings;\n\tnm_bound_var(&na->num_rx_rings, 1, 1, NM_BDG_MAXRINGS, NULL);\n\tnmr->nr_rx_rings = na->num_rx_rings; // write back\n\tnm_bound_var(&nmr->nr_tx_slots, NM_BRIDGE_RINGSIZE,\n\t\t\t1, NM_BDG_MAXSLOTS, NULL);\n\tna->num_tx_desc = nmr->nr_tx_slots;\n\tnm_bound_var(&nmr->nr_rx_slots, NM_BRIDGE_RINGSIZE,\n\t\t\t1, NM_BDG_MAXSLOTS, NULL);\n\t/* validate number of pipes. We want at least 1,\n\t * but probably can do with some more.\n\t * So let's use 2 as default (when 0 is supplied)\n\t */\n\tnpipes = nmr->nr_arg1;\n\tnm_bound_var(&npipes, 2, 1, NM_MAXPIPES, NULL);\n\tnmr->nr_arg1 = npipes;\t/* write back */\n\t/* validate extra bufs */\n\tnm_bound_var(&nmr->nr_arg3, 0, 0,\n\t\t\t128*NM_BDG_MAXSLOTS, NULL);\n\tna->num_rx_desc = nmr->nr_rx_slots;\n\tvpna->mfs = 1514;\n\tvpna->last_smac = ~0llu;\n\t/*if (vpna->mfs > netmap_buf_size) TODO netmap_buf_size is zero??\n\t\tvpna->mfs = netmap_buf_size; */\n if (netmap_verbose)\n\t\tD(\"max frame size %u\", vpna->mfs);\n\n\tna->na_flags |= NAF_BDG_MAYSLEEP;\n\t/* persistent VALE ports look like hw devices\n\t * with a native netmap adapter\n\t */\n\tif (ifp)\n\t\tna->na_flags |= NAF_NATIVE;\n\tna->nm_txsync = netmap_vp_txsync;\n\tna->nm_rxsync = netmap_vp_rxsync;\n\tna->nm_register = netmap_vp_reg;\n\tna->nm_krings_create = netmap_vp_krings_create;\n\tna->nm_krings_delete = netmap_vp_krings_delete;\n\tna->nm_dtor = netmap_vp_dtor;\n\tD(\"nr_arg2 %d\", nmr->nr_arg2);\n\tna->nm_mem = nmd ?\n\t\tnetmap_mem_get(nmd):\n\t\tnetmap_mem_private_new(\n\t\t\tna->num_tx_rings, na->num_tx_desc,\n\t\t\tna->num_rx_rings, na->num_rx_desc,\n\t\t\tnmr->nr_arg3, npipes, &error);\n\tif (na->nm_mem == NULL)\n\t\tgoto err;\n\tna->nm_bdg_attach = netmap_vp_bdg_attach;\n\t/* other nmd fields are set in the common routine */\n\terror = netmap_attach_common(na);\n\tif (error)\n\t\tgoto err;\n\t*ret = vpna;\n\treturn 0;\n\nerr:\n\tif (na->nm_mem != NULL)\n\t\tnetmap_mem_put(na->nm_mem);\n\tnm_os_free(vpna);\n\treturn error;\n}\n\n/* Bridge wrapper code (bwrap).\n * This is used to connect a non-VALE-port netmap_adapter (hwna) to a\n * VALE switch.\n * The main task is to swap the meaning of tx and rx rings to match the\n * expectations of the VALE switch code (see nm_bdg_flush).\n *\n * The bwrap works by interposing a netmap_bwrap_adapter between the\n * rest of the system and the hwna. The netmap_bwrap_adapter looks like\n * a netmap_vp_adapter to the rest the system, but, internally, it\n * translates all callbacks to what the hwna expects.\n *\n * Note that we have to intercept callbacks coming from two sides:\n *\n * - callbacks coming from the netmap module are intercepted by\n * passing around the netmap_bwrap_adapter instead of the hwna\n *\n * - callbacks coming from outside of the netmap module only know\n * about the hwna. This, however, only happens in interrupt\n * handlers, where only the hwna->nm_notify callback is called.\n * What the bwrap does is to overwrite the hwna->nm_notify callback\n * with its own netmap_bwrap_intr_notify.\n * XXX This assumes that the hwna->nm_notify callback was the\n * standard netmap_notify(), as it is the case for nic adapters.\n * Any additional action performed by hwna->nm_notify will not be\n * performed by netmap_bwrap_intr_notify.\n *\n * Additionally, the bwrap can optionally attach the host rings pair\n * of the wrapped adapter to a different port of the switch.\n */\n\n\nstatic void\nnetmap_bwrap_dtor(struct netmap_adapter *na)\n{\n\tstruct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter*)na;\n\tstruct netmap_adapter *hwna = bna->hwna;\n\tstruct nm_bridge *b = bna->up.na_bdg,\n\t\t*bh = bna->host.na_bdg;\n\n\tnetmap_mem_put(bna->host.up.nm_mem);\n\n\tif (b) {\n\t\tnetmap_bdg_detach_common(b, bna->up.bdg_port,\n\t\t\t (bh ? bna->host.bdg_port : -1));\n\t}\n\n\tND(\"na %p\", na);\n\tna->ifp = NULL;\n\tbna->host.up.ifp = NULL;\n\thwna->na_private = NULL;\n\thwna->na_vp = hwna->na_hostvp = NULL;\n\thwna->na_flags &= ~NAF_BUSY;\n\tnetmap_adapter_put(hwna);\n\n}\n\n\n/*\n * Intr callback for NICs connected to a bridge.\n * Simply ignore tx interrupts (maybe we could try to recover space ?)\n * and pass received packets from nic to the bridge.\n *\n * XXX TODO check locking: this is called from the interrupt\n * handler so we should make sure that the interface is not\n * disconnected while passing down an interrupt.\n *\n * Note, no user process can access this NIC or the host stack.\n * The only part of the ring that is significant are the slots,\n * and head/cur/tail are set from the kring as needed\n * (part as a receive ring, part as a transmit ring).\n *\n * callback that overwrites the hwna notify callback.\n * Packets come from the outside or from the host stack and are put on an\n * hwna rx ring.\n * The bridge wrapper then sends the packets through the bridge.\n */\nstatic int\nnetmap_bwrap_intr_notify(struct netmap_kring *kring, int flags)\n{\n\tstruct netmap_adapter *na = kring->na;\n\tstruct netmap_bwrap_adapter *bna = na->na_private;\n\tstruct netmap_kring *bkring;\n\tstruct netmap_vp_adapter *vpna = &bna->up;\n\tu_int ring_nr = kring->ring_id;\n\tint ret = NM_IRQ_COMPLETED;\n\tint error;\n\n\tif (netmap_verbose)\n\t D(\"%s %s 0x%x\", na->name, kring->name, flags);\n\n\tbkring = &vpna->up.tx_rings[ring_nr];\n\n\t/* make sure the ring is not disabled */\n\tif (nm_kr_tryget(kring, 0 /* can't sleep */, NULL)) {\n\t\treturn EIO;\n\t}\n\n\tif (netmap_verbose)\n\t D(\"%s head %d cur %d tail %d\", na->name,\n\t\tkring->rhead, kring->rcur, kring->rtail);\n\n\t/* simulate a user wakeup on the rx ring\n\t * fetch packets that have arrived.\n\t */\n\terror = kring->nm_sync(kring, 0);\n\tif (error)\n\t\tgoto put_out;\n\tif (kring->nr_hwcur == kring->nr_hwtail) {\n\t\tif (netmap_verbose)\n\t\t\tD(\"how strange, interrupt with no packets on %s\",\n\t\t\t na->name);\n\t\tgoto put_out;\n\t}\n\n\t/* new packets are kring->rcur to kring->nr_hwtail, and the bkring\n\t * had hwcur == bkring->rhead. So advance bkring->rhead to kring->nr_hwtail\n\t * to push all packets out.\n\t */\n\tbkring->rhead = bkring->rcur = kring->nr_hwtail;\n\n\tnetmap_vp_txsync(bkring, flags);\n\n\t/* mark all buffers as released on this ring */\n\tkring->rhead = kring->rcur = kring->rtail = kring->nr_hwtail;\n\t/* another call to actually release the buffers */\n\terror = kring->nm_sync(kring, 0);\n\n\t/* The second rxsync may have further advanced hwtail. If this happens,\n\t * return NM_IRQ_RESCHED, otherwise just return NM_IRQ_COMPLETED. */\n\tif (kring->rcur != kring->nr_hwtail) {\n\t\tret = NM_IRQ_RESCHED;\n\t}\nput_out:\n\tnm_kr_put(kring);\n\n\treturn error ? error : ret;\n}\n\n\n/* nm_register callback for bwrap */\nstatic int\nnetmap_bwrap_reg(struct netmap_adapter *na, int onoff)\n{\n\tstruct netmap_bwrap_adapter *bna =\n\t\t(struct netmap_bwrap_adapter *)na;\n\tstruct netmap_adapter *hwna = bna->hwna;\n\tstruct netmap_vp_adapter *hostna = &bna->host;\n\tint error, i;\n\tenum txrx t;\n\n\tND(\"%s %s\", na->name, onoff ? \"on\" : \"off\");\n\n\tif (onoff) {\n\t\t/* netmap_do_regif has been called on the bwrap na.\n\t\t * We need to pass the information about the\n\t\t * memory allocator down to the hwna before\n\t\t * putting it in netmap mode\n\t\t */\n\t\thwna->na_lut = na->na_lut;\n\n\t\tif (hostna->na_bdg) {\n\t\t\t/* if the host rings have been attached to switch,\n\t\t\t * we need to copy the memory allocator information\n\t\t\t * in the hostna also\n\t\t\t */\n\t\t\thostna->up.na_lut = na->na_lut;\n\t\t}\n\n\t\t/* cross-link the netmap rings\n\t\t * The original number of rings comes from hwna,\n\t\t * rx rings on one side equals tx rings on the other.\n\t\t */\n\t\tfor_rx_tx(t) {\n\t\t\tenum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */\n\t\t\tfor (i = 0; i < nma_get_nrings(hwna, r) + 1; i++) {\n\t\t\t\tNMR(hwna, r)[i].ring = NMR(na, t)[i].ring;\n\t\t\t}\n\t\t}\n\n\t\tif (na->na_flags & NAF_HOST_RINGS) {\n\t\t\tstruct netmap_adapter *hna = &hostna->up;\n\t\t\t/* the hostna rings are the host rings of the bwrap.\n\t\t\t * The corresponding krings must point back to the\n\t\t\t * hostna\n\t\t\t */\n\t\t\thna->tx_rings = &na->tx_rings[na->num_tx_rings];\n\t\t\thna->tx_rings[0].na = hna;\n\t\t\thna->rx_rings = &na->rx_rings[na->num_rx_rings];\n\t\t\thna->rx_rings[0].na = hna;\n\t\t}\n\t}\n\n\t/* pass down the pending ring state information */\n\tfor_rx_tx(t) {\n\t\tfor (i = 0; i < nma_get_nrings(na, t) + 1; i++)\n\t\t\tNMR(hwna, t)[i].nr_pending_mode =\n\t\t\t\tNMR(na, t)[i].nr_pending_mode;\n\t}\n\n\t/* forward the request to the hwna */\n\terror = hwna->nm_register(hwna, onoff);\n\tif (error)\n\t\treturn error;\n\n\t/* copy up the current ring state information */\n\tfor_rx_tx(t) {\n\t\tfor (i = 0; i < nma_get_nrings(na, t) + 1; i++)\n\t\t\tNMR(na, t)[i].nr_mode =\n\t\t\t\tNMR(hwna, t)[i].nr_mode;\n\t}\n\n\t/* impersonate a netmap_vp_adapter */\n\tnetmap_vp_reg(na, onoff);\n\tif (hostna->na_bdg)\n\t\tnetmap_vp_reg(&hostna->up, onoff);\n\n\tif (onoff) {\n\t\tu_int i;\n\t\t/* intercept the hwna nm_nofify callback on the hw rings */\n\t\tfor (i = 0; i < hwna->num_rx_rings; i++) {\n\t\t\thwna->rx_rings[i].save_notify = hwna->rx_rings[i].nm_notify;\n\t\t\thwna->rx_rings[i].nm_notify = netmap_bwrap_intr_notify;\n\t\t}\n\t\ti = hwna->num_rx_rings; /* for safety */\n\t\t/* save the host ring notify unconditionally */\n\t\thwna->rx_rings[i].save_notify = hwna->rx_rings[i].nm_notify;\n\t\tif (hostna->na_bdg) {\n\t\t\t/* also intercept the host ring notify */\n\t\t\thwna->rx_rings[i].nm_notify = netmap_bwrap_intr_notify;\n\t\t}\n\t\tif (na->active_fds == 0)\n\t\t\tna->na_flags |= NAF_NETMAP_ON;\n\t} else {\n\t\tu_int i;\n\n\t\tif (na->active_fds == 0)\n\t\t\tna->na_flags &= ~NAF_NETMAP_ON;\n\n\t\t/* reset all notify callbacks (including host ring) */\n\t\tfor (i = 0; i <= hwna->num_rx_rings; i++) {\n\t\t\thwna->rx_rings[i].nm_notify = hwna->rx_rings[i].save_notify;\n\t\t\thwna->rx_rings[i].save_notify = NULL;\n\t\t}\n\t\thwna->na_lut.lut = NULL;\n\t\thwna->na_lut.objtotal = 0;\n\t\thwna->na_lut.objsize = 0;\n\t}\n\n\treturn 0;\n}\n\n/* nm_config callback for bwrap */\nstatic int\nnetmap_bwrap_config(struct netmap_adapter *na, u_int *txr, u_int *txd,\n\t\t\t\t u_int *rxr, u_int *rxd)\n{\n\tstruct netmap_bwrap_adapter *bna =\n\t\t(struct netmap_bwrap_adapter *)na;\n\tstruct netmap_adapter *hwna = bna->hwna;\n\n\t/* forward the request */\n\tnetmap_update_config(hwna);\n\t/* swap the results */\n\t*txr = hwna->num_rx_rings;\n\t*txd = hwna->num_rx_desc;\n\t*rxr = hwna->num_tx_rings;\n\t*rxd = hwna->num_rx_desc;\n\n\treturn 0;\n}\n\n\n/* nm_krings_create callback for bwrap */\nstatic int\nnetmap_bwrap_krings_create(struct netmap_adapter *na)\n{\n\tstruct netmap_bwrap_adapter *bna =\n\t\t(struct netmap_bwrap_adapter *)na;\n\tstruct netmap_adapter *hwna = bna->hwna;\n\tint i, error = 0;\n\tenum txrx t;\n\n\tND(\"%s\", na->name);\n\n\t/* impersonate a netmap_vp_adapter */\n\terror = netmap_vp_krings_create(na);\n\tif (error)\n\t\treturn error;\n\n\t/* also create the hwna krings */\n\terror = hwna->nm_krings_create(hwna);\n\tif (error) {\n\t\tgoto err_del_vp_rings;\n\t}\n\n\t/* get each ring slot number from the corresponding hwna ring */\n\tfor_rx_tx(t) {\n\t\tenum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */\n\t\tfor (i = 0; i < nma_get_nrings(hwna, r) + 1; i++) {\n\t\t\tNMR(na, t)[i].nkr_num_slots = NMR(hwna, r)[i].nkr_num_slots;\n\t\t}\n\t}\n\n\treturn 0;\n\nerr_del_vp_rings:\n\tnetmap_vp_krings_delete(na);\n\n\treturn error;\n}\n\n\nstatic void\nnetmap_bwrap_krings_delete(struct netmap_adapter *na)\n{\n\tstruct netmap_bwrap_adapter *bna =\n\t\t(struct netmap_bwrap_adapter *)na;\n\tstruct netmap_adapter *hwna = bna->hwna;\n\n\tND(\"%s\", na->name);\n\n\thwna->nm_krings_delete(hwna);\n\tnetmap_vp_krings_delete(na);\n}\n\n\n/* notify method for the bridge-->hwna direction */\nstatic int\nnetmap_bwrap_notify(struct netmap_kring *kring, int flags)\n{\n\tstruct netmap_adapter *na = kring->na;\n\tstruct netmap_bwrap_adapter *bna = na->na_private;\n\tstruct netmap_adapter *hwna = bna->hwna;\n\tu_int ring_n = kring->ring_id;\n\tu_int lim = kring->nkr_num_slots - 1;\n\tstruct netmap_kring *hw_kring;\n\tint error;\n\n\tND(\"%s: na %s hwna %s\",\n\t\t\t(kring ? kring->name : \"NULL!\"),\n\t\t\t(na ? na->name : \"NULL!\"),\n\t\t\t(hwna ? hwna->name : \"NULL!\"));\n\thw_kring = &hwna->tx_rings[ring_n];\n\n\tif (nm_kr_tryget(hw_kring, 0, NULL)) {\n\t\treturn ENXIO;\n\t}\n\n\t/* first step: simulate a user wakeup on the rx ring */\n\tnetmap_vp_rxsync(kring, flags);\n\tND(\"%s[%d] PRE rx(c%3d t%3d l%3d) ring(h%3d c%3d t%3d) tx(c%3d ht%3d t%3d)\",\n\t\tna->name, ring_n,\n\t\tkring->nr_hwcur, kring->nr_hwtail, kring->nkr_hwlease,\n\t\tring->head, ring->cur, ring->tail,\n\t\thw_kring->nr_hwcur, hw_kring->nr_hwtail, hw_ring->rtail);\n\t/* second step: the new packets are sent on the tx ring\n\t * (which is actually the same ring)\n\t */\n\thw_kring->rhead = hw_kring->rcur = kring->nr_hwtail;\n\terror = hw_kring->nm_sync(hw_kring, flags);\n\tif (error)\n\t\tgoto put_out;\n\n\t/* third step: now we are back the rx ring */\n\t/* claim ownership on all hw owned bufs */\n\tkring->rhead = kring->rcur = nm_next(hw_kring->nr_hwtail, lim); /* skip past reserved slot */\n\n\t/* fourth step: the user goes to sleep again, causing another rxsync */\n\tnetmap_vp_rxsync(kring, flags);\n\tND(\"%s[%d] PST rx(c%3d t%3d l%3d) ring(h%3d c%3d t%3d) tx(c%3d ht%3d t%3d)\",\n\t\tna->name, ring_n,\n\t\tkring->nr_hwcur, kring->nr_hwtail, kring->nkr_hwlease,\n\t\tring->head, ring->cur, ring->tail,\n\t\thw_kring->nr_hwcur, hw_kring->nr_hwtail, hw_kring->rtail);\nput_out:\n\tnm_kr_put(hw_kring);\n\n\treturn error ? error : NM_IRQ_COMPLETED;\n}\n\n\n/* nm_bdg_ctl callback for the bwrap.\n * Called on bridge-attach and detach, as an effect of vale-ctl -[ahd].\n * On attach, it needs to provide a fake netmap_priv_d structure and\n * perform a netmap_do_regif() on the bwrap. This will put both the\n * bwrap and the hwna in netmap mode, with the netmap rings shared\n * and cross linked. Moroever, it will start intercepting interrupts\n * directed to hwna.\n */\nstatic int\nnetmap_bwrap_bdg_ctl(struct netmap_adapter *na, struct nmreq *nmr, int attach)\n{\n\tstruct netmap_priv_d *npriv;\n\tstruct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter*)na;\n\tint error = 0;\n\n\tif (attach) {\n\t\tif (NETMAP_OWNED_BY_ANY(na)) {\n\t\t\treturn EBUSY;\n\t\t}\n\t\tif (bna->na_kpriv) {\n\t\t\t/* nothing to do */\n\t\t\treturn 0;\n\t\t}\n\t\tnpriv = netmap_priv_new();\n\t\tif (npriv == NULL)\n\t\t\treturn ENOMEM;\n\t\tnpriv->np_ifp = na->ifp; /* let the priv destructor release the ref */\n\t\terror = netmap_do_regif(npriv, na, 0, NR_REG_NIC_SW);\n\t\tif (error) {\n\t\t\tnetmap_priv_delete(npriv);\n\t\t\treturn error;\n\t\t}\n\t\tbna->na_kpriv = npriv;\n\t\tna->na_flags |= NAF_BUSY;\n\t} else {\n\t\tif (na->active_fds == 0) /* not registered */\n\t\t\treturn EINVAL;\n\t\tnetmap_priv_delete(bna->na_kpriv);\n\t\tbna->na_kpriv = NULL;\n\t\tna->na_flags &= ~NAF_BUSY;\n\t}\n\treturn error;\n\n}\n\n/* attach a bridge wrapper to the 'real' device */\nint\nnetmap_bwrap_attach(const char *nr_name, struct netmap_adapter *hwna)\n{\n\tstruct netmap_bwrap_adapter *bna;\n\tstruct netmap_adapter *na = NULL;\n\tstruct netmap_adapter *hostna = NULL;\n\tint error = 0;\n\tenum txrx t;\n\n\t/* make sure the NIC is not already in use */\n\tif (NETMAP_OWNED_BY_ANY(hwna)) {\n\t\tD(\"NIC %s busy, cannot attach to bridge\", hwna->name);\n\t\treturn EBUSY;\n\t}\n\n\tbna = nm_os_malloc(sizeof(*bna));\n\tif (bna == NULL) {\n\t\treturn ENOMEM;\n\t}\n\n\tna = &bna->up.up;\n\t/* make bwrap ifp point to the real ifp */\n\tna->ifp = hwna->ifp;\n\tif_ref(na->ifp);\n\tna->na_private = bna;\n\tstrncpy(na->name, nr_name, sizeof(na->name));\n\t/* fill the ring data for the bwrap adapter with rx/tx meanings\n\t * swapped. The real cross-linking will be done during register,\n\t * when all the krings will have been created.\n\t */\n\tfor_rx_tx(t) {\n\t\tenum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */\n\t\tnma_set_nrings(na, t, nma_get_nrings(hwna, r));\n\t\tnma_set_ndesc(na, t, nma_get_ndesc(hwna, r));\n\t}\n\tna->nm_dtor = netmap_bwrap_dtor;\n\tna->nm_register = netmap_bwrap_reg;\n\t// na->nm_txsync = netmap_bwrap_txsync;\n\t// na->nm_rxsync = netmap_bwrap_rxsync;\n\tna->nm_config = netmap_bwrap_config;\n\tna->nm_krings_create = netmap_bwrap_krings_create;\n\tna->nm_krings_delete = netmap_bwrap_krings_delete;\n\tna->nm_notify = netmap_bwrap_notify;\n\tna->nm_bdg_ctl = netmap_bwrap_bdg_ctl;\n\tna->pdev = hwna->pdev;\n\tna->nm_mem = netmap_mem_get(hwna->nm_mem);\n\tna->virt_hdr_len = hwna->virt_hdr_len;\n\tbna->up.retry = 1; /* XXX maybe this should depend on the hwna */\n\n\tbna->hwna = hwna;\n\tnetmap_adapter_get(hwna);\n\thwna->na_private = bna; /* weak reference */\n\thwna->na_vp = &bna->up;\n\n\tif (hwna->na_flags & NAF_HOST_RINGS) {\n\t\tif (hwna->na_flags & NAF_SW_ONLY)\n\t\t\tna->na_flags |= NAF_SW_ONLY;\n\t\tna->na_flags |= NAF_HOST_RINGS;\n\t\thostna = &bna->host.up;\n\t\tsnprintf(hostna->name, sizeof(hostna->name), \"%s^\", nr_name);\n\t\thostna->ifp = hwna->ifp;\n\t\tfor_rx_tx(t) {\n\t\t\tenum txrx r = nm_txrx_swap(t);\n\t\t\tnma_set_nrings(hostna, t, 1);\n\t\t\tnma_set_ndesc(hostna, t, nma_get_ndesc(hwna, r));\n\t\t}\n\t\t// hostna->nm_txsync = netmap_bwrap_host_txsync;\n\t\t// hostna->nm_rxsync = netmap_bwrap_host_rxsync;\n\t\thostna->nm_notify = netmap_bwrap_notify;\n\t\thostna->nm_mem = netmap_mem_get(na->nm_mem);\n\t\thostna->na_private = bna;\n\t\thostna->na_vp = &bna->up;\n\t\tna->na_hostvp = hwna->na_hostvp =\n\t\t\thostna->na_hostvp = &bna->host;\n\t\thostna->na_flags = NAF_BUSY; /* prevent NIOCREGIF */\n\t}\n\n\tND(\"%s<->%s txr %d txd %d rxr %d rxd %d\",\n\t\tna->name, ifp->if_xname,\n\t\tna->num_tx_rings, na->num_tx_desc,\n\t\tna->num_rx_rings, na->num_rx_desc);\n\n\terror = netmap_attach_common(na);\n\tif (error) {\n\t\tgoto err_free;\n\t}\n\thwna->na_flags |= NAF_BUSY;\n\treturn 0;\n\nerr_free:\n\thwna->na_vp = hwna->na_hostvp = NULL;\n\tnetmap_adapter_put(hwna);\n\tnm_os_free(bna);\n\treturn error;\n\n}\n\nstruct nm_bridge *\nnetmap_init_bridges2(u_int n)\n{\n\tint i;\n\tstruct nm_bridge *b;\n\n\tb = nm_os_malloc(sizeof(struct nm_bridge) * n);\n\tif (b == NULL)\n\t\treturn NULL;\n\tfor (i = 0; i < n; i++)\n\t\tBDG_RWINIT(&b[i]);\n\treturn b;\n}\n\nvoid\nnetmap_uninit_bridges2(struct nm_bridge *b, u_int n)\n{\n\tint i;\n\n\tif (b == NULL)\n\t\treturn;\n\n\tfor (i = 0; i < n; i++)\n\t\tBDG_RWDESTROY(&b[i]);\n\tnm_os_free(b);\n}\n\nint\nnetmap_init_bridges(void)\n{\n#ifdef CONFIG_NET_NS\n\treturn netmap_bns_register();\n#else\n\tnm_bridges = netmap_init_bridges2(NM_BRIDGES);\n\tif (nm_bridges == NULL)\n\t\treturn ENOMEM;\n\treturn 0;\n#endif\n}\n\nvoid\nnetmap_uninit_bridges(void)\n{\n#ifdef CONFIG_NET_NS\n\tnetmap_bns_unregister();\n#else\n\tnetmap_uninit_bridges2(nm_bridges, NM_BRIDGES);\n#endif\n}\n#endif /* WITH_VALE */\n" }, { "alpha_fraction": 0.6030867695808411, "alphanum_fraction": 0.6104918718338013, "avg_line_length": 28.97429847717285, "blob_id": "a04a966761cb5eca5bcd62c49f0fbd1897396b15", "content_id": "4d8d9e36749d837f806dfdd712e30d5f6ea7c7d1", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 12829, "license_type": "permissive", "max_line_length": 86, "num_lines": 428, "path": "/sys/dev/netmap/if_vtnet_netmap.h", "repo_name": "phoenix1796/netmapWin", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2014 Vincenzo Maffione, Luigi Rizzo. All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n * 1. Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND\n * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE\n * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n * SUCH DAMAGE.\n */\n\n/*\n * $FreeBSD: head/sys/dev/netmap/if_vtnet_netmap.h 270097 2014-08-17 10:25:27Z luigi $\n */\n\n#include <net/netmap.h>\n#include <sys/selinfo.h>\n#include <vm/vm.h>\n#include <vm/pmap.h> /* vtophys ? */\n#include <dev/netmap/netmap_kern.h>\n\n\n#define SOFTC_T\tvtnet_softc\n\n/* Free all the unused buffer in all the RX virtqueues.\n * This function is called when entering and exiting netmap mode.\n * - buffers queued by the virtio driver return skbuf/mbuf pointer\n * and need to be freed;\n * - buffers queued by netmap return the txq/rxq, and do not need work\n */\nstatic void\nvtnet_netmap_free_bufs(struct SOFTC_T* sc)\n{\n\tint i, nmb = 0, n = 0, last;\n\n\tfor (i = 0; i < sc->vtnet_max_vq_pairs; i++) {\n\t\tstruct vtnet_rxq *rxq = &sc->vtnet_rxqs[i];\n\t\tstruct virtqueue *vq;\n\t\tstruct mbuf *m;\n\t\tstruct vtnet_txq *txq = &sc->vtnet_txqs[i];\n struct vtnet_tx_header *txhdr;\n\n\t\tlast = 0;\n\t\tvq = rxq->vtnrx_vq;\n\t\twhile ((m = virtqueue_drain(vq, &last)) != NULL) {\n\t\t\tn++;\n\t\t\tif (m != (void *)rxq)\n\t\t\t\tm_freem(m);\n\t\t\telse\n\t\t\t\tnmb++;\n\t\t}\n\n\t\tlast = 0;\n\t\tvq = txq->vtntx_vq;\n\t\twhile ((txhdr = virtqueue_drain(vq, &last)) != NULL) {\n\t\t\tn++;\n\t\t\tif (txhdr != (void *)txq) {\n\t\t\t\tm_freem(txhdr->vth_mbuf);\n\t\t\t\tuma_zfree(vtnet_tx_header_zone, txhdr);\n\t\t\t} else\n\t\t\t\tnmb++;\n\t\t}\n\t}\n\tD(\"freed %d mbufs, %d netmap bufs on %d queues\",\n\t\tn - nmb, nmb, i);\n}\n\n/* Register and unregister. */\nstatic int\nvtnet_netmap_reg(struct netmap_adapter *na, int onoff)\n{\n struct ifnet *ifp = na->ifp;\n\tstruct SOFTC_T *sc = ifp->if_softc;\n\n\tVTNET_CORE_LOCK(sc);\n\tifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);\n\t/* enable or disable flags and callbacks in na and ifp */\n\tif (onoff) {\n\t\tnm_set_native_flags(na);\n\t} else {\n\t\tnm_clear_native_flags(na);\n\t}\n\t/* drain queues so netmap and native drivers\n\t * do not interfere with each other\n\t */\n\tvtnet_netmap_free_bufs(sc);\n vtnet_init_locked(sc); /* also enable intr */\n VTNET_CORE_UNLOCK(sc);\n return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1);\n}\n\n\n/* Reconcile kernel and user view of the transmit ring. */\nstatic int\nvtnet_netmap_txsync(struct netmap_kring *kring, int flags)\n{\n\tstruct netmap_adapter *na = kring->na;\n struct ifnet *ifp = na->ifp;\n\tstruct netmap_ring *ring = kring->ring;\n\tu_int ring_nr = kring->ring_id;\n\tu_int nm_i;\t/* index into the netmap ring */\n\tu_int nic_i;\t/* index into the NIC ring */\n\tu_int n;\n\tu_int const lim = kring->nkr_num_slots - 1;\n\tu_int const head = kring->rhead;\n\n\t/* device-specific */\n\tstruct SOFTC_T *sc = ifp->if_softc;\n\tstruct vtnet_txq *txq = &sc->vtnet_txqs[ring_nr];\n\tstruct virtqueue *vq = txq->vtntx_vq;\n\n\t/*\n\t * First part: process new packets to send.\n\t */\n\trmb();\n\n\tnm_i = kring->nr_hwcur;\n\tif (nm_i != head) {\t/* we have new packets to send */\n\t\tstruct sglist *sg = txq->vtntx_sg;\n\n\t\tnic_i = netmap_idx_k2n(kring, nm_i);\n\t\tfor (n = 0; nm_i != head; n++) {\n\t\t\t/* we use an empty header here */\n\t\t\tstatic struct virtio_net_hdr_mrg_rxbuf hdr;\n\t\t\tstruct netmap_slot *slot = &ring->slot[nm_i];\n\t\t\tu_int len = slot->len;\n\t\t\tuint64_t paddr;\n\t\t\tvoid *addr = PNMB(na, slot, &paddr);\n int err;\n\n\t\t\tNM_CHECK_ADDR_LEN(na, addr, len);\n\n\t\t\tslot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);\n\t\t\t/* Initialize the scatterlist, expose it to the hypervisor,\n\t\t\t * and kick the hypervisor (if necessary).\n\t\t\t */\n\t\t\tsglist_reset(sg); // cheap\n\t\t\t// if vtnet_hdr_size > 0 ...\n\t\t\terr = sglist_append(sg, &hdr, sc->vtnet_hdr_size);\n\t\t\t// XXX later, support multi segment\n\t\t\terr = sglist_append_phys(sg, paddr, len);\n\t\t\t/* use na as the cookie */\n err = virtqueue_enqueue(vq, txq, sg, sg->sg_nseg, 0);\n if (unlikely(err < 0)) {\n D(\"virtqueue_enqueue failed\");\n break;\n }\n\n\t\t\tnm_i = nm_next(nm_i, lim);\n\t\t\tnic_i = nm_next(nic_i, lim);\n\t\t}\n\t\t/* Update hwcur depending on where we stopped. */\n\t\tkring->nr_hwcur = nm_i; /* note we migth break early */\n\n\t\t/* No more free TX slots? Ask the hypervisor for notifications,\n\t\t * possibly only when a considerable amount of work has been\n\t\t * done.\n\t\t */\n\t\tND(3,\"sent %d packets, hwcur %d\", n, nm_i);\n\t\tvirtqueue_disable_intr(vq);\n\t\tvirtqueue_notify(vq);\n\t} else {\n\t\tif (ring->head != ring->tail)\n\t\t ND(5, \"pure notify ? head %d tail %d nused %d %d\",\n\t\t\tring->head, ring->tail, virtqueue_nused(vq),\n\t\t\t(virtqueue_dump(vq), 1));\n\t\tvirtqueue_notify(vq);\n\t\tvirtqueue_enable_intr(vq); // like postpone with 0\n\t}\n\n\n /* Free used slots. We only consider our own used buffers, recognized\n\t * by the token we passed to virtqueue_add_outbuf.\n\t */\n n = 0;\n for (;;) {\n struct vtnet_tx_header *txhdr = virtqueue_dequeue(vq, NULL);\n if (txhdr == NULL)\n break;\n if (likely(txhdr == (void *)txq)) {\n n++;\n\t\t\tif (virtqueue_nused(vq) < 32) { // XXX slow release\n\t\t\t\tbreak;\n\t\t\t}\n\t\t} else { /* leftover from previous transmission */\n\t\t\tm_freem(txhdr->vth_mbuf);\n\t\t\tuma_zfree(vtnet_tx_header_zone, txhdr);\n\t\t}\n }\n\tif (n) {\n\t\tkring->nr_hwtail += n;\n\t\tif (kring->nr_hwtail > lim)\n\t\t\tkring->nr_hwtail -= lim + 1;\n\t}\n\tif (nm_i != kring->nr_hwtail /* && vtnet_txq_below_threshold(txq) == 0*/) {\n\t\tND(3, \"disable intr, hwcur %d\", nm_i);\n\t\tvirtqueue_disable_intr(vq);\n\t} else {\n\t\tND(3, \"enable intr, hwcur %d\", nm_i);\n\t\tvirtqueue_postpone_intr(vq, VQ_POSTPONE_SHORT);\n\t}\n\n return 0;\n}\n\nstatic int\nvtnet_refill_rxq(struct netmap_kring *kring, u_int nm_i, u_int head)\n{\n\tstruct netmap_adapter *na = kring->na;\n struct ifnet *ifp = na->ifp;\n\tstruct netmap_ring *ring = kring->ring;\n\tu_int ring_nr = kring->ring_id;\n\tu_int const lim = kring->nkr_num_slots - 1;\n\tu_int n;\n\n\t/* device-specific */\n\tstruct SOFTC_T *sc = ifp->if_softc;\n\tstruct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];\n\tstruct virtqueue *vq = rxq->vtnrx_vq;\n\n\t/* use a local sglist, default might be short */\n\tstruct sglist_seg ss[2];\n\tstruct sglist sg = { ss, 0, 0, 2 };\n\n\tfor (n = 0; nm_i != head; n++) {\n\t\tstatic struct virtio_net_hdr_mrg_rxbuf hdr;\n\t\tstruct netmap_slot *slot = &ring->slot[nm_i];\n\t\tuint64_t paddr;\n\t\tvoid *addr = PNMB(na, slot, &paddr);\n\t\tint err = 0;\n\n\t\tif (addr == NETMAP_BUF_BASE(na)) { /* bad buf */\n\t\t\tif (netmap_ring_reinit(kring))\n\t\t\t\treturn -1;\n\t\t}\n\n\t\tslot->flags &= ~NS_BUF_CHANGED;\n\t\tsglist_reset(&sg); // cheap\n\t\terr = sglist_append(&sg, &hdr, sc->vtnet_hdr_size);\n\t\terr = sglist_append_phys(&sg, paddr, NETMAP_BUF_SIZE(na));\n\t\t/* writable for the host */\n\t\terr = virtqueue_enqueue(vq, rxq, &sg, 0, sg.sg_nseg);\n\t\tif (err < 0) {\n\t\t\tD(\"virtqueue_enqueue failed\");\n\t\t\tbreak;\n\t\t}\n\t\tnm_i = nm_next(nm_i, lim);\n\t}\n\treturn nm_i;\n}\n\n/* Reconcile kernel and user view of the receive ring. */\nstatic int\nvtnet_netmap_rxsync(struct netmap_kring *kring, int flags)\n{\n\tstruct netmap_adapter *na = kring->na;\n struct ifnet *ifp = na->ifp;\n\tstruct netmap_ring *ring = kring->ring;\n\tu_int ring_nr = kring->ring_id;\n\tu_int nm_i;\t/* index into the netmap ring */\n\t// u_int nic_i;\t/* index into the NIC ring */\n\tu_int n;\n\tu_int const lim = kring->nkr_num_slots - 1;\n\tu_int const head = kring->rhead;\n\tint force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;\n\n\t/* device-specific */\n\tstruct SOFTC_T *sc = ifp->if_softc;\n\tstruct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];\n\tstruct virtqueue *vq = rxq->vtnrx_vq;\n\n\t/* XXX netif_carrier_ok ? */\n\n\tif (head > lim)\n\t\treturn netmap_ring_reinit(kring);\n\n\trmb();\n\t/*\n\t * First part: import newly received packets.\n\t * Only accept our\n\t * own buffers (matching the token). We should only get\n\t * matching buffers, because of vtnet_netmap_free_rx_unused_bufs()\n\t * and vtnet_netmap_init_buffers().\n\t */\n\tif (netmap_no_pendintr || force_update) {\n\t\tuint16_t slot_flags = kring->nkr_slot_flags;\n struct netmap_adapter *token;\n\n nm_i = kring->nr_hwtail;\n n = 0;\n\t\tfor (;;) {\n\t\t\tint len;\n token = virtqueue_dequeue(vq, &len);\n if (token == NULL)\n break;\n if (likely(token == (void *)rxq)) {\n ring->slot[nm_i].len = len;\n ring->slot[nm_i].flags = slot_flags;\n nm_i = nm_next(nm_i, lim);\n n++;\n } else {\n\t\t\t D(\"This should not happen\");\n }\n\t\t}\n\t\tkring->nr_hwtail = nm_i;\n\t\tkring->nr_kflags &= ~NKR_PENDINTR;\n\t}\n ND(\"[B] h %d c %d hwcur %d hwtail %d\",\n\t\tring->head, ring->cur, kring->nr_hwcur,\n\t\t\t kring->nr_hwtail);\n\n\t/*\n\t * Second part: skip past packets that userspace has released.\n\t */\n\tnm_i = kring->nr_hwcur; /* netmap ring index */\n\tif (nm_i != head) {\n\t\tint err = vtnet_refill_rxq(kring, nm_i, head);\n\t\tif (err < 0)\n\t\t\treturn 1;\n\t\tkring->nr_hwcur = err;\n\t\tvirtqueue_notify(vq);\n\t\t/* After draining the queue may need an intr from the hypervisor */\n \tvtnet_rxq_enable_intr(rxq);\n\t}\n\n ND(\"[C] h %d c %d t %d hwcur %d hwtail %d\",\n\t\tring->head, ring->cur, ring->tail,\n\t\tkring->nr_hwcur, kring->nr_hwtail);\n\n\treturn 0;\n}\n\n\n/* Make RX virtqueues buffers pointing to netmap buffers. */\nstatic int\nvtnet_netmap_init_rx_buffers(struct SOFTC_T *sc)\n{\n\tstruct ifnet *ifp = sc->vtnet_ifp;\n\tstruct netmap_adapter* na = NA(ifp);\n\tunsigned int r;\n\n\tif (!nm_native_on(na))\n\t\treturn 0;\n\tfor (r = 0; r < na->num_rx_rings; r++) {\n struct netmap_kring *kring = &na->rx_rings[r];\n\t\tstruct vtnet_rxq *rxq = &sc->vtnet_rxqs[r];\n\t\tstruct virtqueue *vq = rxq->vtnrx_vq;\n\t struct netmap_slot* slot;\n\t\tint err = 0;\n\n\t\tslot = netmap_reset(na, NR_RX, r, 0);\n\t\tif (!slot) {\n\t\t\tD(\"strange, null netmap ring %d\", r);\n\t\t\treturn 0;\n\t\t}\n\t\t/* Add up to na>-num_rx_desc-1 buffers to this RX virtqueue.\n\t\t * It's important to leave one virtqueue slot free, otherwise\n\t\t * we can run into ring->cur/ring->tail wraparounds.\n\t\t */\n\t\terr = vtnet_refill_rxq(kring, 0, na->num_rx_desc-1);\n\t\tif (err < 0)\n\t\t\treturn 0;\n\t\tvirtqueue_notify(vq);\n\t}\n\n\treturn 1;\n}\n\n/* Update the virtio-net device configurations. Number of queues can\n * change dinamically, by 'ethtool --set-channels $IFNAME combined $N'.\n * This is actually the only way virtio-net can currently enable\n * the multiqueue mode.\n * XXX note that we seem to lose packets if the netmap ring has more\n * slots than the queue\n */\nstatic int\nvtnet_netmap_config(struct netmap_adapter *na, u_int *txr, u_int *txd,\n\t\t\t\t\t\tu_int *rxr, u_int *rxd)\n{\n\tstruct ifnet *ifp = na->ifp;\n\tstruct SOFTC_T *sc = ifp->if_softc;\n\n\t*txr = *rxr = sc->vtnet_max_vq_pairs;\n\t*rxd = 512; // sc->vtnet_rx_nmbufs;\n\t*txd = *rxd; // XXX\n D(\"vtnet config txq=%d, txd=%d rxq=%d, rxd=%d\",\n\t\t\t\t\t*txr, *txd, *rxr, *rxd);\n\n\treturn 0;\n}\n\nstatic void\nvtnet_netmap_attach(struct SOFTC_T *sc)\n{\n\tstruct netmap_adapter na;\n\n\tbzero(&na, sizeof(na));\n\n\tna.ifp = sc->vtnet_ifp;\n\tna.num_tx_desc = 1024;// sc->vtnet_rx_nmbufs;\n\tna.num_rx_desc = 1024; // sc->vtnet_rx_nmbufs;\n\tna.nm_register = vtnet_netmap_reg;\n\tna.nm_txsync = vtnet_netmap_txsync;\n\tna.nm_rxsync = vtnet_netmap_rxsync;\n\tna.nm_config = vtnet_netmap_config;\n\tna.num_tx_rings = na.num_rx_rings = sc->vtnet_max_vq_pairs;\n\tD(\"max rings %d\", sc->vtnet_max_vq_pairs);\n\tnetmap_attach(&na);\n\n D(\"virtio attached txq=%d, txd=%d rxq=%d, rxd=%d\",\n\t\t\tna.num_tx_rings, na.num_tx_desc,\n\t\t\tna.num_tx_rings, na.num_rx_desc);\n}\n/* end of file */\n" }, { "alpha_fraction": 0.55559903383255, "alphanum_fraction": 0.5644087791442871, "avg_line_length": 33.0533332824707, "blob_id": "77b0c033c3991448014f6f87e52fef308aa0a89d", "content_id": "dce7189c338086a37f77c89ed427fc5611fb5e34", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10216, "license_type": "permissive", "max_line_length": 112, "num_lines": 300, "path": "/extra/python/pktman.py", "repo_name": "phoenix1796/netmapWin", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport netmap # our module\nimport time # time measurements\nimport select # poll()\nimport argparse # program argument parsing\nimport multiprocessing # thread management\nimport re\n\n# import scapy suppressing the initial WARNING message\nimport logging\nlogging.getLogger(\"scapy.runtime\").setLevel(logging.ERROR)\nfrom scapy.all import Ether, IP, UDP # packet forgery\n\n\ndef help_quit(parser):\n print(\"\")\n parser.print_help()\n quit()\n\n\ndef build_packet(args, parser):\n src = args.src.split(':')\n dst = args.dst.split(':')\n\n # create the payload\n base = \"Hello from Python\"\n header_len = 14 + 20 + 8\n data = base * ((args.length-header_len)/len(base) + 1)\n data = data[0:args.length-header_len]\n\n scap = Ether(src = args.srcmac, dst = args.dstmac)\n scap = scap / IP(src = src[0], dst = dst[0])\n scap = scap / UDP(sport = int(src[1]), dport = int(dst[1]))\n scap = scap / data\n\n try:\n # checksum is computed when calling str(scap), e.g. when the packet is\n # assembled\n ret = str(scap)\n except:\n print(\"Packet parameters are invalid\\n\")\n help_quit(parser)\n\n if args.dump:\n scap.show2()\n\n return ret\n\n\ndef transmit(idx, suffix, args, parser, queue):\n # use nm_open() to open the netmap device and register an interface\n # using an extended interface name\n nmd = netmap.NetmapDesc(args.interface + suffix)\n time.sleep(args.wait_link)\n\n # build the packet that will be transmitted\n pkt = build_packet(args, parser)\n\n # fill in the netmap slots and netmap buffers for tx ring 0\n txr = nmd.transmit_rings[idx]\n num_slots = txr.num_slots\n for i in range(num_slots):\n txr.slots[i].buf[0:len(pkt)] = pkt\n txr.slots[i].len = len(pkt)\n\n # transmit at maximum speed until Ctr-C is pressed\n cnt = 0 # packet counter\n batch = args.batch\n poller = select.poll()\n poller.register(nmd.getfd(), select.POLLOUT)\n t_start = time.time()\n try:\n cur = txr.cur\n while 1:\n ready_list = poller.poll(2)\n if len(ready_list) == 0:\n print(\"Timeout occurred\")\n break;\n n = txr.tail - cur # avail\n if n < 0:\n n += num_slots\n if n > batch:\n n = batch\n cur += n\n if cur >= num_slots:\n cur -= num_slots\n txr.cur = txr.head = cur # lazy update txr.cur and txr.head\n nmd.txsync()\n cnt += n\n except KeyboardInterrupt:\n # report the result to the main process\n queue.put([cnt, time.time() - t_start])\n pass\n\n\ndef receive(idx, suffix, args, parser, queue):\n # use nm_open() to open the netmap device and register an interface\n # using an extended interface name\n nmd = netmap.NetmapDesc(args.interface + suffix)\n time.sleep(args.wait_link)\n\n # select the right ring\n rxr = nmd.receive_rings[idx]\n num_slots = rxr.num_slots\n\n cnt = 0 # packet counter\n poller = select.poll()\n poller.register(nmd.getfd(), select.POLLIN)\n\n # wait for the first packet\n try:\n poller.poll()\n except KeyboardInterrupt:\n # report the result to the main process\n queue.put([cnt, None])\n return\n\n # receive (throwing away everything) until Ctr-C is pressed\n t_start = time.time()\n try:\n cur = rxr.cur\n while 1:\n ready_list = poller.poll()\n if len(ready_list) == 0:\n print(\"Timeout occurred\")\n break;\n n = rxr.tail - cur # avail\n if n < 0:\n n += num_slots\n cur += n\n if cur >= num_slots:\n cur -= num_slots\n rxr.cur = rxr.head = cur # lazy update rxr.cur and rxr.head\n cnt += n\n except KeyboardInterrupt:\n # report the result to the main process\n queue.put([cnt, time.time() - t_start])\n pass\n\n\n# How many netmap ring couples has 'ifname'?\ndef netmap_max_rings(ifname):\n if ifname.startswith('netmap:'):\n ifname = ifname[7:]\n\n nm = netmap.Netmap()\n nm.open()\n nm.if_name = ifname\n nm.getinfo()\n\n return nm.tx_rings\n\n# extract the (nr_ringid, nr_flags) specified by the extended\n# interface name (nm_open() ifname)\ndef netmap_get_ringid(ifname):\n if ifname.startswith('netmap:'):\n ifname = ifname[7:]\n\n nm = netmap.Netmap()\n nm.open()\n nm.if_name = ifname\n nm.getinfo()\n\n return nm.ringid, nm.flags\n\ndef netmap_remove_ifname_suffix(ifname_ext):\n m = re.match(r'\\w+:\\w+', ifname_ext)\n if m == None:\n return None\n\n return m.group(0)\n\n\n############################## MAIN ###########################\n\nif __name__ == '__main__':\n\n # functions implemented by this program\n handler = dict();\n handler['tx'] = transmit\n handler['rx'] = receive\n\n # program arguments\n parser = argparse.ArgumentParser(description = 'Send or receive packets using the netmap API')\n parser.add_argument('-i', '--interface', help = 'the interface to register with netmap; '\n 'can be in the form netmap:<OSNAME>[<EXT>] or <VALENAME>[<EXT>], where '\n 'OSNAME is the O.S. name for a network interface (e.g. \"eth0\"), '\n '<VALENAME> is a valid VALE port name (e.g. \"vale18:2\") and <EXT> is an '\n 'optional extension suffix, specified using the nm_open() syntax '\n '(e.g. \"^\", \"-5\", \"{44\", ...)',\n required = True)\n parser.add_argument('-f', '--function', help = 'the function to perform',\n choices = ['tx', 'rx'], default = 'rx')\n parser.add_argument('-b', '--batchsize', help = 'number of packets to send with each TXSYNC '\n 'operation', type=int, default = 512, dest = 'batch')\n parser.add_argument('-l', '--length', help = 'lenght of the ethernet frame sent',\n type = int, default = 60)\n parser.add_argument('-D', '--dstmac', help = 'destination MAC of tx packets',\n default = 'ff:ff:ff:ff:ff:ff')\n parser.add_argument('-S', '--srcmac', help = 'source MAC of tx packets',\n default = '00:00:00:00:00:00')\n parser.add_argument('-d', '--dst', help = 'destination IP address and UDP port of tx packets',\n default = '10.0.0.2:54322', metavar = 'IP:PORT')\n parser.add_argument('-s', '--src', help = 'source IP address and UDP port of tx packets',\n default = '10.0.0.1:54321', metavar = 'IP:PORT')\n parser.add_argument('-w', '--wait-link', help = 'time to wait for the link before starting '\n 'transmit/receive operations (in seconds)', type = int, default = 1)\n parser.add_argument('-X', '--dump', help = 'dump the packet', action = 'store_true')\n parser.add_argument('-p', '--threads', help = 'number of threads to used for tx/rx '\n 'operations', type = int, default = 1)\n # parse the input\n args = parser.parse_args()\n # print args\n\n # bound checking\n if args.length < 60:\n print('Invalid packet length\\n')\n help_quit(parser)\n\n if args.threads < 1:\n print('Invalid number of threads\\n')\n help_quit(parser)\n\n try:\n # compute 'ifname' removing the suffix from the extended name\n # specified by the user\n ifname = netmap_remove_ifname_suffix(args.interface)\n if ifname == None:\n print('Invalid ifname \"%s\"' % (args.interface, ))\n help_quit(parser)\n\n # compute 'max_couples', which is the number of tx/rx rings couples to be registered\n # according to 'args.interface'\n nr_ringid, nr_flags = netmap_get_ringid(args.interface)\n if nr_flags in [netmap.RegAllNic, netmap.RegNicSw]:\n # ask netmap for the number of available couples\n max_couples = netmap_max_rings(args.interface)\n suffix_required = True\n ringid_offset = 0\n else:\n # all the others netmap.Reg* specifies just one couple of rings\n max_couples = 1\n suffix_required = False\n ringid_offset = nr_ringid\n if args.threads > max_couples:\n print('You cannot use more than %s (tx,rx) rings couples with \"%s\"' % (max_couples, args.interface))\n help_quit(parser)\n except netmap.error as e:\n print(e)\n quit()\n\n jobs = [] # array of worker processes\n queues = [] # array of queues for IPC\n for i in range(args.threads):\n queue = multiprocessing.Queue()\n queues.append(queue)\n\n # 'i_off' contains the ring idx on which the process below will operate\n i_off = i + ringid_offset\n # it may also be necessary to add an extension suffix to the interface\n # name specified by the user\n if suffix_required:\n suffix = '-' + str(i_off)\n else:\n suffix = ''\n\n # create a new process that will execute the user-selected handler function,\n # with the arguments specified by the 'args' tuple\n job = multiprocessing.Process(name = 'worker-' + str(i),\n target = handler[args.function],\n args = (i_off, suffix, args, parser, queue))\n job.deamon = True # ensure work termination\n jobs.append(job)\n\n # start all the workers\n for i in range(len(jobs)):\n jobs[i].start()\n\n # Wait for the user pressing Ctrl-C\n try:\n while 1:\n time.sleep(1000)\n except KeyboardInterrupt:\n pass\n\n # collect and print the result returned by the workers\n tot_rate = 0.0\n for i in range(len(jobs)):\n result = queues[i].get()\n jobs[i].join()\n delta = result[1]\n cnt = result[0]\n if delta == None:\n rate = None\n else:\n rate = 0.001 * cnt / delta\n tot_rate += rate\n print('[%d] Packets processed: %s, Avg rate %s Kpps' % (i, cnt, rate))\n print('Total rate: %s' % (tot_rate, ))\n" }, { "alpha_fraction": 0.6441220045089722, "alphanum_fraction": 0.6574028730392456, "avg_line_length": 36.30275344848633, "blob_id": "ade04f94f662a2a79b96c3b0c3a813d45cc5bcf6", "content_id": "7694796df6a7f1f68d725f36dc75451b2a3bfe40", "detected_licenses": [ "BSD-2-Clause", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4066, "license_type": "permissive", "max_line_length": 107, "num_lines": 109, "path": "/LINUX/archlinux/PKGBUILD", "repo_name": "phoenix1796/netmapWin", "src_encoding": "UTF-8", "text": "# See http://wiki.archlinux.org/index.php/VCS_PKGBUILD_Guidelines\n# for more information on packaging from GIT sources.\n\n# Maintainer: Vincenzo Maffione <[email protected]>\npkgname=netmap\npkgver=r1324.519c07f\npkgrel=1\npkgdesc=\"Netmap is a framework for high speed network packet I/O.\"\narch=('any')\nurl=\"http://info.iet.unipi.it/~luigi/netmap\"\nlicense=('BSD')\ngroups=()\ndepends=('linux' 'glibc')\nmakedepends=('git' 'sed' 'gzip' 'linux-headers' 'abs' 'pacman' 'xmlto' 'docbook-xsl')\nprovides=()\nconflicts=()\nreplaces=()\nbackup=()\noptions=()\ninstall=\"netmap.install\"\nsource=(\"netmap.install\" \"git+https://github.com/luigirizzo/netmap\")\nnoextract=()\nmd5sums=(\"9f936e9fdd86c8a18babdc5848812f92\" \"SKIP\")\n\npkgver() {\n cd \"$srcdir/${pkgname%-git}\"\n printf \"r%s.%s\" \"$(git rev-list --count HEAD)\" \"$(git rev-parse --short HEAD)\"\n}\n\nbuild() {\n msg \"Downloading kernel sources...\"\n # Download kernel sources using ABS, checking that the version is the\n # same as the running kernel\n mkdir -p $srcdir/abs\n cd $srcdir/abs\n ABSROOT=. abs core/linux\n NESTEDDIR=\"$srcdir/abs/core/linux\"\n cd $NESTEDDIR\n grep \"pkgver[ ]*=\" PKGBUILD > .ksver\n KSVER=$(sed 's|pkgver[ ]*=[ ]*||g' .ksver)\n rm .ksver\n RKVER=$(uname -r | sed 's|-.*||g')\n if [ \"$KSVER\" != \"$RKVER\" ]; then\n msg \"Kernel sources version ($KSVER) differs from running kernel version ($RKVER): Cannot continue\"\n return 1\n fi\n KMAJVER=$(echo \"$KSVER\" | sed 's|\\.[0-9]\\+$||g')\n\n echo \"SRCDEST=$SRCDEST\"\n echo \"SRCPKGDEST=$SRCPKGDEST\"\n echo \"PKGDEST=$PKGDEST\"\n echo \"BUILDDIR=$BUILDDIR\"\n # We force some makepkg variables, trying to ovverride yaourt default behaviour,\n # which is to download sources in $srcdir/../linux instead of the place where\n # makepkg is invoked\n SRCDEST=$NESTEDDIR SRCPKGDEST=$NESTEDDIR PKGDEST=$NESTEDDIR BUILDDIR=$NESTEDDIR \\\n makepkg --nobuild --skippgpcheck\n msg \"Kernel sources are ready\"\n\n # Build the netmap kernel module and all modified drivers, using the\n # kernel sources downloaded in the previous steps to copy the NIC\n # drivers. Note however that the kernel modules are built against the\n # running kernel, and not against the downloaded sources.\n msg \"Starting to build netmap\"\n cd \"$srcdir/netmap/LINUX\"\n ./configure --kernel-sources=$NESTEDDIR/src/linux-$KMAJVER\n make || return 1\n # Build pkt-gen and vale-ctl\n cd \"$srcdir/netmap/examples\"\n make clean # amend for existing .o\n make pkt-gen vale-ctl || return 1\n msg \"Build complete\"\n}\n\npackage() {\n # Compute the version numbers of the running kernel\n KVER1=$(uname -r)\n KVER2=$(uname -r | sed 's/\\.[0-9]\\+-[0-9]\\+//')\n\n # Install the netmap module into the extramodules-VERSION directory\n mkdir -p \"$pkgdir/usr/lib/modules/extramodules-${KVER2}\"\n cp \"$srcdir/netmap/LINUX/netmap.ko\" \"$pkgdir/usr/lib/modules/extramodules-${KVER2}\"\n\n # Install pkt-gen and valectl into /usr/bin\n mkdir -p \"$pkgdir/usr/bin\"\n cp \"$srcdir/netmap/examples/pkt-gen\" \"$pkgdir/usr/bin\"\n cp \"$srcdir/netmap/examples/vale-ctl\" \"$pkgdir/usr/bin\"\n\n # Install the netmap public headers\n mkdir -p \"$pkgdir/usr/include/net\"\n cp \"$srcdir/netmap/sys/net/netmap.h\" \"$srcdir/netmap/sys/net/netmap_user.h\" \"$pkgdir/usr/include/net\"\n\n # Install the netmap man page\n mkdir -p \"$pkgdir/usr/share/man/man4\"\n cp \"$srcdir/netmap/share/man/man4/netmap.4\" \"$pkgdir/usr/share/man/man4\"\n gzip \"$pkgdir/usr/share/man/man4/netmap.4\"\n\n #Find and install the modified NIC drivers\n cd \"$srcdir/netmap/LINUX\"\n DRIVERS=$(find . -name \"*.ko\" -and ! -name \"netmap.ko\")\n if [ -n \"$DRIVERS\" ]; then\n mkdir -p \"$pkgdir/usr/lib/modules/extramodules-${KVER2}/netmap-drivers\"\n cp --parent $DRIVERS \"$pkgdir/usr/lib/modules/extramodules-${KVER2}/netmap-drivers\"\n cd \"$pkgdir/usr/lib/modules/extramodules-${KVER2}/netmap-drivers\"\n find . -name \"*.ko\" -exec sh -c \"mv {} \\$(echo {} | sed 's|\\.ko|_netmap\\.ko|g')\" \\;\n fi\n}\n\n# vim:set ts=2 sw=2 et:\n" }, { "alpha_fraction": 0.5483114719390869, "alphanum_fraction": 0.5601096749305725, "avg_line_length": 18.504573822021484, "blob_id": "b07ae7b7b1ce5f72525caf7aa1ac80818a9a1bf0", "content_id": "484724c140547979cbd346290e121d7d62897cf2", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 27716, "license_type": "permissive", "max_line_length": 84, "num_lines": 1421, "path": "/utils/testmmap.c", "repo_name": "phoenix1796/netmapWin", "src_encoding": "UTF-8", "text": "#define TEST_NETMAP\n\n#include <inttypes.h>\n#include <sys/param.h>\t/* ULONG_MAX */\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <unistd.h>\n#include <errno.h>\n#include <sys/poll.h>\n#include <sys/wait.h>\n#include <sys/mman.h>\t/* PROT_* */\n#include <fcntl.h>\t/* O_RDWR */\n#include <pthread.h>\n#include <signal.h>\n#include <ctype.h>\n\n\n#define MAX_VARS 100\n\nchar *variables[MAX_VARS];\nint curr_var;\n\n#define VAR_FAILED ((void*)1)\n\nchar *firstarg(char *buf)\n{\n\tint v;\n\tchar *arg = strtok(buf, \" \\t\\n\");\n\tchar *ret;\n\tif (!arg)\n\t\treturn NULL;\n\tif (arg[0] != '$' && arg[0] != '?')\n\t\treturn arg;\n\tv = atoi(arg+1);\n\tif (v < 0 || v >= MAX_VARS)\n\t\treturn \"\";\n\tret = variables[v];\n\tif (ret == NULL)\n\t\treturn \"NULL\";\n\tif (ret == VAR_FAILED) {\n\t\tprintf(\"reading failed var, exit\\n\");\n\t\texit(1);\n\t}\n\tif (arg[0] == '?')\n\t\treturn ret;\n\tret = rindex(ret, '=') + 1;\n\treturn ret;\n}\n\nchar *nextarg()\n{\n\treturn firstarg(NULL);\n}\n\nchar *restofline()\n{\n\treturn strtok(NULL, \"\\n\");\n}\n\nvoid resetvar(int v, char *b)\n{\n\tif (variables[v] != VAR_FAILED)\n\t\tfree(variables[v]);\n\tvariables[v] = b;\n}\n\n#define outecho(format, args...) \\\n\tdo {\\\n\t\tprintf(\"%u:%lu: \" format \"\\n\", getpid(), (unsigned long) pthread_self(), ##args);\\\n\t\tfflush(stdout);\\\n\t} while (0)\n\n#define output(format, args...) \\\n\tdo {\\\n\t\tresetvar(curr_var, (char*)malloc(1024));\\\n\t\tsnprintf(variables[curr_var], 1024, format, ##args);\\\n\t\toutecho(format, ##args);\\\n\t} while (0)\n\n#define output_err(ret, format, args...)\\\n\tdo {\\\n\t\tif (ret < 0) {\\\n\t\t\tresetvar(curr_var, VAR_FAILED);\\\n\t\t\toutecho(format, ##args);\\\n\t\t\toutecho(\"error: %s\", strerror(errno));\\\n\t\t} else {\\\n\t\t\toutput(format, ##args);\\\n\t\t}\\\n\t} while (0)\n\nstruct chan {\n\tFILE *out;\n\tpid_t pid;\n\tpthread_t tid;\n};\n\nint chan_search_free(struct chan* c[], int max)\n{\n\tint i;\n\n\tfor (i = 0; i < max && c[i]; i++)\n\t\t;\n\n\treturn i;\n}\n\nvoid chan_clear_all(struct chan *c[], int max)\n{\n\tint i;\n\n\tfor (i = 0; i < max; i++) {\n\t\tif (c[i]) {\n\t\t\tfclose(c[i]->out);\n\t\t\tfree(c[i]);\n\t\t\tc[i] = NULL;\n\t\t}\n\t}\n}\n\nint last_fd = -1;\nsize_t last_memsize = 0;\nvoid* last_mmap_addr = NULL;\nchar* last_access_addr = NULL;\n\n\nvoid do_open()\n{\n\tlast_fd = open(\"/dev/netmap\", O_RDWR);\n\toutput_err(last_fd, \"open(\\\"/dev/netmap\\\", O_RDWR)=%d\", last_fd);\n}\n\nvoid do_close()\n{\n\tint ret, fd;\n\tchar *arg = nextarg();\n\tfd = arg ? atoi(arg) : last_fd;\n\tret = close(fd);\n\toutput_err(ret, \"close(%d)=%d\", fd, ret);\n}\n\n#ifdef TEST_NETMAP\n#include <sys/ioctl.h>\n#include <sys/socket.h>\n#include <ifaddrs.h>\n#include <net/netmap_user.h>\n\nstruct nmreq curr_nmr = { .nr_version = NETMAP_API, .nr_flags = NR_REG_ALL_NIC, };\nchar nmr_name[64];\n\nvoid parse_nmr_config(char* w, struct nmreq *nmr)\n{\n\tchar *tok;\n\tint i, v;\n\n\tnmr->nr_tx_rings = nmr->nr_rx_rings = 0;\n\tnmr->nr_tx_slots = nmr->nr_rx_slots = 0;\n\tif (w == NULL || ! *w)\n\t\treturn;\n\tfor (i = 0, tok = strtok(w, \",\"); tok; i++, tok = strtok(NULL, \",\")) {\n\t\tv = atoi(tok);\n\t\tswitch (i) {\n\t\tcase 0:\n\t\t\tnmr->nr_tx_slots = nmr->nr_rx_slots = v;\n\t\t\tbreak;\n\t\tcase 1:\n\t\t\tnmr->nr_rx_slots = v;\n\t\t\tbreak;\n\t\tcase 2:\n\t\t\tnmr->nr_tx_rings = nmr->nr_rx_rings = v;\n\t\t\tbreak;\n\t\tcase 3:\n\t\t\tnmr->nr_rx_rings = v;\n\t\t\tbreak;\n\t\tdefault:\n\t\t\tbreak;\n\t\t}\n\t}\n}\n\nvoid do_getinfo()\n{\n\tint ret;\n\tchar *arg, *name;\n\tint fd;\n\n\tbzero(&curr_nmr, sizeof(curr_nmr));\n\tcurr_nmr.nr_version = NETMAP_API;\n\n\tname = nextarg();\n\tif (name) {\n\t\tstrncpy(curr_nmr.nr_name, name, sizeof(curr_nmr.nr_name));\n\t} else {\n\t\tname = \"any\";\n\t}\n\n\targ = nextarg();\n\tif (!arg) {\n\t\tfd = last_fd;\n\t\tgoto doit;\n\t}\n\tfd = atoi(arg);\n\n\targ = nextarg();\n\tparse_nmr_config(arg, &curr_nmr);\n\ndoit:\n\tret = ioctl(fd, NIOCGINFO, &curr_nmr);\n\tlast_memsize = curr_nmr.nr_memsize;\n\toutput_err(ret, \"ioctl(%d, NIOCGINFO) for %s: region %d memsize=%zu\",\n\t\tfd, name, curr_nmr.nr_arg2, last_memsize);\n}\n\n\nvoid do_regif()\n{\n\tint ret;\n\tchar *arg, *name;\n\tint fd = last_fd;\n\n\tname = nextarg();\n\tif (!name) {\n\t\tname = nmr_name;\n\t\tgoto doit;\n\t}\n\n\tbzero(&curr_nmr, sizeof(curr_nmr));\n\tcurr_nmr.nr_version = NETMAP_API;\n\tcurr_nmr.nr_flags = NR_REG_ALL_NIC;\n\tstrncpy(curr_nmr.nr_name, name, sizeof(curr_nmr.nr_name));\n\n\targ = nextarg();\n\tif (!arg) {\n\t\tgoto doit;\n\t}\n\tfd = atoi(arg);\n\n\targ = nextarg();\n\tparse_nmr_config(arg, &curr_nmr);\n\ndoit:\n\tret = ioctl(fd, NIOCREGIF, &curr_nmr);\n\tlast_memsize = curr_nmr.nr_memsize;\n\toutput_err(ret, \"ioctl(%d, NIOCREGIF) for %s: region %d memsize=%zu\",\n\t\tfd, name, curr_nmr.nr_arg2, last_memsize);\n}\n\nvoid\ndo_txsync()\n{\n\tchar *arg = nextarg();\n\tint fd = arg ? atoi(arg) : last_fd;\n\tint ret = ioctl(fd, NIOCTXSYNC, NULL);\n\toutput_err(ret, \"ioctl(%d, NIOCTXSYNC)=%d\", fd, ret);\n}\n\nvoid\ndo_rxsync()\n{\n\tchar *arg = nextarg();\n\tint fd = arg ? atoi(arg) : last_fd;\n\tint ret = ioctl(fd, NIOCRXSYNC, NULL);\n\toutput_err(ret, \"ioctl(%d, NIOCRXSYNC)=%d\", fd, ret);\n}\n#endif /* TEST_NETMAP */\n\n\nvolatile char tmp1;\nvoid do_access()\n{\n\tchar *arg = nextarg();\n\tchar *p;\n\tif (!arg) {\n\t\tif (!last_access_addr) {\n\t\t\toutput(\"missing address\");\n\t\t\treturn;\n\t\t}\n\t\tp = last_access_addr;\n\t} else {\n\t\tp = (char *)strtoul((void *)arg, NULL, 0);\n\t}\n\tlast_access_addr = p + 4096;\n\ttmp1 = *p;\n}\n\nvoid do_dup()\n{\n\tchar *arg = nextarg();\n\tint fd = last_fd;\n\tint ret;\n\n\tif (arg) {\n\t\tfd = atoi(arg);\n\t}\n\tret = dup(fd);\n\toutput_err(ret, \"dup(%d)=%d\", fd, ret);\n\n}\n\nvoid do_mmap()\n{\n\tsize_t memsize;\n\toff_t off = 0;\n\tint fd;\n\tchar *arg;\n\n\targ = nextarg();\n\tif (!arg) {\n\t\tmemsize = last_memsize;\n\t\tfd = last_fd;\n\t\tgoto doit;\n\t}\n\tmemsize = atoi(arg);\n\targ = nextarg();\n\tif (!arg) {\n\t\tfd = last_fd;\n\t\tgoto doit;\n\t}\n\tfd = atoi(arg);\n\targ = nextarg();\n\tif (arg) {\n\t\toff = (off_t)atol(arg);\n\t}\ndoit:\n\tlast_mmap_addr = mmap(0, memsize,\n\t\t\tPROT_WRITE | PROT_READ,\n\t\t\tMAP_SHARED, fd, off);\n\tif (last_access_addr == NULL)\n\t\tlast_access_addr = last_mmap_addr;\n\toutput_err(last_mmap_addr == MAP_FAILED ? -1 : 0,\n\t\t\"mmap(0, %zu, PROT_WRITE|PROT_READ, MAP_SHARED, %d, %jd)=%p\",\n\t\tmemsize, fd, (intmax_t)off, last_mmap_addr);\n\n}\n\nvoid do_munmap()\n{\n\tvoid *mmap_addr;\n\tsize_t memsize;\n\tchar *arg;\n\tint ret;\n\n\targ = nextarg();\n\tif (!arg) {\n\t\tmmap_addr = last_mmap_addr;\n\t\tmemsize = last_memsize;\n\t\tgoto doit;\n\t}\n\tmmap_addr = (void*)strtoul(arg, NULL, 0);\n\targ = nextarg();\n\tif (!arg) {\n\t\tmemsize = last_memsize;\n\t\tgoto doit;\n\t}\n\tmemsize = (size_t)strtoul(arg, NULL, 0);\ndoit:\n\tret = munmap(mmap_addr, memsize);\n\toutput_err(ret, \"munmap(%p, %zu)=%d\", mmap_addr, memsize, ret);\n}\n\nvoid do_poll()\n{\n\t/* timeout fd fd... */\n\tnfds_t nfds = 0, allocated_fds = 10, i;\n\tstruct pollfd *fds;\n\tint timeout = 500; /* 1/2 second */\n\tchar *arg;\n\tint ret;\n\n\targ = nextarg();\n\tif (arg)\n\t\ttimeout = atoi(arg);\n\tfds = malloc(allocated_fds * sizeof(struct pollfd));\n\tif (fds == NULL) {\n\t\toutput_err(-1, \"out of memory\");\n\t\treturn;\n\t}\n\twhile ( (arg = nextarg()) ) {\n\t\tif (nfds >= allocated_fds) {\n\t\t\tstruct pollfd *new_fds;\n\t\t\tallocated_fds *= 2;\n\t\t\tnew_fds = realloc(fds, allocated_fds * sizeof(struct pollfd));\n\t\t\tif (new_fds == NULL) {\n\t\t\t\tfree(fds);\n\t\t\t\toutput_err(-1, \"out of memory\");\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tfds = new_fds;\n\t\t}\n\t\tfds[nfds].fd = atoi(arg);\n\t\tfds[nfds].events = POLLIN;\n\t\tnfds++;\n\t}\n\tret = poll(fds, nfds, timeout);\n\tfor (i = 0; i < nfds; i++) {\n\t\toutput(\"poll(%d)=%s%s%s%s%s\", fds[i].fd,\n\t\t\t(fds[i].revents & POLLIN) ? \"IN \" : \"- \",\n\t\t\t(fds[i].revents & POLLOUT)? \"OUT \" : \"- \",\n\t\t\t(fds[i].revents & POLLERR)? \"ERR \" : \"- \",\n\t\t\t(fds[i].revents & POLLHUP)? \"HUP \" : \"- \",\n\t\t\t(fds[i].revents & POLLNVAL)?\"NVAL\" : \"-\");\n\n\t}\n\toutput_err(ret, \"poll(...)=%d\", ret);\n\tfree(fds);\n}\n\n\nvoid\ndo_expr()\n{\n\tunsigned long stack[11];\n\tint top = 10;\n\tchar *arg;\n\tint err = 0;\n\n\tstack[10] = ULONG_MAX;\n\twhile ( (arg = nextarg()) ) {\n\t\terrno = 0;\n\t\tchar *rest;\n\t\tunsigned long n = strtoul(arg, &rest, 0);\n\t\tif (!errno && rest != arg) {\n\t\t\tif (top <= 0) {\n\t\t\t\terr = -1;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tstack[--top] = n;\n\t\t\tcontinue;\n\t\t}\n\t\tif (top <= 8) {\n\t\t\tunsigned long n1 = stack[top++];\n\t\t\tunsigned long n2 = stack[top++];\n\t\t\tunsigned long r = 0;\n\t\t\tswitch (arg[0]) {\n\t\t\tcase '+':\n\t\t\t\tr = n1 + n2;\n\t\t\t\tbreak;\n\t\t\tcase '-':\n\t\t\t\tr = n1 - n2;\n\t\t\t\tbreak;\n\t\t\tcase '*':\n\t\t\t\tr = n1 * n2;\n\t\t\t\tbreak;\n\t\t\tcase '/':\n\t\t\t\tif (n2)\n\t\t\t\t\tr = n1 / n2;\n\t\t\t\telse {\n\t\t\t\t\terrno = EDOM;\n\t\t\t\t\terr = -1;\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\tdefault:\n\t\t\t\terr = -1;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tstack[--top] = r;\n\t\t\tcontinue;\n\t\t}\n\t\terr = -1;\n\t\tbreak;\n\t}\n\toutput_err(err, \"expr=%lu\", stack[top]);\n}\n\n\n\nvoid\ndo_echo()\n{\n\tchar *arg;\n\tfor (arg = nextarg(); arg; arg = nextarg()) {\n\t\tprintf(\"%s\\n\", arg);\n\t}\n}\n\nvoid\ndo_vars()\n{\n\tint i;\n\tfor (i = 0; i < MAX_VARS; i++) {\n\t\tconst char *v = variables[i];\n\t\tif (v == NULL)\n\t\t\tcontinue;\n\t\tprintf(\"?%d\\t%s\\n\", i, v == VAR_FAILED ? \"FAILED\" : v);\n\t}\n}\n\nstruct netmap_if *\nget_if()\n{\n\tvoid *mmap_addr;\n\tuint32_t off;\n\tchar *arg;\n\n\t/* defaults */\n\toff = curr_nmr.nr_offset;\n\tmmap_addr = last_mmap_addr;\n\n\t/* first arg: if offset */\n\targ = nextarg();\n\tif (!arg) {\n\t\tgoto doit;\n\t}\n\toff = strtoul(arg, NULL, 0);\n\t/* second arg: mmap address */\n\targ = nextarg();\n\tif (!arg) {\n\t\tgoto doit;\n\t}\n\tmmap_addr = (void*)strtoul(arg, NULL, 0);\ndoit:\n\treturn NETMAP_IF(mmap_addr, off);\n}\n\nvoid\ndo_if()\n{\n\tstruct netmap_if *nifp;\n\tunsigned int i;\n\n\tnifp = get_if();\n\n\tprintf(\"name %s\\n\", nifp->ni_name);\n\tprintf(\"version %u\\n\", nifp->ni_version);\n\tprintf(\"flags %x\", nifp->ni_flags);\n\tif (nifp->ni_flags) {\n\t\tprintf(\" [\");\n\t\tif (nifp->ni_flags & NI_PRIV_MEM) {\n\t\t\tprintf(\" PRIV_MEM\");\n\t\t}\n\t\tprintf(\" ]\");\n\t}\n\tprintf(\"\\n\");\n\tprintf(\"tx_rings %u\\n\", nifp->ni_tx_rings);\n\tprintf(\"rx_rings %u\\n\", nifp->ni_rx_rings);\n\tprintf(\"bufs_head %u\\n\", nifp->ni_bufs_head);\n\tfor (i = 0; i < 5; i++)\n\t\tprintf(\"spare1[%d] %u\\n\", i, nifp->ni_spare1[i]);\n\tfor (i = 0; i < (nifp->ni_tx_rings + nifp->ni_rx_rings + 2); i++)\n\t\tprintf(\"ring_ofs[%d] %zd\\n\", i, nifp->ring_ofs[i]);\n}\n\nstruct netmap_ring *\nget_ring()\n{\n\tstruct netmap_if *nifp;\n\tchar *arg;\n\tunsigned int ringid;\n\n\t/* defaults */\n\tringid = 0;\n\n\t/* first arg: ring number */\n\targ = nextarg();\n\tif (!arg)\n\t\tgoto doit;\n\tringid = strtoul(arg, NULL, 0);\ndoit:\n\tnifp = get_if();\n\treturn NETMAP_TXRING(nifp, ringid);\n}\n\n\nvoid\ndo_ring()\n{\n\tstruct netmap_ring *ring;\n\n\tring = get_ring();\n\n\tprintf(\"buf_ofs %\"PRId64\"\\n\", ring->buf_ofs);\n\tprintf(\"num_slots %u\\n\", ring->num_slots);\n\tprintf(\"nr_buf_size %u\\n\", ring->nr_buf_size);\n\tprintf(\"ringid %d\\n\", ring->ringid);\n\tprintf(\"dir %d [\", ring->dir);\n\tswitch (ring->dir) {\n\tcase 1:\n\t\tprintf(\"rx\");\n\t\tbreak;\n\tcase 0:\n\t\tprintf(\"tx\");\n\t\tbreak;\n\tdefault:\n\t\tprintf(\"??\");\n\t\tbreak;\n\t}\n\tprintf(\"]\\n\");\n\tprintf(\"head %u\\n\", ring->head);\n\tprintf(\"cur %u\\n\", ring->head);\n\tprintf(\"tail %u\\n\", ring->head);\n\tprintf(\"flags %x\", ring->flags);\n\tif (ring->flags) {\n\t\tprintf(\" [\");\n\t\tif (ring->flags & NR_TIMESTAMP) {\n\t\t\tprintf(\" TIMESTAMP\");\n\t\t}\n\t\tif (ring->flags & NR_FORWARD) {\n\t\t\tprintf(\" FORWARD\");\n\t\t}\n\t\tprintf(\" ]\");\n\t}\n\tprintf(\"\\n\");\n\tprintf(\"ts %ld:%ld\\n\",\n\t\t\t(long int)ring->ts.tv_sec, (long int)ring->ts.tv_usec);\n}\n\nvoid\ndo_slot()\n{\n\tstruct netmap_ring *ring;\n\tstruct netmap_slot *slot;\n\tlong int index;\n\tchar *arg;\n\n\t/* defaults */\n\tindex = 0;\n\n\targ = nextarg();\n\tif (!arg)\n\t\tgoto doit;\n\tindex = strtoll(arg, NULL, 0);\ndoit:\n\tring = get_ring();\n\tslot = ring->slot + index;\n\tprintf(\"buf_idx %u\\n\", slot->buf_idx);\n\tprintf(\"len %u\\n\", slot->len);\n\tprintf(\"flags %x\", slot->flags);\n\tif (slot->flags) {\n\t\tprintf(\" [\");\n\t\tif (slot->flags & NS_BUF_CHANGED) {\n\t\t\tprintf(\" BUF_CHANGED\");\n\t\t}\n\t\tif (slot->flags & NS_REPORT) {\n\t\t\tprintf(\" REPORT\");\n\t\t}\n\t\tif (slot->flags & NS_FORWARD) {\n\t\t\tprintf(\" FORWARD\");\n\t\t}\n\t\tif (slot->flags & NS_NO_LEARN) {\n\t\t\tprintf(\" NO_LEARN\");\n\t\t}\n\t\tif (slot->flags & NS_INDIRECT) {\n\t\t\tprintf(\" INDIRECT\");\n\t\t}\n\t\tif (slot->flags & NS_MOREFRAG) {\n\t\t\tprintf(\" MOREFRAG\");\n\t\t}\n\t\tprintf(\" ]\");\n\t}\n\tprintf(\"\\n\");\n\tprintf(\"ptr %lx\\n\", (long)slot->ptr);\n}\n\nstatic void\ndump_payload(char *p, int len)\n{\n\tchar buf[128];\n\tint i, j, i0;\n\n\t/* hexdump routine */\n\tfor (i = 0; i < len; ) {\n\t\tmemset(buf, sizeof(buf), ' ');\n\t\tsprintf(buf, \"%5d: \", i);\n\t\ti0 = i;\n\t\tfor (j=0; j < 16 && i < len; i++, j++)\n\t\t\tsprintf(buf+7+j*3, \"%02x \", (uint8_t)(p[i]));\n\t\ti = i0;\n\t\tfor (j=0; j < 16 && i < len; i++, j++)\n\t\t\tsprintf(buf+7+j + 48, \"%c\",\n\t\t\t\tisprint(p[i]) ? p[i] : '.');\n\t\tprintf(\"%s\\n\", buf);\n\t}\n}\n\nvoid\ndo_buf()\n{\n\tstruct netmap_ring *ring;\n\tlong int buf_idx, len;\n\tchar *buf, *arg;\n\n\t/* defaults */\n\tbuf_idx = 2;\n\tlen = 64;\n\n\targ = nextarg();\n\tif (!arg)\n\t\tgoto doit;\n\tbuf_idx = strtoll(arg, NULL, 0);\n\n\targ = nextarg();\n\tif (!arg)\n\t\tgoto doit;\n\tlen = strtoll(arg, NULL, 0);\ndoit:\n\tring = get_ring();\n\tbuf = NETMAP_BUF(ring, buf_idx);\n\tdump_payload(buf, len);\n}\n\nstruct cmd_def {\n\tconst char *name;\n\tvoid (*f)(void);\n};\n\nint _find_command(const struct cmd_def *cmds, int ncmds, const char* cmd)\n{\n\tint i;\n\tfor (i = 0; i < ncmds; i++) {\n\t\tif (strcmp(cmds[i].name, cmd) == 0)\n\t\t\tbreak;\n\t}\n\treturn i;\n}\n\ntypedef void (*nmr_arg_interp_fun)();\n\n#define nmr_arg_unexpected(n) \\\n\tprintf(\"arg%d: %d%s\\n\", n, curr_nmr.nr_arg ## n, \\\n\t\t(curr_nmr.nr_arg ## n ? \"???\" : \"\"))\n\nvoid\nnmr_arg_bdg_attach()\n{\n\tuint16_t v = curr_nmr.nr_arg1;\n\tprintf(\"arg1: %d [\", v);\n\tif (v == 0) {\n\t\tprintf(\"no host rings\");\n\t} else if (v == NETMAP_BDG_HOST) {\n\t\tprintf(\"BDG_HOST\");\n\t} else {\n\t\tprintf(\"???\");\n\t}\n\tprintf(\"]\\n\");\n\tnmr_arg_unexpected(2);\n\tnmr_arg_unexpected(3);\n}\n\nvoid\nnmr_arg_bdg_detach()\n{\n\tnmr_arg_unexpected(1);\n\tnmr_arg_unexpected(2);\n\tnmr_arg_unexpected(3);\n}\n\nvoid\nnmr_arg_bdg_list()\n{\n\tif (!strlen(curr_nmr.nr_name)) {\n\t\tnmr_arg_unexpected(1);\n\t\tnmr_arg_unexpected(2);\n\t} else {\n\t\tprintf(\"arg1: %d [bridge]\\n\", curr_nmr.nr_arg1);\n\t\tprintf(\"arg2: %d [port]\\n\", curr_nmr.nr_arg2);\n\t}\n\tnmr_arg_unexpected(3);\n}\n\nvoid\nnmr_arg_bdg_regops()\n{\n}\n\nvoid\nnmr_arg_vnet_hdr()\n{\n\tprintf(\"arg1: %d [vnet hdr len]\\n\", curr_nmr.nr_arg1);\n\tnmr_arg_unexpected(2);\n\tnmr_arg_unexpected(3);\n}\n\nvoid\nnmr_pt_host_create()\n{\n}\n\nvoid\nnmr_pt_host_delete()\n{\n}\n\nvoid\nnmr_bdg_polling_on()\n{\n\tprintf(\"arg1: %d [nr cpus]\\n\", curr_nmr.nr_arg1);\n\tnmr_arg_unexpected(2);\n\tnmr_arg_unexpected(3);\n}\n\nvoid\nnmr_arg_error()\n{\n\tnmr_arg_unexpected(1);\n\tnmr_arg_unexpected(2);\n\tnmr_arg_unexpected(3);\n}\n\nvoid\nnmr_arg_extra()\n{\n\tprintf(\"arg1: %d [%sextra rings]\\n\", curr_nmr.nr_arg1,\n\t\t(curr_nmr.nr_arg1 ? \"\" : \"no \"));\n\tprintf(\"arg2: %d [%s memory allocator]\\n\", curr_nmr.nr_arg2,\n\t\t(curr_nmr.nr_arg2 == 0 ? \"default\" :\n\t\t curr_nmr.nr_arg2 == 1 ? \"global\" : \"private\"));\n\tprintf(\"arg3: %d [%sextra buffers]\\n\", curr_nmr.nr_arg3,\n\t\t(curr_nmr.nr_arg3 ? \"\" : \"no \"));\n}\n\nvoid\ndo_nmr_dump()\n{\n\tu_int ringid = curr_nmr.nr_ringid & NETMAP_RING_MASK;\n\tnmr_arg_interp_fun arg_interp;\n\n\tsnprintf(nmr_name, IFNAMSIZ + 1, \"%s\", curr_nmr.nr_name);\n\tnmr_name[IFNAMSIZ] = '\\0';\n\tprintf(\"name: %s\\n\", nmr_name);\n\tprintf(\"version: %d\\n\", curr_nmr.nr_version);\n\tprintf(\"offset: %d\\n\", curr_nmr.nr_offset);\n\tprintf(\"memsize: %d [\", curr_nmr.nr_memsize);\n\tif (curr_nmr.nr_memsize < (1<<20)) {\n\t\tprintf(\"%d KiB\", curr_nmr.nr_memsize >> 10);\n\t} else {\n\t\tprintf(\"%d MiB\", curr_nmr.nr_memsize >> 20);\n\t}\n\tprintf(\"]\\n\");\n\tprintf(\"tx_slots: %d\\n\", curr_nmr.nr_tx_slots);\n\tprintf(\"rx_slots: %d\\n\", curr_nmr.nr_rx_slots);\n\tprintf(\"tx_rings: %d\\n\", curr_nmr.nr_tx_rings);\n\tprintf(\"rx_rings: %d\\n\", curr_nmr.nr_rx_rings);\n\tprintf(\"ringid: %x [\", curr_nmr.nr_ringid);\n\tif (curr_nmr.nr_ringid & NETMAP_SW_RING) {\n\t\tprintf(\"host rings\");\n\t} else if (curr_nmr.nr_ringid & NETMAP_HW_RING) {\n\t\tprintf(\"hw ring %d\", ringid);\n\t} else {\n\t\tprintf(\"hw rings\");\n\t}\n\tif (curr_nmr.nr_ringid & NETMAP_NO_TX_POLL) {\n\t\tprintf(\", no tx poll\");\n\t}\n\tif (curr_nmr.nr_ringid & NETMAP_DO_RX_POLL) {\n\t\tprintf(\", do rx poll\");\n\t}\n\tprintf(\", region %d\", curr_nmr.nr_arg2);\n\tprintf(\"]\\n\");\n\tprintf(\"cmd: %d\", curr_nmr.nr_cmd);\n\tif (curr_nmr.nr_cmd) {\n\t\tprintf(\" [\");\n\t\tswitch (curr_nmr.nr_cmd) {\n\t\tcase NETMAP_BDG_ATTACH:\n\t\t\tprintf(\"BDG_ATTACH\");\n\t\t\targ_interp = nmr_arg_bdg_attach;\n\t\t\tbreak;\n\t\tcase NETMAP_BDG_DETACH:\n\t\t\tprintf(\"BDG_DETACH\");\n\t\t\targ_interp = nmr_arg_bdg_detach;\n\t\t\tbreak;\n\t\tcase NETMAP_BDG_REGOPS:\n\t\t\tprintf(\"BDG_REGOPS\");\n\t\t\targ_interp = nmr_arg_bdg_regops;\n\t\t\tbreak;\n\t\tcase NETMAP_BDG_LIST:\n\t\t\tprintf(\"BDG_LIST\");\n\t\t\targ_interp = nmr_arg_bdg_list;\n\t\t\tbreak;\n\t\tcase NETMAP_BDG_VNET_HDR:\n\t\t\tprintf(\"BDG_VNET_HDR\");\n\t\t\targ_interp = nmr_arg_vnet_hdr;\n\t\t\tbreak;\n\t\tcase NETMAP_BDG_NEWIF:\n\t\t\tprintf(\"BDG_NEWIF\");\n\t\t\targ_interp = nmr_arg_error;\n\t\t\tbreak;\n\t\tcase NETMAP_BDG_DELIF:\n\t\t\tprintf(\"BDG_DELIF\");\n\t\t\targ_interp = nmr_arg_error;\n\t\t\tbreak;\n\t\tcase NETMAP_PT_HOST_CREATE:\n\t\t\tprintf(\"PT_HOST_CREATE\");\n\t\t\targ_interp = nmr_pt_host_create;\n\t\t\tbreak;\n\t\tcase NETMAP_PT_HOST_DELETE:\n\t\t\tprintf(\"PT_HOST_DELETE\");\n\t\t\targ_interp = nmr_pt_host_delete;\n\t\t\tbreak;\n\t\tcase NETMAP_BDG_POLLING_ON:\n\t\t\tprintf(\"BDG_POLLING_ON\");\n\t\t\targ_interp = nmr_bdg_polling_on;\n\t\t\tbreak;\n\t\tcase NETMAP_BDG_POLLING_OFF:\n\t\t\tprintf(\"BDG_POLLING_OFF\");\n\t\t\targ_interp = nmr_arg_error;\n\t\t\tbreak;\n\t\tdefault:\n\t\t\tprintf(\"???\");\n\t\t\targ_interp = nmr_arg_error;\n\t\t\tbreak;\n\t\t}\n\t\tprintf(\"]\");\n\t} else {\n\t\targ_interp = nmr_arg_extra;\n\t}\n\tprintf(\"\\n\");\n\targ_interp();\n\tprintf(\"flags: %x [\", curr_nmr.nr_flags);\n\tswitch (curr_nmr.nr_flags & NR_REG_MASK) {\n\tcase NR_REG_DEFAULT:\n\t\tprintf(\"obey ringid\");\n\t\tbreak;\n\tcase NR_REG_ALL_NIC:\n\t\tprintf(\"ALL_NIC\");\n\t\tbreak;\n\tcase NR_REG_SW:\n\t\tprintf(\"SW\");\n\t\tbreak;\n\tcase NR_REG_NIC_SW:\n\t\tprintf(\"NIC_SW\");\n\t\tbreak;\n\tcase NR_REG_ONE_NIC:\n\t\tprintf(\"ONE_NIC(%d)\", ringid);\n\t\tbreak;\n\tcase NR_REG_PIPE_MASTER:\n\t\tprintf(\"PIPE_MASTER(%d)\", ringid);\n\t\tbreak;\n\tcase NR_REG_PIPE_SLAVE:\n\t\tprintf(\"PIPE_SLAVE(%d)\", ringid);\n\t\tbreak;\n\tdefault:\n\t\tprintf(\"???\");\n\t\tbreak;\n\t}\n\tif (curr_nmr.nr_flags & NR_MONITOR_TX) {\n\t\tprintf(\", MONITOR_TX\");\n\t}\n\tif (curr_nmr.nr_flags & NR_MONITOR_RX) {\n\t\tprintf(\", MONITOR_RX\");\n\t}\n\tif (curr_nmr.nr_flags & NR_ZCOPY_MON) {\n\t\tprintf(\", ZCOPY_MON\");\n\t}\n\tif (curr_nmr.nr_flags & NR_EXCLUSIVE) {\n\t\tprintf(\", EXCLUSIVE\");\n\t}\n\tif (curr_nmr.nr_flags & NR_PTNETMAP_HOST) {\n\t\tprintf(\", PTNETMAP_HOST\");\n\t}\n\tprintf(\"]\\n\");\n\tprintf(\"spare2[0]: %x\\n\", curr_nmr.spare2[0]);\n}\n\nvoid\ndo_nmr_reset()\n{\n\tbzero(&curr_nmr, sizeof(curr_nmr));\n}\n\nvoid\ndo_nmr_name()\n{\n\tchar *name = nextarg();\n\tif (name) {\n\t\tstrncpy(curr_nmr.nr_name, name, IFNAMSIZ);\n\t}\n\tstrncpy(nmr_name, curr_nmr.nr_name, IFNAMSIZ);\n\tnmr_name[IFNAMSIZ] = '\\0';\n\toutput(\"name=%s\", nmr_name);\n}\n\nvoid\ndo_nmr_ringid()\n{\n\tchar *arg;\n\tuint16_t ringid = curr_nmr.nr_ringid;\n\tint n;\n\tfor (n = 0, arg = nextarg(); arg; arg = nextarg(), n++) {\n\t\tif (strcmp(arg, \"hw-ring\") == 0) {\n\t\t\tringid |= NETMAP_HW_RING;\n\t\t} else if (strcmp(arg, \"sw-ring\") == 0) {\n\t\t\tringid |= NETMAP_SW_RING;\n\t\t} else if (strcmp(arg, \"no-tx-poll\") == 0) {\n\t\t\tringid |= NETMAP_NO_TX_POLL;\n\t\t} else if (strcmp(arg, \"default\") == 0) {\n\t\t\tringid = 0;\n\t\t} else {\n\t\t\tringid &= ~NETMAP_RING_MASK;\n\t\t\tringid |= (atoi(arg) & NETMAP_RING_MASK);\n\t\t}\n\t}\n\tif (n)\n\t\tcurr_nmr.nr_ringid = ringid;\n\toutput(\"ringid=%x\", curr_nmr.nr_ringid);\n}\n\nvoid\ndo_nmr_cmd()\n{\n\tchar *arg = nextarg();\n\tif (arg == NULL)\n\t\tgoto out;\n\n\tif (strcmp(arg, \"bdg-attach\") == 0) {\n\t\tcurr_nmr.nr_cmd = NETMAP_BDG_ATTACH;\n\t} else if (strcmp(arg, \"bdg-detach\") == 0) {\n\t\tcurr_nmr.nr_cmd = NETMAP_BDG_DETACH;\n\t} else if (strcmp(arg, \"bdg-list\") == 0) {\n\t\tcurr_nmr.nr_cmd = NETMAP_BDG_LIST;\n\t} else if (strcmp(arg, \"bdg-host\") == 0) {\n\t\tcurr_nmr.nr_cmd = NETMAP_BDG_HOST;\n\t} else if (strcmp(arg, \"bdg-vnet-hdr\") == 0) {\n\t\tcurr_nmr.nr_cmd = NETMAP_BDG_VNET_HDR;\n\t} else if (strcmp(arg, \"bdg-newif\") == 0) {\n\t\tcurr_nmr.nr_cmd = NETMAP_BDG_NEWIF;\n\t} else if (strcmp(arg, \"bdg-delif\") == 0) {\n\t\tcurr_nmr.nr_cmd = NETMAP_BDG_DELIF;\n\t} else if (strcmp(arg, \"bdg-polling-on\") == 0) {\n\t\tcurr_nmr.nr_cmd = NETMAP_BDG_POLLING_ON;\n\t} else if (strcmp(arg, \"bdg-polling-off\") == 0) {\n\t\tcurr_nmr.nr_cmd = NETMAP_BDG_POLLING_OFF;\n\t} else if (strcmp(arg, \"pt-host-create\") == 0) {\n\t\tcurr_nmr.nr_cmd = NETMAP_PT_HOST_CREATE;\n\t} else if (strcmp(arg, \"pt-host-delete\") == 0) {\n\t\tcurr_nmr.nr_cmd = NETMAP_PT_HOST_DELETE;\n\t}\nout:\n\toutput(\"cmd=%x\", curr_nmr.nr_cmd);\n}\n\nvoid\ndo_nmr_flags()\n{\n\tchar *arg;\n\tuint32_t flags = curr_nmr.nr_flags;\n\tint n;\n\tfor (n = 0, arg = nextarg(); arg; arg = nextarg(), n++) {\n\t\tif (strcmp(arg, \"all-nic\") == 0) {\n\t\t\tflags &= ~NR_REG_MASK;\n\t\t\tflags |= NR_REG_ALL_NIC;\n\t\t} else if (strcmp(arg, \"sw\") == 0) {\n\t\t\tflags &= ~NR_REG_MASK;\n\t\t\tflags |= NR_REG_SW;\n\t\t} else if (strcmp(arg, \"nic-sw\") == 0) {\n\t\t\tflags &= ~NR_REG_MASK;\n\t\t\tflags |= NR_REG_NIC_SW;\n\t\t} else if (strcmp(arg, \"one-nic\") == 0) {\n\t\t\tflags &= ~NR_REG_MASK;\n\t\t\tflags |= NR_REG_ONE_NIC;\n\t\t} else if (strcmp(arg, \"pipe-master\") == 0) {\n\t\t\tflags &= ~NR_REG_MASK;\n\t\t\tflags |= NR_REG_PIPE_MASTER;\n\t\t} else if (strcmp(arg, \"pipe-slave\") == 0) {\n\t\t\tflags &= ~NR_REG_MASK;\n\t\t\tflags |= NR_REG_PIPE_SLAVE;\n\t\t} else if (strcmp(arg, \"monitor-tx\") == 0) {\n\t\t\tflags |= NR_MONITOR_TX;\n\t\t} else if (strcmp(arg, \"monitor-rx\") == 0) {\n\t\t\tflags |= NR_MONITOR_RX;\n\t\t} else if (strcmp(arg, \"zcopy-mon\") == 0) {\n\t\t\tflags |= NR_ZCOPY_MON;\n\t\t} else if (strcmp(arg, \"exclusive\") == 0) {\n\t\t\tflags |= NR_EXCLUSIVE;\n\t\t} else if (strcmp(arg, \"ptnetmap-host\") == 0) {\n\t\t\tflags |= NR_PTNETMAP_HOST;\n\t\t} else if (strcmp(arg, \"default\") == 0) {\n\t\t\tflags = 0;\n\t\t}\n\t}\n\tif (n)\n\t\tcurr_nmr.nr_flags = flags;\n\toutput(\"flags=%x\", curr_nmr.nr_flags);\n}\n\nstruct cmd_def nmr_commands[] = {\n\t{ \"dump\",\tdo_nmr_dump },\n\t{ \"reset\",\tdo_nmr_reset },\n\t{ \"name\",\tdo_nmr_name },\n\t{ \"ringid\",\tdo_nmr_ringid },\n\t{ \"cmd\",\tdo_nmr_cmd },\n\t{ \"flags\",\tdo_nmr_flags },\n};\n\nconst int N_NMR_CMDS = sizeof(nmr_commands) / sizeof(struct cmd_def);\n\nint\nfind_nmr_command(const char *cmd)\n{\n\treturn _find_command(nmr_commands, N_NMR_CMDS, cmd);\n}\n\n#define nmr_arg_update(f) \t\t\t\t\\\n\t({\t\t\t\t\t\t\\\n\t\tint __ret = 0;\t\t\t\t\\\n\t\tif (strcmp(cmd, #f) == 0) {\t\t\\\n\t\t\tchar *arg = nextarg();\t\t\\\n\t\t\tif (arg) {\t\t\t\\\n\t\t\t\tcurr_nmr.nr_##f = strtol(arg, NULL, 0); \\\n\t\t\t}\t\t\t\t\\\n\t\t\toutput(#f \"=%d\", curr_nmr.nr_##f);\t\\\n\t\t\t__ret = 1;\t\t\t\\\n\t\t} \t\t\t\t\t\\\n\t\t__ret;\t\t\t\t\t\\\n\t})\n\n/* prepare the curr_nmr */\nvoid\ndo_nmr()\n{\n\tchar *cmd = nextarg();\n\tint i;\n\n\tif (cmd == NULL) {\n\t\tdo_nmr_dump();\n\t\treturn;\n\t}\n\tif (cmd[0] == '.') {\n\t\tcmd++;\n\t} else {\n\t\ti = find_nmr_command(cmd);\n\t\tif (i < N_NMR_CMDS) {\n\t\t\tnmr_commands[i].f();\n\t\t\treturn;\n\t\t}\n\t}\n\tif (nmr_arg_update(version) ||\n\t nmr_arg_update(offset) ||\n\t nmr_arg_update(memsize) ||\n\t nmr_arg_update(tx_slots) ||\n\t nmr_arg_update(rx_slots) ||\n\t nmr_arg_update(tx_rings) ||\n\t nmr_arg_update(rx_rings) ||\n\t nmr_arg_update(ringid) ||\n\t nmr_arg_update(cmd) ||\n\t nmr_arg_update(arg1) ||\n\t nmr_arg_update(arg2) ||\n\t nmr_arg_update(arg3) ||\n\t nmr_arg_update(flags))\n\t\treturn;\n\toutput(\"unknown field: %s\", cmd);\n}\n\n\n\nstruct cmd_def commands[] = {\n\t{ \"open\",\tdo_open,\t},\n\t{ \"close\", \tdo_close,\t},\n#ifdef TEST_NETMAP\n\t{ \"getinfo\",\tdo_getinfo,\t},\n\t{ \"regif\",\tdo_regif,\t},\n\t{ \"txsync\",\tdo_txsync,\t},\n\t{ \"rxsync\",\tdo_rxsync,\t},\n#endif /* TEST_NETMAP */\n\t{ \"dup\",\tdo_dup,\t\t},\n\t{ \"mmap\",\tdo_mmap,\t},\n\t{ \"access\",\tdo_access,\t},\n\t{ \"munmap\",\tdo_munmap,\t},\n\t{ \"poll\",\tdo_poll,\t},\n\t{ \"expr\",\tdo_expr,\t},\n\t{ \"echo\",\tdo_echo,\t},\n\t{ \"vars\",\tdo_vars,\t},\n\t{ \"if\", do_if, },\n\t{ \"ring\", do_ring, },\n\t{ \"slot\", do_slot, },\n\t{ \"buf\", do_buf, },\n\t{ \"nmr\",\tdo_nmr,\t\t}\n};\n\nconst int N_CMDS = sizeof(commands) / sizeof(struct cmd_def);\n\nint find_command(const char* cmd)\n{\n\treturn _find_command(commands, N_CMDS, cmd);\n}\n\n#define MAX_CHAN 10\n\nvoid prompt()\n{\n\tif (isatty(STDIN_FILENO)) {\n\t\tprintf(\"> \");\n\t}\n}\n\nstruct chan *channels[MAX_CHAN];\n\nvoid*\nthread_cmd_loop(void *arg)\n{\n\tchar buf[1024];\n\tFILE *in = (FILE*)arg;\n\n\twhile (fgets(buf, 1024, in)) {\n\t\tchar *cmd;\n\t\tint i;\n\n\t\tcmd = firstarg(buf);\n\t\ti = find_command(cmd);\n\t\tif (i < N_CMDS) {\n\t\t\tcommands[i].f();\n\t\t\tcontinue;\n\t\t}\n\t\toutput(\"unknown cmd %s\", cmd);\n\t}\n\tfclose(in);\n\treturn NULL;\n}\n\nvoid do_exit()\n{\n\toutput(\"quit\");\n}\n\nvoid\ncmd_loop()\n{\n\tchar buf[1024];\n\tint i;\n\tstruct chan *c;\n\n\tbzero(channels, sizeof(*channels) * MAX_CHAN);\n\n\tatexit(do_exit);\n\n\tfor (prompt(); fgets(buf, 1024, stdin); prompt()) {\n\t\tchar *cmd;\n\t\tint slot;\n\n\t\tcmd = firstarg(buf);\n\t\tif (!cmd)\n\t\t\tcontinue;\n\t\tif (cmd[0] == '@') {\n\t\t\tcurr_var = atoi(cmd + 1);\n\t\t\tif (curr_var < 0 || curr_var >= MAX_VARS)\n\t\t\t\tcurr_var = 0;\n\t\t\tcmd = nextarg();\n\t\t\tif (!cmd)\n\t\t\t\tcontinue;\n\t\t} else {\n\t\t\tcurr_var = 0;\n\t\t}\n\n\t\tif (strcmp(cmd, \"fork\") == 0) {\n\t\t\tint slot = chan_search_free(channels, MAX_CHAN);\n\t\t\tstruct chan *c = NULL;\n\t\t\tpid_t pid;\n\t\t\tint p1[2] = { -1, -1};\n\n\t\t\tif (slot == MAX_CHAN) {\n\t\t\t\toutput(\"too many channels\");\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tc = channels[slot] = (struct chan*)malloc(sizeof(struct chan));\n\t\t\tif (c == NULL) {\n\t\t\t\toutput_err(-1, \"malloc\");\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tbzero(c, sizeof(*c));\n\t\t\tif (pipe(p1) < 0) {\n\t\t\t\toutput_err(-1, \"pipe\");\n\t\t\t\tgoto clean1;\n\t\t\t}\n\t\t\tc->out = fdopen(p1[1], \"w\");\n\t\t\tif (c->out == NULL) {\n\t\t\t\toutput_err(-1, \"fdopen\");\n\t\t\t\tgoto clean1;\n\t\t\t}\n\t\t\tpid = fork();\n\t\t\tswitch (pid) {\n\t\t\tcase -1:\n\t\t\t\toutput_err(-1, \"fork\");\n\t\t\t\tgoto clean1;\n\t\t\tcase 0:\n\t\t\t\tfclose(stdin);\n\t\t\t\tif (dup(p1[0]) < 0) {\n\t\t\t\t\toutput_err(-1, \"dup\");\n\t\t\t\t\texit(1);\n\t\t\t\t}\n\t\t\t\tclose(p1[1]);\n\t\t\t\tstdin = fdopen(0, \"r\");\n\t\t\t\tchan_clear_all(channels, MAX_CHAN);\n\t\t\t\tgoto out;\n\t\t\tdefault:\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tc->pid = pid;\n\t\t\tclose(p1[0]);\n\t\t\toutput(\"fork()=%d slot=%d\", pid, slot);\n\t\t\tcontinue;\n\t\tclean1:\n\t\t\tif (c) {\n\t\t\t\tfclose(c->out);\n\t\t\t}\n\t\t\tclose(p1[0]);\n\t\t\tclose(p1[1]);\n\t\t\tfree(c);\n\t\tout:\n\t\t\tcontinue;\n\t\t}\n\t\tif (strcmp(cmd, \"kill\") == 0) {\n\t\t\tint ret;\n\n\t\t\tcmd = nextarg();\n\t\t\tif (!cmd) {\n\t\t\t\toutput(\"missing slot\");\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tslot = atoi(cmd);\n\t\t\tif (slot < 0 || slot >= MAX_CHAN || !channels[slot]) {\n\t\t\t\toutput(\"invalid slot: %s\", cmd);\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tc = channels[slot];\n\t\t\tret = kill(c->pid, SIGTERM);\n\t\t\toutput_err(ret, \"kill(%d, SIGTERM)=%d\", c->pid, ret);\n\t\t\tif (ret != -1) {\n\t\t\t\twait(NULL);\n\t\t\t\tfclose(c->out);\n\t\t\t\tfree(c);\n\t\t\t\tchannels[slot] = NULL;\n\t\t\t}\n\t\t\tcontinue;\n\t\t}\n\t\tif (strcmp(cmd, \"thread\") == 0) {\n\t\t\tint slot = chan_search_free(channels, MAX_CHAN);\n\t\t\tstruct chan *c = NULL;\n\t\t\tpthread_t tid;\n\t\t\tint p1[2] = { -1, -1};\n\t\t\tint ret;\n\t\t\tFILE *in = NULL;\n\n\t\t\tif (slot == MAX_CHAN) {\n\t\t\t\toutput(\"too many channels\");\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tc = channels[slot] = (struct chan*)malloc(sizeof(struct chan));\n\t\t\tbzero(c, sizeof(*c));\n\t\t\tif (pipe(p1) < 0) {\n\t\t\t\toutput_err(-1, \"pipe\");\n\t\t\t\tgoto clean2;\n\t\t\t}\n\t\t\tc->out = fdopen(p1[1], \"w\");\n\t\t\tif (c->out == NULL) {\n\t\t\t\toutput_err(-1, \"fdopen\");\n\t\t\t\tgoto clean2;\n\t\t\t}\n\t\t\tin = fdopen(p1[0], \"r\");\n\t\t\tif (in == NULL) {\n\t\t\t\toutput_err(-1, \"fdopen\");\n\t\t\t\tgoto clean2;\n\t\t\t}\n\t\t\tret = pthread_create(&tid, NULL, thread_cmd_loop, in);\n\t\t\toutput_err(ret, \"pthread_create() tid=%lu slot=%d\",\n\t\t\t\t(unsigned long) tid, slot);\n\t\t\tif (ret < 0)\n\t\t\t\tgoto clean2;\n\t\t\tc->pid = getpid();\n\t\t\tc->tid = tid;\n\t\t\tcontinue;\n\t\tclean2:\n\t\t\tfclose(in);\n\t\t\tfclose(c->out);\n\t\t\tclose(p1[0]);\n\t\t\tclose(p1[1]);\n\t\t\tfree(c);\n\t\t\tcontinue;\n\t\t}\n\t\tif (strcmp(cmd, \"cancel\") == 0) {\n\t\t\tint ret;\n\n\t\t\tcmd = nextarg();\n\t\t\tif (!cmd) {\n\t\t\t\toutput(\"missing slot\");\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tslot = atoi(cmd);\n\t\t\tif (slot < 0 || slot >= MAX_CHAN || !channels[slot]) {\n\t\t\t\toutput(\"invalid slot: %s\", cmd);\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tc = channels[slot];\n\t\t\tfclose(c->out);\n\t\t\tret = pthread_join(c->tid, NULL);\n\t\t\toutput_err(ret, \"pthread_join(%lu)=%d\",\n\t\t\t\t(unsigned long) c->tid, ret);\n\t\t\tif (ret > 0) {\n\t\t\t\tfree(c);\n\t\t\t\tchannels[slot] = NULL;\n\t\t\t}\n\t\t\tcontinue;\n\t\t}\n\t\ti = find_command(cmd);\n\t\tif (i < N_CMDS) {\n\t\t\tcommands[i].f();\n\t\t\tcontinue;\n\t\t}\n\t\tslot = atoi(cmd);\n\t\tif (slot < 0 || slot > MAX_CHAN || !channels[slot]) {\n\t\t\toutput(\"invalid cmd/slot: %s\", cmd);\n\t\t\tcontinue;\n\t\t}\n\t\tcmd = restofline();\n\t\tif (!cmd) {\n\t\t\toutput(\"missing command\");\n\t\t\tcontinue;\n\t\t}\n\t\tfprintf(channels[slot]->out, \"%s\\n\", cmd);\n\t\tfflush(channels[slot]->out);\n\t\tsleep(1);\n\t}\n}\n\nint\nmain(int argc, char **argv)\n{\n\t(void) argc;\n\t(void) argv;\n\tprintf(\"testmmap\\n\");\n\tcmd_loop();\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.6747624278068542, "alphanum_fraction": 0.6842660903930664, "avg_line_length": 18.32653045654297, "blob_id": "e16f3ebd31e0bf9b96dacd22cd63a9c91ee1e3a4", "content_id": "8659b0bf882b98957295d108c2e648da0c7f2152", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 947, "license_type": "permissive", "max_line_length": 68, "num_lines": 49, "path": "/utils/GNUmakefile", "repo_name": "phoenix1796/netmapWin", "src_encoding": "UTF-8", "text": "# For multiple programs using a single source file each,\n# we can just define 'progs' and create custom targets.\nPROGS\t= test_select testmmap test_nm producer\nX86PROGS = testlock testcsum\nLIBNETMAP =\n\nCLEANFILES = $(PROGS) $(X86PROGS) *.o\n\nSRCDIR ?= ..\nVPATH = $(SRCDIR)/utils\n\nNO_MAN=\nCFLAGS = -O2 -pipe\nCFLAGS += -Werror -Wall -Wunused-function\nCFLAGS += -I $(SRCDIR)/sys # -I/home/luigi/FreeBSD/head/sys -I../sys\nCFLAGS += -Wextra\nifdef WITH_PCAP\n# do not use pcap by default, as it is not always available on linux\nLDLIBS += -lpcap\nelse\nCFLAGS += -DNO_PCAP\nendif\n\nLDLIBS += -lpthread\nifeq ($(shell uname),Linux)\n\tLDLIBS += -lrt -lm\t# on linux\nendif\n#SRCS = pkt-gen.c\n\nPREFIX ?= /usr/local\n\nall: $(PROGS)\n\nall-x86: $(PROGS) $(X86PROGS)\n\nkern_test: testmod/kern_test.c\n\ntest_nm: test_nm.o\n\nclean:\n\t-@rm -rf $(CLEANFILES)\n\ntestlock: testlock.c\n\n.PHONY: install\ninstall: $(PROGS:%=install-%)\n\ninstall-%:\n\tinstall -D $* $(DESTDIR)/$(PREFIX)/bin/$*\n" }, { "alpha_fraction": 0.6918238997459412, "alphanum_fraction": 0.7012578845024109, "avg_line_length": 16.66666603088379, "blob_id": "7160a930217775a50bb970a08785cddad73071d2", "content_id": "b569482b5b12ad9d58b1567c77c6b6128e17abda", "detected_licenses": [ "BSD-2-Clause", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 318, "license_type": "permissive", "max_line_length": 76, "num_lines": 18, "path": "/LINUX/dkms/README.md", "repo_name": "phoenix1796/netmapWin", "src_encoding": "UTF-8", "text": "DKMS GUIDE\n==========\n\nSome prerequisites:\n # apt-get install dkms linux-source linux-headers-$(uname -r) devscripts\n\n\nFirst way is a plain dkms installation:\n```\nmake install-dkms\ndkms install netmap/<VERSION>\n```\n\nOr make .deb package with sources:\n```\nmake install-dkms\ndkms mkdeb netmap/0.0.1 --source-only\n```\n" }, { "alpha_fraction": 0.5880619287490845, "alphanum_fraction": 0.6100573539733887, "avg_line_length": 26.585176467895508, "blob_id": "400c9eb5c665990bc4ddd13875e0204a48c030ca", "content_id": "5453b653a3501e86eefc1961f24292973be09681", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 49874, "license_type": "permissive", "max_line_length": 98, "num_lines": 1808, "path": "/apps/tlem/tlem.c", "repo_name": "phoenix1796/netmapWin", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2016 Universita` di Pisa. All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n * 1. Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND\n * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE\n * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n * SUCH DAMAGE.\n */\n\n#if 0 /* COMMENT */\n\nThis program implements TLEM, a bandwidth and delay emulator between two\nnetmap ports. It is meant to be run from the command line and\nimplemented with a main control thread, plus a couple of threads\nfor each direction of the communication.\n\nThe control thread parses command line arguments and then sits\nin a loop where it periodically reads traffic statistics from\nthe other threads and prints them out on the console.\n\nThe packet transfer in each direction is implemented by a \"producer\"\nthread prod() which reads from the input netmap port and puts packets\ninto a queue (struct _qs) with appropriate metatada on when packets\nare due for release, and a \"consumer\" thread cons() which reads\nfrom the queue and transmits packets on the output port when their\ntime has come.\n\n netmap thread struct _qs thread netmap\n port port\n {input}-->(prod)-->--[ queue ]-->--(cons)-->{output}\n\nThe producer can either wait for traffic using a blocking poll(),\nor periodically check the input around short usleep().\nThe latter mechanism is the preferred one as it allows a controlled\nlatency with a low interrupt load and a modest system load.\n\nThe queue is sized so that overflows can occur only if the consumer\nis severely backlogged, hence the only appropriate option is drop\ntraffic rather than wait for space. The case of an empty queue is\nmanaged by having the consumer probe the queue around short usleep()\ncalls. This mechanism gives controlled latency with moderate system\nload, so it is not worthwhile to optimize the CPU usage.\n\nIn order to get good and predictable performance, it is important\nthat threads are pinned to a single core, and it is preferable that\nprod() and cons() for each direction share the cache as much as possible.\nPutting them on two hyperthreads of the same core seems to give\ngood results but that shoud be investigated further.\n\nIt also seems useful to use a scheduler (SCHED_FIFO or SCHED_RR)\nthat gives more predictable cycles to the CPU, as well as try\nto keep other system tasks away from the CPUs used for the four\nmain threads.\nThe program does CPU pinning and sets the scheduler and priority\nfor the prod and cons threads. Externally one should do the\nassignment of other threads (e.g. interrupt handlers) and\nmake sure that network interfaces are configured properly.\n\n--- Main functions of the program ---\nwithin each function, q is used as a pointer to the queue holding\npackets and parameters.\n\nprod()\n\n waits for packets using the wait_for_packet() function.\n After that, for each packet, the following information may\n be of use:\n \tq->cur_pkt\tpoints to the buffer containing the packet\n\tq->cur_len\tpacket length, excluding CRC\n\tq->prod_now\ttime at which the packet was received,\n\t\t\tin nanoseconds. A batch of packets may\n\t\t\thave the same value q->prod_now\n\n Four functions are then called in sequence:\n\n q->c_loss (set with the -L command line option) decides\n \twhether the packet should be dropped before even queuing.\n\tThis is generally useful to emulate random loss.\n\tThe function is supposed to set q->c_drop = 1 if the\n\tpacket should be dropped, or leave it to 0 otherwise.\n\n no_room (not configurable) checks whether there is space\n \tin the queue, enforcing both the queue size set with -Q\n\tand the space allocated for the delay line.\n\tIn case of no space the packet is dropped.\n\n q->c_bw (set with the -B command line option) is used to\n enforce bandwidth limitation. The function must store\n\tin q->cur_tt the transmission time (in nanoseconds) of\n\tthe packet, which is typically proportional to the length\n\tof the packet, i.e. q->cur_tt = q->cur_len / <bandwidth>\n\tVariants are possible, eg. to account for constant framing\n\tbits as on the ethernet, or variable channel acquisition times,\n\tetc.\n\tThis mechanism can also be used to simulate variable queueing\n\tdelay e.g. due to the presence of cross traffic.\n\n q->c_delay (set with the -D option) implements delay emulation.\n\tThe function should set q->cur_delay to the additional\n\tdelay the packet is subject to. The framework will take care of\n\tcomputing the actual exit time of a packet so that there is no\n\treordering.\n\n\n\n#endif /* COMMENT */\n\n// debugging macros\n#define NED(_fmt, ...)\tdo {} while (0)\n#define ED(_fmt, ...)\t\t\t\t\t\t\\\n\tdo {\t\t\t\t\t\t\t\\\n\t\tstruct timeval _t0;\t\t\t\t\\\n\t\tgettimeofday(&_t0, NULL);\t\t\t\\\n\t\tfprintf(stderr, \"%03d.%03d [%5d] \\t\" _fmt \"\\n\", \\\n\t\t(int)(_t0.tv_sec % 1000), (int)_t0.tv_usec/1000, \\\n\t\t__LINE__, ##__VA_ARGS__); \\\n\t} while (0)\n\n#define _GNU_SOURCE\t// for CPU_SET() etc\n#include <stdio.h>\n#define NETMAP_WITH_LIBS\n#include <net/netmap_user.h>\n#include <sys/poll.h>\n\n\nint verbose = 0;\n\nstatic int do_abort = 0;\n\n#include <stdlib.h>\n#include <stdio.h>\n#include <pthread.h>\n#include <sys/time.h>\n\n#include <sys/resource.h> // setpriority\n\n#ifdef __FreeBSD__\n#include <pthread_np.h> /* pthread w/ affinity */\n#include <sys/cpuset.h> /* cpu_set */\n#endif /* __FreeBSD__ */\n\n#ifdef linux\n#define cpuset_t cpu_set_t\n#endif\n\n#ifdef __APPLE__\n#define cpuset_t uint64_t // XXX\nstatic inline void CPU_ZERO(cpuset_t *p)\n{\n *p = 0;\n}\n\nstatic inline void CPU_SET(uint32_t i, cpuset_t *p)\n{\n *p |= 1<< (i & 0x3f);\n}\n\n#define pthread_setaffinity_np(a, b, c) ((void)a, 0)\n#define sched_setscheduler(a, b, c)\t(1) /* error */\n#define clock_gettime(a,b) \\\n do {struct timespec t0 = {0,0}; *(b) = t0; } while (0)\n\n#define\t_P64\tunsigned long\n#endif\n\n#ifndef _P64\n\n/* we use uint64_t widely, but printf gives trouble on different\n * platforms so we use _P64 as a cast\n */\n#define\t_P64\tuint64_t\n#endif /* print stuff */\n\n\nstruct _qs;\t/* forward */\n/*\n * descriptor of a configuration entry.\n * Each handler has a parse function which takes ac/av[] and returns\n * true if successful. Any allocated space is stored into struct _cfg *\n * that is passed as argument.\n * arg and arg_len are included for convenience.\n */\nstruct _cfg {\n int (*parse)(struct _qs *, struct _cfg *, int ac, char *av[]); /* 0 ok, 1 on error */\n int (*run)(struct _qs *, struct _cfg *arg); /* 0 Ok, 1 on error */\n // int close(struct _qs *, void *arg); /* 0 Ok, 1 on error */\n\n const char *optarg;\t/* command line argument. Initial value is the error message */\n /* placeholders for common values */\n void *arg;\t\t/* allocated memory if any */\n int arg_len;\t/* size of *arg in case a realloc is needed */\n uint64_t d[16];\t/* static storage for simple cases */\n};\n\n/*\n *\nA packet in the queue is q_pkt plus the payload.\n\nFor the packet descriptor we need the following:\n\n -\tposition of next packet in the queue (can go backwards).\n\tWe can reduce to 32 bits if we consider alignments,\n\tor we just store the length to be added to the current\n\tvalue and assume 0 as a special index.\n -\tactual packet length (16 bits may be ok)\n -\tqueue output time, in nanoseconds (64 bits)\n -\tdelay line output time, in nanoseconds\n\tOne of the two can be packed to a 32bit value\n\nA convenient coding uses 32 bytes per packet.\nEven if we use a compact encoding it is difficult to go below 18 bytes\n\n */\n\nstruct q_pkt {\n\tuint64_t\tnext;\t\t/* buffer index for next packet */\n\tuint64_t\tpktlen;\t\t/* actual packet len */\n\tuint64_t\tpt_qout;\t/* time of output from queue */\n\tuint64_t\tpt_tx;\t\t/* transmit time */\n};\n\n\n/*\n * communication occurs through this data structure, with fields\n * cache-aligned according to who are the readers/writers.\n *\n\nThe queue is an array of memory (buf) of size buflen (does not change).\n\nThe producer uses 'tail' as an index in the queue to indicate\nthe first empty location (ie. after the last byte of data),\nthe consumer uses head to indicate the next byte to consume.\n\nFor best performance we should align buffers and packets\nto multiples of cacheline, but this would explode memory too much.\nWorst case memory explosion is with 65 byte packets.\nMemory usage as shown below:\n\n\t\tqpkt-pad\n\tsize\t32-16\t32-32\t32-64\t64-64\n\n\t64\t96\t96\t96\t128\n\t65\t112\t128\t160\t192\n\n\nAn empty queue has head == tail, a full queue will have free space\nbelow a threshold. In our case the queue is large enough and we\nare non blocking so we can simply drop traffic when the queue\napproaches a full state.\n\nTo simulate bandwidth limitations efficiently, the producer has a second\npointer, prod_tail_1, used to check for expired packets. This is done lazily.\n\n */\n/*\n * When sizing the buffer, we must assume some value for the bandwidth.\n * INFINITE_BW is supposed to be faster than what we support\n */\n#define INFINITE_BW\t(200ULL*1000000*1000)\n#define\tMY_CACHELINE\t(128ULL)\n#define PKT_PAD\t\t(32)\t/* padding on packets */\n#define MAX_PKT\t\t(9200)\t/* max packet size */\n\n#define ALIGN_CACHE\t__attribute__ ((aligned (MY_CACHELINE)))\n\nstruct _qs { /* shared queue */\n\tuint64_t\tt0;\t/* start of times */\n\n\tuint64_t \tbuflen;\t/* queue length */\n\tchar *buf;\n\n\t/* the queue has at least 1 empty position */\n\tuint64_t\tmax_bps;\t/* bits per second */\n\tuint64_t\tmax_delay;\t/* nanoseconds */\n\tuint64_t\tqsize;\t/* queue size in bytes */\n\n\t/* handlers for various options */\n\tstruct _cfg\tc_delay;\n\tstruct _cfg\tc_bw;\n\tstruct _cfg\tc_loss;\n\n\t/* producer's fields */\n\tuint64_t\ttx ALIGN_CACHE;\t/* tx counter */\n\tuint64_t\tprod_tail_1;\t/* head of queue */\n\tuint64_t\tprod_queued;\t/* queued bytes */\n\tuint64_t\tprod_head;\t/* cached copy */\n\tuint64_t\tprod_tail;\t/* cached copy */\n\tuint64_t\tprod_now;\t/* most recent producer timestamp */\n\tuint64_t\tprod_drop;\t/* drop packet count */\n\tuint64_t\tprod_max_gap;\t/* rx round duration */\n\n\t/* parameters for reading from the netmap port */\n\tstruct nm_desc *src_port;\t\t/* netmap descriptor */\n\tconst char *\tprod_ifname;\t/* interface name */\n\tstruct netmap_ring *rxring;\t/* current ring being handled */\n\tuint32_t\tsi;\t\t/* ring index */\n\tint\t\tburst;\n\tuint32_t\trx_qmax;\t/* stats on max queued */\n\n\tuint64_t\tqt_qout;\t/* queue exit time for last packet */\n\t\t/*\n\t\t * when doing shaping, the software computes and stores here\n\t\t * the time when the most recently queued packet will exit from\n\t\t * the queue.\n\t\t */\n\n\tuint64_t\tqt_tx;\t\t/* delay line exit time for last packet */\n\t\t/*\n\t\t * The software computes the time at which the most recently\n\t\t * queued packet exits from the queue.\n\t\t * To avoid reordering, the next packet should exit at least\n\t\t * at qt_tx + cur_tt\n\t\t */\n\n\t/* producer's fields controlling the queueing */\n\tchar *\t\tcur_pkt;\t/* current packet being analysed */\n\tuint32_t\tcur_len;\t/* length of current packet */\n\n\tint\t\tcur_drop;\t/* 1 if current packet should be dropped. */\n\t\t/*\n\t\t * cur_drop can be set as a result of the loss emulation,\n\t\t * and may need to use the packet size, current time, etc.\n\t\t */\n\n\tuint64_t\tcur_tt;\t\t/* transmission time (ns) for current packet */\n\t\t/*\n\t\t * The transmission time is how much link time the packet will consume.\n\t\t * should be set by the function that does the bandwidth emulation,\n\t\t * but could also be the result of a function that emulates the\n\t\t * presence of competing traffic, MAC protocols etc.\n\t\t * cur_tt is 0 for links with infinite bandwidth.\n\t\t */\n\n\tuint64_t\tcur_delay;\t/* delay (ns) for current packet from c_delay.run() */\n\t\t/*\n\t\t * this should be set by the function that computes the extra delay\n\t\t * applied to the packet.\n\t\t * The code makes sure that there is no reordering and possibly\n\t\t * bumps the output time as needed.\n\t\t */\n\n\n\t/* consumer's fields */\n\tconst char *\t\tcons_ifname;\n\tuint64_t rx ALIGN_CACHE;\t/* rx counter */\n//\tuint64_t\tcons_head;\t/* cached copy */\n//\tuint64_t\tcons_tail;\t/* cached copy */\n\tuint64_t\tcons_now;\t/* most recent producer timestamp */\n\tuint64_t\tcons_lag;\t/* tail - head */\n\tuint64_t\trx_wait;\t/* stats */\n\n\t/* shared fields */\n\tvolatile uint64_t tail ALIGN_CACHE ;\t/* producer writes here */\n\tvolatile uint64_t head ALIGN_CACHE ;\t/* consumer reads from here */\n};\n\nstruct pipe_args {\n\tint\t\tzerocopy;\n\tint\t\twait_link;\n\n\tpthread_t\tcons_tid;\t/* main thread */\n\tpthread_t\tprod_tid;\t/* producer thread */\n\n\t/* Affinity: */\n\tint\t\tcons_core;\t/* core for cons() */\n\tint\t\tprod_core;\t/* core for prod() */\n\n\tstruct nm_desc *pa;\t\t/* netmap descriptor */\n\tstruct nm_desc *pb;\n\n\tstruct _qs\tq;\n};\n\n#define NS_IN_S\t(1000000000ULL)\t// nanoseconds\n#define TIME_UNITS\tNS_IN_S\n\n/* set the thread affinity. */\nstatic int\nsetaffinity(int i)\n{\n cpuset_t cpumask;\n\tstruct sched_param p;\n\n if (i == -1)\n return 0;\n\n /* Set thread affinity affinity.*/\n CPU_ZERO(&cpumask);\n CPU_SET(i, &cpumask);\n\n if (pthread_setaffinity_np(pthread_self(), sizeof(cpuset_t), &cpumask) != 0) {\n ED(\"Unable to set affinity: %s\", strerror(errno));\n }\n\tif (setpriority(PRIO_PROCESS, 0, -10)) {; // XXX not meaningful\n ED(\"Unable to set priority: %s\", strerror(errno));\n\t}\n\tbzero(&p, sizeof(p));\n\tp.sched_priority = 10; // 99 on linux ?\n\t// use SCHED_RR or SCHED_FIFO\n\tif (sched_setscheduler(0, SCHED_RR, &p)) {\n ED(\"Unable to set scheduler: %s\", strerror(errno));\n\t}\n return 0;\n}\n\n\nstatic inline void\nset_tns_now(uint64_t *now, uint64_t t0)\n{\n struct timespec t;\n\n clock_gettime(CLOCK_REALTIME, &t); // XXX precise on FreeBSD ?\n *now = (uint64_t)(t.tv_nsec + NS_IN_S * t.tv_sec);\n *now -= t0;\n}\n\n\nstatic inline int pad(int x)\n{\n\treturn ((x) + PKT_PAD - 1) & ~(PKT_PAD - 1) ;\n}\n\n/* compare two timestamps */\nstatic inline int64_t\nts_cmp(uint64_t a, uint64_t b)\n{\n\treturn (int64_t)(a - b);\n}\n\n/* create a packet descriptor */\nstatic inline struct q_pkt *\npkt_at(struct _qs *q, uint64_t ofs)\n{\n return (struct q_pkt *)(q->buf + ofs);\n}\n\n/*\n * q_reclaim() accounts for packets whose output time has expired,\n * return 1 if any has been reclaimed.\n * XXX if bw = 0, prod_queued does not need to be updated or prod_tail_1 handled\n */\nstatic int\nq_reclaim(struct _qs *q)\n{\n\tstruct q_pkt *p0, *p;\n\n\tp = p0 = pkt_at(q, q->prod_tail_1);\n\t/* always reclaim queued packets */\n\twhile (ts_cmp(p->pt_qout, q->prod_now) <= 0 && q->prod_queued > 0) {\n\t ND(1, \"reclaim pkt at %ld len %d left %ld\", q->prod_tail_1, p->pktlen, q->prod_queued);\n\t q->prod_queued -= p->pktlen;\n\t q->prod_tail_1 = p->next;\n\t p = pkt_at(q, q->prod_tail_1);\n\t}\n\treturn p != p0;\n}\n\n/*\n * no_room() checks for room in the queue and delay line.\n *\n * For the queue, we check that the amount of queued bytes does\n * not exceed the space. We reclaim expired packets if needed.\n *\n * For the delay line and buffer, we make sure that a packet is never\n * split across a boundary. \"need\" is the amount of space we allocate,\n * padding as needed, so that new_t will become 0 if needed,\n * new_t > 0 if there is room in the remaining part of the buffer.\n *\n * enqueue a packet. Two cases:\n * A:\t[ h......t ]\n *\tbecause of the padding, overflow if (h <= t && new_t == 0 && h == 0)\n *\n * B:\t[...t h ......]\n *\toverflow if (h > t && (new_t == 0 || new_t >= h)\n *\n * Conditions to have space:\n * A\n * for another one, wrap tail to 0 to ease checks.\n */\nstatic int\nno_room(struct _qs *q)\n{\n uint64_t h = q->prod_head;\t/* shorthand */\n uint64_t t = q->prod_tail;\t/* shorthand */\n struct q_pkt *p = pkt_at(q, t);\n uint64_t need = pad(q->cur_len) + sizeof(*p); /* space for a packet */\n uint64_t new_t = t + need;\n\n if (q->buflen - new_t < MAX_PKT + sizeof(*p))\n\tnew_t = 0; /* further padding */\n\n /* XXX let the queue overflow once, otherwise it is complex\n * to deal with 0-sized queues\n */\n if (q->prod_queued > q->qsize) {\n\tq_reclaim(q);\n\tif (q->prod_queued > q->qsize) {\n\t q->prod_drop++;\n\t RD(1, \"too many bytes queued %lu, drop %lu\",\n\t\t(_P64)q->prod_queued, (_P64)q->prod_drop);\n\t return 1;\n\t}\n }\n\n if ((h <= t && new_t == 0 && h == 0) || (h > t && (new_t == 0 || new_t >= h)) ) {\n\th = q->prod_head = q->head; /* re-read head, just in case */\n\t/* repeat the test */\n\tif ((h <= t && new_t == 0 && h == 0) || (h > t && (new_t == 0 || new_t >= h)) ) {\n\t ND(1, \"no room for insert h %ld t %ld new_t %ld\",\n\t\t(_P64)h, (_P64)t, (_P64)new_t);\n\t return 1; /* no room for insert */\n\t}\n }\n p->next = new_t; /* prepare for queueing */\n p->pktlen = 0;\n return 0;\n}\n\n\n/*\n * we have already checked for room and prepared p->next\n */\nstatic inline int\nenq(struct _qs *q)\n{\n struct q_pkt *p = pkt_at(q, q->prod_tail);\n\n /* hopefully prefetch has been done ahead */\n nm_pkt_copy(q->cur_pkt, (char *)(p+1), q->cur_len);\n p->pktlen = q->cur_len;\n p->pt_qout = q->qt_qout;\n p->pt_tx = q->qt_tx;\n ND(1, \"enqueue len %d at %d new tail %ld qout %ld tx %ld\",\n\tq->cur_len, (int)q->prod_tail, p->next,\n\tp->pt_qout, p->pt_tx);\n q->prod_tail = p->next;\n q->tx++;\n if (q->max_bps)\n\tq->prod_queued += p->pktlen;\n /* XXX update timestamps ? */\n return 0;\n}\n\n\nint\nrx_queued(struct nm_desc *d)\n{\n u_int tot = 0, i;\n for (i = d->first_rx_ring; i <= d->last_rx_ring; i++) {\n\tstruct netmap_ring *rxr = NETMAP_RXRING(d->nifp, i);\n\n\tND(5, \"ring %d h %d cur %d tail %d\", i,\n\t\trxr->head, rxr->cur, rxr->tail);\n\ttot += nm_ring_space(rxr);\n }\n return tot;\n}\n\n/*\n * wait for packets, then compute a timestamp in 64-bit ns\n */\nstatic void\nwait_for_packets(struct _qs *q)\n{\n int n0;\n uint64_t prev = q->prod_now;\n\n ioctl(q->src_port->fd, NIOCRXSYNC, 0); /* forced */\n while (!do_abort) {\n\n\tn0 = rx_queued(q->src_port);\n\tif (n0 > (int)q->rx_qmax) {\n\t q->rx_qmax = n0;\n\t}\n\tif (n0)\n\t break;\n\tprev = 0; /* we slept */\n\tif (1) {\n\t usleep(5);\n\t ioctl(q->src_port->fd, NIOCRXSYNC, 0);\n\t} else {\n\t struct pollfd pfd;\n\t struct netmap_ring *rx;\n\t int ret;\n\n\t pfd.fd = q->src_port->fd;\n\t pfd.revents = 0;\n\t pfd.events = POLLIN;\n\t ND(1, \"prepare for poll on %s\", q->prod_ifname);\n\t ret = poll(&pfd, 1, 10000);\n\t if (ret <= 0 || verbose) {\n\t\tD(\"poll %s ev %x %x rx %d@%d\",\n\t\t ret <= 0 ? \"timeout\" : \"ok\",\n\t\t pfd.events,\n\t\t pfd.revents,\n\t\t rx_queued(q->src_port),\n\t\t NETMAP_RXRING(q->src_port->nifp, q->src_port->first_rx_ring)->cur\n\t\t);\n\t }\n\t if (pfd.revents & POLLERR) {\n\t\trx = NETMAP_RXRING(q->src_port->nifp, q->src_port->first_rx_ring);\n\t\tD(\"error on fd0, rx [%d,%d,%d)\",\n\t\t rx->head, rx->cur, rx->tail);\n\t\tsleep(1);\n\t }\n\t}\n }\n set_tns_now(&q->prod_now, q->t0);\n if (ts_cmp(q->qt_qout, q->prod_now) < 0) {\n\tq->qt_qout = q->prod_now;\n }\n if (prev > 0 && (prev = q->prod_now - prev) > q->prod_max_gap) {\n\tq->prod_max_gap = prev;\n }\n ND(10, \"%s %d queued packets at %ld ms\",\n\tq->prod_ifname, n0, (q->prod_now/1000000) % 10000);\n}\n\n/*\n * prefetch a packet 'pos' slots ahead of cur.\n * not very useful though\n */\nvoid\nprefetch_packet(struct netmap_ring *rxr, int pos)\n{\n struct netmap_slot *rs;\n uint32_t ofs = rxr->cur + pos;\n uint32_t i, l;\n const char *buf;\n\n if (ofs >= rxr->num_slots)\n\treturn;\n rs = &rxr->slot[ofs];\n buf = NETMAP_BUF(rxr, rs->buf_idx);\n l = rs->len;\n for (i = 0; i < l; i += 64)\n\t__builtin_prefetch(buf + i);\n}\n\n/*\n * initialize state variables to the first or next packet\n */\nstatic void\nscan_ring(struct _qs *q, int next /* bool */)\n{\n struct netmap_slot *rs;\n struct netmap_ring *rxr = q->rxring; /* invalid if next == 0 */\n struct nm_desc *pa = q->src_port;\n\n /* fast path for the first two */\n if (likely(next != 0)) { /* current ring */\n\tND(10, \"scan next\");\n\t/* advance */\n\trxr->head = rxr->cur = nm_ring_next(rxr, rxr->cur);\n\tif (!nm_ring_empty(rxr)) /* good one */\n\t goto got_one;\n\tq->si++;\t/* otherwise update and fallthrough */\n } else { /* scan from beginning */\n\tq->si = pa->first_rx_ring;\n\tND(10, \"scanning first ring %d\", q->si);\n }\n while (q->si <= pa->last_rx_ring) {\n\tq->rxring = rxr = NETMAP_RXRING(pa->nifp, q->si);\n\tif (!nm_ring_empty(rxr))\n\t break;\n\tq->si++;\n\tcontinue;\n }\n if (q->si > pa->last_rx_ring) { /* no data, cur == tail */\n ND(5, \"no more pkts on %s\", q->prod_ifname);\n\treturn;\n }\ngot_one:\n rs = &rxr->slot[rxr->cur];\n if (unlikely(rs->buf_idx < 2)) {\n\tD(\"wrong index rx[%d] = %d\", rxr->cur, rs->buf_idx);\n\tsleep(2);\n }\n if (unlikely(rs->len > MAX_PKT)) { // XXX\n\tD(\"wrong len rx[%d] len %d\", rxr->cur, rs->len);\n\trs->len = 0;\n }\n q->cur_pkt = NETMAP_BUF(rxr, rs->buf_idx);\n q->cur_len = rs->len;\n //prefetch_packet(rxr, 1); not much better than prefetching q->cur_pkt, one line\n __builtin_prefetch(q->cur_pkt);\n __builtin_prefetch(rs+1); /* one row ahead ? */\n ND(10, \"-------- slot %d tail %d len %d buf %p\", rxr->cur, rxr->tail, q->cur_len, q->cur_pkt);\n}\n\n/*\n * simple handler for parameters not supplied\n */\nstatic int\nnull_run_fn(struct _qs *q, struct _cfg *cfg)\n{\n (void)q;\n (void)cfg;\n return 0;\n}\n\n\nstatic int\ndrop_after(struct _qs *q)\n{\n\t(void)q; // XXX\n\treturn 0;\n}\n\n\nstatic void *\nprod(void *_pa)\n{\n struct pipe_args *pa = _pa;\n struct _qs *q = &pa->q;\n\n setaffinity(pa->prod_core);\n set_tns_now(&q->prod_now, q->t0);\n q->qt_qout = q->qt_tx = q->prod_now;\n ND(\"start times %ld\", q->prod_now);\n while (!do_abort) { /* producer, infinite */\n\tint count;\n\n\twait_for_packets(q);\t/* also updates prod_now */\n\t// XXX optimize to flush frequently\n\tfor (count = 0, scan_ring(q, 0); count < q->burst && !nm_ring_empty(q->rxring);\n\t\tcount++, scan_ring(q, 1)) {\n\t // transmission time\n\t uint64_t t_tx, tt;\t/* output and transmission time */\n\n\t if (q->cur_len < 60) {\n\t\tRD(5, \"short packet len %d\", q->cur_len);\n\t\tcontinue; // short frame\n\t }\n\t q->c_loss.run(q, &q->c_loss);\n\t if (q->cur_drop)\n\t\tcontinue;\n\t if (no_room(q)) {\n\t\tq->tail = q->prod_tail; /* notify */\n\t\tusleep(1); // XXX give cons a chance to run ?\n\t\tif (no_room(q)) /* try to run drop-free once */\n\t\t continue;\n\t }\n\t // XXX possibly implement c_tt for transmission time emulation\n\t q->c_bw.run(q, &q->c_bw);\n\t tt = q->cur_tt;\n\t q->qt_qout += tt;\n\t if (drop_after(q))\n\t\tcontinue;\n\t q->c_delay.run(q, &q->c_delay); /* compute delay */\n\t t_tx = q->qt_qout + q->cur_delay;\n\t ND(5, \"tt %ld qout %ld tx %ld qt_tx %ld\", tt, q->qt_qout, t_tx, q->qt_tx);\n\t /* insure no reordering and spacing by transmission time */\n\t q->qt_tx = (t_tx >= q->qt_tx + tt) ? t_tx : q->qt_tx + tt;\n\t enq(q);\n\t}\n\tq->tail = q->prod_tail; /* notify */\n }\n D(\"exiting on abort\");\n return NULL;\n}\n\n\n/*\n * the consumer reads from the queue using head,\n * advances it every now and then.\n */\nstatic void *\ncons(void *_pa)\n{\n struct pipe_args *pa = _pa;\n struct _qs *q = &pa->q;\n int cycles = 0;\n int pending = 0;\n const char *pre_start, *pre_end; /* prefetch limits */\n\n /*\n * prefetch about 2k ahead of the current pointer\n */\n pre_start = q->buf + q->head;\n pre_end = pre_start + 2048;\n\n (void)cycles; // XXX disable warning\n set_tns_now(&q->cons_now, q->t0);\n while (!do_abort) { /* consumer, infinite */\n\tstruct q_pkt *p = (struct q_pkt *)(q->buf + q->head);\n\tif (p->next < q->head) { /* wrap around prefetch */\n\t pre_start = q->buf + p->next;\n\t}\n\tpre_end = q->buf + p->next + 2048;\n#if 1\n\t/* prefetch the first line saves 4ns */\n (void)pre_end;// __builtin_prefetch(pre_end - 2048);\n#else\n\t/* prefetch, ideally up to a full packet not just one line.\n\t * this does not seem to have a huge effect.\n\t * 4ns out of 198 on 1500 byte packets\n\t */\n\tfor (; pre_start < pre_end; pre_start += 64)\n\t __builtin_prefetch(pre_start);\n#endif\n\n\tif (q->head == q->tail || ts_cmp(p->pt_tx, q->cons_now) > 0) {\n\t ND(4, \" >>>> TXSYNC, pkt not ready yet h %ld t %ld now %ld tx %ld\",\n\t\tq->head, q->tail, q->cons_now, p->pt_tx);\n\t q->rx_wait++;\n\t ioctl(pa->pb->fd, NIOCTXSYNC, 0); // XXX just in case\n\t pending = 0;\n\t usleep(5);\n\t set_tns_now(&q->cons_now, q->t0);\n\t continue;\n\t}\n\tND(5, \"drain len %ld now %ld tx %ld h %ld t %ld next %ld\",\n\t\tp->pktlen, q->cons_now, p->pt_tx, q->head, q->tail, p->next);\n\t/* XXX inefficient but simple */\n\tpending++;\n\tif (nm_inject(pa->pb, (char *)(p + 1), p->pktlen) == 0 ||\n\t\tpending > q->burst) {\n\t ND(5, \"inject failed len %d now %ld tx %ld h %ld t %ld next %ld\",\n\t\t(int)p->pktlen, q->cons_now, p->pt_tx, q->head, q->tail, p->next);\n\t ioctl(pa->pb->fd, NIOCTXSYNC, 0);\n\t pending = 0;\n\t continue;\n\t}\n\n\tq->head = p->next;\n\t/* drain packets from the queue */\n\tq->rx++;\n\t// XXX barrier\n }\n D(\"exiting on abort\");\n return NULL;\n}\n\n\n/*\n * main thread for each direction.\n * Allocates memory for the queues, creates the prod() thread,\n * then acts as a cons().\n */\nstatic void *\ntlem_main(void *_a)\n{\n struct pipe_args *a = _a;\n struct _qs *q = &a->q;\n uint64_t need;\n\n setaffinity(a->cons_core);\n set_tns_now(&q->t0, 0); /* starting reference */\n\n a->pa = nm_open(q->prod_ifname, NULL, NETMAP_NO_TX_POLL, NULL);\n if (a->pa == NULL) {\n\tED(\"cannot open %s\", q->prod_ifname);\n\treturn NULL;\n }\n // XXX use a single mmap ?\n a->pb = nm_open(q->cons_ifname, NULL, NM_OPEN_NO_MMAP, a->pa);\n if (a->pb == NULL) {\n\tED(\"cannot open %s\", q->cons_ifname);\n\tnm_close(a->pa);\n\treturn NULL;\n }\n a->zerocopy = a->zerocopy && (a->pa->mem == a->pb->mem);\n ND(\"------- zerocopy %ssupported\", a->zerocopy ? \"\" : \"NOT \");\n /* allocate space for the queue:\n * compute required bw*delay (adding 1ms for good measure),\n * then add the queue size i bytes, then multiply by three due\n * to the packet expansion for padding\n */\n\n need = q->max_bps ? q->max_bps : INFINITE_BW;\n need *= q->max_delay + 1000000;\t/* delay is in nanoseconds */\n need /= TIME_UNITS; /* total bits */\n need /= 8; /* in bytes */\n need += q->qsize; /* in bytes */\n need += 3 * MAX_PKT; // safety\n\n /*\n * This is the memory strictly for packets.\n * The size can increase a lot if we account for descriptors and\n * rounding.\n * In fact, the expansion factor can be up to a factor of 3\n * for particularly bad situations (65-byte packets)\n */\n need *= 3; /* room for descriptors and padding */\n\n q->buf = calloc(1, need);\n if (q->buf == NULL) {\n\tED(\"alloc %ld bytes for queue failed, exiting\", (_P64)need);\n\tnm_close(a->pa);\n\tnm_close(a->pb);\n\treturn(NULL);\n }\n q->buflen = need;\n ED(\"----\\n\\t%s -> %s : bps %ld delay %s loss %s queue %ld bytes\"\n\t\"\\n\\tbuffer %lu bytes\",\n\tq->prod_ifname, q->cons_ifname,\n\t(_P64)q->max_bps, q->c_delay.optarg, q->c_loss.optarg, (_P64)q->qsize,\n\t(_P64)q->buflen);\n\n q->src_port = a->pa;\n\n pthread_create(&a->prod_tid, NULL, prod, (void*)a);\n /* continue as cons() */\n cons((void*)a);\n D(\"exiting on abort\");\n return NULL;\n}\n\n\n\nstatic void\nsigint_h(int sig)\n{\n\t(void)sig;\t/* UNUSED */\n\tdo_abort = 1;\n\tsignal(SIGINT, SIG_DFL);\n}\n\n\n\nstatic void\nusage(void)\n{\n\tfprintf(stderr,\n\t \"usage: tlem [-v] [-D delay] [-B bps] [-L loss] [-Q qsize] \\n\"\n\t \"\\t[-b burst] [-w wait_time] -i ifa -i ifb\\n\");\n\texit(1);\n}\n\n\n/*---- configuration handling ---- */\n/*\n * support routine: split argument, returns ac and *av.\n * av contains two extra entries, a NULL and a pointer\n * to the entire string.\n */\nstatic char **\nsplit_arg(const char *src, int *_ac)\n{\n char *my = NULL, **av = NULL, *seps = \" \\t\\r\\n,\";\n int l, i, ac; /* number of entries */\n\n if (!src)\n\treturn NULL;\n l = strlen(src);\n /* in the first pass we count fields, in the second pass\n * we allocate the av[] array and a copy of the string\n * and fill av[]. av[ac] = NULL, av[ac+1]\n */\n for (;;) {\n\ti = ac = 0;\n\tND(\"start pass %d: <%s>\", av ? 1 : 0, my);\n\twhile (i < l) {\n\t /* trim leading separator */\n\t while (i <l && strchr(seps, src[i]))\n\t\ti++;\n\t if (i >= l)\n\t\tbreak;\n\t ND(\" pass %d arg %d: <%s>\", av ? 1 : 0, ac, src+i);\n\t if (av) /* in the second pass, set the result */\n\t\tav[ac] = my+i;\n\t ac++;\n\t /* skip string */\n\t while (i <l && !strchr(seps, src[i])) i++;\n\t if (av)\n\t\tmy[i] = '\\0'; /* write marker */\n\t}\n\tif (!av) { /* end of first pass */\n\t ND(\"ac is %d\", ac);\n\t av = calloc(1, (l+1) + (ac + 2)*sizeof(char *));\n\t my = (char *)&(av[ac+2]);\n\t strcpy(my, src);\n\t} else {\n\t break;\n\t}\n }\n for (i = 0; i < ac; i++) fprintf(stderr, \"%d: <%s>\\n\", i, av[i]);\n av[i++] = NULL;\n av[i++] = my;\n *_ac = ac;\n return av;\n}\n\n\n/*\n * apply a command against a set of functions,\n * install a handler in *dst\n */\nstatic int\ncmd_apply(const struct _cfg *a, const char *arg, struct _qs *q, struct _cfg *dst)\n{\n\tint ac = 0;\n\tchar **av;\n\tint i;\n\n\tif (arg == NULL || *arg == '\\0')\n\t\treturn 1; /* no argument may be ok */\n\tif (a == NULL || dst == NULL) {\n\t\tED(\"program error - invalid arguments\");\n\t\texit(1);\n\t}\n\tav = split_arg(arg, &ac);\n\tif (av == NULL)\n\t\treturn 1; /* error */\n\tfor (i = 0; a[i].parse; i++) {\n\t\tstruct _cfg x = a[i];\n\t\tconst char *errmsg = x.optarg;\n\t\tint ret;\n\n\t\tx.arg = NULL;\n\t\tx.arg_len = 0;\n\t\tbzero(&x.d, sizeof(x.d));\n\t\tret = x.parse(q, &x, ac, av);\n\t\tif (ret == 2) /* not recognised */\n\t\t\tcontinue;\n\t\tif (ret == 1) {\n\t\t\tED(\"invalid arguments: need '%s' have '%s'\",\n\t\t\t\terrmsg, arg);\n\t\t\tbreak;\n\t\t}\n\t\tx.optarg = arg;\n\t\t*dst = x;\n\t\treturn 0;\n\t}\n\tED(\"arguments %s not recognised\", arg);\n\tfree(av);\n\treturn 1;\n}\n\nstatic struct _cfg delay_cfg[];\nstatic struct _cfg bw_cfg[];\nstatic struct _cfg loss_cfg[];\n\nstatic uint64_t parse_bw(const char *arg);\nstatic uint64_t parse_qsize(const char *arg);\n\n/*\n * tlem [options]\n * accept separate sets of arguments for the two directions\n *\n */\n\nstatic void\nadd_to(const char ** v, int l, const char *arg, const char *msg)\n{\n\tfor (; l > 0 && *v != NULL ; l--, v++);\n\tif (l == 0) {\n\t\tED(\"%s %s\", msg, arg);\n\t\texit(1);\n\t}\n\t*v = arg;\n}\n\nint\nmain(int argc, char **argv)\n{\n\tint ch, i, err=0;\n\n#define\tN_OPTS\t2\n\tstruct pipe_args bp[N_OPTS];\n\tconst char *d[N_OPTS], *b[N_OPTS], *l[N_OPTS], *q[N_OPTS], *ifname[N_OPTS];\n\tint cores[4] = { 2, 8, 4, 10 }; /* default values */\n\n\tbzero(d, sizeof(d));\n\tbzero(b, sizeof(b));\n\tbzero(l, sizeof(l));\n\tbzero(q, sizeof(q));\n\tbzero(ifname, sizeof(ifname));\n\n\tfprintf(stderr, \"%s built %s %s\\n\", argv[0], __DATE__, __TIME__);\n\n\tbzero(&bp, sizeof(bp));\t/* all data initially go here */\n\n\tfor (i = 0; i < N_OPTS; i++) {\n\t struct _qs *q = &bp[i].q;\n\t q->c_delay.optarg = \"0\";\n\t q->c_delay.run = null_run_fn;\n\t q->c_loss.optarg = \"0\";\n\t q->c_loss.run = null_run_fn;\n\t q->c_bw.optarg = \"0\";\n\t q->c_bw.run = null_run_fn;\n\t}\n\n\t// Options:\n\t// B\tbandwidth in bps\n\t// D\tdelay in seconds\n\t// Q\tqsize in bytes\n\t// L\tloss probability\n\t// i\tinterface name (two mandatory)\n\t// v\tverbose\n\t// b\tbatch size\n\n\twhile ( (ch = getopt(argc, argv, \"B:C:D:L:Q:b:ci:vw:\")) != -1) {\n\t\tswitch (ch) {\n\t\tdefault:\n\t\t\tD(\"bad option %c %s\", ch, optarg);\n\t\t\tusage();\n\t\t\tbreak;\n\n\t\tcase 'C': /* CPU placement, up to 4 arguments */\n\t\t\t{\n\t\t\t\tint ac = 0;\n\t\t\t\tchar **av = split_arg(optarg, &ac);\n\t\t\t\tif (ac == 1) { /* sequential after the first */\n\t\t\t\t\tcores[0] = atoi(av[0]);\n\t\t\t\t\tcores[1] = cores[0] + 1;\n\t\t\t\t\tcores[2] = cores[1] + 1;\n\t\t\t\t\tcores[3] = cores[2] + 1;\n\t\t\t\t} else if (ac == 2) { /* two sequential pairs */\n\t\t\t\t\tcores[0] = atoi(av[0]);\n\t\t\t\t\tcores[1] = cores[0] + 1;\n\t\t\t\t\tcores[2] = atoi(av[1]);\n\t\t\t\t\tcores[3] = cores[2] + 1;\n\t\t\t\t} else if (ac == 4) { /* four values */\n\t\t\t\t\tcores[0] = atoi(av[0]);\n\t\t\t\t\tcores[1] = atoi(av[1]);\n\t\t\t\t\tcores[2] = atoi(av[2]);\n\t\t\t\t\tcores[3] = atoi(av[3]);\n\t\t\t\t} else {\n\t\t\t\t\tED(\" -C accepts 1, 2 or 4 comma separated arguments\");\n\t\t\t\t\tusage();\n\t\t\t\t}\n\t\t\t\tif (av)\n\t\t\t\t\tfree(av);\n\t\t\t}\n\t\t\tbreak;\n\n\t\tcase 'B': /* bandwidth in bps */\n\t\t\tadd_to(b, N_OPTS, optarg, \"-B too many times\");\n\t\t\tbreak;\n\n\t\tcase 'D': /* delay in seconds (float) */\n\t\t\tadd_to(d, N_OPTS, optarg, \"-D too many times\");\n\t\t\tbreak;\n\n\t\tcase 'Q': /* qsize in bytes */\n\t\t\tadd_to(q, N_OPTS, optarg, \"-Q too many times\");\n\t\t\tbreak;\n\n\t\tcase 'L': /* loss probability */\n\t\t\tadd_to(l, N_OPTS, optarg, \"-L too many times\");\n\t\t\tbreak;\n\n\t\tcase 'b':\t/* burst */\n\t\t\tbp[0].q.burst = atoi(optarg);\n\t\t\tbreak;\n\n\t\tcase 'i':\t/* interface */\n\t\t\tadd_to(ifname, N_OPTS, optarg, \"-i too many times\");\n\t\t\tbreak;\n\t\tcase 'c':\n\t\t\tbp[0].zerocopy = 0; /* do not zerocopy */\n\t\t\tbreak;\n\t\tcase 'v':\n\t\t\tverbose++;\n\t\t\tbreak;\n\t\tcase 'w':\n\t\t\tbp[0].wait_link = atoi(optarg);\n\t\t\tbreak;\n\t\t}\n\n\t}\n\n\targc -= optind;\n\targv += optind;\n\n\t/*\n\t * consistency checks for common arguments\n\t */\n\tif (!ifname[0] || !ifname[0]) {\n\t\tED(\"missing interface(s)\");\n\t\tusage();\n\t}\n\tif (strcmp(ifname[0], ifname[1]) == 0) {\n\t\tED(\"must specify two different interfaces %s %s\", ifname[0], ifname[1]);\n\t\tusage();\n\t}\n\tif (bp[0].q.burst < 1 || bp[0].q.burst > 8192) {\n\t\tED(\"invalid burst %d, set to 1024\", bp[0].q.burst);\n\t\tbp[0].q.burst = 1024; // XXX 128 is probably better\n\t}\n\tif (bp[0].wait_link > 100) {\n\t\tED(\"invalid wait_link %d, set to 4\", bp[0].wait_link);\n\t\tbp[0].wait_link = 4;\n\t}\n\n\tbp[1] = bp[0]; /* copy parameters, but swap interfaces */\n\tbp[0].q.prod_ifname = bp[1].q.cons_ifname = ifname[0];\n\tbp[1].q.prod_ifname = bp[0].q.cons_ifname = ifname[1];\n\n\t/* assign cores. prod and cons work better if on the same HT */\n\tbp[0].cons_core = cores[0];\n\tbp[0].prod_core = cores[1];\n\tbp[1].cons_core = cores[2];\n\tbp[1].prod_core = cores[3];\n\tED(\"running on cores %d %d %d %d\", cores[0], cores[1], cores[2], cores[3]);\n\n\t/* use same parameters for both directions if needed */\n\tif (d[1] == NULL)\n\t\td[1] = d[0];\n\tif (b[1] == NULL)\n\t\tb[1] = b[0];\n\tif (l[1] == NULL)\n\t\tl[1] = l[0];\n\n\t/* apply commands */\n\tfor (i = 0; i < N_OPTS; i++) { /* once per queue */\n\t\tstruct _qs *q = &bp[i].q;\n\t\terr += cmd_apply(delay_cfg, d[i], q, &q->c_delay);\n\t\terr += cmd_apply(bw_cfg, b[i], q, &q->c_bw);\n\t\terr += cmd_apply(loss_cfg, l[i], q, &q->c_loss);\n\t}\n\n\tif (q[0] == NULL)\n\t\tq[0] = \"0\";\n\tif (q[1] == NULL)\n\t\tq[1] = q[0];\n\tbp[0].q.qsize = parse_qsize(q[0]);\n\tbp[1].q.qsize = parse_qsize(q[1]);\n\n\tif (bp[0].q.qsize == 0) {\n\t\tED(\"qsize= 0 is not valid, set to 50k\");\n\t\tbp[0].q.qsize = 50000;\n\t}\n\tif (bp[1].q.qsize == 0) {\n\t\tED(\"qsize= 0 is not valid, set to 50k\");\n\t\tbp[1].q.qsize = 50000;\n\t}\n\n\tpthread_create(&bp[0].cons_tid, NULL, tlem_main, (void*)&bp[0]);\n\tpthread_create(&bp[1].cons_tid, NULL, tlem_main, (void*)&bp[1]);\n\n\tsignal(SIGINT, sigint_h);\n\tsleep(1);\n\twhile (!do_abort) {\n\t struct _qs olda = bp[0].q, oldb = bp[1].q;\n\t struct _qs *q0 = &bp[0].q, *q1 = &bp[1].q;\n\n\t sleep(1);\n\t ED(\"%ld -> %ld maxq %d round %ld, %ld <- %ld maxq %d round %ld\",\n\t\t(_P64)(q0->rx - olda.rx), (_P64)(q0->tx - olda.tx),\n\t\tq0->rx_qmax, (_P64)q0->prod_max_gap,\n\t\t(_P64)(q1->rx - oldb.rx), (_P64)(q1->tx - oldb.tx),\n\t\tq1->rx_qmax, (_P64)q1->prod_max_gap\n\t\t);\n\t ED(\"plr nominal %le actual %le\",\n\t\t(double)(q0->c_loss.d[0])/(1<<24),\n\t\tq0->c_loss.d[1] == 0 ? 0 :\n\t\t(double)(q0->c_loss.d[2])/q0->c_loss.d[1]);\n\t bp[0].q.rx_qmax = (bp[0].q.rx_qmax * 7)/8; // ewma\n\t bp[0].q.prod_max_gap = (bp[0].q.prod_max_gap * 7)/8; // ewma\n\t bp[1].q.rx_qmax = (bp[1].q.rx_qmax * 7)/8; // ewma\n\t bp[1].q.prod_max_gap = (bp[1].q.prod_max_gap * 7)/8; // ewma\n\t}\n\tD(\"exiting on abort\");\n\tsleep(1);\n\n\treturn (0);\n}\n\n/* conversion factor for numbers.\n * Each entry has a set of characters and conversion factor,\n * the first entry should have an empty string and default factor,\n * the final entry has s = NULL.\n */\nstruct _sm {\t/* string and multiplier */\n\tchar *s;\n\tdouble m;\n};\n\n/*\n * parse a generic value\n */\nstatic double\nparse_gen(const char *arg, const struct _sm *conv, int *err)\n{\n\tdouble d;\n\tchar *ep;\n\tint dummy;\n\n\tif (err == NULL)\n\t\terr = &dummy;\n\t*err = 0;\n\tif (arg == NULL)\n\t\tgoto error;\n\td = strtod(arg, &ep);\n\tif (ep == arg) { /* no value */\n\t\tED(\"bad argument %s\", arg);\n\t\tgoto error;\n\t}\n\t/* special case, no conversion */\n\tif (conv == NULL && *ep == '\\0')\n\t\tgoto done;\n\tND(\"checking %s [%s]\", arg, ep);\n\tfor (;conv->s; conv++) {\n\t\tif (strchr(conv->s, *ep))\n\t\t\tgoto done;\n\t}\nerror:\n\t*err = 1;\t/* unrecognised */\n\treturn 0;\n\ndone:\n\tif (conv) {\n\t\tND(\"scale is %s %lf\", conv->s, conv->m);\n\t\td *= conv->m; /* apply default conversion */\n\t}\n\tND(\"returning %lf\", d);\n\treturn d;\n}\n\n#define U_PARSE_ERR ~(0ULL)\n\n/* returns a value in nanoseconds */\nstatic uint64_t\nparse_time(const char *arg)\n{\n struct _sm a[] = {\n\t{\"\", 1000000000 /* seconds */},\n\t{\"n\", 1 /* nanoseconds */}, {\"u\", 1000 /* microseconds */},\n\t{\"m\", 1000000 /* milliseconds */}, {\"s\", 1000000000 /* seconds */},\n\t{NULL, 0 /* seconds */}\n };\n int err;\n uint64_t ret = (uint64_t)parse_gen(arg, a, &err);\n return err ? U_PARSE_ERR : ret;\n}\n\n\n/*\n * parse a bandwidth, returns value in bps or U_PARSE_ERR if error.\n */\nstatic uint64_t\nparse_bw(const char *arg)\n{\n struct _sm a[] = {\n\t{\"\", 1}, {\"kK\", 1000}, {\"mM\", 1000000}, {\"gG\", 1000000000}, {NULL, 0}\n };\n int err;\n uint64_t ret = (uint64_t)parse_gen(arg, a, &err);\n return err ? U_PARSE_ERR : ret;\n}\n\n/*\n * parse a queue size, returns value in bytes or U_PARSE_ERR if error.\n */\nstatic uint64_t\nparse_qsize(const char *arg)\n{\n struct _sm a[] = {\n\t{\"\", 1}, {\"kK\", 1024}, {\"mM\", 1024*1024}, {\"gG\", 1024*1024*1024}, {NULL, 0}\n };\n int err;\n uint64_t ret = (uint64_t)parse_gen(arg, a, &err);\n return err ? U_PARSE_ERR : ret;\n}\n\n/*\n * For some function we need random bits.\n * This is a wrapper to whatever function you want that returns\n * 24 useful random bits.\n */\n\n#include <math.h> /* log, exp etc. */\nstatic inline uint64_t\nmy_random24(void)\t/* 24 useful bits */\n{\n\treturn random() & ((1<<24) - 1);\n}\n\n\n/*-------------- user-configuration -----------------*/\n\n#if 0 /* start of comment block */\n\nHere we place the functions to implement the various features\nof the system. For each feature one should define a struct _cfg\n(see at the beginning for definition) that refers a *_parse() function\nto extract values from the command line, and a *_run() function\nthat is invoked on each packet to implement the desired function.\n\nExamples of the two functions are below. In general:\n\n- the *_parse() function takes argc/argv[], matches the function\n name in argv[0], extracts the operating parameters, allocates memory\n if needed, and stores them in the struct _cfg.\n Return value is 2 if argv[0] is not recosnised, 1 if there is an\n error in the arguments, 0 if all ok.\n\n On the command line, argv[] is a single, comma separated argument\n that follow the specific option eg -D constant,20ms\n\n struct _cfg has some preallocated space (e.g an array of uint64_t) so simple\n function can use that without having to allocate memory.\n\n- the *_run() function takes struct _q *q and struct _cfg *cfg as arguments.\n *q contains all the informatio that may be possibly needed, including\n those on the packet currently under processing.\n The basic values are the following:\n\n\tchar *\t cur_pkt \tpoints to the current packet (linear buffer)\n\tuint32_t cur_len;\tlength of the current packet\n\t\tthe functions are not supposed to modify these values\n\n\tint\t cur_drop;\ttrue if current packet must be dropped.\n\t\tMust be set to non-zero by the loss emulation function\n\n\tuint64_t cur_delay;\tdelay in nanoseconds for the current packet\n\t\tMust be set by the delay emulation function\n\n More sophisticated functions may need to access other fields in *q,\n see the structure description for that.\n\nWhen implementing a new function for a feature (e.g. for delay,\nbandwidth, loss...) the struct _cfg should be added to the array\nthat contains all possible options.\n\n\t\t--- Specific notes ---\n\nDELAY emulation\t\t-D option_arguments\n\n NOTE: The config function should store, in q->max_delay,\n a reasonable estimate of the maximum delay applied to the packets\n as this is needed to size the memory buffer used to store packets.\n\n If the option is not supplied, the system applies 0 extra delay\n\n The resolution for times is 1ns, the precision is load dependent and\n generally in the order of 20-50us.\n Times are in nanoseconds, can be followed by a character specifying\n a different unit e.g.\n\n\tn\tnanoseconds\n\tu\tmicroseconds\n\tm\tmilliseconds\n\ts\tseconds\n\n Currently implemented options:\n\n constant,t\t\tconstant delay equal to t\n\n uniform,tmin,tmax\tuniform delay between tmin and tmax\n\n exp,tavg,tmin\texponential distribution with average tavg\n\t\t\tand minimum tmin (corresponds to an exponential\n\t\t\tdistribution with argument 1/(tavg-tmin) )\n\n\nLOSS emulation\t\t-L option_arguments\n\n Loss is expressed as packet or bit error rate, which is an absolute\n number between 0 and 1 (typically small).\n\n Currently implemented options\n\n plr,p\t\tuniform packet loss rate p, independent\n\t\t\tof packet size\n\n burst,p,lmin,lmax \tburst loss with burst probability p and\n\t\t\tburst length uniformly distributed between\n\t\t\tlmin and lmax\n\n ber,p\t\tuniformly distributed bit error rate p,\n\t\t\tso actual loss prob. depends on size.\n\nBANDWIDTH emulation\t-B option_arguments\n\n Bandwidths are expressed in bits per second, can be followed by a\n character specifying a different unit e.g.\n\n\tb/B\tbits per second\n\tk/K\tkbits/s (10^3 bits/s)\n\tm/M\tmbits/s (10^6 bits/s)\n\tg/G\tgbits/s (10^9 bits/s)\n\n The config function should store in q->max_bps the maximum\n available bandwidth, which is used to determine how much space\n is needed in the queue.\n\n Currently implemented options\n\n const,b\t\tconstant bw, excluding mac framing\n ether,b\t\tconstant bw, including ethernet framing\n\t\t\t(20 bytes framing + 4 bytes crc)\n\n#endif /* end of comment block */\n\n/*\n * Configuration options for delay\n *\n * Must store a reasonable estimate of the max_delay in q->max_delay\n * as this is used to size the queue.\n */\n\n/* constant delay, also accepts just a number */\nstatic int\nconst_delay_parse(struct _qs *q, struct _cfg *dst, int ac, char *av[])\n{\n\tuint64_t delay;\n\n\tif (strncmp(av[0], \"const\", 5) != 0 && ac > 1)\n\t\treturn 2; /* unrecognised */\n\tif (ac > 2)\n\t\treturn 1; /* error */\n\tdelay = parse_time(av[ac - 1]);\n\tif (delay == U_PARSE_ERR)\n\t\treturn 1; /* error */\n\tdst->d[0] = delay;\n\tq->max_delay = delay;\n\treturn 0;\t/* success */\n}\n\n/* runtime function, store the delay into q->cur_delay */\nstatic int\nconst_delay_run(struct _qs *q, struct _cfg *arg)\n{\n\tq->cur_delay = arg->d[0]; /* the delay */\n\treturn 0;\n}\n\nstatic int\nuniform_delay_parse(struct _qs *q, struct _cfg *dst, int ac, char *av[])\n{\n\tuint64_t dmin, dmax;\n\n\t(void)q;\n\tif (strcmp(av[0], \"uniform\") != 0)\n\t\treturn 2; /* not recognised */\n\tif (ac != 3)\n\t\treturn 1; /* error */\n\tdmin = parse_time(av[1]);\n\tdmax = parse_time(av[2]);\n\tif (dmin == U_PARSE_ERR || dmax == U_PARSE_ERR || dmin > dmax)\n\t\treturn 1;\n\tD(\"dmin %ld dmax %ld\", (_P64)dmin, (_P64)dmax);\n\tdst->d[0] = dmin;\n\tdst->d[1] = dmax;\n\tdst->d[2] = dmax - dmin;\n\tq->max_delay = dmax;\n\treturn 0;\n}\n\nstatic int\nuniform_delay_run(struct _qs *q, struct _cfg *arg)\n{\n\tuint64_t x = my_random24();\n\tq->cur_delay = arg->d[0] + ((arg->d[2] * x) >> 24);\n#if 0 /* COMPUTE_STATS */\n#endif /* COMPUTE_STATS */\n\treturn 0;\n}\n\n/*\n * exponential delay: Prob(delay = x) = exp(-x/d_av)\n * gives a delay between 0 and infinity with average d_av\n * The cumulative function is 1 - d_av exp(-x/d_av)\n *\n * The inverse function generates a uniform random number p in 0..1\n * and generates delay = (d_av-d_min) * -ln(1-p) + d_min\n *\n * To speed up behaviour at runtime we tabulate the values\n */\n\nstatic int\nexp_delay_parse(struct _qs *q, struct _cfg *dst, int ac, char *av[])\n{\n#define\tPTS_D_EXP\t512\n\tuint64_t i, d_av, d_min, *t; /*table of values */\n\n (void)q;\n if (strcmp(av[0], \"exp\") != 0)\n\t\treturn 2; /* not recognised */\n if (ac != 3)\n return 1; /* error */\n d_av = parse_time(av[1]);\n d_min = parse_time(av[2]);\n if (d_av == U_PARSE_ERR || d_min == U_PARSE_ERR || d_av < d_min)\n return 1; /* error */\n\td_av -= d_min;\n\tdst->arg_len = PTS_D_EXP * sizeof(uint64_t);\n\tdst->arg = calloc(1, dst->arg_len);\n\tif (dst->arg == NULL)\n\t\treturn 1; /* no memory */\n\tt = (uint64_t *)dst->arg;\n q->max_delay = d_av * 4 + d_min; /* exp(-4) */\n\t/* tabulate -ln(1-n)*delay for n in 0..1 */\n\tfor (i = 0; i < PTS_D_EXP; i++) {\n\t\tdouble d = -log2 ((double)(PTS_D_EXP - i) / PTS_D_EXP) * d_av + d_min;\n\t\tt[i] = (uint64_t)d;\n\t\tND(5, \"%ld: %le\", i, d);\n\t}\n return 0;\n}\n\nstatic int\nexp_delay_run(struct _qs *q, struct _cfg *arg)\n{\n\tuint64_t *t = (uint64_t *)arg->arg;\n q->cur_delay = t[my_random24() & (PTS_D_EXP - 1)];\n\tRD(5, \"delay %lu\", (_P64)q->cur_delay);\n return 0;\n}\n\n\n#define TLEM_CFG_END\tNULL, 0, {0}\n\nstatic struct _cfg delay_cfg[] = {\n\t{ const_delay_parse, const_delay_run,\n\t\t\"constant,delay\", TLEM_CFG_END },\n\t{ uniform_delay_parse, uniform_delay_run,\n\t\t\"uniform,dmin,dmax # dmin <= dmax\", TLEM_CFG_END },\n\t{ exp_delay_parse, exp_delay_run,\n\t\t\"exp,dmin,davg # dmin <= davg\", TLEM_CFG_END },\n\t{ NULL, NULL, NULL, TLEM_CFG_END }\n};\n\n/* standard bandwidth, also accepts just a number */\nstatic int\nconst_bw_parse(struct _qs *q, struct _cfg *dst, int ac, char *av[])\n{\n\tuint64_t bw;\n\n\t(void)q;\n\tif (strncmp(av[0], \"const\", 5) != 0)\n\t\treturn 2; /* unrecognised */\n\tif (ac > 2)\n\t\treturn 1; /* error */\n\tbw = parse_bw(av[ac - 1]);\n\tif (bw == U_PARSE_ERR) {\n\t\treturn (ac == 2) ? 1 /* error */ : 2 /* unrecognised */;\n\t}\n\tdst->d[0] = bw;\n\tq->max_bps = bw;\t/* bw used to determine queue size */\n\treturn 0;\t/* success */\n}\n\n\n/* runtime function, store the delay into q->cur_delay */\nstatic int\nconst_bw_run(struct _qs *q, struct _cfg *arg)\n{\n\tuint64_t bps = arg->d[0];\n\tq->cur_tt = bps ? 8ULL* TIME_UNITS * q->cur_len / bps : 0 ;\n\treturn 0;\n}\n\n/* ethernet bandwidth, add 672 bits per packet */\nstatic int\nether_bw_parse(struct _qs *q, struct _cfg *dst, int ac, char *av[])\n{\n\tuint64_t bw;\n\n\t(void)q;\n\tif (strcmp(av[0], \"ether\") != 0)\n\t\treturn 2; /* unrecognised */\n\tif (ac != 2)\n\t\treturn 1; /* error */\n\tbw = parse_bw(av[ac - 1]);\n\tif (bw == U_PARSE_ERR)\n\t\treturn 1; /* error */\n\tdst->d[0] = bw;\n\tq->max_bps = bw;\t/* bw used to determine queue size */\n\treturn 0;\t/* success */\n}\n\n\n/* runtime function, add 20 bytes (framing) + 4 bytes (crc) */\nstatic int\nether_bw_run(struct _qs *q, struct _cfg *arg)\n{\n\tuint64_t bps = arg->d[0];\n\tq->cur_tt = bps ? 8ULL * TIME_UNITS * (q->cur_len + 24) / bps : 0 ;\n\treturn 0;\n}\n\nstatic struct _cfg bw_cfg[] = {\n\t{ const_bw_parse, const_bw_run,\n\t\t\"constant,bps\", TLEM_CFG_END },\n\t{ ether_bw_parse, ether_bw_run,\n\t\t\"ether,bps\", TLEM_CFG_END },\n\t{ NULL, NULL, NULL, TLEM_CFG_END }\n};\n\n/*\n * loss patterns\n */\nstatic int\nconst_plr_parse(struct _qs *q, struct _cfg *dst, int ac, char *av[])\n{\n\tdouble plr;\n\tint err;\n\n\t(void)q;\n\tif (strcmp(av[0], \"plr\") != 0 && ac > 1)\n\t\treturn 2; /* unrecognised */\n\tif (ac > 2)\n\t\treturn 1; /* error */\n\t// XXX to be completed\n\tplr = parse_gen(av[ac-1], NULL, &err);\n\tif (err || plr < 0 || plr > 1)\n\t\treturn 1;\n\tdst->d[0] = plr * (1<<24); /* scale is 16m */\n\tif (plr != 0 && dst->d[0] == 0)\n\t\tED(\"WWW warning, rounding %le down to 0\", plr);\n\treturn 0;\t/* success */\n}\n\nstatic int\nconst_plr_run(struct _qs *q, struct _cfg *arg)\n{\n\t(void)arg;\n\tuint64_t r = my_random24();\n\tq->cur_drop = r < arg->d[0];\n#if 1\t/* keep stats */\n\targ->d[1]++;\n\targ->d[2] += q->cur_drop;\n#endif\n\treturn 0;\n}\n\n\n/*\n * For BER the loss is 1- (1-ber)**bit_len\n * The linear approximation is only good for small values, so we\n * tabulate (1-ber)**len for various sizes in bytes\n */\nstatic int\nconst_ber_parse(struct _qs *q, struct _cfg *dst, int ac, char *av[])\n{\n\tdouble ber, ber8, cur;\n\tint i, err;\n\tuint32_t *plr;\n\tconst uint32_t mask = (1<<24) - 1;\n\n\t(void)q;\n\tif (strcmp(av[0], \"ber\") != 0)\n\t\treturn 2; /* unrecognised */\n\tif (ac != 2)\n\t\treturn 1; /* error */\n\tber = parse_gen(av[ac-1], NULL, &err);\n\tif (err || ber < 0 || ber > 1)\n\t\treturn 1;\n\tdst->arg_len = MAX_PKT * sizeof(uint32_t);\n\tplr = calloc(1, dst->arg_len);\n\tif (plr == NULL)\n\t\treturn 1; /* no memory */\n\tdst->arg = plr;\n\tber8 = 1 - ber;\n\tber8 *= ber8; /* **2 */\n\tber8 *= ber8; /* **4 */\n\tber8 *= ber8; /* **8 */\n\tcur = 1;\n\tfor (i=0; i < MAX_PKT; i++, cur *= ber8) {\n\t\tplr[i] = (mask + 1)*(1 - cur);\n\t\tif (plr[i] > mask)\n\t\t\tplr[i] = mask;\n#if 0\n\t\tif (i>= 60) // && plr[i] < mask/2)\n\t\t\tRD(50,\"%4d: %le %ld\", i, 1.0 - cur, (_P64)plr[i]);\n#endif\n\t}\n\tdst->d[0] = ber * (mask + 1);\n\treturn 0;\t/* success */\n}\n\nstatic int\nconst_ber_run(struct _qs *q, struct _cfg *arg)\n{\n\tint l = q->cur_len;\n\tuint64_t r = my_random24();\n\tuint32_t *plr = arg->arg;\n\n\tif (l >= MAX_PKT) {\n\t\tRD(5, \"pkt len %d too large, trim to %d\", l, MAX_PKT-1);\n\t\tl = MAX_PKT-1;\n\t}\n\tq->cur_drop = r < plr[l];\n#if 1\t/* keep stats */\n\targ->d[1] += l * 8;\n\targ->d[2] += q->cur_drop;\n#endif\n\treturn 0;\n}\n\nstatic struct _cfg loss_cfg[] = {\n\t{ const_plr_parse, const_plr_run,\n\t\t\"plr,prob # 0 <= prob <= 1\", TLEM_CFG_END },\n\t{ const_ber_parse, const_ber_run,\n\t\t\"ber,prob # 0 <= prob <= 1\", TLEM_CFG_END },\n\t{ NULL, NULL, NULL, TLEM_CFG_END }\n};\n" }, { "alpha_fraction": 0.6083146333694458, "alphanum_fraction": 0.6118543148040771, "avg_line_length": 27.192705154418945, "blob_id": "effa8d3d117b198ecd5fbf9b53d616c12bb84303", "content_id": "8b5c7edfac62258794f48523e8e0ec80a8bf0183", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 40964, "license_type": "permissive", "max_line_length": 85, "num_lines": 1453, "path": "/sys/dev/netmap/netmap_pt.c", "repo_name": "phoenix1796/netmapWin", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2015 Stefano Garzarella\n * Copyright (C) 2016 Vincenzo Maffione\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n * 1. Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND\n * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE\n * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n * SUCH DAMAGE.\n *\n * $FreeBSD$\n */\n\n/*\n * common headers\n */\n#if defined(__FreeBSD__)\n#include <sys/cdefs.h>\n#include <sys/param.h>\n#include <sys/kernel.h>\n#include <sys/types.h>\n#include <sys/selinfo.h>\n#include <sys/socket.h>\n#include <net/if.h>\n#include <net/if_var.h>\n#include <machine/bus.h>\n\n//#define usleep_range(_1, _2)\n#define usleep_range(_1, _2) \\\n\tpause_sbt(\"ptnetmap-sleep\", SBT_1US * _1, SBT_1US * 1, C_ABSOLUTE)\n\n#elif defined(linux)\n#include <bsd_glue.h>\n#endif\n\n#include <net/netmap.h>\n#include <dev/netmap/netmap_kern.h>\n#include <net/netmap_virt.h>\n#include <dev/netmap/netmap_mem2.h>\n\n#ifdef WITH_PTNETMAP_HOST\n\n/* RX cycle without receive any packets */\n#define PTN_RX_DRY_CYCLES_MAX\t10\n\n/* Limit Batch TX to half ring.\n * Currently disabled, since it does not manage NS_MOREFRAG, which\n * results in random drops in the VALE txsync. */\n//#define PTN_TX_BATCH_LIM(_n)\t((_n >> 1))\n\n//#define BUSY_WAIT\n\n#define NETMAP_PT_DEBUG /* Enables communication debugging. */\n#ifdef NETMAP_PT_DEBUG\n#define DBG(x) x\n#else\n#define DBG(x)\n#endif\n\n\n#undef RATE\n//#define RATE /* Enables communication statistics. */\n#ifdef RATE\n#define IFRATE(x) x\nstruct rate_batch_stats {\n unsigned long sync;\n unsigned long sync_dry;\n unsigned long pkt;\n};\n\nstruct rate_stats {\n unsigned long gtxk; /* Guest --> Host Tx kicks. */\n unsigned long grxk; /* Guest --> Host Rx kicks. */\n unsigned long htxk; /* Host --> Guest Tx kicks. */\n unsigned long hrxk; /* Host --> Guest Rx Kicks. */\n unsigned long btxwu; /* Backend Tx wake-up. */\n unsigned long brxwu; /* Backend Rx wake-up. */\n struct rate_batch_stats txbs;\n struct rate_batch_stats rxbs;\n};\n\nstruct rate_context {\n struct timer_list timer;\n struct rate_stats new;\n struct rate_stats old;\n};\n\n#define RATE_PERIOD 2\nstatic void\nrate_callback(unsigned long arg)\n{\n struct rate_context * ctx = (struct rate_context *)arg;\n struct rate_stats cur = ctx->new;\n struct rate_batch_stats *txbs = &cur.txbs;\n struct rate_batch_stats *rxbs = &cur.rxbs;\n struct rate_batch_stats *txbs_old = &ctx->old.txbs;\n struct rate_batch_stats *rxbs_old = &ctx->old.rxbs;\n uint64_t tx_batch, rx_batch;\n unsigned long txpkts, rxpkts;\n unsigned long gtxk, grxk;\n int r;\n\n txpkts = txbs->pkt - txbs_old->pkt;\n rxpkts = rxbs->pkt - rxbs_old->pkt;\n\n tx_batch = ((txbs->sync - txbs_old->sync) > 0) ?\n\t txpkts / (txbs->sync - txbs_old->sync): 0;\n rx_batch = ((rxbs->sync - rxbs_old->sync) > 0) ?\n\t rxpkts / (rxbs->sync - rxbs_old->sync): 0;\n\n /* Fix-up gtxk and grxk estimates. */\n gtxk = (cur.gtxk - ctx->old.gtxk) - (cur.btxwu - ctx->old.btxwu);\n grxk = (cur.grxk - ctx->old.grxk) - (cur.brxwu - ctx->old.brxwu);\n\n printk(\"txpkts = %lu Hz\\n\", txpkts/RATE_PERIOD);\n printk(\"gtxk = %lu Hz\\n\", gtxk/RATE_PERIOD);\n printk(\"htxk = %lu Hz\\n\", (cur.htxk - ctx->old.htxk)/RATE_PERIOD);\n printk(\"btxw = %lu Hz\\n\", (cur.btxwu - ctx->old.btxwu)/RATE_PERIOD);\n printk(\"rxpkts = %lu Hz\\n\", rxpkts/RATE_PERIOD);\n printk(\"grxk = %lu Hz\\n\", grxk/RATE_PERIOD);\n printk(\"hrxk = %lu Hz\\n\", (cur.hrxk - ctx->old.hrxk)/RATE_PERIOD);\n printk(\"brxw = %lu Hz\\n\", (cur.brxwu - ctx->old.brxwu)/RATE_PERIOD);\n printk(\"txbatch = %llu avg\\n\", tx_batch);\n printk(\"rxbatch = %llu avg\\n\", rx_batch);\n printk(\"\\n\");\n\n ctx->old = cur;\n r = mod_timer(&ctx->timer, jiffies +\n msecs_to_jiffies(RATE_PERIOD * 1000));\n if (unlikely(r))\n D(\"[ptnetmap] Error: mod_timer()\\n\");\n}\n\nstatic void\nrate_batch_stats_update(struct rate_batch_stats *bf, uint32_t pre_tail,\n\t\t uint32_t act_tail, uint32_t num_slots)\n{\n int n = (int)act_tail - pre_tail;\n\n if (n) {\n if (n < 0)\n n += num_slots;\n\n bf->sync++;\n bf->pkt += n;\n } else {\n bf->sync_dry++;\n }\n}\n\n#else /* !RATE */\n#define IFRATE(x)\n#endif /* RATE */\n\nstruct ptnetmap_state {\n /* Kthreads. */\n struct nm_kthread **kthreads;\n\n /* Shared memory with the guest (TX/RX) */\n struct ptnet_ring __user *ptrings;\n\n bool stopped;\n\n /* Netmap adapter wrapping the backend. */\n struct netmap_pt_host_adapter *pth_na;\n\n IFRATE(struct rate_context rate_ctx;)\n};\n\nstatic inline void\nptnetmap_kring_dump(const char *title, const struct netmap_kring *kring)\n{\n RD(1, \"%s - name: %s hwcur: %d hwtail: %d rhead: %d rcur: %d \\\n \t\t rtail: %d head: %d cur: %d tail: %d\",\n title, kring->name, kring->nr_hwcur,\n kring->nr_hwtail, kring->rhead, kring->rcur, kring->rtail,\n kring->ring->head, kring->ring->cur, kring->ring->tail);\n}\n\n/*\n * TX functions to set/get and to handle host/guest kick.\n */\n\n\n/* Enable or disable guest --> host kicks. */\nstatic inline void\nptring_kick_enable(struct ptnet_ring __user *ptring, uint32_t val)\n{\n CSB_WRITE(ptring, host_need_kick, val);\n}\n\n/* Are guest interrupt enabled or disabled? */\nstatic inline uint32_t\nptring_intr_enabled(struct ptnet_ring __user *ptring)\n{\n uint32_t v;\n\n CSB_READ(ptring, guest_need_kick, v);\n\n return v;\n}\n\n/* Enable or disable guest interrupts. */\nstatic inline void\nptring_intr_enable(struct ptnet_ring __user *ptring, uint32_t val)\n{\n CSB_WRITE(ptring, guest_need_kick, val);\n}\n\n/* Handle TX events: from the guest or from the backend */\nstatic void\nptnetmap_tx_handler(void *data)\n{\n struct netmap_kring *kring = data;\n struct netmap_pt_host_adapter *pth_na =\n\t\t(struct netmap_pt_host_adapter *)kring->na->na_private;\n struct ptnetmap_state *ptns = pth_na->ptns;\n struct ptnet_ring __user *ptring;\n struct netmap_ring shadow_ring; /* shadow copy of the netmap_ring */\n bool more_txspace = false;\n struct nm_kthread *kth;\n uint32_t num_slots;\n int batch;\n IFRATE(uint32_t pre_tail);\n\n if (unlikely(!ptns)) {\n D(\"ERROR ptnetmap state is NULL\");\n return;\n }\n\n if (unlikely(ptns->stopped)) {\n RD(1, \"backend netmap is being stopped\");\n return;\n }\n\n if (unlikely(nm_kr_tryget(kring, 1, NULL))) {\n D(\"ERROR nm_kr_tryget()\");\n return;\n }\n\n /* This is a guess, to be fixed in the rate callback. */\n IFRATE(ptns->rate_ctx.new.gtxk++);\n\n /* Get TX ptring pointer from the CSB. */\n ptring = ptns->ptrings + kring->ring_id;\n kth = ptns->kthreads[kring->ring_id];\n\n num_slots = kring->nkr_num_slots;\n shadow_ring.head = kring->rhead;\n shadow_ring.cur = kring->rcur;\n\n /* Disable guest --> host notifications. */\n ptring_kick_enable(ptring, 0);\n /* Copy the guest kring pointers from the CSB */\n ptnetmap_host_read_kring_csb(ptring, &shadow_ring, num_slots);\n\n for (;;) {\n\t/* If guest moves ahead too fast, let's cut the move so\n\t * that we don't exceed our batch limit. */\n batch = shadow_ring.head - kring->nr_hwcur;\n if (batch < 0)\n batch += num_slots;\n\n#ifdef PTN_TX_BATCH_LIM\n if (batch > PTN_TX_BATCH_LIM(num_slots)) {\n uint32_t head_lim = kring->nr_hwcur + PTN_TX_BATCH_LIM(num_slots);\n\n if (head_lim >= num_slots)\n head_lim -= num_slots;\n ND(1, \"batch: %d head: %d head_lim: %d\", batch, shadow_ring.head,\n\t\t\t\t\t\t head_lim);\n shadow_ring.head = head_lim;\n\t batch = PTN_TX_BATCH_LIM(num_slots);\n }\n#endif /* PTN_TX_BATCH_LIM */\n\n if (nm_kr_txspace(kring) <= (num_slots >> 1)) {\n shadow_ring.flags |= NAF_FORCE_RECLAIM;\n }\n\n /* Netmap prologue */\n\tshadow_ring.tail = kring->rtail;\n if (unlikely(nm_txsync_prologue(kring, &shadow_ring) >= num_slots)) {\n /* Reinit ring and enable notifications. */\n netmap_ring_reinit(kring);\n ptring_kick_enable(ptring, 1);\n break;\n }\n\n if (unlikely(netmap_verbose & NM_VERB_TXSYNC)) {\n ptnetmap_kring_dump(\"pre txsync\", kring);\n\t}\n\n IFRATE(pre_tail = kring->rtail);\n if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) {\n /* Reenable notifications. */\n ptring_kick_enable(ptring, 1);\n D(\"ERROR txsync()\");\n\t break;\n }\n\n /*\n * Finalize\n * Copy host hwcur and hwtail into the CSB for the guest sync(), and\n\t * do the nm_sync_finalize.\n */\n ptnetmap_host_write_kring_csb(ptring, kring->nr_hwcur,\n\t\t\t\t kring->nr_hwtail);\n if (kring->rtail != kring->nr_hwtail) {\n\t /* Some more room available in the parent adapter. */\n\t kring->rtail = kring->nr_hwtail;\n\t more_txspace = true;\n }\n\n IFRATE(rate_batch_stats_update(&ptns->rate_ctx.new.txbs, pre_tail,\n\t\t\t\t kring->rtail, num_slots));\n\n if (unlikely(netmap_verbose & NM_VERB_TXSYNC)) {\n ptnetmap_kring_dump(\"post txsync\", kring);\n\t}\n\n#ifndef BUSY_WAIT\n /* Interrupt the guest if needed. */\n if (more_txspace && ptring_intr_enabled(ptring)) {\n /* Disable guest kick to avoid sending unnecessary kicks */\n ptring_intr_enable(ptring, 0);\n nm_os_kthread_send_irq(kth);\n IFRATE(ptns->rate_ctx.new.htxk++);\n more_txspace = false;\n }\n#endif\n /* Read CSB to see if there is more work to do. */\n ptnetmap_host_read_kring_csb(ptring, &shadow_ring, num_slots);\n#ifndef BUSY_WAIT\n if (shadow_ring.head == kring->rhead) {\n /*\n * No more packets to transmit. We enable notifications and\n * go to sleep, waiting for a kick from the guest when new\n * new slots are ready for transmission.\n */\n usleep_range(1,1);\n /* Reenable notifications. */\n ptring_kick_enable(ptring, 1);\n /* Doublecheck. */\n ptnetmap_host_read_kring_csb(ptring, &shadow_ring, num_slots);\n if (shadow_ring.head != kring->rhead) {\n\t\t/* We won the race condition, there are more packets to\n\t\t * transmit. Disable notifications and do another cycle */\n\t\tptring_kick_enable(ptring, 0);\n\t\tcontinue;\n\t }\n\t break;\n }\n\n\tif (nm_kr_txempty(kring)) {\n\t /* No more available TX slots. We stop waiting for a notification\n\t * from the backend (netmap_tx_irq). */\n ND(1, \"TX ring\");\n break;\n }\n#endif\n if (unlikely(ptns->stopped)) {\n D(\"backend netmap is being stopped\");\n break;\n }\n }\n\n nm_kr_put(kring);\n\n if (more_txspace && ptring_intr_enabled(ptring)) {\n ptring_intr_enable(ptring, 0);\n nm_os_kthread_send_irq(kth);\n IFRATE(ptns->rate_ctx.new.htxk++);\n }\n}\n\n/*\n * We need RX kicks from the guest when (tail == head-1), where we wait\n * for the guest to refill.\n */\n#ifndef BUSY_WAIT\nstatic inline int\nptnetmap_norxslots(struct netmap_kring *kring, uint32_t g_head)\n{\n return (NM_ACCESS_ONCE(kring->nr_hwtail) == nm_prev(g_head,\n \t\t\t kring->nkr_num_slots - 1));\n}\n#endif /* !BUSY_WAIT */\n\n/* Handle RX events: from the guest or from the backend */\nstatic void\nptnetmap_rx_handler(void *data)\n{\n struct netmap_kring *kring = data;\n struct netmap_pt_host_adapter *pth_na =\n\t\t(struct netmap_pt_host_adapter *)kring->na->na_private;\n struct ptnetmap_state *ptns = pth_na->ptns;\n struct ptnet_ring __user *ptring;\n struct netmap_ring shadow_ring; /* shadow copy of the netmap_ring */\n struct nm_kthread *kth;\n uint32_t num_slots;\n int dry_cycles = 0;\n bool some_recvd = false;\n IFRATE(uint32_t pre_tail);\n\n if (unlikely(!ptns || !ptns->pth_na)) {\n D(\"ERROR ptnetmap state %p, ptnetmap host adapter %p\", ptns,\n\t ptns ? ptns->pth_na : NULL);\n return;\n }\n\n if (unlikely(ptns->stopped)) {\n RD(1, \"backend netmap is being stopped\");\n\treturn;\n }\n\n if (unlikely(nm_kr_tryget(kring, 1, NULL))) {\n D(\"ERROR nm_kr_tryget()\");\n\treturn;\n }\n\n /* This is a guess, to be fixed in the rate callback. */\n IFRATE(ptns->rate_ctx.new.grxk++);\n\n /* Get RX ptring pointer from the CSB. */\n ptring = ptns->ptrings + (pth_na->up.num_tx_rings + kring->ring_id);\n kth = ptns->kthreads[pth_na->up.num_tx_rings + kring->ring_id];\n\n num_slots = kring->nkr_num_slots;\n shadow_ring.head = kring->rhead;\n shadow_ring.cur = kring->rcur;\n\n /* Disable notifications. */\n ptring_kick_enable(ptring, 0);\n /* Copy the guest kring pointers from the CSB */\n ptnetmap_host_read_kring_csb(ptring, &shadow_ring, num_slots);\n\n for (;;) {\n\tuint32_t hwtail;\n\n /* Netmap prologue */\n\tshadow_ring.tail = kring->rtail;\n if (unlikely(nm_rxsync_prologue(kring, &shadow_ring) >= num_slots)) {\n /* Reinit ring and enable notifications. */\n netmap_ring_reinit(kring);\n ptring_kick_enable(ptring, 1);\n break;\n }\n\n if (unlikely(netmap_verbose & NM_VERB_RXSYNC)) {\n ptnetmap_kring_dump(\"pre rxsync\", kring);\n\t}\n\n IFRATE(pre_tail = kring->rtail);\n if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) {\n /* Reenable notifications. */\n ptring_kick_enable(ptring, 1);\n D(\"ERROR rxsync()\");\n\t break;\n }\n /*\n * Finalize\n * Copy host hwcur and hwtail into the CSB for the guest sync()\n */\n\thwtail = NM_ACCESS_ONCE(kring->nr_hwtail);\n ptnetmap_host_write_kring_csb(ptring, kring->nr_hwcur, hwtail);\n if (kring->rtail != hwtail) {\n\t kring->rtail = hwtail;\n some_recvd = true;\n dry_cycles = 0;\n } else {\n dry_cycles++;\n }\n\n IFRATE(rate_batch_stats_update(&ptns->rate_ctx.new.rxbs, pre_tail,\n\t kring->rtail, num_slots));\n\n if (unlikely(netmap_verbose & NM_VERB_RXSYNC)) {\n ptnetmap_kring_dump(\"post rxsync\", kring);\n\t}\n\n#ifndef BUSY_WAIT\n\t/* Interrupt the guest if needed. */\n if (some_recvd && ptring_intr_enabled(ptring)) {\n /* Disable guest kick to avoid sending unnecessary kicks */\n ptring_intr_enable(ptring, 0);\n nm_os_kthread_send_irq(kth);\n IFRATE(ptns->rate_ctx.new.hrxk++);\n some_recvd = false;\n }\n#endif\n /* Read CSB to see if there is more work to do. */\n ptnetmap_host_read_kring_csb(ptring, &shadow_ring, num_slots);\n#ifndef BUSY_WAIT\n if (ptnetmap_norxslots(kring, shadow_ring.head)) {\n /*\n * No more slots available for reception. We enable notification and\n * go to sleep, waiting for a kick from the guest when new receive\n\t * slots are available.\n */\n usleep_range(1,1);\n /* Reenable notifications. */\n ptring_kick_enable(ptring, 1);\n /* Doublecheck. */\n ptnetmap_host_read_kring_csb(ptring, &shadow_ring, num_slots);\n if (!ptnetmap_norxslots(kring, shadow_ring.head)) {\n\t\t/* We won the race condition, more slots are available. Disable\n\t\t * notifications and do another cycle. */\n ptring_kick_enable(ptring, 0);\n continue;\n\t }\n break;\n }\n\n\thwtail = NM_ACCESS_ONCE(kring->nr_hwtail);\n if (unlikely(hwtail == kring->rhead ||\n\t\t dry_cycles >= PTN_RX_DRY_CYCLES_MAX)) {\n\t /* No more packets to be read from the backend. We stop and\n\t * wait for a notification from the backend (netmap_rx_irq). */\n ND(1, \"nr_hwtail: %d rhead: %d dry_cycles: %d\",\n\t hwtail, kring->rhead, dry_cycles);\n break;\n }\n#endif\n if (unlikely(ptns->stopped)) {\n D(\"backend netmap is being stopped\");\n break;\n }\n }\n\n nm_kr_put(kring);\n\n /* Interrupt the guest if needed. */\n if (some_recvd && ptring_intr_enabled(ptring)) {\n ptring_intr_enable(ptring, 0);\n nm_os_kthread_send_irq(kth);\n IFRATE(ptns->rate_ctx.new.hrxk++);\n }\n}\n\n#ifdef NETMAP_PT_DEBUG\nstatic void\nptnetmap_print_configuration(struct ptnetmap_cfg *cfg)\n{\n\tint k;\n\n\tD(\"ptnetmap configuration:\");\n\tD(\" CSB ptrings @%p, num_rings=%u, cfgtype %08x\", cfg->ptrings,\n\t cfg->num_rings, cfg->cfgtype);\n\tfor (k = 0; k < cfg->num_rings; k++) {\n\t\tswitch (cfg->cfgtype) {\n\t\tcase PTNETMAP_CFGTYPE_QEMU: {\n\t\t\tstruct ptnetmap_cfgentry_qemu *e =\n\t\t\t\t(struct ptnetmap_cfgentry_qemu *)(cfg+1) + k;\n\t\t\tD(\" ring #%d: ioeventfd=%lu, irqfd=%lu\", k,\n\t\t\t\t(unsigned long)e->ioeventfd,\n\t\t\t\t(unsigned long)e->irqfd);\n\t\t\tbreak;\n\t\t}\n\n\t\tcase PTNETMAP_CFGTYPE_BHYVE:\n\t\t{\n\t\t\tstruct ptnetmap_cfgentry_bhyve *e =\n\t\t\t\t(struct ptnetmap_cfgentry_bhyve *)(cfg+1) + k;\n\t\t\tD(\" ring #%d: wchan=%lu, ioctl_fd=%lu, \"\n\t\t\t \"ioctl_cmd=%lu, msix_msg_data=%lu, msix_addr=%lu\",\n\t\t\t\tk, (unsigned long)e->wchan,\n\t\t\t\t(unsigned long)e->ioctl_fd,\n\t\t\t\t(unsigned long)e->ioctl_cmd,\n\t\t\t\t(unsigned long)e->ioctl_data.msg_data,\n\t\t\t\t(unsigned long)e->ioctl_data.addr);\n\t\t\tbreak;\n\t\t}\n\t\t}\n\t}\n\n}\n#endif /* NETMAP_PT_DEBUG */\n\n/* Copy actual state of the host ring into the CSB for the guest init */\nstatic int\nptnetmap_kring_snapshot(struct netmap_kring *kring, struct ptnet_ring __user *ptring)\n{\n if(CSB_WRITE(ptring, head, kring->rhead))\n goto err;\n if(CSB_WRITE(ptring, cur, kring->rcur))\n goto err;\n\n if(CSB_WRITE(ptring, hwcur, kring->nr_hwcur))\n goto err;\n if(CSB_WRITE(ptring, hwtail, NM_ACCESS_ONCE(kring->nr_hwtail)))\n goto err;\n\n DBG(ptnetmap_kring_dump(\"ptnetmap_kring_snapshot\", kring);)\n\n return 0;\nerr:\n return EFAULT;\n}\n\nstatic struct netmap_kring *\nptnetmap_kring(struct netmap_pt_host_adapter *pth_na, int k)\n{\n\tif (k < pth_na->up.num_tx_rings) {\n\t\treturn pth_na->up.tx_rings + k;\n\t}\n\treturn pth_na->up.rx_rings + k - pth_na->up.num_tx_rings;\n}\n\nstatic int\nptnetmap_krings_snapshot(struct netmap_pt_host_adapter *pth_na)\n{\n\tstruct ptnetmap_state *ptns = pth_na->ptns;\n\tstruct netmap_kring *kring;\n\tunsigned int num_rings;\n\tint err = 0, k;\n\n\tnum_rings = pth_na->up.num_tx_rings +\n\t\t pth_na->up.num_rx_rings;\n\n\tfor (k = 0; k < num_rings; k++) {\n\t\tkring = ptnetmap_kring(pth_na, k);\n\t\terr |= ptnetmap_kring_snapshot(kring, ptns->ptrings + k);\n\t}\n\n\treturn err;\n}\n\n/*\n * Functions to create, start and stop the kthreads\n */\n\nstatic int\nptnetmap_create_kthreads(struct netmap_pt_host_adapter *pth_na,\n\t\t\t struct ptnetmap_cfg *cfg)\n{\n\tstruct ptnetmap_state *ptns = pth_na->ptns;\n\tstruct nm_kthread_cfg nmk_cfg;\n\tunsigned int num_rings;\n\tuint8_t *cfg_entries = (uint8_t *)(cfg + 1);\n\tint k;\n\n\tnum_rings = pth_na->up.num_tx_rings +\n\t\t pth_na->up.num_rx_rings;\n\n\tfor (k = 0; k < num_rings; k++) {\n\t\tnmk_cfg.attach_user = 1; /* attach kthread to user process */\n\t\tnmk_cfg.worker_private = ptnetmap_kring(pth_na, k);\n\t\tnmk_cfg.type = k;\n\t\tif (k < pth_na->up.num_tx_rings) {\n\t\t\tnmk_cfg.worker_fn = ptnetmap_tx_handler;\n\t\t} else {\n\t\t\tnmk_cfg.worker_fn = ptnetmap_rx_handler;\n\t\t}\n\n\t\tptns->kthreads[k] = nm_os_kthread_create(&nmk_cfg,\n\t\t\tcfg->cfgtype, cfg_entries + k * cfg->entry_size);\n\t\tif (ptns->kthreads[k] == NULL) {\n\t\t\tgoto err;\n\t\t}\n\t}\n\n\treturn 0;\nerr:\n\tfor (k = 0; k < num_rings; k++) {\n\t\tif (ptns->kthreads[k]) {\n\t\t\tnm_os_kthread_delete(ptns->kthreads[k]);\n\t\t\tptns->kthreads[k] = NULL;\n\t\t}\n\t}\n\treturn EFAULT;\n}\n\nstatic int\nptnetmap_start_kthreads(struct netmap_pt_host_adapter *pth_na)\n{\n\tstruct ptnetmap_state *ptns = pth_na->ptns;\n\tint num_rings;\n\tint error;\n\tint k;\n\n\tif (!ptns) {\n\t\tD(\"BUG ptns is NULL\");\n\t\treturn EFAULT;\n\t}\n\n\tptns->stopped = false;\n\n\tnum_rings = ptns->pth_na->up.num_tx_rings +\n\t\t ptns->pth_na->up.num_rx_rings;\n\tfor (k = 0; k < num_rings; k++) {\n\t\t//nm_os_kthread_set_affinity(ptns->kthreads[k], xxx);\n\t\terror = nm_os_kthread_start(ptns->kthreads[k]);\n\t\tif (error) {\n\t\t\treturn error;\n\t\t}\n\t}\n\n\treturn 0;\n}\n\nstatic void\nptnetmap_stop_kthreads(struct netmap_pt_host_adapter *pth_na)\n{\n\tstruct ptnetmap_state *ptns = pth_na->ptns;\n\tint num_rings;\n\tint k;\n\n\tif (!ptns) {\n\t\t/* Nothing to do. */\n\t\treturn;\n\t}\n\n\tptns->stopped = true;\n\n\tnum_rings = ptns->pth_na->up.num_tx_rings +\n\t\t ptns->pth_na->up.num_rx_rings;\n\tfor (k = 0; k < num_rings; k++) {\n\t\tnm_os_kthread_stop(ptns->kthreads[k]);\n\t}\n}\n\nstatic struct ptnetmap_cfg *\nptnetmap_read_cfg(struct nmreq *nmr)\n{\n\tuintptr_t *nmr_ptncfg = (uintptr_t *)&nmr->nr_arg1;\n\tstruct ptnetmap_cfg *cfg;\n\tstruct ptnetmap_cfg tmp;\n\tsize_t cfglen;\n\n\tif (copyin((const void *)*nmr_ptncfg, &tmp, sizeof(tmp))) {\n\t\tD(\"Partial copyin() failed\");\n\t\treturn NULL;\n\t}\n\n\tcfglen = sizeof(tmp) + tmp.num_rings * tmp.entry_size;\n\tcfg = nm_os_malloc(cfglen);\n\tif (!cfg) {\n\t\treturn NULL;\n\t}\n\n\tif (copyin((const void *)*nmr_ptncfg, cfg, cfglen)) {\n\t\tD(\"Full copyin() failed\");\n\t\tnm_os_free(cfg);\n\t\treturn NULL;\n\t}\n\n\treturn cfg;\n}\n\nstatic int nm_unused_notify(struct netmap_kring *, int);\nstatic int nm_pt_host_notify(struct netmap_kring *, int);\n\n/* Create ptnetmap state and switch parent adapter to ptnetmap mode. */\nstatic int\nptnetmap_create(struct netmap_pt_host_adapter *pth_na,\n\t\tstruct ptnetmap_cfg *cfg)\n{\n struct ptnetmap_state *ptns;\n unsigned int num_rings;\n int ret, i;\n\n /* Check if ptnetmap state is already there. */\n if (pth_na->ptns) {\n D(\"ERROR adapter %p already in ptnetmap mode\", pth_na->parent);\n return EINVAL;\n }\n\n num_rings = pth_na->up.num_tx_rings + pth_na->up.num_rx_rings;\n\n if (num_rings != cfg->num_rings) {\n D(\"ERROR configuration mismatch, expected %u rings, found %u\",\n num_rings, cfg->num_rings);\n return EINVAL;\n }\n\n ptns = nm_os_malloc(sizeof(*ptns) + num_rings * sizeof(*ptns->kthreads));\n if (!ptns) {\n return ENOMEM;\n }\n\n ptns->kthreads = (struct nm_kthread **)(ptns + 1);\n ptns->stopped = true;\n\n /* Cross-link data structures. */\n pth_na->ptns = ptns;\n ptns->pth_na = pth_na;\n\n /* Store the CSB address provided by the hypervisor. */\n ptns->ptrings = cfg->ptrings;\n\n DBG(ptnetmap_print_configuration(cfg));\n\n /* Create kthreads */\n if ((ret = ptnetmap_create_kthreads(pth_na, cfg))) {\n D(\"ERROR ptnetmap_create_kthreads()\");\n goto err;\n }\n /* Copy krings state into the CSB for the guest initialization */\n if ((ret = ptnetmap_krings_snapshot(pth_na))) {\n D(\"ERROR ptnetmap_krings_snapshot()\");\n goto err;\n }\n\n /* Overwrite parent nm_notify krings callback. */\n pth_na->parent->na_private = pth_na;\n pth_na->parent_nm_notify = pth_na->parent->nm_notify;\n pth_na->parent->nm_notify = nm_unused_notify;\n\n for (i = 0; i < pth_na->parent->num_rx_rings; i++) {\n pth_na->up.rx_rings[i].save_notify =\n \tpth_na->up.rx_rings[i].nm_notify;\n pth_na->up.rx_rings[i].nm_notify = nm_pt_host_notify;\n }\n for (i = 0; i < pth_na->parent->num_tx_rings; i++) {\n pth_na->up.tx_rings[i].save_notify =\n \tpth_na->up.tx_rings[i].nm_notify;\n pth_na->up.tx_rings[i].nm_notify = nm_pt_host_notify;\n }\n\n#ifdef RATE\n memset(&ptns->rate_ctx, 0, sizeof(ptns->rate_ctx));\n setup_timer(&ptns->rate_ctx.timer, &rate_callback,\n (unsigned long)&ptns->rate_ctx);\n if (mod_timer(&ptns->rate_ctx.timer, jiffies + msecs_to_jiffies(1500)))\n D(\"[ptn] Error: mod_timer()\\n\");\n#endif\n\n DBG(D(\"[%s] ptnetmap configuration DONE\", pth_na->up.name));\n\n return 0;\n\nerr:\n pth_na->ptns = NULL;\n nm_os_free(ptns);\n return ret;\n}\n\n/* Switch parent adapter back to normal mode and destroy\n * ptnetmap state. */\nstatic void\nptnetmap_delete(struct netmap_pt_host_adapter *pth_na)\n{\n struct ptnetmap_state *ptns = pth_na->ptns;\n int num_rings;\n int i;\n\n if (!ptns) {\n\t/* Nothing to do. */\n return;\n }\n\n /* Restore parent adapter callbacks. */\n pth_na->parent->nm_notify = pth_na->parent_nm_notify;\n pth_na->parent->na_private = NULL;\n\n for (i = 0; i < pth_na->parent->num_rx_rings; i++) {\n pth_na->up.rx_rings[i].nm_notify =\n \tpth_na->up.rx_rings[i].save_notify;\n pth_na->up.rx_rings[i].save_notify = NULL;\n }\n for (i = 0; i < pth_na->parent->num_tx_rings; i++) {\n pth_na->up.tx_rings[i].nm_notify =\n \tpth_na->up.tx_rings[i].save_notify;\n pth_na->up.tx_rings[i].save_notify = NULL;\n }\n\n /* Delete kthreads. */\n num_rings = ptns->pth_na->up.num_tx_rings +\n ptns->pth_na->up.num_rx_rings;\n for (i = 0; i < num_rings; i++) {\n nm_os_kthread_delete(ptns->kthreads[i]);\n\tptns->kthreads[i] = NULL;\n }\n\n IFRATE(del_timer(&ptns->rate_ctx.timer));\n\n nm_os_free(ptns);\n\n pth_na->ptns = NULL;\n\n DBG(D(\"[%s] ptnetmap deleted\", pth_na->up.name));\n}\n\n/*\n * Called by netmap_ioctl().\n * Operation is indicated in nmr->nr_cmd.\n *\n * Called without NMG_LOCK.\n */\nint\nptnetmap_ctl(struct nmreq *nmr, struct netmap_adapter *na)\n{\n struct netmap_pt_host_adapter *pth_na;\n struct ptnetmap_cfg *cfg;\n char *name;\n int cmd, error = 0;\n\n name = nmr->nr_name;\n cmd = nmr->nr_cmd;\n\n DBG(D(\"name: %s\", name));\n\n if (!nm_ptnetmap_host_on(na)) {\n D(\"ERROR Netmap adapter %p is not a ptnetmap host adapter\", na);\n error = ENXIO;\n goto done;\n }\n pth_na = (struct netmap_pt_host_adapter *)na;\n\n NMG_LOCK();\n switch (cmd) {\n case NETMAP_PT_HOST_CREATE:\n\t/* Read hypervisor configuration from userspace. */\n cfg = ptnetmap_read_cfg(nmr);\n if (!cfg)\n break;\n /* Create ptnetmap state (kthreads, ...) and switch parent\n\t * adapter to ptnetmap mode. */\n error = ptnetmap_create(pth_na, cfg);\n\tnm_os_free(cfg);\n if (error)\n break;\n /* Start kthreads. */\n error = ptnetmap_start_kthreads(pth_na);\n if (error)\n ptnetmap_delete(pth_na);\n break;\n\n case NETMAP_PT_HOST_DELETE:\n /* Stop kthreads. */\n ptnetmap_stop_kthreads(pth_na);\n /* Switch parent adapter back to normal mode and destroy\n\t * ptnetmap state (kthreads, ...). */\n ptnetmap_delete(pth_na);\n break;\n\n default:\n D(\"ERROR invalid cmd (nmr->nr_cmd) (0x%x)\", cmd);\n error = EINVAL;\n break;\n }\n NMG_UNLOCK();\n\ndone:\n return error;\n}\n\n/* nm_notify callbacks for ptnetmap */\nstatic int\nnm_pt_host_notify(struct netmap_kring *kring, int flags)\n{\n\tstruct netmap_adapter *na = kring->na;\n\tstruct netmap_pt_host_adapter *pth_na =\n\t\t(struct netmap_pt_host_adapter *)na->na_private;\n\tstruct ptnetmap_state *ptns;\n\tint k;\n\n\t/* First check that the passthrough port is not being destroyed. */\n\tif (unlikely(!pth_na)) {\n\t\treturn NM_IRQ_COMPLETED;\n\t}\n\n\tptns = pth_na->ptns;\n\tif (unlikely(!ptns || ptns->stopped)) {\n\t\treturn NM_IRQ_COMPLETED;\n\t}\n\n\tk = kring->ring_id;\n\n\t/* Notify kthreads (wake up if needed) */\n\tif (kring->tx == NR_TX) {\n\t\tND(1, \"TX backend irq\");\n\t\tIFRATE(ptns->rate_ctx.new.btxwu++);\n\t} else {\n\t\tk += pth_na->up.num_tx_rings;\n\t\tND(1, \"RX backend irq\");\n\t\tIFRATE(ptns->rate_ctx.new.brxwu++);\n\t}\n\tnm_os_kthread_wakeup_worker(ptns->kthreads[k]);\n\n\treturn NM_IRQ_COMPLETED;\n}\n\nstatic int\nnm_unused_notify(struct netmap_kring *kring, int flags)\n{\n D(\"BUG this should never be called\");\n return ENXIO;\n}\n\n/* nm_config callback for bwrap */\nstatic int\nnm_pt_host_config(struct netmap_adapter *na, u_int *txr, u_int *txd,\n u_int *rxr, u_int *rxd)\n{\n struct netmap_pt_host_adapter *pth_na =\n (struct netmap_pt_host_adapter *)na;\n struct netmap_adapter *parent = pth_na->parent;\n int error;\n\n //XXX: maybe calling parent->nm_config is better\n\n /* forward the request */\n error = netmap_update_config(parent);\n\n *rxr = na->num_rx_rings = parent->num_rx_rings;\n *txr = na->num_tx_rings = parent->num_tx_rings;\n *txd = na->num_tx_desc = parent->num_tx_desc;\n *rxd = na->num_rx_desc = parent->num_rx_desc;\n\n DBG(D(\"rxr: %d txr: %d txd: %d rxd: %d\", *rxr, *txr, *txd, *rxd));\n\n return error;\n}\n\n/* nm_krings_create callback for ptnetmap */\nstatic int\nnm_pt_host_krings_create(struct netmap_adapter *na)\n{\n struct netmap_pt_host_adapter *pth_na =\n (struct netmap_pt_host_adapter *)na;\n struct netmap_adapter *parent = pth_na->parent;\n enum txrx t;\n int error;\n\n DBG(D(\"%s\", pth_na->up.name));\n\n /* create the parent krings */\n error = parent->nm_krings_create(parent);\n if (error) {\n return error;\n }\n\n /* A ptnetmap host adapter points the very same krings\n * as its parent adapter. These pointer are used in the\n * TX/RX worker functions. */\n na->tx_rings = parent->tx_rings;\n na->rx_rings = parent->rx_rings;\n na->tailroom = parent->tailroom;\n\n for_rx_tx(t) {\n\tstruct netmap_kring *kring;\n\n\t/* Parent's kring_create function will initialize\n\t * its own na->si. We have to init our na->si here. */\n\tnm_os_selinfo_init(&na->si[t]);\n\n\t/* Force the mem_rings_create() method to create the\n\t * host rings independently on what the regif asked for:\n\t * these rings are needed by the guest ptnetmap adapter\n\t * anyway. */\n\tkring = &NMR(na, t)[nma_get_nrings(na, t)];\n\tkring->nr_kflags |= NKR_NEEDRING;\n }\n\n return 0;\n}\n\n/* nm_krings_delete callback for ptnetmap */\nstatic void\nnm_pt_host_krings_delete(struct netmap_adapter *na)\n{\n struct netmap_pt_host_adapter *pth_na =\n (struct netmap_pt_host_adapter *)na;\n struct netmap_adapter *parent = pth_na->parent;\n\n DBG(D(\"%s\", pth_na->up.name));\n\n parent->nm_krings_delete(parent);\n\n na->tx_rings = na->rx_rings = na->tailroom = NULL;\n}\n\n/* nm_register callback */\nstatic int\nnm_pt_host_register(struct netmap_adapter *na, int onoff)\n{\n struct netmap_pt_host_adapter *pth_na =\n (struct netmap_pt_host_adapter *)na;\n struct netmap_adapter *parent = pth_na->parent;\n int error;\n DBG(D(\"%s onoff %d\", pth_na->up.name, onoff));\n\n if (onoff) {\n /* netmap_do_regif has been called on the ptnetmap na.\n * We need to pass the information about the\n * memory allocator to the parent before\n * putting it in netmap mode\n */\n parent->na_lut = na->na_lut;\n }\n\n /* forward the request to the parent */\n error = parent->nm_register(parent, onoff);\n if (error)\n return error;\n\n\n if (onoff) {\n na->na_flags |= NAF_NETMAP_ON | NAF_PTNETMAP_HOST;\n } else {\n ptnetmap_delete(pth_na);\n na->na_flags &= ~(NAF_NETMAP_ON | NAF_PTNETMAP_HOST);\n }\n\n return 0;\n}\n\n/* nm_dtor callback */\nstatic void\nnm_pt_host_dtor(struct netmap_adapter *na)\n{\n struct netmap_pt_host_adapter *pth_na =\n (struct netmap_pt_host_adapter *)na;\n struct netmap_adapter *parent = pth_na->parent;\n\n DBG(D(\"%s\", pth_na->up.name));\n\n /* The equivalent of NETMAP_PT_HOST_DELETE if the hypervisor\n * didn't do it. */\n ptnetmap_stop_kthreads(pth_na);\n ptnetmap_delete(pth_na);\n\n parent->na_flags &= ~NAF_BUSY;\n\n netmap_adapter_put(pth_na->parent);\n pth_na->parent = NULL;\n}\n\n/* check if nmr is a request for a ptnetmap adapter that we can satisfy */\nint\nnetmap_get_pt_host_na(struct nmreq *nmr, struct netmap_adapter **na,\n\t\tstruct netmap_mem_d *nmd, int create)\n{\n struct nmreq parent_nmr;\n struct netmap_adapter *parent; /* target adapter */\n struct netmap_pt_host_adapter *pth_na;\n struct ifnet *ifp = NULL;\n int error;\n\n /* Check if it is a request for a ptnetmap adapter */\n if ((nmr->nr_flags & (NR_PTNETMAP_HOST)) == 0) {\n return 0;\n }\n\n D(\"Requesting a ptnetmap host adapter\");\n\n pth_na = nm_os_malloc(sizeof(*pth_na));\n if (pth_na == NULL) {\n D(\"ERROR malloc\");\n return ENOMEM;\n }\n\n /* first, try to find the adapter that we want to passthrough\n * We use the same nmr, after we have turned off the ptnetmap flag.\n * In this way we can potentially passthrough everything netmap understands.\n */\n memcpy(&parent_nmr, nmr, sizeof(parent_nmr));\n parent_nmr.nr_flags &= ~(NR_PTNETMAP_HOST);\n error = netmap_get_na(&parent_nmr, &parent, &ifp, nmd, create);\n if (error) {\n D(\"parent lookup failed: %d\", error);\n goto put_out_noputparent;\n }\n DBG(D(\"found parent: %s\", parent->name));\n\n /* make sure the interface is not already in use */\n if (NETMAP_OWNED_BY_ANY(parent)) {\n D(\"NIC %s busy, cannot ptnetmap\", parent->name);\n error = EBUSY;\n goto put_out;\n }\n\n pth_na->parent = parent;\n\n /* Follow netmap_attach()-like operations for the host\n * ptnetmap adapter. */\n\n //XXX pth_na->up.na_flags = parent->na_flags;\n pth_na->up.num_rx_rings = parent->num_rx_rings;\n pth_na->up.num_tx_rings = parent->num_tx_rings;\n pth_na->up.num_tx_desc = parent->num_tx_desc;\n pth_na->up.num_rx_desc = parent->num_rx_desc;\n\n pth_na->up.nm_dtor = nm_pt_host_dtor;\n pth_na->up.nm_register = nm_pt_host_register;\n\n /* Reuse parent's adapter txsync and rxsync methods. */\n pth_na->up.nm_txsync = parent->nm_txsync;\n pth_na->up.nm_rxsync = parent->nm_rxsync;\n\n pth_na->up.nm_krings_create = nm_pt_host_krings_create;\n pth_na->up.nm_krings_delete = nm_pt_host_krings_delete;\n pth_na->up.nm_config = nm_pt_host_config;\n\n /* Set the notify method only or convenience, it will never\n * be used, since - differently from default krings_create - we\n * ptnetmap krings_create callback inits kring->nm_notify\n * directly. */\n pth_na->up.nm_notify = nm_unused_notify;\n\n pth_na->up.nm_mem = netmap_mem_get(parent->nm_mem);\n\n pth_na->up.na_flags |= NAF_HOST_RINGS;\n\n error = netmap_attach_common(&pth_na->up);\n if (error) {\n D(\"ERROR netmap_attach_common()\");\n goto put_out;\n }\n\n *na = &pth_na->up;\n netmap_adapter_get(*na);\n\n /* set parent busy, because attached for ptnetmap */\n parent->na_flags |= NAF_BUSY;\n\n strncpy(pth_na->up.name, parent->name, sizeof(pth_na->up.name));\n strcat(pth_na->up.name, \"-PTN\");\n\n DBG(D(\"%s ptnetmap request DONE\", pth_na->up.name));\n\n /* drop the reference to the ifp, if any */\n if (ifp)\n if_rele(ifp);\n\n return 0;\n\nput_out:\n netmap_adapter_put(parent);\n if (ifp)\n\tif_rele(ifp);\nput_out_noputparent:\n nm_os_free(pth_na);\n return error;\n}\n#endif /* WITH_PTNETMAP_HOST */\n\n#ifdef WITH_PTNETMAP_GUEST\n/*\n * Guest ptnetmap txsync()/rxsync() routines, used in ptnet device drivers.\n * These routines are reused across the different operating systems supported\n * by netmap.\n */\n\n/*\n * Reconcile host and guest views of the transmit ring.\n *\n * Guest user wants to transmit packets up to the one before ring->head,\n * and guest kernel knows tx_ring->hwcur is the first packet unsent\n * by the host kernel.\n *\n * We push out as many packets as possible, and possibly\n * reclaim buffers from previously completed transmission.\n *\n * Notifications from the host are enabled only if the user guest would\n * block (no space in the ring).\n */\nbool\nnetmap_pt_guest_txsync(struct ptnet_ring *ptring, struct netmap_kring *kring,\n\t\t int flags)\n{\n\tbool notify = false;\n\n\t/* Disable notifications */\n\tptring->guest_need_kick = 0;\n\n\t/*\n\t * First part: tell the host (updating the CSB) to process the new\n\t * packets.\n\t */\n\tkring->nr_hwcur = ptring->hwcur;\n\tptnetmap_guest_write_kring_csb(ptring, kring->rcur, kring->rhead);\n\n /* Ask for a kick from a guest to the host if needed. */\n\tif ((kring->rhead != kring->nr_hwcur &&\n\t\tNM_ACCESS_ONCE(ptring->host_need_kick)) ||\n\t\t\t(flags & NAF_FORCE_RECLAIM)) {\n\t\tptring->sync_flags = flags;\n\t\tnotify = true;\n\t}\n\n\t/*\n\t * Second part: reclaim buffers for completed transmissions.\n\t */\n\tif (nm_kr_txempty(kring) || (flags & NAF_FORCE_RECLAIM)) {\n ptnetmap_guest_read_kring_csb(ptring, kring);\n\t}\n\n /*\n * No more room in the ring for new transmissions. The user thread will\n\t * go to sleep and we need to be notified by the host when more free\n\t * space is available.\n */\n\tif (nm_kr_txempty(kring)) {\n\t\t/* Reenable notifications. */\n\t\tptring->guest_need_kick = 1;\n /* Double check */\n ptnetmap_guest_read_kring_csb(ptring, kring);\n /* If there is new free space, disable notifications */\n\t\tif (unlikely(!nm_kr_txempty(kring))) {\n\t\t\tptring->guest_need_kick = 0;\n\t\t}\n\t}\n\n\tND(1, \"TX - CSB: head:%u cur:%u hwtail:%u - KRING: head:%u cur:%u tail: %u\",\n\t\t\tptring->head, ptring->cur, ptring->hwtail,\n\t\t\tkring->rhead, kring->rcur, kring->nr_hwtail);\n\n\treturn notify;\n}\n\n/*\n * Reconcile host and guest view of the receive ring.\n *\n * Update hwcur/hwtail from host (reading from CSB).\n *\n * If guest user has released buffers up to the one before ring->head, we\n * also give them to the host.\n *\n * Notifications from the host are enabled only if the user guest would\n * block (no more completed slots in the ring).\n */\nbool\nnetmap_pt_guest_rxsync(struct ptnet_ring *ptring, struct netmap_kring *kring,\n\t\t int flags)\n{\n\tbool notify = false;\n\n /* Disable notifications */\n\tptring->guest_need_kick = 0;\n\n\t/*\n\t * First part: import newly received packets, by updating the kring\n\t * hwtail to the hwtail known from the host (read from the CSB).\n\t * This also updates the kring hwcur.\n\t */\n ptnetmap_guest_read_kring_csb(ptring, kring);\n\tkring->nr_kflags &= ~NKR_PENDINTR;\n\n\t/*\n\t * Second part: tell the host about the slots that guest user has\n\t * released, by updating cur and head in the CSB.\n\t */\n\tif (kring->rhead != kring->nr_hwcur) {\n\t\tptnetmap_guest_write_kring_csb(ptring, kring->rcur,\n\t\t\t\t\t kring->rhead);\n /* Ask for a kick from the guest to the host if needed. */\n\t\tif (NM_ACCESS_ONCE(ptring->host_need_kick)) {\n\t\t\tptring->sync_flags = flags;\n\t\t\tnotify = true;\n\t\t}\n\t}\n\n /*\n * No more completed RX slots. The user thread will go to sleep and\n\t * we need to be notified by the host when more RX slots have been\n\t * completed.\n */\n\tif (nm_kr_rxempty(kring)) {\n\t\t/* Reenable notifications. */\n ptring->guest_need_kick = 1;\n /* Double check */\n ptnetmap_guest_read_kring_csb(ptring, kring);\n /* If there are new slots, disable notifications. */\n\t\tif (!nm_kr_rxempty(kring)) {\n ptring->guest_need_kick = 0;\n }\n }\n\n\tND(1, \"RX - CSB: head:%u cur:%u hwtail:%u - KRING: head:%u cur:%u\",\n\t\tptring->head, ptring->cur, ptring->hwtail,\n\t\tkring->rhead, kring->rcur);\n\n\treturn notify;\n}\n\n/*\n * Callbacks for ptnet drivers: nm_krings_create, nm_krings_delete, nm_dtor.\n */\nint\nptnet_nm_krings_create(struct netmap_adapter *na)\n{\n\tstruct netmap_pt_guest_adapter *ptna =\n\t\t\t(struct netmap_pt_guest_adapter *)na; /* Upcast. */\n\tstruct netmap_adapter *na_nm = &ptna->hwup.up;\n\tstruct netmap_adapter *na_dr = &ptna->dr.up;\n\tint ret;\n\n\tif (ptna->backend_regifs) {\n\t\treturn 0;\n\t}\n\n\t/* Create krings on the public netmap adapter. */\n\tret = netmap_hw_krings_create(na_nm);\n\tif (ret) {\n\t\treturn ret;\n\t}\n\n\t/* Copy krings into the netmap adapter private to the driver. */\n\tna_dr->tx_rings = na_nm->tx_rings;\n\tna_dr->rx_rings = na_nm->rx_rings;\n\n\treturn 0;\n}\n\nvoid\nptnet_nm_krings_delete(struct netmap_adapter *na)\n{\n\tstruct netmap_pt_guest_adapter *ptna =\n\t\t\t(struct netmap_pt_guest_adapter *)na; /* Upcast. */\n\tstruct netmap_adapter *na_nm = &ptna->hwup.up;\n\tstruct netmap_adapter *na_dr = &ptna->dr.up;\n\n\tif (ptna->backend_regifs) {\n\t\treturn;\n\t}\n\n\tna_dr->tx_rings = NULL;\n\tna_dr->rx_rings = NULL;\n\n\tnetmap_hw_krings_delete(na_nm);\n}\n\nvoid\nptnet_nm_dtor(struct netmap_adapter *na)\n{\n\tstruct netmap_pt_guest_adapter *ptna =\n\t\t\t(struct netmap_pt_guest_adapter *)na;\n\n\tnetmap_mem_put(ptna->dr.up.nm_mem); // XXX is this needed?\n\tmemset(&ptna->dr, 0, sizeof(ptna->dr));\n\tnetmap_mem_pt_guest_ifp_del(na->nm_mem, na->ifp);\n}\n\n#endif /* WITH_PTNETMAP_GUEST */\n" } ]
12
markshells/NFT_Auction_Platform
https://github.com/markshells/NFT_Auction_Platform
c419844466a2ffd39b0b6ca4e80dffed2bf126aa
0ccfcb05901711d6485f583e29dbcc7f56759d9c
3d8f53bf2024aded9c00285f1f3624f61c9b3656
refs/heads/main
2023-07-16T05:20:15.862703
2021-09-02T17:29:19
2021-09-02T17:29:19
398,326,174
3
0
null
2021-08-20T15:47:05
2021-08-20T15:45:46
2021-08-20T01:52:59
null
[ { "alpha_fraction": 0.6563193202018738, "alphanum_fraction": 0.6873614192008972, "avg_line_length": 25.52941131591797, "blob_id": "a61f71f448d758adf34bbd20bc32fd90aa917db9", "content_id": "417dfeff150faeb00b7c29f7a01360e945573cdc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 451, "license_type": "no_license", "max_line_length": 53, "num_lines": 17, "path": "/nft_auction/src/web3.js", "repo_name": "markshells/NFT_Auction_Platform", "src_encoding": "UTF-8", "text": "//overrides metamask v0.2 for our 1.0 version. \n//1.0 lets us use async and await instead of promises\nimport Web3 from 'web3';\n\n\n// Function to check for MetaMask Connection\nexport const checkMetaMaskConnection = () => {\n if(window.ethereum) {\n window.web3 = new Web3(window.ethereum);\n window.ethereum.enable();\n return true;\n }\n return false;\n}\n\nconst web3 = new Web3(window.web3.currentProvider);\nexport default web3;\n" }, { "alpha_fraction": 0.7222222089767456, "alphanum_fraction": 0.7222222089767456, "avg_line_length": 35, "blob_id": "1f0f650a08477d88f2a75828d50ca37d4e1a5ae3", "content_id": "437d1c63301d03d558c6d3eeccb6719fe8627c8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 36, "license_type": "no_license", "max_line_length": 35, "num_lines": 1, "path": "/README.md", "repo_name": "markshells/NFT_Auction_Platform", "src_encoding": "UTF-8", "text": "Click [here](./frontend/index.html)\n" }, { "alpha_fraction": 0.6366525292396545, "alphanum_fraction": 0.6408898234367371, "avg_line_length": 24.54054069519043, "blob_id": "931c074fec3a6258ccbe7457be6750414e95b786", "content_id": "d2c43396ea204e2aeeb638c2ef7b23d771d0bb52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 944, "license_type": "no_license", "max_line_length": 84, "num_lines": 37, "path": "/tokentools.py", "repo_name": "markshells/NFT_Auction_Platform", "src_encoding": "UTF-8", "text": "import requests\nimport json\nimport os\nfrom dotenv import load_dotenv\nfrom pathlib import Path\nfrom web3.auto import w3\n\nload_dotenv('JL.env')\n\nheaders = {\n \"Content-Type\": \"application/json\",\n \"pinata_api_key\": os.getenv(\"PINATA_API_KEY\"),\n \"pinata_secret_api_key\": os.getenv(\"PINATA_SECRET_KEY\"),\n}\n\ndef initContract():\n with open(Path(\"Tokenize.json\")) as json_file:\n abi = json.load(json_file)\n\n return w3.eth.contract(address=os.getenv(\"ARTREGISTRY_ADDRESS\"), abi=abi)\n\ndef convertDataToJSON(title, artist_name):\n data = {\n \"pinataOptions\": {\"cidVersion\": 1},\n \"pinataContent\": {\n \"title\": title,\n \"artist_name\": artist_name\n },\n }\n return json.dumps(data)\n\ndef pinJSONtoIPFS(json):\n r = requests.post(\n \"https://api.pinata.cloud/pinning/pinJSONToIPFS\", data=json, headers=headers\n )\n ipfs_hash = r.json()[\"IpfsHash\"]\n return f\"ipfs://{ipfs_hash}\"" } ]
3
Semc/pyboto3
https://github.com/Semc/pyboto3
e12bb5539a0a7ccfd884cd54213796e8970a3f1a
bde39b571490d01fdf9d4b4cdc5e27950b425a78
0f98ed54d6ce9060781832622591f7d901f99adc
refs/heads/master
2020-04-15T02:51:51.016352
2019-01-06T16:55:21
2019-01-06T16:55:21
164,326,781
0
0
null
2019-01-06T16:50:51
2018-12-28T06:26:25
2018-04-24T20:20:38
null
[ { "alpha_fraction": 0.6365212202072144, "alphanum_fraction": 0.6405383348464966, "avg_line_length": 39.62882995605469, "blob_id": "ecbca89bf1b45e8fc2b39ae289e42f2fbc725818", "content_id": "d6d119a4acfc6aed60ae3976d03b2ff4674aa886", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 49040, "license_type": "permissive", "max_line_length": 563, "num_lines": 1207, "path": "/pyboto3/datasync.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef cancel_task_execution(TaskExecutionArn=None):\n \"\"\"\n Cancels execution of a task.\n When you cancel a task execution, the transfer of some files are abruptly interrupted. The contents of files that are transferred to the destination might be incomplete or inconsistent with the source files. However, if you start a new task execution on the same task and you allow the task execution to complete, file content on the destination is complete and consistent. This applies to other unexpected failures that interrupt a task execution. In all of these cases, AWS DataSync successfully complete the transfer when you start the next task execution.\n See also: AWS API Documentation\n \n \n :example: response = client.cancel_task_execution(\n TaskExecutionArn='string'\n )\n \n \n :type TaskExecutionArn: string\n :param TaskExecutionArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the task execution to cancel.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef create_agent(ActivationKey=None, AgentName=None, Tags=None):\n \"\"\"\n Activates an AWS DataSync agent that you have deployed on your host. The activation process associates your agent with your account. In the activation process, you specify information such as the AWS Region that you want to activate the agent in. You activate the agent in the AWS Region where your target locations (in Amazon S3 or Amazon EFS) reside. Your tasks are created in this AWS Region.\n You can use an agent for more than one location. If a task uses multiple agents, all of them need to have status AVAILABLE for the task to run. If you use multiple agents for a source location, the status of all the agents must be AVAILABLE for the task to run. For more information, see Activating a Sync Agent in the AWS DataSync User Guide.\n Agents are automatically updated by AWS on a regular basis, using a mechanism that ensures minimal interruption to your tasks.\n See also: AWS API Documentation\n \n \n :example: response = client.create_agent(\n ActivationKey='string',\n AgentName='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type ActivationKey: string\n :param ActivationKey: [REQUIRED]\n Your agent activation key. You can get the activation key either by sending an HTTP GET request with redirects that enable you to get the agent IP address (port 80). Alternatively, you can get it from the AWS DataSync console.\n The redirect URL returned in the response provides you the activation key for your agent in the query string parameter activationKey . It might also include other activation-related parameters; however, these are merely defaults. The arguments you pass to this API call determine the actual configuration of your agent. For more information, see Activating a Sync Agent in the AWS DataSync User Guide.\n \n\n :type AgentName: string\n :param AgentName: The name you configured for your agent. This value is a text reference that is used to identify the agent in the console.\n\n :type Tags: list\n :param Tags: The key-value pair that represents the tag you want to associate with the agent. The value can be an empty string. This value helps you manage, filter, and search for your agents.\n Note\n Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @.\n (dict) --Represents a single entry in a list of AWS resource tags. TagListEntry returns an array that contains a list of tasks when the ListTagsForResource operation is called.\n Key (string) --The key for an AWS resource tag.\n Value (string) --The value for an AWS resource tag.\n \n \n\n :rtype: dict\n :return: {\n 'AgentArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_location_efs(Subdirectory=None, EfsFilesystemArn=None, Ec2Config=None, Tags=None):\n \"\"\"\n Creates an endpoint for an Amazon EFS file system.\n See also: AWS API Documentation\n \n \n :example: response = client.create_location_efs(\n Subdirectory='string',\n EfsFilesystemArn='string',\n Ec2Config={\n 'SubnetArn': 'string',\n 'SecurityGroupArns': [\n 'string',\n ]\n },\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type Subdirectory: string\n :param Subdirectory: [REQUIRED]\n A subdirectory in the location s path. This subdirectory in the EFS file system is used to read data from the EFS source location or write data to the EFS destination. By default, AWS DataSync uses the root directory.\n \n\n :type EfsFilesystemArn: string\n :param EfsFilesystemArn: [REQUIRED]\n The Amazon Resource Name (ARN) for the Amazon EFS file system.\n \n\n :type Ec2Config: dict\n :param Ec2Config: [REQUIRED]\n The subnet and security group that the Amazon EFS file system uses.\n SubnetArn (string) -- [REQUIRED]The ARN of the subnet that the Amazon EC2 resource belongs in.\n SecurityGroupArns (list) -- [REQUIRED]The Amazon Resource Names (ARNs) of the security groups that are configured for the Amazon EC2 resource.\n (string) --\n \n\n :type Tags: list\n :param Tags: The key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location.\n (dict) --Represents a single entry in a list of AWS resource tags. TagListEntry returns an array that contains a list of tasks when the ListTagsForResource operation is called.\n Key (string) --The key for an AWS resource tag.\n Value (string) --The value for an AWS resource tag.\n \n \n\n :rtype: dict\n :return: {\n 'LocationArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_location_nfs(Subdirectory=None, ServerHostname=None, OnPremConfig=None, Tags=None):\n \"\"\"\n Creates an endpoint for a Network File System (NFS) file system.\n See also: AWS API Documentation\n \n \n :example: response = client.create_location_nfs(\n Subdirectory='string',\n ServerHostname='string',\n OnPremConfig={\n 'AgentArns': [\n 'string',\n ]\n },\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type Subdirectory: string\n :param Subdirectory: [REQUIRED]\n The subdirectory in the NFS file system that is used to read data from the NFS source location or write data to the NFS destination. The NFS path should be a path that's exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network.\n To see all the paths exported by your NFS server. run 'showmount -e nfs-server-name ' from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication.\n To transfer all the data in the folder you specified, DataSync needs to have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash, or ensure that the permissions for all of the files that you want sync allow read access for all users. Doing either enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access. For information about NFS export configuration, see 18.7. The /etc/exports Configuration File in the Centos documentation.\n \n\n :type ServerHostname: string\n :param ServerHostname: [REQUIRED]\n The name of the NFS server. This value is the IP address or Domain Name Service (DNS) name of the NFS server. An agent that is installed on-premises uses this host name to mount the NFS server in a network.\n Note\n This name must either be DNS-compliant or must be an IP version 4 (IPv4) address.\n \n\n :type OnPremConfig: dict\n :param OnPremConfig: [REQUIRED]\n Contains a list of Amazon Resource Names (ARNs) of agents that are used to connect to an NFS server.\n AgentArns (list) -- [REQUIRED]ARNs)of the agents to use for an NFS location.\n (string) --\n \n\n :type Tags: list\n :param Tags: The key-value pair that represents the tag that you want to add to the location. The value can be an empty string. We recommend using tags to name your resources.\n (dict) --Represents a single entry in a list of AWS resource tags. TagListEntry returns an array that contains a list of tasks when the ListTagsForResource operation is called.\n Key (string) --The key for an AWS resource tag.\n Value (string) --The value for an AWS resource tag.\n \n \n\n :rtype: dict\n :return: {\n 'LocationArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_location_s3(Subdirectory=None, S3BucketArn=None, S3Config=None, Tags=None):\n \"\"\"\n Creates an endpoint for an Amazon S3 bucket.\n For AWS DataSync to access a destination S3 bucket, it needs an AWS Identity and Access Management (IAM) role that has the required permissions. You can set up the required permissions by creating an IAM policy that grants the required permissions and attaching the policy to the role. An example of such a policy is shown in the examples section. For more information, see Configuring Amazon S3 Location Settings in the AWS DataSync User Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.create_location_s3(\n Subdirectory='string',\n S3BucketArn='string',\n S3Config={\n 'BucketAccessRoleArn': 'string'\n },\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type Subdirectory: string\n :param Subdirectory: [REQUIRED]\n A subdirectory in the Amazon S3 bucket. This subdirectory in Amazon S3 is used to read data from the S3 source location or write data to the S3 destination.\n \n\n :type S3BucketArn: string\n :param S3BucketArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the Amazon S3 bucket.\n \n\n :type S3Config: dict\n :param S3Config: [REQUIRED]\n The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that is used to access an Amazon S3 bucket. For detailed information about using such a role, see Components and Terminology in the AWS DataSync User Guide .\n BucketAccessRoleArn (string) -- [REQUIRED]The Amazon S3 bucket to access. This bucket is used as a parameter in the CreateLocationS3 operation.\n \n\n :type Tags: list\n :param Tags: The key-value pair that represents the tag that you want to add to the location. The value can be an empty string. We recommend using tags to name your resources.\n (dict) --Represents a single entry in a list of AWS resource tags. TagListEntry returns an array that contains a list of tasks when the ListTagsForResource operation is called.\n Key (string) --The key for an AWS resource tag.\n Value (string) --The value for an AWS resource tag.\n \n \n\n :rtype: dict\n :return: {\n 'LocationArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_task(SourceLocationArn=None, DestinationLocationArn=None, CloudWatchLogGroupArn=None, Name=None, Options=None, Tags=None):\n \"\"\"\n Creates a task. A task is a set of two locations (source and destination) and a set of default OverrideOptions that you use to control the behavior of a task. If you don't specify default values for Options when you create a task, AWS DataSync populates them with safe service defaults.\n When you initially create a task, it enters the INITIALIZING status and then the CREATING status. In CREATING status, AWS DataSync attempts to mount the source Network File System (NFS) location. The task transitions to the AVAILABLE status without waiting for the destination location to mount. Instead, AWS DataSync mounts a destination before every task execution and then unmounts it after every task execution.\n If an agent that is associated with a source (NFS) location goes offline, the task transitions to the UNAVAILABLE status. If the status of the task remains in the CREATING status for more than a few minutes, it means that your agent might be having trouble mounting the source NFS file system. Check the task's ErrorCode and ErrorDetail . Mount issues are often caused by either a misconfigured firewall or a mistyped NFS server host name.\n See also: AWS API Documentation\n \n \n :example: response = client.create_task(\n SourceLocationArn='string',\n DestinationLocationArn='string',\n CloudWatchLogGroupArn='string',\n Name='string',\n Options={\n 'VerifyMode': 'POINT_IN_TIME_CONSISTENT'|'NONE',\n 'Atime': 'NONE'|'BEST_EFFORT',\n 'Mtime': 'NONE'|'PRESERVE',\n 'Uid': 'NONE'|'INT_VALUE'|'NAME'|'BOTH',\n 'Gid': 'NONE'|'INT_VALUE'|'NAME'|'BOTH',\n 'PreserveDeletedFiles': 'PRESERVE'|'REMOVE',\n 'PreserveDevices': 'NONE'|'PRESERVE',\n 'PosixPermissions': 'NONE'|'BEST_EFFORT'|'PRESERVE',\n 'BytesPerSecond': 123\n },\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type SourceLocationArn: string\n :param SourceLocationArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the source location for the task.\n \n\n :type DestinationLocationArn: string\n :param DestinationLocationArn: [REQUIRED]\n The Amazon Resource Name (ARN) of an AWS storage resource's location.\n \n\n :type CloudWatchLogGroupArn: string\n :param CloudWatchLogGroupArn: The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that is used to monitor and log events in the task. For more information on these groups, see Working with Log Groups and Log Streams in the Amazon CloudWatch User Guide.\n For more information about how to useCloudWatchLogs with DataSync, see Monitoring Your Task .\n \n\n :type Name: string\n :param Name: The name of a task. This value is a text reference that is used to identify the task in the console.\n\n :type Options: dict\n :param Options: The set of configuration options that control the behavior of a single execution of the task that occurs when you call StartTaskExecution . You can configure these options to preserve metadata such as user ID (UID) and group ID (GID), file permissions, data integrity verification, and so on.\n For each individual task execution, you can override these options by specifying the OverrideOptions before starting a the task execution. For more information, see the operation.\n VerifyMode (string) --A value that determines whether a data integrity verification should be performed at the end of a task execution after all data and metadata have been transferred.\n Default value: POINT_IN_TIME_CONSISTENT.\n POINT_IN_TIME_CONSISTENT: Perform verification (recommended).\n NONE: Skip verification.\n Atime (string) --A file metadata value that shows the last time a file was accessed (that is, when the file was read or written to). If you set Atime to BEST_EFFORT, DataSync attempts to preserve the original Atime attribute on all source files (that is, the version before the PREPARING phase). However, Atime 's behavior is not fully standard across platforms, so AWS DataSync can only do this on a best-effort basis.\n Default value: BEST_EFFORT.\n BEST_EFFORT: Attempt to preserve the per-file Atime value (recommended).\n NONE: Ignore Atime .\n Note\n If Atime is set to BEST_EFFORT, Mtime must be set to PRESERVE.\n If Atime is set to NONE, Mtime must also be NONE.\n Mtime (string) --A value that indicates the last time that a file was modified (that is, a file was written to) before the PREPARING phase.\n Default value: PRESERVE.\n PRESERVE: Preserve original Mtime (recommended)\n NONE: Ignore Mtime .\n Note\n If Mtime is set to PRESERVE, Atime must be set to BEST_EFFORT.\n If Mtime is set to NONE, Atime must also be set to NONE.\n Uid (string) --The user ID (UID) of the file's owner.\n Default value: INT_VALUE. This preserves the integer value of the ID.\n INT_VALUE: Preserve the integer value of UID and group ID (GID) (recommended).\n NONE: Ignore UID and GID.\n Gid (string) --The group ID (GID) of the file's owners.\n Default value: INT_VALUE. This preserves the integer value of the ID.\n INT_VALUE: Preserve the integer value of user ID (UID) and GID (recommended).\n NONE: Ignore UID and GID.\n PreserveDeletedFiles (string) --A value that specifies whether files in the destination that don't exist in the source file system should be preserved.\n Default value: PRESERVE.\n PRESERVE: Ignore such destination files (recommended).\n REMOVE: Delete destination files that aren t present in the source.\n PreserveDevices (string) --A value that determines whether AWS DataSync should preserve the metadata of block and character devices in the source file system, and recreate the files with that device name and metadata on the destination.\n Note\n AWS DataSync can't sync the actual contents of such devices, because they are nonterminal and don't return an end-of-file (EOF) marker.\n Default value: NONE.\n NONE: Ignore special devices (recommended).\n PRESERVE: Preserve character and block device metadata. This option isn't currently supported for Amazon EFS.\n PosixPermissions (string) --A value that determines which users or groups can access a file for a specific purpose such as reading, writing, or execution of the file.\n Default value: PRESERVE.\n PRESERVE: Preserve POSIX-style permissions (recommended).\n NONE: Ignore permissions.\n Note\n AWS DataSync can preserve extant permissions of a source location.\n BytesPerSecond (integer) --A value that limits the bandwidth used by AWS DataSync. For example, if you want AWS DataSync to use a maximum of 1 MB, set this value to 1048576 (=1024*1024 ).\n \n\n :type Tags: list\n :param Tags: The key-value pair that represents the tag that you want to add to the resource. The value can be an empty string.\n (dict) --Represents a single entry in a list of AWS resource tags. TagListEntry returns an array that contains a list of tasks when the ListTagsForResource operation is called.\n Key (string) --The key for an AWS resource tag.\n Value (string) --The value for an AWS resource tag.\n \n \n\n :rtype: dict\n :return: {\n 'TaskArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_agent(AgentArn=None):\n \"\"\"\n Deletes an agent. To specify which agent to delete, use the Amazon Resource Name (ARN) of the agent in your request. The operation disassociates the agent from your AWS account. However, it doesn't delete the agent virtual machine (VM) from your on-premises environment.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_agent(\n AgentArn='string'\n )\n \n \n :type AgentArn: string\n :param AgentArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the agent to delete. Use the ListAgents operation to return a list of agents for your account and AWS Region.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_location(LocationArn=None):\n \"\"\"\n Deletes the configuration of a location used by AWS DataSync.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_location(\n LocationArn='string'\n )\n \n \n :type LocationArn: string\n :param LocationArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the location to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_task(TaskArn=None):\n \"\"\"\n Deletes a task.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_task(\n TaskArn='string'\n )\n \n \n :type TaskArn: string\n :param TaskArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the task to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef describe_agent(AgentArn=None):\n \"\"\"\n Returns metadata such as the name, the network interfaces, and the status (that is, whether the agent is running or not) for an agent. To specify which agent to describe, use the Amazon Resource Name (ARN) of the agent in your request.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_agent(\n AgentArn='string'\n )\n \n \n :type AgentArn: string\n :param AgentArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the agent to describe.\n \n\n :rtype: dict\n :return: {\n 'AgentArn': 'string',\n 'Name': 'string',\n 'Status': 'ONLINE'|'OFFLINE',\n 'LastConnectionTime': datetime(2015, 1, 1),\n 'CreationTime': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef describe_location_efs(LocationArn=None):\n \"\"\"\n Returns metadata, such as the path information about an Amazon EFS location.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_location_efs(\n LocationArn='string'\n )\n \n \n :type LocationArn: string\n :param LocationArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the EFS location to describe.\n \n\n :rtype: dict\n :return: {\n 'LocationArn': 'string',\n 'LocationUri': 'string',\n 'Ec2Config': {\n 'SubnetArn': 'string',\n 'SecurityGroupArns': [\n 'string',\n ]\n },\n 'CreationTime': datetime(2015, 1, 1)\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_location_nfs(LocationArn=None):\n \"\"\"\n Returns metadata, such as the path information, about a NFS location.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_location_nfs(\n LocationArn='string'\n )\n \n \n :type LocationArn: string\n :param LocationArn: [REQUIRED]\n The Amazon resource Name (ARN) of the NFS location to describe.\n \n\n :rtype: dict\n :return: {\n 'LocationArn': 'string',\n 'LocationUri': 'string',\n 'OnPremConfig': {\n 'AgentArns': [\n 'string',\n ]\n },\n 'CreationTime': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef describe_location_s3(LocationArn=None):\n \"\"\"\n Returns metadata, such as bucket name, about an Amazon S3 bucket location.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_location_s3(\n LocationArn='string'\n )\n \n \n :type LocationArn: string\n :param LocationArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the Amazon S3 bucket location to describe.\n \n\n :rtype: dict\n :return: {\n 'LocationArn': 'string',\n 'LocationUri': 'string',\n 'S3Config': {\n 'BucketAccessRoleArn': 'string'\n },\n 'CreationTime': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef describe_task(TaskArn=None):\n \"\"\"\n Returns metadata about a task.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_task(\n TaskArn='string'\n )\n \n \n :type TaskArn: string\n :param TaskArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the task to describe.\n \n\n :rtype: dict\n :return: {\n 'TaskArn': 'string',\n 'Status': 'AVAILABLE'|'CREATING'|'RUNNING'|'UNAVAILABLE',\n 'Name': 'string',\n 'CurrentTaskExecutionArn': 'string',\n 'SourceLocationArn': 'string',\n 'DestinationLocationArn': 'string',\n 'CloudWatchLogGroupArn': 'string',\n 'Options': {\n 'VerifyMode': 'POINT_IN_TIME_CONSISTENT'|'NONE',\n 'Atime': 'NONE'|'BEST_EFFORT',\n 'Mtime': 'NONE'|'PRESERVE',\n 'Uid': 'NONE'|'INT_VALUE'|'NAME'|'BOTH',\n 'Gid': 'NONE'|'INT_VALUE'|'NAME'|'BOTH',\n 'PreserveDeletedFiles': 'PRESERVE'|'REMOVE',\n 'PreserveDevices': 'NONE'|'PRESERVE',\n 'PosixPermissions': 'NONE'|'BEST_EFFORT'|'PRESERVE',\n 'BytesPerSecond': 123\n },\n 'ErrorCode': 'string',\n 'ErrorDetail': 'string',\n 'CreationTime': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef describe_task_execution(TaskExecutionArn=None):\n \"\"\"\n Returns detailed metadata about a task that is being executed.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_task_execution(\n TaskExecutionArn='string'\n )\n \n \n :type TaskExecutionArn: string\n :param TaskExecutionArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the task that is being executed.\n \n\n :rtype: dict\n :return: {\n 'TaskExecutionArn': 'string',\n 'Status': 'LAUNCHING'|'PREPARING'|'TRANSFERRING'|'VERIFYING'|'SUCCESS'|'ERROR',\n 'Options': {\n 'VerifyMode': 'POINT_IN_TIME_CONSISTENT'|'NONE',\n 'Atime': 'NONE'|'BEST_EFFORT',\n 'Mtime': 'NONE'|'PRESERVE',\n 'Uid': 'NONE'|'INT_VALUE'|'NAME'|'BOTH',\n 'Gid': 'NONE'|'INT_VALUE'|'NAME'|'BOTH',\n 'PreserveDeletedFiles': 'PRESERVE'|'REMOVE',\n 'PreserveDevices': 'NONE'|'PRESERVE',\n 'PosixPermissions': 'NONE'|'BEST_EFFORT'|'PRESERVE',\n 'BytesPerSecond': 123\n },\n 'StartTime': datetime(2015, 1, 1),\n 'EstimatedFilesToTransfer': 123,\n 'EstimatedBytesToTransfer': 123,\n 'FilesTransferred': 123,\n 'BytesWritten': 123,\n 'BytesTransferred': 123,\n 'Result': {\n 'PrepareDuration': 123,\n 'PrepareStatus': 'PENDING'|'SUCCESS'|'ERROR',\n 'TransferDuration': 123,\n 'TransferStatus': 'PENDING'|'SUCCESS'|'ERROR',\n 'VerifyDuration': 123,\n 'VerifyStatus': 'PENDING'|'SUCCESS'|'ERROR',\n 'ErrorCode': 'string',\n 'ErrorDetail': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_agents(MaxResults=None, NextToken=None):\n \"\"\"\n Returns a list of agents owned by an AWS account in the AWS Region specified in the request. The returned list is ordered by agent Amazon Resource Name (ARN).\n By default, this operation returns a maximum of 100 agents. This operation supports pagination that enables you to optionally reduce the number of agents returned in a response.\n If you have more agents than are returned in a response (that is, the response returns only a truncated list of your agents), the response contains a marker that you can specify in your next request to fetch the next page of agents.\n See also: AWS API Documentation\n \n \n :example: response = client.list_agents(\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type MaxResults: integer\n :param MaxResults: The maximum number of agents to list.\n\n :type NextToken: string\n :param NextToken: An opaque string that indicates the position at which to begin the next list of agents.\n\n :rtype: dict\n :return: {\n 'Agents': [\n {\n 'AgentArn': 'string',\n 'Name': 'string',\n 'Status': 'ONLINE'|'OFFLINE'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_locations(MaxResults=None, NextToken=None):\n \"\"\"\n Returns a lists of source and destination locations.\n If you have more locations than are returned in a response (that is, the response returns only a truncated list of your agents), the response contains a token that you can specify in your next request to fetch the next page of locations.\n See also: AWS API Documentation\n \n \n :example: response = client.list_locations(\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type MaxResults: integer\n :param MaxResults: The maximum number of locations to return.\n\n :type NextToken: string\n :param NextToken: An opaque string that indicates the position at which to begin the next list of locations.\n\n :rtype: dict\n :return: {\n 'Locations': [\n {\n 'LocationArn': 'string',\n 'LocationUri': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_tags_for_resource(ResourceArn=None, MaxResults=None, NextToken=None):\n \"\"\"\n Returns all the tags associated with a specified resources.\n See also: AWS API Documentation\n \n \n :example: response = client.list_tags_for_resource(\n ResourceArn='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type ResourceArn: string\n :param ResourceArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the resource whose tags to list.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of locations to return.\n\n :type NextToken: string\n :param NextToken: An opaque string that indicates the position at which to begin the next list of locations.\n\n :rtype: dict\n :return: {\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_task_executions(TaskArn=None, MaxResults=None, NextToken=None):\n \"\"\"\n Returns a list of executed tasks.\n See also: AWS API Documentation\n \n \n :example: response = client.list_task_executions(\n TaskArn='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type TaskArn: string\n :param TaskArn: The Amazon Resource Name (ARN) of the task whose tasks you want to list.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of executed tasks to list.\n\n :type NextToken: string\n :param NextToken: An opaque string that indicates the position at which to begin the next list of the executed tasks.\n\n :rtype: dict\n :return: {\n 'TaskExecutions': [\n {\n 'TaskExecutionArn': 'string',\n 'Status': 'LAUNCHING'|'PREPARING'|'TRANSFERRING'|'VERIFYING'|'SUCCESS'|'ERROR'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_tasks(MaxResults=None, NextToken=None):\n \"\"\"\n Returns a list of all the tasks.\n See also: AWS API Documentation\n \n \n :example: response = client.list_tasks(\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type MaxResults: integer\n :param MaxResults: The maximum number of tasks to return.\n\n :type NextToken: string\n :param NextToken: An opaque string that indicates the position at which to begin the next list of tasks.\n\n :rtype: dict\n :return: {\n 'Tasks': [\n {\n 'TaskArn': 'string',\n 'Status': 'AVAILABLE'|'CREATING'|'RUNNING'|'UNAVAILABLE',\n 'Name': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef start_task_execution(TaskArn=None, OverrideOptions=None):\n \"\"\"\n Starts a specific invocation of a task. A TaskExecution value represents an individual run of a task. Each task can have at most one TaskExecution at a time.\n For detailed information, see Task Execution in Components and Terminology in the AWS DataSync User Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.start_task_execution(\n TaskArn='string',\n OverrideOptions={\n 'VerifyMode': 'POINT_IN_TIME_CONSISTENT'|'NONE',\n 'Atime': 'NONE'|'BEST_EFFORT',\n 'Mtime': 'NONE'|'PRESERVE',\n 'Uid': 'NONE'|'INT_VALUE'|'NAME'|'BOTH',\n 'Gid': 'NONE'|'INT_VALUE'|'NAME'|'BOTH',\n 'PreserveDeletedFiles': 'PRESERVE'|'REMOVE',\n 'PreserveDevices': 'NONE'|'PRESERVE',\n 'PosixPermissions': 'NONE'|'BEST_EFFORT'|'PRESERVE',\n 'BytesPerSecond': 123\n }\n )\n \n \n :type TaskArn: string\n :param TaskArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the task to start.\n \n\n :type OverrideOptions: dict\n :param OverrideOptions: Represents the options that are available to control the behavior of a StartTaskExecution operation. Behavior includes preserving metadata such as user ID (UID), group ID (GID), and file permissions, and also overwriting files in the destination, data integrity verification, and so on.\n A task has a set of default options associated with it. If you don't specify an option in StartTaskExecution , the default value is used. You can override the defaults options on each task execution by specifying an overriding Options value to StartTaskExecution .\n VerifyMode (string) --A value that determines whether a data integrity verification should be performed at the end of a task execution after all data and metadata have been transferred.\n Default value: POINT_IN_TIME_CONSISTENT.\n POINT_IN_TIME_CONSISTENT: Perform verification (recommended).\n NONE: Skip verification.\n Atime (string) --A file metadata value that shows the last time a file was accessed (that is, when the file was read or written to). If you set Atime to BEST_EFFORT, DataSync attempts to preserve the original Atime attribute on all source files (that is, the version before the PREPARING phase). However, Atime 's behavior is not fully standard across platforms, so AWS DataSync can only do this on a best-effort basis.\n Default value: BEST_EFFORT.\n BEST_EFFORT: Attempt to preserve the per-file Atime value (recommended).\n NONE: Ignore Atime .\n Note\n If Atime is set to BEST_EFFORT, Mtime must be set to PRESERVE.\n If Atime is set to NONE, Mtime must also be NONE.\n Mtime (string) --A value that indicates the last time that a file was modified (that is, a file was written to) before the PREPARING phase.\n Default value: PRESERVE.\n PRESERVE: Preserve original Mtime (recommended)\n NONE: Ignore Mtime .\n Note\n If Mtime is set to PRESERVE, Atime must be set to BEST_EFFORT.\n If Mtime is set to NONE, Atime must also be set to NONE.\n Uid (string) --The user ID (UID) of the file's owner.\n Default value: INT_VALUE. This preserves the integer value of the ID.\n INT_VALUE: Preserve the integer value of UID and group ID (GID) (recommended).\n NONE: Ignore UID and GID.\n Gid (string) --The group ID (GID) of the file's owners.\n Default value: INT_VALUE. This preserves the integer value of the ID.\n INT_VALUE: Preserve the integer value of user ID (UID) and GID (recommended).\n NONE: Ignore UID and GID.\n PreserveDeletedFiles (string) --A value that specifies whether files in the destination that don't exist in the source file system should be preserved.\n Default value: PRESERVE.\n PRESERVE: Ignore such destination files (recommended).\n REMOVE: Delete destination files that aren t present in the source.\n PreserveDevices (string) --A value that determines whether AWS DataSync should preserve the metadata of block and character devices in the source file system, and recreate the files with that device name and metadata on the destination.\n Note\n AWS DataSync can't sync the actual contents of such devices, because they are nonterminal and don't return an end-of-file (EOF) marker.\n Default value: NONE.\n NONE: Ignore special devices (recommended).\n PRESERVE: Preserve character and block device metadata. This option isn't currently supported for Amazon EFS.\n PosixPermissions (string) --A value that determines which users or groups can access a file for a specific purpose such as reading, writing, or execution of the file.\n Default value: PRESERVE.\n PRESERVE: Preserve POSIX-style permissions (recommended).\n NONE: Ignore permissions.\n Note\n AWS DataSync can preserve extant permissions of a source location.\n BytesPerSecond (integer) --A value that limits the bandwidth used by AWS DataSync. For example, if you want AWS DataSync to use a maximum of 1 MB, set this value to 1048576 (=1024*1024 ).\n \n\n :rtype: dict\n :return: {\n 'TaskExecutionArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef tag_resource(ResourceArn=None, Tags=None):\n \"\"\"\n Applies a key-value pair to an AWS resource.\n See also: AWS API Documentation\n \n \n :example: response = client.tag_resource(\n ResourceArn='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type ResourceArn: string\n :param ResourceArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the resource to apply the tag to.\n \n\n :type Tags: list\n :param Tags: [REQUIRED]\n The tags to apply.\n (dict) --Represents a single entry in a list of AWS resource tags. TagListEntry returns an array that contains a list of tasks when the ListTagsForResource operation is called.\n Key (string) --The key for an AWS resource tag.\n Value (string) --The value for an AWS resource tag.\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef untag_resource(ResourceArn=None, Keys=None):\n \"\"\"\n Removes a tag from an AWS resource.\n See also: AWS API Documentation\n \n \n :example: response = client.untag_resource(\n ResourceArn='string',\n Keys=[\n 'string',\n ]\n )\n \n \n :type ResourceArn: string\n :param ResourceArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the resource to remove the tag from.\n \n\n :type Keys: list\n :param Keys: [REQUIRED]\n The keys in the key-value pair in the tag to remove.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_agent(AgentArn=None, Name=None):\n \"\"\"\n Updates the name of an agent.\n See also: AWS API Documentation\n \n \n :example: response = client.update_agent(\n AgentArn='string',\n Name='string'\n )\n \n \n :type AgentArn: string\n :param AgentArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the agent to update.\n \n\n :type Name: string\n :param Name: The name that you want to use to configure the agent.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_task(TaskArn=None, Options=None, Name=None):\n \"\"\"\n Updates the metadata associated with a task.\n See also: AWS API Documentation\n \n \n :example: response = client.update_task(\n TaskArn='string',\n Options={\n 'VerifyMode': 'POINT_IN_TIME_CONSISTENT'|'NONE',\n 'Atime': 'NONE'|'BEST_EFFORT',\n 'Mtime': 'NONE'|'PRESERVE',\n 'Uid': 'NONE'|'INT_VALUE'|'NAME'|'BOTH',\n 'Gid': 'NONE'|'INT_VALUE'|'NAME'|'BOTH',\n 'PreserveDeletedFiles': 'PRESERVE'|'REMOVE',\n 'PreserveDevices': 'NONE'|'PRESERVE',\n 'PosixPermissions': 'NONE'|'BEST_EFFORT'|'PRESERVE',\n 'BytesPerSecond': 123\n },\n Name='string'\n )\n \n \n :type TaskArn: string\n :param TaskArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the resource name of the task to update.\n \n\n :type Options: dict\n :param Options: Represents the options that are available to control the behavior of a StartTaskExecution operation. Behavior includes preserving metadata such as user ID (UID), group ID (GID), and file permissions, and also overwriting files in the destination, data integrity verification, and so on.\n A task has a set of default options associated with it. If you don't specify an option in StartTaskExecution , the default value is used. You can override the defaults options on each task execution by specifying an overriding Options value to StartTaskExecution .\n VerifyMode (string) --A value that determines whether a data integrity verification should be performed at the end of a task execution after all data and metadata have been transferred.\n Default value: POINT_IN_TIME_CONSISTENT.\n POINT_IN_TIME_CONSISTENT: Perform verification (recommended).\n NONE: Skip verification.\n Atime (string) --A file metadata value that shows the last time a file was accessed (that is, when the file was read or written to). If you set Atime to BEST_EFFORT, DataSync attempts to preserve the original Atime attribute on all source files (that is, the version before the PREPARING phase). However, Atime 's behavior is not fully standard across platforms, so AWS DataSync can only do this on a best-effort basis.\n Default value: BEST_EFFORT.\n BEST_EFFORT: Attempt to preserve the per-file Atime value (recommended).\n NONE: Ignore Atime .\n Note\n If Atime is set to BEST_EFFORT, Mtime must be set to PRESERVE.\n If Atime is set to NONE, Mtime must also be NONE.\n Mtime (string) --A value that indicates the last time that a file was modified (that is, a file was written to) before the PREPARING phase.\n Default value: PRESERVE.\n PRESERVE: Preserve original Mtime (recommended)\n NONE: Ignore Mtime .\n Note\n If Mtime is set to PRESERVE, Atime must be set to BEST_EFFORT.\n If Mtime is set to NONE, Atime must also be set to NONE.\n Uid (string) --The user ID (UID) of the file's owner.\n Default value: INT_VALUE. This preserves the integer value of the ID.\n INT_VALUE: Preserve the integer value of UID and group ID (GID) (recommended).\n NONE: Ignore UID and GID.\n Gid (string) --The group ID (GID) of the file's owners.\n Default value: INT_VALUE. This preserves the integer value of the ID.\n INT_VALUE: Preserve the integer value of user ID (UID) and GID (recommended).\n NONE: Ignore UID and GID.\n PreserveDeletedFiles (string) --A value that specifies whether files in the destination that don't exist in the source file system should be preserved.\n Default value: PRESERVE.\n PRESERVE: Ignore such destination files (recommended).\n REMOVE: Delete destination files that aren t present in the source.\n PreserveDevices (string) --A value that determines whether AWS DataSync should preserve the metadata of block and character devices in the source file system, and recreate the files with that device name and metadata on the destination.\n Note\n AWS DataSync can't sync the actual contents of such devices, because they are nonterminal and don't return an end-of-file (EOF) marker.\n Default value: NONE.\n NONE: Ignore special devices (recommended).\n PRESERVE: Preserve character and block device metadata. This option isn't currently supported for Amazon EFS.\n PosixPermissions (string) --A value that determines which users or groups can access a file for a specific purpose such as reading, writing, or execution of the file.\n Default value: PRESERVE.\n PRESERVE: Preserve POSIX-style permissions (recommended).\n NONE: Ignore permissions.\n Note\n AWS DataSync can preserve extant permissions of a source location.\n BytesPerSecond (integer) --A value that limits the bandwidth used by AWS DataSync. For example, if you want AWS DataSync to use a maximum of 1 MB, set this value to 1048576 (=1024*1024 ).\n \n\n :type Name: string\n :param Name: The name of the task to update.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6252545118331909, "alphanum_fraction": 0.6283086538314819, "avg_line_length": 31.779056549072266, "blob_id": "c7e16ae8e1d91ed1990e7a83ca20733b27e1e23c", "content_id": "1b269c1425b2760e07606324cb9fe50cbe8b8c22", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 28486, "license_type": "permissive", "max_line_length": 365, "num_lines": 869, "path": "/pyboto3/quicksight.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_group(GroupName=None, Description=None, AwsAccountId=None, Namespace=None):\n \"\"\"\n Creates an Amazon QuickSight group.\n The permissions resource is ``arn:aws:quicksight:us-east-1:<relevant-aws-account-id> :group/default/<group-name> `` .\n The response is a group object.\n See also: AWS API Documentation\n \n \n :example: response = client.create_group(\n GroupName='string',\n Description='string',\n AwsAccountId='string',\n Namespace='string'\n )\n \n \n :type GroupName: string\n :param GroupName: [REQUIRED]\n A name for the group that you want to create.\n \n\n :type Description: string\n :param Description: A description for the group that you want to create.\n\n :type AwsAccountId: string\n :param AwsAccountId: [REQUIRED]\n The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.\n \n\n :type Namespace: string\n :param Namespace: [REQUIRED]\n The namespace. Currently, you should set this to default .\n \n\n :rtype: dict\n :return: {\n 'Group': {\n 'Arn': 'string',\n 'GroupName': 'string',\n 'Description': 'string'\n },\n 'RequestId': 'string',\n 'Status': 123\n }\n \n \n \"\"\"\n pass\n\ndef create_group_membership(MemberName=None, GroupName=None, AwsAccountId=None, Namespace=None):\n \"\"\"\n Adds an Amazon QuickSight user to an Amazon QuickSight group.\n The permissions resource is ``arn:aws:quicksight:us-east-1:<aws-account-id> :group/default/<group-name> `` .\n The condition resource is the user name.\n The condition key is quicksight:UserName .\n The response is the group member object.\n See also: AWS API Documentation\n \n \n :example: response = client.create_group_membership(\n MemberName='string',\n GroupName='string',\n AwsAccountId='string',\n Namespace='string'\n )\n \n \n :type MemberName: string\n :param MemberName: [REQUIRED]\n The name of the user that you want to add to the group membership.\n \n\n :type GroupName: string\n :param GroupName: [REQUIRED]\n The name of the group that you want to add the user to.\n \n\n :type AwsAccountId: string\n :param AwsAccountId: [REQUIRED]\n The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.\n \n\n :type Namespace: string\n :param Namespace: [REQUIRED]\n The namespace. Currently, you should set this to default .\n \n\n :rtype: dict\n :return: {\n 'GroupMember': {\n 'Arn': 'string',\n 'MemberName': 'string'\n },\n 'RequestId': 'string',\n 'Status': 123\n }\n \n \n \"\"\"\n pass\n\ndef delete_group(GroupName=None, AwsAccountId=None, Namespace=None):\n \"\"\"\n Removes a user group from Amazon QuickSight.\n The permissions resource is ``arn:aws:quicksight:us-east-1:<aws-account-id> :group/default/<group-name> `` .\n See also: AWS API Documentation\n \n \n :example: response = client.delete_group(\n GroupName='string',\n AwsAccountId='string',\n Namespace='string'\n )\n \n \n :type GroupName: string\n :param GroupName: [REQUIRED]\n The name of the group that you want to delete.\n \n\n :type AwsAccountId: string\n :param AwsAccountId: [REQUIRED]\n The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.\n \n\n :type Namespace: string\n :param Namespace: [REQUIRED]\n The namespace. Currently, you should set this to default .\n \n\n :rtype: dict\n :return: {\n 'RequestId': 'string',\n 'Status': 123\n }\n \n \n \"\"\"\n pass\n\ndef delete_group_membership(MemberName=None, GroupName=None, AwsAccountId=None, Namespace=None):\n \"\"\"\n Removes a user from a group so that the user is no longer a member of the group.\n The permissions resource is ``arn:aws:quicksight:us-east-1:<aws-account-id> :group/default/<group-name> `` .\n The condition resource is the user name.\n The condition key is quicksight:UserName .\n See also: AWS API Documentation\n \n \n :example: response = client.delete_group_membership(\n MemberName='string',\n GroupName='string',\n AwsAccountId='string',\n Namespace='string'\n )\n \n \n :type MemberName: string\n :param MemberName: [REQUIRED]\n The name of the user that you want to delete from the group membership.\n \n\n :type GroupName: string\n :param GroupName: [REQUIRED]\n The name of the group that you want to delete the user from.\n \n\n :type AwsAccountId: string\n :param AwsAccountId: [REQUIRED]\n The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.\n \n\n :type Namespace: string\n :param Namespace: [REQUIRED]\n The namespace. Currently, you should set this to default .\n \n\n :rtype: dict\n :return: {\n 'RequestId': 'string',\n 'Status': 123\n }\n \n \n \"\"\"\n pass\n\ndef delete_user(UserName=None, AwsAccountId=None, Namespace=None):\n \"\"\"\n Deletes the Amazon QuickSight user that is associated with the identity of the AWS Identity and Access Management (IAM) user or role that's making the call. The IAM user isn't deleted as a result of this call.\n The permission resource is ``arn:aws:quicksight:us-east-1:<aws-account-id> :user/default/<user-name> `` .\n See also: AWS API Documentation\n \n \n :example: response = client.delete_user(\n UserName='string',\n AwsAccountId='string',\n Namespace='string'\n )\n \n \n :type UserName: string\n :param UserName: [REQUIRED]\n The name of the user that you want to delete.\n \n\n :type AwsAccountId: string\n :param AwsAccountId: [REQUIRED]\n The ID for the AWS account that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.\n \n\n :type Namespace: string\n :param Namespace: [REQUIRED]\n The namespace. Currently, you should set this to default .\n \n\n :rtype: dict\n :return: {\n 'RequestId': 'string',\n 'Status': 123\n }\n \n \n \"\"\"\n pass\n\ndef describe_group(GroupName=None, AwsAccountId=None, Namespace=None):\n \"\"\"\n Returns an Amazon QuickSight group's description and Amazon Resource Name (ARN).\n The permissions resource is ``arn:aws:quicksight:us-east-1:<relevant-aws-account-id> :group/default/<group-name> `` .\n The response is the group object.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_group(\n GroupName='string',\n AwsAccountId='string',\n Namespace='string'\n )\n \n \n :type GroupName: string\n :param GroupName: [REQUIRED]\n The name of the group that you want to describe.\n \n\n :type AwsAccountId: string\n :param AwsAccountId: [REQUIRED]\n The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.\n \n\n :type Namespace: string\n :param Namespace: [REQUIRED]\n The namespace. Currently, you should set this to default .\n \n\n :rtype: dict\n :return: {\n 'Group': {\n 'Arn': 'string',\n 'GroupName': 'string',\n 'Description': 'string'\n },\n 'RequestId': 'string',\n 'Status': 123\n }\n \n \n \"\"\"\n pass\n\ndef describe_user(UserName=None, AwsAccountId=None, Namespace=None):\n \"\"\"\n Returns information about a user, given the user name.\n The permission resource is ``arn:aws:quicksight:us-east-1:<aws-account-id> :user/default/<user-name> `` .\n The response is a user object that contains the user's Amazon Resource Name (ARN), AWS Identity and Access Management (IAM) role, and email address.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_user(\n UserName='string',\n AwsAccountId='string',\n Namespace='string'\n )\n \n \n :type UserName: string\n :param UserName: [REQUIRED]\n The name of the user that you want to describe.\n \n\n :type AwsAccountId: string\n :param AwsAccountId: [REQUIRED]\n The ID for the AWS account that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.\n \n\n :type Namespace: string\n :param Namespace: [REQUIRED]\n The namespace. Currently, you should set this to default .\n \n\n :rtype: dict\n :return: {\n 'User': {\n 'Arn': 'string',\n 'UserName': 'string',\n 'Email': 'string',\n 'Role': 'ADMIN'|'AUTHOR'|'READER'|'RESTRICTED_AUTHOR'|'RESTRICTED_READER',\n 'IdentityType': 'IAM'|'QUICKSIGHT',\n 'Active': True|False\n },\n 'RequestId': 'string',\n 'Status': 123\n }\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_dashboard_embed_url(AwsAccountId=None, DashboardId=None, IdentityType=None, SessionLifetimeInMinutes=None, UndoRedoDisabled=None, ResetDisabled=None):\n \"\"\"\n Generates a server-side embeddable URL and authorization code. Before this can work properly, first you need to configure the dashboards and user permissions. For more information, see Embedding Amazon QuickSight Dashboards .\n Currently, you can use GetDashboardEmbedURL only from the server, not from the users browser.\n Assume the role with permissions enabled for actions: quickSight:RegisterUser and quicksight:GetDashboardEmbedURL . You can use assume-role, assume-role-with-web-identity, or assume-role-with-saml.\n If the user does not exist in QuickSight, register the user:\n Get the URL for the embedded dashboard\n See also: AWS API Documentation\n \n \n :example: response = client.get_dashboard_embed_url(\n AwsAccountId='string',\n DashboardId='string',\n IdentityType='IAM'|'QUICKSIGHT',\n SessionLifetimeInMinutes=123,\n UndoRedoDisabled=True|False,\n ResetDisabled=True|False\n )\n \n \n :type AwsAccountId: string\n :param AwsAccountId: [REQUIRED]\n AWS account ID that contains the dashboard you are embedding.\n \n\n :type DashboardId: string\n :param DashboardId: [REQUIRED]\n The ID for the dashboard, also added to IAM policy\n \n\n :type IdentityType: string\n :param IdentityType: [REQUIRED]\n The authentication method the user uses to sign in (IAM only).\n \n\n :type SessionLifetimeInMinutes: integer\n :param SessionLifetimeInMinutes: How many minutes the session is valid. The session lifetime must be between 15 and 600 minutes.\n\n :type UndoRedoDisabled: boolean\n :param UndoRedoDisabled: Remove the undo/redo button on embedded dashboard. The default is FALSE, which enables the undo/redo button.\n\n :type ResetDisabled: boolean\n :param ResetDisabled: Remove the reset button on embedded dashboard. The default is FALSE, which allows the reset button.\n\n :rtype: dict\n :return: {\n 'EmbedUrl': 'string',\n 'Status': 123,\n 'RequestId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_group_memberships(GroupName=None, NextToken=None, MaxResults=None, AwsAccountId=None, Namespace=None):\n \"\"\"\n Lists member users in a group.\n The permissions resource is ``arn:aws:quicksight:us-east-1:<aws-account-id> :group/default/<group-name> `` .\n The response is a list of group member objects.\n See also: AWS API Documentation\n \n \n :example: response = client.list_group_memberships(\n GroupName='string',\n NextToken='string',\n MaxResults=123,\n AwsAccountId='string',\n Namespace='string'\n )\n \n \n :type GroupName: string\n :param GroupName: [REQUIRED]\n The name of the group that you want to see a membership list of.\n \n\n :type NextToken: string\n :param NextToken: A pagination token that can be used in a subsequent request.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return from this request.\n\n :type AwsAccountId: string\n :param AwsAccountId: [REQUIRED]\n The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.\n \n\n :type Namespace: string\n :param Namespace: [REQUIRED]\n The namespace. Currently, you should set this to default .\n \n\n :rtype: dict\n :return: {\n 'GroupMemberList': [\n {\n 'Arn': 'string',\n 'MemberName': 'string'\n },\n ],\n 'NextToken': 'string',\n 'RequestId': 'string',\n 'Status': 123\n }\n \n \n \"\"\"\n pass\n\ndef list_groups(AwsAccountId=None, NextToken=None, MaxResults=None, Namespace=None):\n \"\"\"\n Lists all user groups in Amazon QuickSight.\n The permissions resource is arn:aws:quicksight:us-east-1:*<aws-account-id>* :group/default/* .\n The response is a list of group objects.\n See also: AWS API Documentation\n \n \n :example: response = client.list_groups(\n AwsAccountId='string',\n NextToken='string',\n MaxResults=123,\n Namespace='string'\n )\n \n \n :type AwsAccountId: string\n :param AwsAccountId: [REQUIRED]\n The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.\n \n\n :type NextToken: string\n :param NextToken: A pagination token that can be used in a subsequent request.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return.\n\n :type Namespace: string\n :param Namespace: [REQUIRED]\n The namespace. Currently, you should set this to default .\n \n\n :rtype: dict\n :return: {\n 'GroupList': [\n {\n 'Arn': 'string',\n 'GroupName': 'string',\n 'Description': 'string'\n },\n ],\n 'NextToken': 'string',\n 'RequestId': 'string',\n 'Status': 123\n }\n \n \n \"\"\"\n pass\n\ndef list_user_groups(UserName=None, AwsAccountId=None, Namespace=None, NextToken=None, MaxResults=None):\n \"\"\"\n Lists the Amazon QuickSight groups that an Amazon QuickSight user is a member of.\n The permission resource is ``arn:aws:quicksight:us-east-1:<aws-account-id> :user/default/<user-name> `` .\n The response is a one or more group objects.\n See also: AWS API Documentation\n \n \n :example: response = client.list_user_groups(\n UserName='string',\n AwsAccountId='string',\n Namespace='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type UserName: string\n :param UserName: [REQUIRED]\n The Amazon QuickSight user name that you want to list group memberships for.\n \n\n :type AwsAccountId: string\n :param AwsAccountId: [REQUIRED]\n The AWS Account ID that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.\n \n\n :type Namespace: string\n :param Namespace: [REQUIRED]\n The namespace. Currently, you should set this to default .\n \n\n :type NextToken: string\n :param NextToken: A pagination token that can be used in a subsequent request.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return from this request.\n\n :rtype: dict\n :return: {\n 'GroupList': [\n {\n 'Arn': 'string',\n 'GroupName': 'string',\n 'Description': 'string'\n },\n ],\n 'NextToken': 'string',\n 'RequestId': 'string',\n 'Status': 123\n }\n \n \n \"\"\"\n pass\n\ndef list_users(AwsAccountId=None, NextToken=None, MaxResults=None, Namespace=None):\n \"\"\"\n Returns a list of all of the Amazon QuickSight users belonging to this account.\n The permission resource is arn:aws:quicksight:us-east-1:*<aws-account-id>* :user/default/* .\n The response is a list of user objects, containing each user's Amazon Resource Name (ARN), AWS Identity and Access Management (IAM) role, and email address.\n See also: AWS API Documentation\n \n \n :example: response = client.list_users(\n AwsAccountId='string',\n NextToken='string',\n MaxResults=123,\n Namespace='string'\n )\n \n \n :type AwsAccountId: string\n :param AwsAccountId: [REQUIRED]\n The ID for the AWS account that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.\n \n\n :type NextToken: string\n :param NextToken: A pagination token that can be used in a subsequent request.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return from this request.\n\n :type Namespace: string\n :param Namespace: [REQUIRED]\n The namespace. Currently, you should set this to default .\n \n\n :rtype: dict\n :return: {\n 'UserList': [\n {\n 'Arn': 'string',\n 'UserName': 'string',\n 'Email': 'string',\n 'Role': 'ADMIN'|'AUTHOR'|'READER'|'RESTRICTED_AUTHOR'|'RESTRICTED_READER',\n 'IdentityType': 'IAM'|'QUICKSIGHT',\n 'Active': True|False\n },\n ],\n 'NextToken': 'string',\n 'RequestId': 'string',\n 'Status': 123\n }\n \n \n \"\"\"\n pass\n\ndef register_user(IdentityType=None, Email=None, UserRole=None, IamArn=None, SessionName=None, AwsAccountId=None, Namespace=None, UserName=None):\n \"\"\"\n Creates an Amazon QuickSight user, whose identity is associated with the AWS Identity and Access Management (IAM) identity or role specified in the request.\n The permission resource is ``arn:aws:quicksight:us-east-1:<aws-account-id> :user/default/<user-name> `` .\n The condition resource is the Amazon Resource Name (ARN) for the IAM user or role, and the session name.\n The condition keys are quicksight:IamArn and quicksight:SessionName .\n See also: AWS API Documentation\n \n \n :example: response = client.register_user(\n IdentityType='IAM'|'QUICKSIGHT',\n Email='string',\n UserRole='ADMIN'|'AUTHOR'|'READER'|'RESTRICTED_AUTHOR'|'RESTRICTED_READER',\n IamArn='string',\n SessionName='string',\n AwsAccountId='string',\n Namespace='string',\n UserName='string'\n )\n \n \n :type IdentityType: string\n :param IdentityType: [REQUIRED]\n Amazon QuickSight supports several ways of managing the identity of users. This parameter accepts two values:\n IAM : A user whose identity maps to an existing IAM user or role.\n QUICKSIGHT : A user whose identity is owned and managed internally by Amazon QuickSight.\n \n\n :type Email: string\n :param Email: [REQUIRED]\n The email address of the user that you want to register.\n \n\n :type UserRole: string\n :param UserRole: [REQUIRED]\n The Amazon QuickSight role of the user. The user role can be one of the following:\n READER : A user who has read-only access to dashboards.\n AUTHOR : A user who can create data sources, data sets, analyses, and dashboards.\n ADMIN : A user who is an author, who can also manage Amazon QuickSight settings.\n \n\n :type IamArn: string\n :param IamArn: The ARN of the IAM user or role that you are registering with Amazon QuickSight.\n\n :type SessionName: string\n :param SessionName: The name of the session with the assumed IAM role. By using this parameter, you can register multiple users with the same IAM role, provided that each has a different session name. For more information on assuming IAM roles, see ` assume-role https://docs.aws.amazon.com/cli/latest/reference/sts/assume-role.html`__ in the AWS CLI Reference.\n\n :type AwsAccountId: string\n :param AwsAccountId: [REQUIRED]\n The ID for the AWS account that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.\n \n\n :type Namespace: string\n :param Namespace: [REQUIRED]\n The namespace. Currently, you should set this to default .\n \n\n :type UserName: string\n :param UserName: The Amazon QuickSight user name that you want to create for the user you are registering.\n\n :rtype: dict\n :return: {\n 'User': {\n 'Arn': 'string',\n 'UserName': 'string',\n 'Email': 'string',\n 'Role': 'ADMIN'|'AUTHOR'|'READER'|'RESTRICTED_AUTHOR'|'RESTRICTED_READER',\n 'IdentityType': 'IAM'|'QUICKSIGHT',\n 'Active': True|False\n },\n 'UserInvitationUrl': 'string',\n 'RequestId': 'string',\n 'Status': 123\n }\n \n \n \"\"\"\n pass\n\ndef update_group(GroupName=None, Description=None, AwsAccountId=None, Namespace=None):\n \"\"\"\n Changes a group description.\n The permissions resource is ``arn:aws:quicksight:us-east-1:<aws-account-id> :group/default/<group-name> `` .\n The response is a group object.\n See also: AWS API Documentation\n \n \n :example: response = client.update_group(\n GroupName='string',\n Description='string',\n AwsAccountId='string',\n Namespace='string'\n )\n \n \n :type GroupName: string\n :param GroupName: [REQUIRED]\n The name of the group that you want to update.\n \n\n :type Description: string\n :param Description: The description for the group that you want to update.\n\n :type AwsAccountId: string\n :param AwsAccountId: [REQUIRED]\n The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.\n \n\n :type Namespace: string\n :param Namespace: [REQUIRED]\n The namespace. Currently, you should set this to default .\n \n\n :rtype: dict\n :return: {\n 'Group': {\n 'Arn': 'string',\n 'GroupName': 'string',\n 'Description': 'string'\n },\n 'RequestId': 'string',\n 'Status': 123\n }\n \n \n \"\"\"\n pass\n\ndef update_user(UserName=None, AwsAccountId=None, Namespace=None, Email=None, Role=None):\n \"\"\"\n Updates an Amazon QuickSight user.\n The permission resource is ``arn:aws:quicksight:us-east-1:<aws-account-id> :user/default/<user-name> `` .\n The response is a user object that contains the user's Amazon QuickSight user name, email address, active or inactive status in Amazon QuickSight, Amazon QuickSight role, and Amazon Resource Name (ARN).\n See also: AWS API Documentation\n \n \n :example: response = client.update_user(\n UserName='string',\n AwsAccountId='string',\n Namespace='string',\n Email='string',\n Role='ADMIN'|'AUTHOR'|'READER'|'RESTRICTED_AUTHOR'|'RESTRICTED_READER'\n )\n \n \n :type UserName: string\n :param UserName: [REQUIRED]\n The Amazon QuickSight user name that you want to update.\n \n\n :type AwsAccountId: string\n :param AwsAccountId: [REQUIRED]\n The ID for the AWS account that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.\n \n\n :type Namespace: string\n :param Namespace: [REQUIRED]\n The namespace. Currently, you should set this to default .\n \n\n :type Email: string\n :param Email: [REQUIRED]\n The email address of the user that you want to update.\n \n\n :type Role: string\n :param Role: [REQUIRED]\n The Amazon QuickSight role of the user. The user role can be one of the following:\n READER : A user who has read-only access to dashboards.\n AUTHOR : A user who can create data sources, data sets, analyses, and dashboards.\n ADMIN : A user who is an author, who can also manage Amazon QuickSight settings.\n \n\n :rtype: dict\n :return: {\n 'User': {\n 'Arn': 'string',\n 'UserName': 'string',\n 'Email': 'string',\n 'Role': 'ADMIN'|'AUTHOR'|'READER'|'RESTRICTED_AUTHOR'|'RESTRICTED_READER',\n 'IdentityType': 'IAM'|'QUICKSIGHT',\n 'Active': True|False\n },\n 'RequestId': 'string',\n 'Status': 123\n }\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.5986538529396057, "alphanum_fraction": 0.6032211780548096, "avg_line_length": 33.435428619384766, "blob_id": "bdee1243bb5f58f78692fbe4cf5ca5bf5c9d4bb6", "content_id": "2536ffaa09cf6ddb9671270a1010c5a54389d50b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20800, "license_type": "permissive", "max_line_length": 520, "num_lines": 604, "path": "/pyboto3/signer.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef cancel_signing_profile(profileName=None):\n \"\"\"\n Changes the state of an ACTIVE signing profile to CANCELED . A canceled profile is still viewable with the ListSigningProfiles operation, but it cannot perform new signing jobs, and is deleted two years after cancelation.\n See also: AWS API Documentation\n \n \n :example: response = client.cancel_signing_profile(\n profileName='string'\n )\n \n \n :type profileName: string\n :param profileName: [REQUIRED]\n The name of the signing profile to be canceled.\n \n\n \"\"\"\n pass\n\ndef describe_signing_job(jobId=None):\n \"\"\"\n Returns information about a specific code signing job. You specify the job by using the jobId value that is returned by the StartSigningJob operation.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_signing_job(\n jobId='string'\n )\n \n \n :type jobId: string\n :param jobId: [REQUIRED]\n The ID of the signing job on input.\n \n\n :rtype: dict\n :return: {\n 'jobId': 'string',\n 'source': {\n 's3': {\n 'bucketName': 'string',\n 'key': 'string',\n 'version': 'string'\n }\n },\n 'signingMaterial': {\n 'certificateArn': 'string'\n },\n 'platformId': 'string',\n 'profileName': 'string',\n 'overrides': {\n 'signingConfiguration': {\n 'encryptionAlgorithm': 'RSA'|'ECDSA',\n 'hashAlgorithm': 'SHA1'|'SHA256'\n }\n },\n 'signingParameters': {\n 'string': 'string'\n },\n 'createdAt': datetime(2015, 1, 1),\n 'completedAt': datetime(2015, 1, 1),\n 'requestedBy': 'string',\n 'status': 'InProgress'|'Failed'|'Succeeded',\n 'statusReason': 'string',\n 'signedObject': {\n 's3': {\n 'bucketName': 'string',\n 'key': 'string'\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_signing_platform(platformId=None):\n \"\"\"\n Returns information on a specific signing platform.\n See also: AWS API Documentation\n \n \n :example: response = client.get_signing_platform(\n platformId='string'\n )\n \n \n :type platformId: string\n :param platformId: [REQUIRED]\n The ID of the target signing platform.\n \n\n :rtype: dict\n :return: {\n 'platformId': 'string',\n 'displayName': 'string',\n 'partner': 'string',\n 'target': 'string',\n 'category': 'AWSIoT',\n 'signingConfiguration': {\n 'encryptionAlgorithmOptions': {\n 'allowedValues': [\n 'RSA'|'ECDSA',\n ],\n 'defaultValue': 'RSA'|'ECDSA'\n },\n 'hashAlgorithmOptions': {\n 'allowedValues': [\n 'SHA1'|'SHA256',\n ],\n 'defaultValue': 'SHA1'|'SHA256'\n }\n },\n 'signingImageFormat': {\n 'supportedFormats': [\n 'JSON',\n ],\n 'defaultFormat': 'JSON'\n },\n 'maxSizeInMB': 123\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_signing_profile(profileName=None):\n \"\"\"\n Returns information on a specific signing profile.\n See also: AWS API Documentation\n \n \n :example: response = client.get_signing_profile(\n profileName='string'\n )\n \n \n :type profileName: string\n :param profileName: [REQUIRED]\n The name of the target signing profile.\n \n\n :rtype: dict\n :return: {\n 'profileName': 'string',\n 'signingMaterial': {\n 'certificateArn': 'string'\n },\n 'platformId': 'string',\n 'overrides': {\n 'signingConfiguration': {\n 'encryptionAlgorithm': 'RSA'|'ECDSA',\n 'hashAlgorithm': 'SHA1'|'SHA256'\n }\n },\n 'signingParameters': {\n 'string': 'string'\n },\n 'status': 'Active'|'Canceled'\n }\n \n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_signing_jobs(status=None, platformId=None, requestedBy=None, maxResults=None, nextToken=None):\n \"\"\"\n Lists all your signing jobs. You can use the maxResults parameter to limit the number of signing jobs that are returned in the response. If additional jobs remain to be listed, AWS Signer returns a nextToken value. Use this value in subsequent calls to ListSigningJobs to fetch the remaining values. You can continue calling ListSigningJobs with your maxResults parameter and with new values that AWS Signer returns in the nextToken parameter until all of your signing jobs have been returned.\n See also: AWS API Documentation\n \n \n :example: response = client.list_signing_jobs(\n status='InProgress'|'Failed'|'Succeeded',\n platformId='string',\n requestedBy='string',\n maxResults=123,\n nextToken='string'\n )\n \n \n :type status: string\n :param status: A status value with which to filter your results.\n\n :type platformId: string\n :param platformId: The ID of microcontroller platform that you specified for the distribution of your code image.\n\n :type requestedBy: string\n :param requestedBy: The IAM principal that requested the signing job.\n\n :type maxResults: integer\n :param maxResults: Specifies the maximum number of items to return in the response. Use this parameter when paginating results. If additional items exist beyond the number you specify, the nextToken element is set in the response. Use the nextToken value in a subsequent request to retrieve additional items.\n\n :type nextToken: string\n :param nextToken: String for specifying the next set of paginated results to return. After you receive a response with truncated results, use this parameter in a subsequent request. Set it to the value of nextToken from the response that you just received.\n\n :rtype: dict\n :return: {\n 'jobs': [\n {\n 'jobId': 'string',\n 'source': {\n 's3': {\n 'bucketName': 'string',\n 'key': 'string',\n 'version': 'string'\n }\n },\n 'signedObject': {\n 's3': {\n 'bucketName': 'string',\n 'key': 'string'\n }\n },\n 'signingMaterial': {\n 'certificateArn': 'string'\n },\n 'createdAt': datetime(2015, 1, 1),\n 'status': 'InProgress'|'Failed'|'Succeeded'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_signing_platforms(category=None, partner=None, target=None, maxResults=None, nextToken=None):\n \"\"\"\n Lists all signing platforms available in AWS Signer that match the request parameters. If additional jobs remain to be listed, AWS Signer returns a nextToken value. Use this value in subsequent calls to ListSigningJobs to fetch the remaining values. You can continue calling ListSigningJobs with your maxResults parameter and with new values that AWS Signer returns in the nextToken parameter until all of your signing jobs have been returned.\n See also: AWS API Documentation\n \n \n :example: response = client.list_signing_platforms(\n category='string',\n partner='string',\n target='string',\n maxResults=123,\n nextToken='string'\n )\n \n \n :type category: string\n :param category: The category type of a signing platform.\n\n :type partner: string\n :param partner: Any partner entities connected to a signing platform.\n\n :type target: string\n :param target: The validation template that is used by the target signing platform.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to be returned by this operation.\n\n :type nextToken: string\n :param nextToken: Value for specifying the next set of paginated results to return. After you receive a response with truncated results, use this parameter in a subsequent request. Set it to the value of nextToken from the response that you just received.\n\n :rtype: dict\n :return: {\n 'platforms': [\n {\n 'platformId': 'string',\n 'displayName': 'string',\n 'partner': 'string',\n 'target': 'string',\n 'category': 'AWSIoT',\n 'signingConfiguration': {\n 'encryptionAlgorithmOptions': {\n 'allowedValues': [\n 'RSA'|'ECDSA',\n ],\n 'defaultValue': 'RSA'|'ECDSA'\n },\n 'hashAlgorithmOptions': {\n 'allowedValues': [\n 'SHA1'|'SHA256',\n ],\n 'defaultValue': 'SHA1'|'SHA256'\n }\n },\n 'signingImageFormat': {\n 'supportedFormats': [\n 'JSON',\n ],\n 'defaultFormat': 'JSON'\n },\n 'maxSizeInMB': 123\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_signing_profiles(includeCanceled=None, maxResults=None, nextToken=None):\n \"\"\"\n Lists all available signing profiles in your AWS account. Returns only profiles with an ACTIVE status unless the includeCanceled request field is set to true . If additional jobs remain to be listed, AWS Signer returns a nextToken value. Use this value in subsequent calls to ListSigningJobs to fetch the remaining values. You can continue calling ListSigningJobs with your maxResults parameter and with new values that AWS Signer returns in the nextToken parameter until all of your signing jobs have been returned.\n See also: AWS API Documentation\n \n \n :example: response = client.list_signing_profiles(\n includeCanceled=True|False,\n maxResults=123,\n nextToken='string'\n )\n \n \n :type includeCanceled: boolean\n :param includeCanceled: Designates whether to include profiles with the status of CANCELED .\n\n :type maxResults: integer\n :param maxResults: The maximum number of profiles to be returned.\n\n :type nextToken: string\n :param nextToken: Value for specifying the next set of paginated results to return. After you receive a response with truncated results, use this parameter in a subsequent request. Set it to the value of nextToken from the response that you just received.\n\n :rtype: dict\n :return: {\n 'profiles': [\n {\n 'profileName': 'string',\n 'signingMaterial': {\n 'certificateArn': 'string'\n },\n 'platformId': 'string',\n 'signingParameters': {\n 'string': 'string'\n },\n 'status': 'Active'|'Canceled'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef put_signing_profile(profileName=None, signingMaterial=None, platformId=None, overrides=None, signingParameters=None):\n \"\"\"\n Creates a signing profile. A signing profile is an AWS Signer template that can be used to carry out a pre-defined signing job. For more information, see http://docs.aws.amazon.com/signer/latest/developerguide/gs-profile.html\n See also: AWS API Documentation\n \n \n :example: response = client.put_signing_profile(\n profileName='string',\n signingMaterial={\n 'certificateArn': 'string'\n },\n platformId='string',\n overrides={\n 'signingConfiguration': {\n 'encryptionAlgorithm': 'RSA'|'ECDSA',\n 'hashAlgorithm': 'SHA1'|'SHA256'\n }\n },\n signingParameters={\n 'string': 'string'\n }\n )\n \n \n :type profileName: string\n :param profileName: [REQUIRED]\n The name of the signing profile to be created.\n \n\n :type signingMaterial: dict\n :param signingMaterial: [REQUIRED]\n The AWS Certificate Manager certificate that will be used to sign code with the new signing profile.\n certificateArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the certificates that is used to sign your code.\n \n\n :type platformId: string\n :param platformId: [REQUIRED]\n The ID of the signing profile to be created.\n \n\n :type overrides: dict\n :param overrides: A subfield of platform . This specifies any different configuration options that you want to apply to the chosen platform (such as a different hash-algorithm or signing-algorithm ).\n signingConfiguration (dict) --A signing configuration that overrides the default encryption or hash algorithm of a signing job.\n encryptionAlgorithm (string) --A specified override of the default encryption algorithm that is used in an AWS Signer job.\n hashAlgorithm (string) --A specified override of the default hash algorithm that is used in an AWS Signer job.\n \n \n\n :type signingParameters: dict\n :param signingParameters: Map of key-value pairs for signing. These can include any information that you want to use during signing.\n (string) --\n (string) --\n \n\n :rtype: dict\n :return: {\n 'arn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef start_signing_job(source=None, destination=None, profileName=None, clientRequestToken=None):\n \"\"\"\n Initiates a signing job to be performed on the code provided. Signing jobs are viewable by the ListSigningJobs operation for two years after they are performed. Note the following requirements:\n You can call the DescribeSigningJob and the ListSigningJobs actions after you call StartSigningJob .\n For a Java example that shows how to use this action, see http://docs.aws.amazon.com/acm/latest/userguide/\n See also: AWS API Documentation\n \n \n :example: response = client.start_signing_job(\n source={\n 's3': {\n 'bucketName': 'string',\n 'key': 'string',\n 'version': 'string'\n }\n },\n destination={\n 's3': {\n 'bucketName': 'string',\n 'prefix': 'string'\n }\n },\n profileName='string',\n clientRequestToken='string'\n )\n \n \n :type source: dict\n :param source: [REQUIRED]\n The S3 bucket that contains the object to sign or a BLOB that contains your raw code.\n s3 (dict) --The S3Source object.\n bucketName (string) -- [REQUIRED]Name of the S3 bucket.\n key (string) -- [REQUIRED]Key name of the bucket object that contains your unsigned code.\n version (string) -- [REQUIRED]Version of your source image in your version enabled S3 bucket.\n \n \n\n :type destination: dict\n :param destination: [REQUIRED]\n The S3 bucket in which to save your signed object. The destination contains the name of your bucket and an optional prefix.\n s3 (dict) --The S3Destination object.\n bucketName (string) --Name of the S3 bucket.\n prefix (string) --An Amazon S3 prefix that you can use to limit responses to those that begin with the specified prefix.\n \n \n\n :type profileName: string\n :param profileName: The name of the signing profile.\n\n :type clientRequestToken: string\n :param clientRequestToken: [REQUIRED]\n String that identifies the signing request. All calls after the first that use this token return the same response as the first call.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'jobId': 'string'\n }\n \n \n :returns: \n source (dict) -- [REQUIRED]\n The S3 bucket that contains the object to sign or a BLOB that contains your raw code.\n \n s3 (dict) --The S3Source object.\n \n bucketName (string) -- [REQUIRED]Name of the S3 bucket.\n \n key (string) -- [REQUIRED]Key name of the bucket object that contains your unsigned code.\n \n version (string) -- [REQUIRED]Version of your source image in your version enabled S3 bucket.\n \n \n \n \n \n destination (dict) -- [REQUIRED]\n The S3 bucket in which to save your signed object. The destination contains the name of your bucket and an optional prefix.\n \n s3 (dict) --The S3Destination object.\n \n bucketName (string) --Name of the S3 bucket.\n \n prefix (string) --An Amazon S3 prefix that you can use to limit responses to those that begin with the specified prefix.\n \n \n \n \n \n profileName (string) -- The name of the signing profile.\n clientRequestToken (string) -- [REQUIRED]\n String that identifies the signing request. All calls after the first that use this token return the same response as the first call.\n This field is autopopulated if not provided.\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.5391294956207275, "alphanum_fraction": 0.5460977554321289, "avg_line_length": 39.907894134521484, "blob_id": "d3c08defdec0bbf8fd2c1b7c2478158b66d8096d", "content_id": "e2d3a785d692490566320c77559ee17da4d95c9f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9328, "license_type": "permissive", "max_line_length": 353, "num_lines": 228, "path": "/pyboto3/comprehendmedical.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef detect_entities(Text=None):\n \"\"\"\n Inspects the clinical text for a variety of medical entities and returns specific information about them such as entity category, location, and confidence score on that information .\n See also: AWS API Documentation\n \n \n :example: response = client.detect_entities(\n Text='string'\n )\n \n \n :type Text: string\n :param Text: [REQUIRED]\n A UTF-8 text string containing the clinical content being examined for entities. Each string must contain fewer than 20,000 bytes of characters.\n \n\n :rtype: dict\n :return: {\n 'Entities': [\n {\n 'Id': 123,\n 'BeginOffset': 123,\n 'EndOffset': 123,\n 'Score': ...,\n 'Text': 'string',\n 'Category': 'MEDICATION'|'MEDICAL_CONDITION'|'PROTECTED_HEALTH_INFORMATION'|'TEST_TREATMENT_PROCEDURE'|'ANATOMY',\n 'Type': 'NAME'|'DOSAGE'|'ROUTE_OR_MODE'|'FORM'|'FREQUENCY'|'DURATION'|'GENERIC_NAME'|'BRAND_NAME'|'STRENGTH'|'RATE'|'ACUITY'|'TEST_NAME'|'TEST_VALUE'|'TEST_UNITS'|'PROCEDURE_NAME'|'TREATMENT_NAME'|'DATE'|'AGE'|'CONTACT_POINT'|'EMAIL'|'IDENTIFIER'|'URL'|'ADDRESS'|'PROFESSION'|'SYSTEM_ORGAN_SITE'|'DIRECTION'|'QUALITY'|'QUANTITY',\n 'Traits': [\n {\n 'Name': 'SIGN'|'SYMPTOM'|'DIAGNOSIS'|'NEGATION',\n 'Score': ...\n },\n ],\n 'Attributes': [\n {\n 'Type': 'NAME'|'DOSAGE'|'ROUTE_OR_MODE'|'FORM'|'FREQUENCY'|'DURATION'|'GENERIC_NAME'|'BRAND_NAME'|'STRENGTH'|'RATE'|'ACUITY'|'TEST_NAME'|'TEST_VALUE'|'TEST_UNITS'|'PROCEDURE_NAME'|'TREATMENT_NAME'|'DATE'|'AGE'|'CONTACT_POINT'|'EMAIL'|'IDENTIFIER'|'URL'|'ADDRESS'|'PROFESSION'|'SYSTEM_ORGAN_SITE'|'DIRECTION'|'QUALITY'|'QUANTITY',\n 'Score': ...,\n 'RelationshipScore': ...,\n 'Id': 123,\n 'BeginOffset': 123,\n 'EndOffset': 123,\n 'Text': 'string',\n 'Traits': [\n {\n 'Name': 'SIGN'|'SYMPTOM'|'DIAGNOSIS'|'NEGATION',\n 'Score': ...\n },\n ]\n },\n ]\n },\n ],\n 'UnmappedAttributes': [\n {\n 'Type': 'MEDICATION'|'MEDICAL_CONDITION'|'PROTECTED_HEALTH_INFORMATION'|'TEST_TREATMENT_PROCEDURE'|'ANATOMY',\n 'Attribute': {\n 'Type': 'NAME'|'DOSAGE'|'ROUTE_OR_MODE'|'FORM'|'FREQUENCY'|'DURATION'|'GENERIC_NAME'|'BRAND_NAME'|'STRENGTH'|'RATE'|'ACUITY'|'TEST_NAME'|'TEST_VALUE'|'TEST_UNITS'|'PROCEDURE_NAME'|'TREATMENT_NAME'|'DATE'|'AGE'|'CONTACT_POINT'|'EMAIL'|'IDENTIFIER'|'URL'|'ADDRESS'|'PROFESSION'|'SYSTEM_ORGAN_SITE'|'DIRECTION'|'QUALITY'|'QUANTITY',\n 'Score': ...,\n 'RelationshipScore': ...,\n 'Id': 123,\n 'BeginOffset': 123,\n 'EndOffset': 123,\n 'Text': 'string',\n 'Traits': [\n {\n 'Name': 'SIGN'|'SYMPTOM'|'DIAGNOSIS'|'NEGATION',\n 'Score': ...\n },\n ]\n }\n },\n ],\n 'PaginationToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef detect_phi(Text=None):\n \"\"\"\n Inspects the clinical text for personal health information (PHI) entities and entity category, location, and confidence score on that information.\n See also: AWS API Documentation\n \n \n :example: response = client.detect_phi(\n Text='string'\n )\n \n \n :type Text: string\n :param Text: [REQUIRED]\n A UTF-8 text string containing the clinical content being examined for PHI entities. Each string must contain fewer than 20,000 bytes of characters.\n \n\n :rtype: dict\n :return: {\n 'Entities': [\n {\n 'Id': 123,\n 'BeginOffset': 123,\n 'EndOffset': 123,\n 'Score': ...,\n 'Text': 'string',\n 'Category': 'MEDICATION'|'MEDICAL_CONDITION'|'PROTECTED_HEALTH_INFORMATION'|'TEST_TREATMENT_PROCEDURE'|'ANATOMY',\n 'Type': 'NAME'|'DOSAGE'|'ROUTE_OR_MODE'|'FORM'|'FREQUENCY'|'DURATION'|'GENERIC_NAME'|'BRAND_NAME'|'STRENGTH'|'RATE'|'ACUITY'|'TEST_NAME'|'TEST_VALUE'|'TEST_UNITS'|'PROCEDURE_NAME'|'TREATMENT_NAME'|'DATE'|'AGE'|'CONTACT_POINT'|'EMAIL'|'IDENTIFIER'|'URL'|'ADDRESS'|'PROFESSION'|'SYSTEM_ORGAN_SITE'|'DIRECTION'|'QUALITY'|'QUANTITY',\n 'Traits': [\n {\n 'Name': 'SIGN'|'SYMPTOM'|'DIAGNOSIS'|'NEGATION',\n 'Score': ...\n },\n ],\n 'Attributes': [\n {\n 'Type': 'NAME'|'DOSAGE'|'ROUTE_OR_MODE'|'FORM'|'FREQUENCY'|'DURATION'|'GENERIC_NAME'|'BRAND_NAME'|'STRENGTH'|'RATE'|'ACUITY'|'TEST_NAME'|'TEST_VALUE'|'TEST_UNITS'|'PROCEDURE_NAME'|'TREATMENT_NAME'|'DATE'|'AGE'|'CONTACT_POINT'|'EMAIL'|'IDENTIFIER'|'URL'|'ADDRESS'|'PROFESSION'|'SYSTEM_ORGAN_SITE'|'DIRECTION'|'QUALITY'|'QUANTITY',\n 'Score': ...,\n 'RelationshipScore': ...,\n 'Id': 123,\n 'BeginOffset': 123,\n 'EndOffset': 123,\n 'Text': 'string',\n 'Traits': [\n {\n 'Name': 'SIGN'|'SYMPTOM'|'DIAGNOSIS'|'NEGATION',\n 'Score': ...\n },\n ]\n },\n ]\n },\n ],\n 'PaginationToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.7221415638923645, "alphanum_fraction": 0.7240099906921387, "avg_line_length": 69.31474304199219, "blob_id": "2732bcb4ebbe364f640703e5f8fccf49dc31f2e2", "content_id": "5cd6b04056cf86ee15c78eaf18e320507b20085b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 74394, "license_type": "permissive", "max_line_length": 814, "num_lines": 1058, "path": "/pyboto3/secretsmanager.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef cancel_rotate_secret(SecretId=None):\n \"\"\"\n Disables automatic scheduled rotation and cancels the rotation of a secret if one is currently in progress.\n To re-enable scheduled rotation, call RotateSecret with AutomaticallyRotateAfterDays set to a value greater than 0. This will immediately rotate your secret and then enable the automatic schedule.\n To successfully start a rotation, the staging label AWSPENDING must be in one of the following states:\n If the staging label AWSPENDING is attached to a different version than the version with AWSCURRENT then the attempt to rotate fails.\n To run this command, you must have the following permissions:\n See also: AWS API Documentation\n \n \n :example: response = client.cancel_rotate_secret(\n SecretId='string'\n )\n \n \n :type SecretId: string\n :param SecretId: [REQUIRED]\n Specifies the secret for which you want to cancel a rotation request. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.\n Note\n If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too for example, if you don t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don t create secret names that end with a hyphen followed by six characters.\n \n\n :rtype: dict\n :return: {\n 'ARN': 'string',\n 'Name': 'string',\n 'VersionId': 'string'\n }\n \n \n :returns: \n secretsmanager:CancelRotateSecret\n \n \"\"\"\n pass\n\ndef create_secret(Name=None, ClientRequestToken=None, Description=None, KmsKeyId=None, SecretBinary=None, SecretString=None, Tags=None):\n \"\"\"\n Creates a new secret. A secret in Secrets Manager consists of both the protected secret data and the important information needed to manage the secret.\n Secrets Manager stores the encrypted secret data in one of a collection of \"versions\" associated with the secret. Each version contains a copy of the encrypted secret data. Each version is associated with one or more \"staging labels\" that identify where the version is in the rotation cycle. The SecretVersionsToStages field of the secret contains the mapping of staging labels to the active versions of the secret. Versions without a staging label are considered deprecated and are not included in the list.\n You provide the secret data to be encrypted by putting text in either the SecretString parameter or binary data in the SecretBinary parameter, but not both. If you include SecretString or SecretBinary then Secrets Manager also creates an initial secret version and automatically attaches the staging label AWSCURRENT to the new version.\n To run this command, you must have the following permissions:\n See also: AWS API Documentation\n \n \n :example: response = client.create_secret(\n Name='string',\n ClientRequestToken='string',\n Description='string',\n KmsKeyId='string',\n SecretBinary=b'bytes',\n SecretString='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n Specifies the friendly name of the new secret.\n The secret name must be ASCII letters, digits, or the following characters : /_+=.@-\n Note\n Don't end your secret name with a hyphen followed by six characters. If you do so, you risk confusion and unexpected results when searching for a secret by partial ARN. This is because Secrets Manager automatically adds a hyphen and six random characters at the end of the ARN.\n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: (Optional) If you include SecretString or SecretBinary , then an initial version is created as part of the secret, and this parameter specifies a unique identifier for the new version.\n Note\n If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes it as the value for this parameter in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for the new version and include that value in the request.\n This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during a rotation. We recommend that you generate a UUID-type value to ensure uniqueness of your versions within the specified secret.\n If the ClientRequestToken value isn't already associated with a version of the secret then a new version of the secret is created.\n If a version with this value already exists and that version's SecretString and SecretBinary values are the same as those in the request, then the request is ignored (the operation is idempotent).\n If a version with this value already exists and that version's SecretString and SecretBinary values are different from those in the request then the request fails because you cannot modify an existing version. Instead, use PutSecretValue to create a new version.\n This value becomes the VersionId of the new version.\n This field is autopopulated if not provided.\n \n\n :type Description: string\n :param Description: (Optional) Specifies a user-provided description of the secret.\n\n :type KmsKeyId: string\n :param KmsKeyId: (Optional) Specifies the ARN, Key ID, or alias of the AWS KMS customer master key (CMK) to be used to encrypt the SecretString or SecretBinary values in the versions stored in this secret.\n You can specify any of the supported ways to identify a AWS KMS key ID. If you need to reference a CMK in a different account, you can use only the key ARN or the alias ARN.\n If you don't specify this value, then Secrets Manager defaults to using the AWS account's default CMK (the one named aws/secretsmanager ). If a AWS KMS CMK with that name doesn't yet exist, then Secrets Manager creates it for you automatically the first time it needs to encrypt a version's SecretString or SecretBinary fields.\n Warning\n You can use the account's default CMK to encrypt and decrypt only if you call this operation using credentials from the same account that owns the secret. If the secret is in a different account, then you must create a custom CMK and specify the ARN in this field.\n \n\n :type SecretBinary: bytes\n :param SecretBinary: (Optional) Specifies binary data that you want to encrypt and store in the new version of the secret. To use this parameter in the command-line tools, we recommend that you store your binary data in a file and then use the appropriate technique for your tool to pass the contents of the file as a parameter.\n Either SecretString or SecretBinary must have a value, but not both. They cannot both be empty.\n This parameter is not available using the Secrets Manager console. It can be accessed only by using the AWS CLI or one of the AWS SDKs.\n \n\n :type SecretString: string\n :param SecretString: (Optional) Specifies text data that you want to encrypt and store in this new version of the secret.\n Either SecretString or SecretBinary must have a value, but not both. They cannot both be empty.\n If you create a secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that the Lambda rotation function knows how to parse.\n For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide . For example:\n [{'username':'bob'},{'password':'abc123xyz456'}]\n If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.\n \n\n :type Tags: list\n :param Tags: (Optional) Specifies a list of user-defined tags that are attached to the secret. Each tag is a 'Key' and 'Value' pair of strings. This operation only appends tags to the existing list of tags. To remove tags, you must use UntagResource .\n Warning\n Secrets Manager tag key names are case sensitive. A tag with the key 'ABC' is a different tag from one with key 'abc'.\n If you check tags in IAM policy Condition elements as part of your security strategy, then adding or removing a tag can change permissions. If the successful completion of this operation would result in you losing your permissions for this secret, then this operation is blocked and returns an Access Denied error.\n This parameter requires a JSON text string argument. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide . For example:\n [{'Key':'CostCenter','Value':'12345'},{'Key':'environment','Value':'production'}]\n If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.\n The following basic restrictions apply to tags:\n Maximum number of tags per secret 50\n Maximum key length 127 Unicode characters in UTF-8\n Maximum value length 255 Unicode characters in UTF-8\n Tag keys and values are case sensitive.\n Do not use the aws: prefix in your tag names or values because it is reserved for AWS use. You can't edit or delete tag names or values with this prefix. Tags with this prefix do not count against your tags per secret limit.\n If your tagging schema will be used across multiple services and resources, remember that other services might have restrictions on allowed characters. Generally allowed characters are: letters, spaces, and numbers representable in UTF-8, plus the following special characters: + - = . _ : / @.\n (dict) --A structure that contains information about a tag.\n Key (string) --The key identifier, or name, of the tag.\n Value (string) --The string value that's associated with the key of the tag.\n \n \n\n :rtype: dict\n :return: {\n 'ARN': 'string',\n 'Name': 'string',\n 'VersionId': 'string'\n }\n \n \n :returns: \n secretsmanager:CreateSecret\n kms:GenerateDataKey - needed only if you use a customer-managed AWS KMS key to encrypt the secret. You do not need this permission to use the account's default AWS managed CMK for Secrets Manager.\n kms:Decrypt - needed only if you use a customer-managed AWS KMS key to encrypt the secret. You do not need this permission to use the account's default AWS managed CMK for Secrets Manager.\n secretsmanager:TagResource - needed only if you include the Tags parameter.\n \n \"\"\"\n pass\n\ndef delete_resource_policy(SecretId=None):\n \"\"\"\n Deletes the resource-based permission policy that's attached to the secret.\n To run this command, you must have the following permissions:\n See also: AWS API Documentation\n \n \n :example: response = client.delete_resource_policy(\n SecretId='string'\n )\n \n \n :type SecretId: string\n :param SecretId: [REQUIRED]\n Specifies the secret that you want to delete the attached resource-based policy for. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.\n Note\n If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too for example, if you don t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don t create secret names that end with a hyphen followed by six characters.\n \n\n :rtype: dict\n :return: {\n 'ARN': 'string',\n 'Name': 'string'\n }\n \n \n :returns: \n To attach a resource policy to a secret, use PutResourcePolicy .\n To retrieve the current resource-based policy that's attached to a secret, use GetResourcePolicy .\n To list all of the currently available secrets, use ListSecrets .\n \n \"\"\"\n pass\n\ndef delete_secret(SecretId=None, RecoveryWindowInDays=None, ForceDeleteWithoutRecovery=None):\n \"\"\"\n Deletes an entire secret and all of its versions. You can optionally include a recovery window during which you can restore the secret. If you don't specify a recovery window value, the operation defaults to 30 days. Secrets Manager attaches a DeletionDate stamp to the secret that specifies the end of the recovery window. At the end of the recovery window, Secrets Manager deletes the secret permanently.\n At any time before recovery window ends, you can use RestoreSecret to remove the DeletionDate and cancel the deletion of the secret.\n You cannot access the encrypted secret information in any secret that is scheduled for deletion. If you need to access that information, you must cancel the deletion with RestoreSecret and then retrieve the information.\n To run this command, you must have the following permissions:\n See also: AWS API Documentation\n \n \n :example: response = client.delete_secret(\n SecretId='string',\n RecoveryWindowInDays=123,\n ForceDeleteWithoutRecovery=True|False\n )\n \n \n :type SecretId: string\n :param SecretId: [REQUIRED]\n Specifies the secret that you want to delete. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.\n Note\n If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too for example, if you don t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don t create secret names that end with a hyphen followed by six characters.\n \n\n :type RecoveryWindowInDays: integer\n :param RecoveryWindowInDays: (Optional) Specifies the number of days that Secrets Manager waits before it can delete the secret. You can't use both this parameter and the ForceDeleteWithoutRecovery parameter in the same API call.\n This value can range from 7 to 30 days. The default value is 30.\n \n\n :type ForceDeleteWithoutRecovery: boolean\n :param ForceDeleteWithoutRecovery: (Optional) Specifies that the secret is to be deleted without any recovery window. You can't use both this parameter and the RecoveryWindowInDays parameter in the same API call.\n An asynchronous background process performs the actual deletion, so there can be a short delay before the operation completes. If you write code to delete and then immediately recreate a secret with the same name, ensure that your code includes appropriate back off and retry logic.\n Warning\n Use this parameter with caution. This parameter causes the operation to skip the normal waiting period before the permanent deletion that AWS would normally impose with the RecoveryWindowInDays parameter. If you delete a secret with the ForceDeleteWithouRecovery parameter, then you have no opportunity to recover the secret. It is permanently lost.\n \n\n :rtype: dict\n :return: {\n 'ARN': 'string',\n 'Name': 'string',\n 'DeletionDate': datetime(2015, 1, 1)\n }\n \n \n :returns: \n secretsmanager:DeleteSecret\n \n \"\"\"\n pass\n\ndef describe_secret(SecretId=None):\n \"\"\"\n Retrieves the details of a secret. It does not include the encrypted fields. Only those fields that are populated with a value are returned in the response.\n To run this command, you must have the following permissions:\n See also: AWS API Documentation\n \n \n :example: response = client.describe_secret(\n SecretId='string'\n )\n \n \n :type SecretId: string\n :param SecretId: [REQUIRED]\n The identifier of the secret whose details you want to retrieve. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.\n Note\n If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too for example, if you don t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don t create secret names that end with a hyphen followed by six characters.\n \n\n :rtype: dict\n :return: {\n 'ARN': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'KmsKeyId': 'string',\n 'RotationEnabled': True|False,\n 'RotationLambdaARN': 'string',\n 'RotationRules': {\n 'AutomaticallyAfterDays': 123\n },\n 'LastRotatedDate': datetime(2015, 1, 1),\n 'LastChangedDate': datetime(2015, 1, 1),\n 'LastAccessedDate': datetime(2015, 1, 1),\n 'DeletedDate': datetime(2015, 1, 1),\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'VersionIdsToStages': {\n 'string': [\n 'string',\n ]\n }\n }\n \n \n :returns: \n To create a secret, use CreateSecret .\n To modify a secret, use UpdateSecret .\n To retrieve the encrypted secret information in a version of the secret, use GetSecretValue .\n To list all of the secrets in the AWS account, use ListSecrets .\n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_random_password(PasswordLength=None, ExcludeCharacters=None, ExcludeNumbers=None, ExcludePunctuation=None, ExcludeUppercase=None, ExcludeLowercase=None, IncludeSpace=None, RequireEachIncludedType=None):\n \"\"\"\n Generates a random password of the specified complexity. This operation is intended for use in the Lambda rotation function. Per best practice, we recommend that you specify the maximum length and include every character type that the system you are generating a password for can support.\n To run this command, you must have the following permissions:\n See also: AWS API Documentation\n \n \n :example: response = client.get_random_password(\n PasswordLength=123,\n ExcludeCharacters='string',\n ExcludeNumbers=True|False,\n ExcludePunctuation=True|False,\n ExcludeUppercase=True|False,\n ExcludeLowercase=True|False,\n IncludeSpace=True|False,\n RequireEachIncludedType=True|False\n )\n \n \n :type PasswordLength: integer\n :param PasswordLength: The desired length of the generated password. The default value if you do not include this parameter is 32 characters.\n\n :type ExcludeCharacters: string\n :param ExcludeCharacters: A string that includes characters that should not be included in the generated password. The default is that all characters from the included sets can be used.\n\n :type ExcludeNumbers: boolean\n :param ExcludeNumbers: Specifies that the generated password should not include digits. The default if you do not include this switch parameter is that digits can be included.\n\n :type ExcludePunctuation: boolean\n :param ExcludePunctuation: Specifies that the generated password should not include punctuation characters. The default if you do not include this switch parameter is that punctuation characters can be included.\n The following are the punctuation characters that can be included in the generated password if you don't explicitly exclude them with ExcludeCharacters or ExcludePunctuation :\n ! ' # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \\ ] ^ _ ` { | } ~\n \n\n :type ExcludeUppercase: boolean\n :param ExcludeUppercase: Specifies that the generated password should not include uppercase letters. The default if you do not include this switch parameter is that uppercase letters can be included.\n\n :type ExcludeLowercase: boolean\n :param ExcludeLowercase: Specifies that the generated password should not include lowercase letters. The default if you do not include this switch parameter is that lowercase letters can be included.\n\n :type IncludeSpace: boolean\n :param IncludeSpace: Specifies that the generated password can include the space character. The default if you do not include this switch parameter is that the space character is not included.\n\n :type RequireEachIncludedType: boolean\n :param RequireEachIncludedType: A boolean value that specifies whether the generated password must include at least one of every allowed character type. The default value is True and the operation requires at least one of every character type.\n\n :rtype: dict\n :return: {\n 'RandomPassword': 'string'\n }\n \n \n :returns: \n PasswordLength (integer) -- The desired length of the generated password. The default value if you do not include this parameter is 32 characters.\n ExcludeCharacters (string) -- A string that includes characters that should not be included in the generated password. The default is that all characters from the included sets can be used.\n ExcludeNumbers (boolean) -- Specifies that the generated password should not include digits. The default if you do not include this switch parameter is that digits can be included.\n ExcludePunctuation (boolean) -- Specifies that the generated password should not include punctuation characters. The default if you do not include this switch parameter is that punctuation characters can be included.\n The following are the punctuation characters that can be included in the generated password if you don't explicitly exclude them with ExcludeCharacters or ExcludePunctuation :\n \n ! \" # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \\ ] ^ _ ` { | } ~\n \n ExcludeUppercase (boolean) -- Specifies that the generated password should not include uppercase letters. The default if you do not include this switch parameter is that uppercase letters can be included.\n ExcludeLowercase (boolean) -- Specifies that the generated password should not include lowercase letters. The default if you do not include this switch parameter is that lowercase letters can be included.\n IncludeSpace (boolean) -- Specifies that the generated password can include the space character. The default if you do not include this switch parameter is that the space character is not included.\n RequireEachIncludedType (boolean) -- A boolean value that specifies whether the generated password must include at least one of every allowed character type. The default value is True and the operation requires at least one of every character type.\n \n \"\"\"\n pass\n\ndef get_resource_policy(SecretId=None):\n \"\"\"\n Retrieves the JSON text of the resource-based policy document that's attached to the specified secret. The JSON request string input and response output are shown formatted with white space and line breaks for better readability. Submit your input as a single line JSON string.\n To run this command, you must have the following permissions:\n See also: AWS API Documentation\n \n \n :example: response = client.get_resource_policy(\n SecretId='string'\n )\n \n \n :type SecretId: string\n :param SecretId: [REQUIRED]\n Specifies the secret that you want to retrieve the attached resource-based policy for. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.\n Note\n If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too for example, if you don t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don t create secret names that end with a hyphen followed by six characters.\n \n\n :rtype: dict\n :return: {\n 'ARN': 'string',\n 'Name': 'string',\n 'ResourcePolicy': 'string'\n }\n \n \n :returns: \n To attach a resource policy to a secret, use PutResourcePolicy .\n To delete the resource-based policy that's attached to a secret, use DeleteResourcePolicy .\n To list all of the currently available secrets, use ListSecrets .\n \n \"\"\"\n pass\n\ndef get_secret_value(SecretId=None, VersionId=None, VersionStage=None):\n \"\"\"\n Retrieves the contents of the encrypted fields SecretString or SecretBinary from the specified version of a secret, whichever contains content.\n To run this command, you must have the following permissions:\n See also: AWS API Documentation\n \n \n :example: response = client.get_secret_value(\n SecretId='string',\n VersionId='string',\n VersionStage='string'\n )\n \n \n :type SecretId: string\n :param SecretId: [REQUIRED]\n Specifies the secret containing the version that you want to retrieve. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.\n Note\n If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too for example, if you don t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don t create secret names that end with a hyphen followed by six characters.\n \n\n :type VersionId: string\n :param VersionId: Specifies the unique identifier of the version of the secret that you want to retrieve. If you specify this parameter then don't specify VersionStage . If you don't specify either a VersionStage or VersionId then the default is to perform the operation on the version with the VersionStage value of AWSCURRENT .\n This value is typically a UUID-type value with 32 hexadecimal digits.\n \n\n :type VersionStage: string\n :param VersionStage: Specifies the secret version that you want to retrieve by the staging label attached to the version.\n Staging labels are used to keep track of different versions during the rotation process. If you use this parameter then don't specify VersionId . If you don't specify either a VersionStage or VersionId , then the default is to perform the operation on the version with the VersionStage value of AWSCURRENT .\n \n\n :rtype: dict\n :return: {\n 'ARN': 'string',\n 'Name': 'string',\n 'VersionId': 'string',\n 'SecretBinary': b'bytes',\n 'SecretString': 'string',\n 'VersionStages': [\n 'string',\n ],\n 'CreatedDate': datetime(2015, 1, 1)\n }\n \n \n :returns: \n To create a new version of the secret with different encrypted information, use PutSecretValue .\n To retrieve the non-encrypted details for the secret, use DescribeSecret .\n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_secret_version_ids(SecretId=None, MaxResults=None, NextToken=None, IncludeDeprecated=None):\n \"\"\"\n Lists all of the versions attached to the specified secret. The output does not include the SecretString or SecretBinary fields. By default, the list includes only versions that have at least one staging label in VersionStage attached.\n To run this command, you must have the following permissions:\n See also: AWS API Documentation\n \n \n :example: response = client.list_secret_version_ids(\n SecretId='string',\n MaxResults=123,\n NextToken='string',\n IncludeDeprecated=True|False\n )\n \n \n :type SecretId: string\n :param SecretId: [REQUIRED]\n The identifier for the secret containing the versions you want to list. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.\n Note\n If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too for example, if you don t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don t create secret names that end with a hyphen followed by six characters.\n \n\n :type MaxResults: integer\n :param MaxResults: (Optional) Limits the number of results that you want to include in the response. If you don't include this parameter, it defaults to a value that's specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (isn't null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Secrets Manager might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.\n\n :type NextToken: string\n :param NextToken: (Optional) Use this parameter in a request if you receive a NextToken response in a previous request that indicates that there's more output available. In a subsequent call, set it to the value of the previous call's NextToken response to indicate where the output should continue from.\n\n :type IncludeDeprecated: boolean\n :param IncludeDeprecated: (Optional) Specifies that you want the results to include versions that do not have any staging labels attached to them. Such versions are considered deprecated and are subject to deletion by Secrets Manager as needed.\n\n :rtype: dict\n :return: {\n 'Versions': [\n {\n 'VersionId': 'string',\n 'VersionStages': [\n 'string',\n ],\n 'LastAccessedDate': datetime(2015, 1, 1),\n 'CreatedDate': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string',\n 'ARN': 'string',\n 'Name': 'string'\n }\n \n \n :returns: \n To list the secrets in an account, use ListSecrets .\n \n \"\"\"\n pass\n\ndef list_secrets(MaxResults=None, NextToken=None):\n \"\"\"\n Lists all of the secrets that are stored by Secrets Manager in the AWS account. To list the versions currently stored for a specific secret, use ListSecretVersionIds . The encrypted fields SecretString and SecretBinary are not included in the output. To get that information, call the GetSecretValue operation.\n To run this command, you must have the following permissions:\n See also: AWS API Documentation\n \n \n :example: response = client.list_secrets(\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type MaxResults: integer\n :param MaxResults: (Optional) Limits the number of results that you want to include in the response. If you don't include this parameter, it defaults to a value that's specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (isn't null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Secrets Manager might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.\n\n :type NextToken: string\n :param NextToken: (Optional) Use this parameter in a request if you receive a NextToken response in a previous request that indicates that there's more output available. In a subsequent call, set it to the value of the previous call's NextToken response to indicate where the output should continue from.\n\n :rtype: dict\n :return: {\n 'SecretList': [\n {\n 'ARN': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'KmsKeyId': 'string',\n 'RotationEnabled': True|False,\n 'RotationLambdaARN': 'string',\n 'RotationRules': {\n 'AutomaticallyAfterDays': 123\n },\n 'LastRotatedDate': datetime(2015, 1, 1),\n 'LastChangedDate': datetime(2015, 1, 1),\n 'LastAccessedDate': datetime(2015, 1, 1),\n 'DeletedDate': datetime(2015, 1, 1),\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'SecretVersionsToStages': {\n 'string': [\n 'string',\n ]\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n To list the versions attached to a secret, use ListSecretVersionIds .\n \n \"\"\"\n pass\n\ndef put_resource_policy(SecretId=None, ResourcePolicy=None):\n \"\"\"\n Attaches the contents of the specified resource-based permission policy to a secret. A resource-based policy is optional. Alternatively, you can use IAM identity-based policies that specify the secret's Amazon Resource Name (ARN) in the policy statement's Resources element. You can also use a combination of both identity-based and resource-based policies. The affected users and roles receive the permissions that are permitted by all of the relevant policies. For more information, see Using Resource-Based Policies for AWS Secrets Manager . For the complete description of the AWS policy syntax and grammar, see IAM JSON Policy Reference in the IAM User Guide .\n To run this command, you must have the following permissions:\n See also: AWS API Documentation\n \n \n :example: response = client.put_resource_policy(\n SecretId='string',\n ResourcePolicy='string'\n )\n \n \n :type SecretId: string\n :param SecretId: [REQUIRED]\n Specifies the secret that you want to attach the resource-based policy to. You can specify either the ARN or the friendly name of the secret.\n Note\n If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too for example, if you don t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don t create secret names that end with a hyphen followed by six characters.\n \n\n :type ResourcePolicy: string\n :param ResourcePolicy: [REQUIRED]\n A JSON-formatted string that's constructed according to the grammar and syntax for an AWS resource-based policy. The policy in the string identifies who can access or manage this secret and its versions. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide .\n \n\n :rtype: dict\n :return: {\n 'ARN': 'string',\n 'Name': 'string'\n }\n \n \n :returns: \n To retrieve the resource policy that's attached to a secret, use GetResourcePolicy .\n To delete the resource-based policy that's attached to a secret, use DeleteResourcePolicy .\n To list all of the currently available secrets, use ListSecrets .\n \n \"\"\"\n pass\n\ndef put_secret_value(SecretId=None, ClientRequestToken=None, SecretBinary=None, SecretString=None, VersionStages=None):\n \"\"\"\n Stores a new encrypted secret value in the specified secret. To do this, the operation creates a new version and attaches it to the secret. The version can contain a new SecretString value or a new SecretBinary value. You can also specify the staging labels that are initially attached to the new version.\n To run this command, you must have the following permissions:\n See also: AWS API Documentation\n \n \n :example: response = client.put_secret_value(\n SecretId='string',\n ClientRequestToken='string',\n SecretBinary=b'bytes',\n SecretString='string',\n VersionStages=[\n 'string',\n ]\n )\n \n \n :type SecretId: string\n :param SecretId: [REQUIRED]\n Specifies the secret to which you want to add a new version. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret. The secret must already exist.\n Note\n If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too for example, if you don t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don t create secret names that end with a hyphen followed by six characters.\n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: (Optional) Specifies a unique identifier for the new version of the secret.\n Note\n If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for new versions and include that value in the request.\n This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during the Lambda rotation function's processing. We recommend that you generate a UUID-type value to ensure uniqueness within the specified secret.\n If the ClientRequestToken value isn't already associated with a version of the secret then a new version of the secret is created.\n If a version with this value already exists and that version's SecretString or SecretBinary values are the same as those in the request then the request is ignored (the operation is idempotent).\n If a version with this value already exists and that version's SecretString and SecretBinary values are different from those in the request then the request fails because you cannot modify an existing secret version. You can only create new versions to store new secret values.\n This value becomes the VersionId of the new version.\n This field is autopopulated if not provided.\n \n\n :type SecretBinary: bytes\n :param SecretBinary: (Optional) Specifies binary data that you want to encrypt and store in the new version of the secret. To use this parameter in the command-line tools, we recommend that you store your binary data in a file and then use the appropriate technique for your tool to pass the contents of the file as a parameter. Either SecretBinary or SecretString must have a value, but not both. They cannot both be empty.\n This parameter is not accessible if the secret using the Secrets Manager console.\n \n\n :type SecretString: string\n :param SecretString: (Optional) Specifies text data that you want to encrypt and store in this new version of the secret. Either SecretString or SecretBinary must have a value, but not both. They cannot both be empty.\n If you create this secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that the default Lambda rotation function knows how to parse.\n For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide .\n For example:\n [{'username':'bob'},{'password':'abc123xyz456'}]\n If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.\n \n\n :type VersionStages: list\n :param VersionStages: (Optional) Specifies a list of staging labels that are attached to this version of the secret. These staging labels are used to track the versions through the rotation process by the Lambda rotation function.\n A staging label must be unique to a single version of the secret. If you specify a staging label that's already associated with a different version of the same secret then that staging label is automatically removed from the other version and attached to this version.\n If you do not specify a value for VersionStages then Secrets Manager automatically moves the staging label AWSCURRENT to this new version.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'ARN': 'string',\n 'Name': 'string',\n 'VersionId': 'string',\n 'VersionStages': [\n 'string',\n ]\n }\n \n \n :returns: \n If you call an operation that needs to encrypt or decrypt the SecretString or SecretBinary for a secret in the same account as the calling user and that secret doesn't specify a AWS KMS encryption key, Secrets Manager uses the account's default AWS managed customer master key (CMK) with the alias aws/secretsmanager . If this key doesn't already exist in your account then Secrets Manager creates it for you automatically. All users and roles in the same AWS account automatically have access to use the default CMK. Note that if an Secrets Manager API call results in AWS having to create the account's AWS-managed CMK, it can result in a one-time significant delay in returning the result.\n If the secret is in a different AWS account from the credentials calling an API that requires encryption or decryption of the secret value then you must create and use a custom AWS KMS CMK because you can't access the default CMK for the account using credentials from a different AWS account. Store the ARN of the CMK in the secret when you create the secret or when you update it by including it in the KMSKeyId . If you call an API that must encrypt or decrypt SecretString or SecretBinary using credentials from a different account then the AWS KMS key policy must grant cross-account access to that other account's user or role for both the kms:GenerateDataKey and kms:Decrypt operations.\n \n \"\"\"\n pass\n\ndef restore_secret(SecretId=None):\n \"\"\"\n Cancels the scheduled deletion of a secret by removing the DeletedDate time stamp. This makes the secret accessible to query once again.\n To run this command, you must have the following permissions:\n See also: AWS API Documentation\n \n \n :example: response = client.restore_secret(\n SecretId='string'\n )\n \n \n :type SecretId: string\n :param SecretId: [REQUIRED]\n Specifies the secret that you want to restore from a previously scheduled deletion. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.\n Note\n If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too for example, if you don t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don t create secret names that end with a hyphen followed by six characters.\n \n\n :rtype: dict\n :return: {\n 'ARN': 'string',\n 'Name': 'string'\n }\n \n \n :returns: \n To delete a secret, use DeleteSecret .\n \n \"\"\"\n pass\n\ndef rotate_secret(SecretId=None, ClientRequestToken=None, RotationLambdaARN=None, RotationRules=None):\n \"\"\"\n Configures and starts the asynchronous process of rotating this secret. If you include the configuration parameters, the operation sets those values for the secret and then immediately starts a rotation. If you do not include the configuration parameters, the operation starts a rotation with the values already stored in the secret. After the rotation completes, the protected service and its clients all use the new version of the secret.\n This required configuration information includes the ARN of an AWS Lambda function and the time between scheduled rotations. The Lambda rotation function creates a new version of the secret and creates or updates the credentials on the protected service to match. After testing the new credentials, the function marks the new secret with the staging label AWSCURRENT so that your clients all immediately begin to use the new version. For more information about rotating secrets and how to configure a Lambda function to rotate the secrets for your protected service, see Rotating Secrets in AWS Secrets Manager in the AWS Secrets Manager User Guide .\n Secrets Manager schedules the next rotation when the previous one is complete. Secrets Manager schedules the date by adding the rotation interval (number of days) to the actual date of the last rotation. The service chooses the hour within that 24-hour date window randomly. The minute is also chosen somewhat randomly, but weighted towards the top of the hour and influenced by a variety of factors that help distribute load.\n The rotation function must end with the versions of the secret in one of two states:\n If instead the AWSPENDING staging label is present but is not attached to the same version as AWSCURRENT then any later invocation of RotateSecret assumes that a previous rotation request is still in progress and returns an error.\n To run this command, you must have the following permissions:\n See also: AWS API Documentation\n \n \n :example: response = client.rotate_secret(\n SecretId='string',\n ClientRequestToken='string',\n RotationLambdaARN='string',\n RotationRules={\n 'AutomaticallyAfterDays': 123\n }\n )\n \n \n :type SecretId: string\n :param SecretId: [REQUIRED]\n Specifies the secret that you want to rotate. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.\n Note\n If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too for example, if you don t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don t create secret names that end with a hyphen followed by six characters.\n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: (Optional) Specifies a unique identifier for the new version of the secret that helps ensure idempotency.\n If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request for this parameter. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for new versions and include that value in the request.\n You only need to specify your own value if you are implementing your own retry logic and want to ensure that a given secret is not created twice. We recommend that you generate a UUID-type value to ensure uniqueness within the specified secret.\n Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during the function's processing. This value becomes the VersionId of the new version.\n This field is autopopulated if not provided.\n \n\n :type RotationLambdaARN: string\n :param RotationLambdaARN: (Optional) Specifies the ARN of the Lambda function that can rotate the secret.\n\n :type RotationRules: dict\n :param RotationRules: A structure that defines the rotation configuration for this secret.\n AutomaticallyAfterDays (integer) --Specifies the number of days between automatic scheduled rotations of the secret.\n Secrets Manager schedules the next rotation when the previous one is complete. Secrets Manager schedules the date by adding the rotation interval (number of days) to the actual date of the last rotation. The service chooses the hour within that 24-hour date window randomly. The minute is also chosen somewhat randomly, but weighted towards the top of the hour and influenced by a variety of factors that help distribute load.\n \n\n :rtype: dict\n :return: {\n 'ARN': 'string',\n 'Name': 'string',\n 'VersionId': 'string'\n }\n \n \n :returns: \n secretsmanager:RotateSecret\n lambda:InvokeFunction (on the function specified in the secret's metadata)\n \n \"\"\"\n pass\n\ndef tag_resource(SecretId=None, Tags=None):\n \"\"\"\n Attaches one or more tags, each consisting of a key name and a value, to the specified secret. Tags are part of the secret's overall metadata, and are not associated with any specific version of the secret. This operation only appends tags to the existing list of tags. To remove tags, you must use UntagResource .\n The following basic restrictions apply to tags:\n To run this command, you must have the following permissions:\n See also: AWS API Documentation\n \n \n :example: response = client.tag_resource(\n SecretId='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type SecretId: string\n :param SecretId: [REQUIRED]\n The identifier for the secret that you want to attach tags to. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.\n Note\n If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too for example, if you don t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don t create secret names that end with a hyphen followed by six characters.\n \n\n :type Tags: list\n :param Tags: [REQUIRED]\n The tags to attach to the secret. Each element in the list consists of a Key and a Value .\n This parameter to the API requires a JSON text string argument. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide . For the AWS CLI, you can also use the syntax: --Tags Key='Key1',Value='Value1',Key='Key2',Value='Value2'[, ]\n (dict) --A structure that contains information about a tag.\n Key (string) --The key identifier, or name, of the tag.\n Value (string) --The string value that's associated with the key of the tag.\n \n \n\n :returns: \n secretsmanager:TagResource\n \n \"\"\"\n pass\n\ndef untag_resource(SecretId=None, TagKeys=None):\n \"\"\"\n Removes one or more tags from the specified secret.\n This operation is idempotent. If a requested tag is not attached to the secret, no error is returned and the secret metadata is unchanged.\n To run this command, you must have the following permissions:\n See also: AWS API Documentation\n \n \n :example: response = client.untag_resource(\n SecretId='string',\n TagKeys=[\n 'string',\n ]\n )\n \n \n :type SecretId: string\n :param SecretId: [REQUIRED]\n The identifier for the secret that you want to remove tags from. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.\n Note\n If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too for example, if you don t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don t create secret names that end with a hyphen followed by six characters.\n \n\n :type TagKeys: list\n :param TagKeys: [REQUIRED]\n A list of tag key names to remove from the secret. You don't specify the value. Both the key and its associated value are removed.\n This parameter to the API requires a JSON text string argument. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide .\n (string) --\n \n\n :returns: \n To add one or more tags to the collection attached to a secret, use TagResource .\n To view the list of tags attached to a secret, use DescribeSecret .\n \n \"\"\"\n pass\n\ndef update_secret(SecretId=None, ClientRequestToken=None, Description=None, KmsKeyId=None, SecretBinary=None, SecretString=None):\n \"\"\"\n Modifies many of the details of the specified secret. If you include a ClientRequestToken and either SecretString or SecretBinary then it also creates a new version attached to the secret.\n To modify the rotation configuration of a secret, use RotateSecret instead.\n To run this command, you must have the following permissions:\n See also: AWS API Documentation\n \n \n :example: response = client.update_secret(\n SecretId='string',\n ClientRequestToken='string',\n Description='string',\n KmsKeyId='string',\n SecretBinary=b'bytes',\n SecretString='string'\n )\n \n \n :type SecretId: string\n :param SecretId: [REQUIRED]\n Specifies the secret that you want to modify or to which you want to add a new version. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.\n Note\n If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too for example, if you don t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don t create secret names that end with a hyphen followed by six characters.\n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: (Optional) If you want to add a new version to the secret, this parameter specifies a unique identifier for the new version that helps ensure idempotency.\n If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for new versions and include that value in the request.\n You typically only need to interact with this value if you implement your own retry logic and want to ensure that a given secret is not created twice. We recommend that you generate a UUID-type value to ensure uniqueness within the specified secret.\n Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during the Lambda rotation function's processing.\n If the ClientRequestToken value isn't already associated with a version of the secret then a new version of the secret is created.\n If a version with this value already exists and that version's SecretString and SecretBinary values are the same as those in the request then the request is ignored (the operation is idempotent).\n If a version with this value already exists and that version's SecretString and SecretBinary values are different from the request then an error occurs because you cannot modify an existing secret value.\n This value becomes the VersionId of the new version.\n This field is autopopulated if not provided.\n \n\n :type Description: string\n :param Description: (Optional) Specifies an updated user-provided description of the secret.\n\n :type KmsKeyId: string\n :param KmsKeyId: (Optional) Specifies an updated ARN or alias of the AWS KMS customer master key (CMK) to be used to encrypt the protected text in new versions of this secret.\n Warning\n You can only use the account's default CMK to encrypt and decrypt if you call this operation using credentials from the same account that owns the secret. If the secret is in a different account, then you must create a custom CMK and provide the ARN of that CMK in this field. The user making the call must have permissions to both the secret and the CMK in their respective accounts.\n \n\n :type SecretBinary: bytes\n :param SecretBinary: (Optional) Specifies updated binary data that you want to encrypt and store in the new version of the secret. To use this parameter in the command-line tools, we recommend that you store your binary data in a file and then use the appropriate technique for your tool to pass the contents of the file as a parameter. Either SecretBinary or SecretString must have a value, but not both. They cannot both be empty.\n This parameter is not accessible using the Secrets Manager console.\n \n\n :type SecretString: string\n :param SecretString: (Optional) Specifies updated text data that you want to encrypt and store in this new version of the secret. Either SecretBinary or SecretString must have a value, but not both. They cannot both be empty.\n If you create this secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that the default Lambda rotation function knows how to parse.\n For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide . For example:\n [{'username':'bob'},{'password':'abc123xyz456'}]\n If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text. You can also 'escape' the double quote character in the embedded JSON text by prefacing each with a backslash. For example, the following string is surrounded by double-quotes. All of the embedded double quotes are escaped:\n '[{\\'username\\':\\'bob\\'},{\\'password\\':\\'abc123xyz456\\'}]'\n \n\n :rtype: dict\n :return: {\n 'ARN': 'string',\n 'Name': 'string',\n 'VersionId': 'string'\n }\n \n \n :returns: \n If you call an operation that needs to encrypt or decrypt the SecretString or SecretBinary for a secret in the same account as the calling user and that secret doesn't specify a AWS KMS encryption key, Secrets Manager uses the account's default AWS managed customer master key (CMK) with the alias aws/secretsmanager . If this key doesn't already exist in your account then Secrets Manager creates it for you automatically. All users and roles in the same AWS account automatically have access to use the default CMK. Note that if an Secrets Manager API call results in AWS having to create the account's AWS-managed CMK, it can result in a one-time significant delay in returning the result.\n If the secret is in a different AWS account from the credentials calling an API that requires encryption or decryption of the secret value then you must create and use a custom AWS KMS CMK because you can't access the default CMK for the account using credentials from a different AWS account. Store the ARN of the CMK in the secret when you create the secret or when you update it by including it in the KMSKeyId . If you call an API that must encrypt or decrypt SecretString or SecretBinary using credentials from a different account then the AWS KMS key policy must grant cross-account access to that other account's user or role for both the kms:GenerateDataKey and kms:Decrypt operations.\n \n \"\"\"\n pass\n\ndef update_secret_version_stage(SecretId=None, VersionStage=None, RemoveFromVersionId=None, MoveToVersionId=None):\n \"\"\"\n Modifies the staging labels attached to a version of a secret. Staging labels are used to track a version as it progresses through the secret rotation process. You can attach a staging label to only one version of a secret at a time. If a staging label to be added is already attached to another version, then it is moved--removed from the other version first and then attached to this one. For more information about staging labels, see Staging Labels in the AWS Secrets Manager User Guide .\n The staging labels that you specify in the VersionStage parameter are added to the existing list of staging labels--they don't replace it.\n You can move the AWSCURRENT staging label to this version by including it in this call.\n If this action results in the last label being removed from a version, then the version is considered to be 'deprecated' and can be deleted by Secrets Manager.\n To run this command, you must have the following permissions:\n See also: AWS API Documentation\n \n \n :example: response = client.update_secret_version_stage(\n SecretId='string',\n VersionStage='string',\n RemoveFromVersionId='string',\n MoveToVersionId='string'\n )\n \n \n :type SecretId: string\n :param SecretId: [REQUIRED]\n Specifies the secret with the version whose list of staging labels you want to modify. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.\n Note\n If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too for example, if you don t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don t create secret names that end with a hyphen followed by six characters.\n \n\n :type VersionStage: string\n :param VersionStage: [REQUIRED]\n The staging label to add to this version.\n \n\n :type RemoveFromVersionId: string\n :param RemoveFromVersionId: Specifies the secret version ID of the version that the staging label is to be removed from. If the staging label you are trying to attach to one version is already attached to a different version, then you must include this parameter and specify the version that the label is to be removed from. If the label is attached and you either do not specify this parameter, or the version ID does not match, then the operation fails.\n\n :type MoveToVersionId: string\n :param MoveToVersionId: (Optional) The secret version ID that you want to add the staging label to. If you want to remove a label from a version, then do not specify this parameter.\n If the staging label is already attached to a different version of the secret, then you must also specify the RemoveFromVersionId parameter.\n \n\n :rtype: dict\n :return: {\n 'ARN': 'string',\n 'Name': 'string'\n }\n \n \n :returns: \n To get the list of staging labels that are currently associated with a version of a secret, use `` DescribeSecret `` and examine the SecretVersionsToStages response value.\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.5216872692108154, "alphanum_fraction": 0.5306518077850342, "avg_line_length": 33.15418243408203, "blob_id": "bb1a3305f582dd9838a0fcdd232503eb9ce40228", "content_id": "e9ae3360c80716f6802f81d68d8a4da3f7eecbe9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 46963, "license_type": "permissive", "max_line_length": 530, "num_lines": 1375, "path": "/pyboto3/appmesh.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_mesh(clientToken=None, meshName=None):\n \"\"\"\n Creates a new service mesh. A service mesh is a logical boundary for network traffic between the services that reside within it.\n After you create your service mesh, you can create virtual nodes, virtual routers, and routes to distribute traffic between the applications in your mesh.\n See also: AWS API Documentation\n \n \n :example: response = client.create_mesh(\n clientToken='string',\n meshName='string'\n )\n \n \n :type clientToken: string\n :param clientToken: Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 36 letters, numbers, hyphens, and underscores are allowed.\n This field is autopopulated if not provided.\n \n\n :type meshName: string\n :param meshName: [REQUIRED]\n The name to use for the service mesh.\n \n\n :rtype: dict\n :return: {\n 'mesh': {\n 'meshName': 'string',\n 'metadata': {\n 'arn': 'string',\n 'createdAt': datetime(2015, 1, 1),\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'uid': 'string',\n 'version': 123\n },\n 'status': {\n 'status': 'ACTIVE'|'DELETED'|'INACTIVE'\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef create_route(clientToken=None, meshName=None, routeName=None, spec=None, virtualRouterName=None):\n \"\"\"\n Creates a new route that is associated with a virtual router.\n You can use the prefix parameter in your route specification for path-based routing of requests. For example, if your virtual router service name is my-service.local , and you want the route to match requests to my-service.local/metrics , then your prefix should be /metrics .\n If your route matches a request, you can distribute traffic to one or more target virtual nodes with relative weighting.\n See also: AWS API Documentation\n \n \n :example: response = client.create_route(\n clientToken='string',\n meshName='string',\n routeName='string',\n spec={\n 'httpRoute': {\n 'action': {\n 'weightedTargets': [\n {\n 'virtualNode': 'string',\n 'weight': 123\n },\n ]\n },\n 'match': {\n 'prefix': 'string'\n }\n }\n },\n virtualRouterName='string'\n )\n \n \n :type clientToken: string\n :param clientToken: Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 36 letters, numbers, hyphens, and underscores are allowed.\n This field is autopopulated if not provided.\n \n\n :type meshName: string\n :param meshName: [REQUIRED]\n The name of the service mesh in which to create the route.\n \n\n :type routeName: string\n :param routeName: [REQUIRED]\n The name to use for the route.\n \n\n :type spec: dict\n :param spec: [REQUIRED]\n The route specification to apply.\n httpRoute (dict) --The HTTP routing information for the route.\n action (dict) --The action to take if a match is determined.\n weightedTargets (list) --The targets that traffic is routed to when a request matches the route. You can specify one or more targets and their relative weights with which to distribute traffic.\n (dict) --An object representing a target and its relative weight. Traffic is distributed across targets according to their relative weight. For example, a weighted target with a relative weight of 50 receives five times as much traffic as one with a relative weight of 10.\n virtualNode (string) --The virtual node to associate with the weighted target.\n weight (integer) --The relative weight of the weighted target.\n \n match (dict) --The criteria for determining an HTTP request match.\n prefix (string) --Specifies the path with which to match requests. This parameter must always start with / , which by itself matches all requests to the virtual router service name. You can also match for path-based routing of requests. For example, if your virtual router service name is my-service.local , and you want the route to match requests to my-service.local/metrics , then your prefix should be /metrics .\n \n \n\n :type virtualRouterName: string\n :param virtualRouterName: [REQUIRED]\n The name of the virtual router in which to create the route.\n \n\n :rtype: dict\n :return: {\n 'route': {\n 'meshName': 'string',\n 'metadata': {\n 'arn': 'string',\n 'createdAt': datetime(2015, 1, 1),\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'uid': 'string',\n 'version': 123\n },\n 'routeName': 'string',\n 'spec': {\n 'httpRoute': {\n 'action': {\n 'weightedTargets': [\n {\n 'virtualNode': 'string',\n 'weight': 123\n },\n ]\n },\n 'match': {\n 'prefix': 'string'\n }\n }\n },\n 'status': {\n 'status': 'ACTIVE'|'DELETED'|'INACTIVE'\n },\n 'virtualRouterName': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef create_virtual_node(clientToken=None, meshName=None, spec=None, virtualNodeName=None):\n \"\"\"\n Creates a new virtual node within a service mesh.\n A virtual node acts as logical pointer to a particular task group, such as an Amazon ECS service or a Kubernetes deployment. When you create a virtual node, you must specify the DNS service discovery name for your task group.\n Any inbound traffic that your virtual node expects should be specified as a listener . Any outbound traffic that your virtual node expects to reach should be specified as a backend .\n The response metadata for your new virtual node contains the arn that is associated with the virtual node. Set this value (either the full ARN or the truncated resource name, for example, mesh/default/virtualNode/simpleapp , as the APPMESH_VIRTUAL_NODE_NAME environment variable for your task group's Envoy proxy container in your task definition or pod spec. This is then mapped to the node.id and node.cluster Envoy parameters.\n See also: AWS API Documentation\n \n \n :example: response = client.create_virtual_node(\n clientToken='string',\n meshName='string',\n spec={\n 'backends': [\n 'string',\n ],\n 'listeners': [\n {\n 'healthCheck': {\n 'healthyThreshold': 123,\n 'intervalMillis': 123,\n 'path': 'string',\n 'port': 123,\n 'protocol': 'http'|'tcp',\n 'timeoutMillis': 123,\n 'unhealthyThreshold': 123\n },\n 'portMapping': {\n 'port': 123,\n 'protocol': 'http'|'tcp'\n }\n },\n ],\n 'serviceDiscovery': {\n 'dns': {\n 'serviceName': 'string'\n }\n }\n },\n virtualNodeName='string'\n )\n \n \n :type clientToken: string\n :param clientToken: Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 36 letters, numbers, hyphens, and underscores are allowed.\n This field is autopopulated if not provided.\n \n\n :type meshName: string\n :param meshName: [REQUIRED]\n The name of the service mesh in which to create the virtual node.\n \n\n :type spec: dict\n :param spec: [REQUIRED]\n The virtual node specification to apply.\n backends (list) --The backends to which the virtual node is expected to send outbound traffic.\n (string) --\n listeners (list) --The listeners from which the virtual node is expected to receive inbound traffic.\n (dict) --An object representing a listener for a virtual node.\n healthCheck (dict) --The health check information for the listener.\n Note\n Listener health checks are not available during the App Mesh preview.\n healthyThreshold (integer) --The number of consecutive successful health checks that must occur before declaring listener healthy.\n intervalMillis (integer) --The time period in milliseconds between each health check execution.\n path (string) --The destination path for the health check request.\n port (integer) --The destination port for the health check request.\n protocol (string) --The protocol for the health check request.\n timeoutMillis (integer) --The amount of time to wait when receiving a response from the health check, in milliseconds.\n unhealthyThreshold (integer) --The number of consecutive failed health checks that must occur before declaring a virtual node unhealthy.\n portMapping (dict) --The port mapping information for the listener.\n port (integer) --The port used for the port mapping.\n protocol (string) --The protocol used for the port mapping.\n \n serviceDiscovery (dict) --The service discovery information for the virtual node.\n dns (dict) --Specifies the DNS service name for the virtual node.\n serviceName (string) --The DNS service name for your virtual node.\n \n \n\n :type virtualNodeName: string\n :param virtualNodeName: [REQUIRED]\n The name to use for the virtual node.\n \n\n :rtype: dict\n :return: {\n 'virtualNode': {\n 'meshName': 'string',\n 'metadata': {\n 'arn': 'string',\n 'createdAt': datetime(2015, 1, 1),\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'uid': 'string',\n 'version': 123\n },\n 'spec': {\n 'backends': [\n 'string',\n ],\n 'listeners': [\n {\n 'healthCheck': {\n 'healthyThreshold': 123,\n 'intervalMillis': 123,\n 'path': 'string',\n 'port': 123,\n 'protocol': 'http'|'tcp',\n 'timeoutMillis': 123,\n 'unhealthyThreshold': 123\n },\n 'portMapping': {\n 'port': 123,\n 'protocol': 'http'|'tcp'\n }\n },\n ],\n 'serviceDiscovery': {\n 'dns': {\n 'serviceName': 'string'\n }\n }\n },\n 'status': {\n 'status': 'ACTIVE'|'DELETED'|'INACTIVE'\n },\n 'virtualNodeName': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef create_virtual_router(clientToken=None, meshName=None, spec=None, virtualRouterName=None):\n \"\"\"\n Creates a new virtual router within a service mesh.\n Virtual routers handle traffic for one or more service names within your mesh. After you create your virtual router, create and associate routes for your virtual router that direct incoming requests to different virtual nodes.\n See also: AWS API Documentation\n \n \n :example: response = client.create_virtual_router(\n clientToken='string',\n meshName='string',\n spec={\n 'serviceNames': [\n 'string',\n ]\n },\n virtualRouterName='string'\n )\n \n \n :type clientToken: string\n :param clientToken: Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 36 letters, numbers, hyphens, and underscores are allowed.\n This field is autopopulated if not provided.\n \n\n :type meshName: string\n :param meshName: [REQUIRED]\n The name of the service mesh in which to create the virtual router.\n \n\n :type spec: dict\n :param spec: [REQUIRED]\n The virtual router specification to apply.\n serviceNames (list) --The service mesh service names to associate with the virtual router.\n (string) --\n \n\n :type virtualRouterName: string\n :param virtualRouterName: [REQUIRED]\n The name to use for the virtual router.\n \n\n :rtype: dict\n :return: {\n 'virtualRouter': {\n 'meshName': 'string',\n 'metadata': {\n 'arn': 'string',\n 'createdAt': datetime(2015, 1, 1),\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'uid': 'string',\n 'version': 123\n },\n 'spec': {\n 'serviceNames': [\n 'string',\n ]\n },\n 'status': {\n 'status': 'ACTIVE'|'DELETED'|'INACTIVE'\n },\n 'virtualRouterName': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef delete_mesh(meshName=None):\n \"\"\"\n Deletes an existing service mesh.\n You must delete all resources (routes, virtual routers, virtual nodes) in the service mesh before you can delete the mesh itself.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_mesh(\n meshName='string'\n )\n \n \n :type meshName: string\n :param meshName: [REQUIRED]\n The name of the service mesh to delete.\n \n\n :rtype: dict\n :return: {\n 'mesh': {\n 'meshName': 'string',\n 'metadata': {\n 'arn': 'string',\n 'createdAt': datetime(2015, 1, 1),\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'uid': 'string',\n 'version': 123\n },\n 'status': {\n 'status': 'ACTIVE'|'DELETED'|'INACTIVE'\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef delete_route(meshName=None, routeName=None, virtualRouterName=None):\n \"\"\"\n Deletes an existing route.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_route(\n meshName='string',\n routeName='string',\n virtualRouterName='string'\n )\n \n \n :type meshName: string\n :param meshName: [REQUIRED]\n The name of the service mesh in which to delete the route.\n \n\n :type routeName: string\n :param routeName: [REQUIRED]\n The name of the route to delete.\n \n\n :type virtualRouterName: string\n :param virtualRouterName: [REQUIRED]\n The name of the virtual router in which to delete the route.\n \n\n :rtype: dict\n :return: {\n 'route': {\n 'meshName': 'string',\n 'metadata': {\n 'arn': 'string',\n 'createdAt': datetime(2015, 1, 1),\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'uid': 'string',\n 'version': 123\n },\n 'routeName': 'string',\n 'spec': {\n 'httpRoute': {\n 'action': {\n 'weightedTargets': [\n {\n 'virtualNode': 'string',\n 'weight': 123\n },\n ]\n },\n 'match': {\n 'prefix': 'string'\n }\n }\n },\n 'status': {\n 'status': 'ACTIVE'|'DELETED'|'INACTIVE'\n },\n 'virtualRouterName': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef delete_virtual_node(meshName=None, virtualNodeName=None):\n \"\"\"\n Deletes an existing virtual node.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_virtual_node(\n meshName='string',\n virtualNodeName='string'\n )\n \n \n :type meshName: string\n :param meshName: [REQUIRED]\n The name of the service mesh in which to delete the virtual node.\n \n\n :type virtualNodeName: string\n :param virtualNodeName: [REQUIRED]\n The name of the virtual node to delete.\n \n\n :rtype: dict\n :return: {\n 'virtualNode': {\n 'meshName': 'string',\n 'metadata': {\n 'arn': 'string',\n 'createdAt': datetime(2015, 1, 1),\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'uid': 'string',\n 'version': 123\n },\n 'spec': {\n 'backends': [\n 'string',\n ],\n 'listeners': [\n {\n 'healthCheck': {\n 'healthyThreshold': 123,\n 'intervalMillis': 123,\n 'path': 'string',\n 'port': 123,\n 'protocol': 'http'|'tcp',\n 'timeoutMillis': 123,\n 'unhealthyThreshold': 123\n },\n 'portMapping': {\n 'port': 123,\n 'protocol': 'http'|'tcp'\n }\n },\n ],\n 'serviceDiscovery': {\n 'dns': {\n 'serviceName': 'string'\n }\n }\n },\n 'status': {\n 'status': 'ACTIVE'|'DELETED'|'INACTIVE'\n },\n 'virtualNodeName': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef delete_virtual_router(meshName=None, virtualRouterName=None):\n \"\"\"\n Deletes an existing virtual router.\n You must delete any routes associated with the virtual router before you can delete the router itself.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_virtual_router(\n meshName='string',\n virtualRouterName='string'\n )\n \n \n :type meshName: string\n :param meshName: [REQUIRED]\n The name of the service mesh in which to delete the virtual router.\n \n\n :type virtualRouterName: string\n :param virtualRouterName: [REQUIRED]\n The name of the virtual router to delete.\n \n\n :rtype: dict\n :return: {\n 'virtualRouter': {\n 'meshName': 'string',\n 'metadata': {\n 'arn': 'string',\n 'createdAt': datetime(2015, 1, 1),\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'uid': 'string',\n 'version': 123\n },\n 'spec': {\n 'serviceNames': [\n 'string',\n ]\n },\n 'status': {\n 'status': 'ACTIVE'|'DELETED'|'INACTIVE'\n },\n 'virtualRouterName': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_mesh(meshName=None):\n \"\"\"\n Describes an existing cluster.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_mesh(\n meshName='string'\n )\n \n \n :type meshName: string\n :param meshName: [REQUIRED]\n The name of the service mesh to describe.\n \n\n :rtype: dict\n :return: {\n 'mesh': {\n 'meshName': 'string',\n 'metadata': {\n 'arn': 'string',\n 'createdAt': datetime(2015, 1, 1),\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'uid': 'string',\n 'version': 123\n },\n 'status': {\n 'status': 'ACTIVE'|'DELETED'|'INACTIVE'\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_route(meshName=None, routeName=None, virtualRouterName=None):\n \"\"\"\n Describes an existing route.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_route(\n meshName='string',\n routeName='string',\n virtualRouterName='string'\n )\n \n \n :type meshName: string\n :param meshName: [REQUIRED]\n The name of the service mesh in which the route resides.\n \n\n :type routeName: string\n :param routeName: [REQUIRED]\n The name of the route to describe.\n \n\n :type virtualRouterName: string\n :param virtualRouterName: [REQUIRED]\n The name of the virtual router with which the route is associated.\n \n\n :rtype: dict\n :return: {\n 'route': {\n 'meshName': 'string',\n 'metadata': {\n 'arn': 'string',\n 'createdAt': datetime(2015, 1, 1),\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'uid': 'string',\n 'version': 123\n },\n 'routeName': 'string',\n 'spec': {\n 'httpRoute': {\n 'action': {\n 'weightedTargets': [\n {\n 'virtualNode': 'string',\n 'weight': 123\n },\n ]\n },\n 'match': {\n 'prefix': 'string'\n }\n }\n },\n 'status': {\n 'status': 'ACTIVE'|'DELETED'|'INACTIVE'\n },\n 'virtualRouterName': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_virtual_node(meshName=None, virtualNodeName=None):\n \"\"\"\n Describes an existing virtual node.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_virtual_node(\n meshName='string',\n virtualNodeName='string'\n )\n \n \n :type meshName: string\n :param meshName: [REQUIRED]\n The name of the service mesh in which the virtual node resides.\n \n\n :type virtualNodeName: string\n :param virtualNodeName: [REQUIRED]\n The name of the virtual node to describe.\n \n\n :rtype: dict\n :return: {\n 'virtualNode': {\n 'meshName': 'string',\n 'metadata': {\n 'arn': 'string',\n 'createdAt': datetime(2015, 1, 1),\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'uid': 'string',\n 'version': 123\n },\n 'spec': {\n 'backends': [\n 'string',\n ],\n 'listeners': [\n {\n 'healthCheck': {\n 'healthyThreshold': 123,\n 'intervalMillis': 123,\n 'path': 'string',\n 'port': 123,\n 'protocol': 'http'|'tcp',\n 'timeoutMillis': 123,\n 'unhealthyThreshold': 123\n },\n 'portMapping': {\n 'port': 123,\n 'protocol': 'http'|'tcp'\n }\n },\n ],\n 'serviceDiscovery': {\n 'dns': {\n 'serviceName': 'string'\n }\n }\n },\n 'status': {\n 'status': 'ACTIVE'|'DELETED'|'INACTIVE'\n },\n 'virtualNodeName': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_virtual_router(meshName=None, virtualRouterName=None):\n \"\"\"\n Describes an existing virtual router.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_virtual_router(\n meshName='string',\n virtualRouterName='string'\n )\n \n \n :type meshName: string\n :param meshName: [REQUIRED]\n The name of the service mesh in which the virtual router resides.\n \n\n :type virtualRouterName: string\n :param virtualRouterName: [REQUIRED]\n The name of the virtual router to describe.\n \n\n :rtype: dict\n :return: {\n 'virtualRouter': {\n 'meshName': 'string',\n 'metadata': {\n 'arn': 'string',\n 'createdAt': datetime(2015, 1, 1),\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'uid': 'string',\n 'version': 123\n },\n 'spec': {\n 'serviceNames': [\n 'string',\n ]\n },\n 'status': {\n 'status': 'ACTIVE'|'DELETED'|'INACTIVE'\n },\n 'virtualRouterName': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_meshes(limit=None, nextToken=None):\n \"\"\"\n Returns a list of existing service meshes.\n See also: AWS API Documentation\n \n \n :example: response = client.list_meshes(\n limit=123,\n nextToken='string'\n )\n \n \n :type limit: integer\n :param limit: The maximum number of mesh results returned by ListMeshes in paginated output. When this parameter is used, ListMeshes only returns limit results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListMeshes request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListMeshes returns up to 100 results and a nextToken value if applicable.\n\n :type nextToken: string\n :param nextToken: The nextToken value returned from a previous paginated ListMeshes request where limit was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.\n Note\n This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.\n \n\n :rtype: dict\n :return: {\n 'meshes': [\n {\n 'arn': 'string',\n 'meshName': 'string'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_routes(limit=None, meshName=None, nextToken=None, virtualRouterName=None):\n \"\"\"\n Returns a list of existing routes in a service mesh.\n See also: AWS API Documentation\n \n \n :example: response = client.list_routes(\n limit=123,\n meshName='string',\n nextToken='string',\n virtualRouterName='string'\n )\n \n \n :type limit: integer\n :param limit: The maximum number of mesh results returned by ListRoutes in paginated output. When this parameter is used, ListRoutes only returns limit results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListRoutes request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListRoutes returns up to 100 results and a nextToken value if applicable.\n\n :type meshName: string\n :param meshName: [REQUIRED]\n The name of the service mesh in which to list routes.\n \n\n :type nextToken: string\n :param nextToken: The nextToken value returned from a previous paginated ListRoutes request where limit was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.\n\n :type virtualRouterName: string\n :param virtualRouterName: [REQUIRED]\n The name of the virtual router in which to list routes.\n \n\n :rtype: dict\n :return: {\n 'nextToken': 'string',\n 'routes': [\n {\n 'arn': 'string',\n 'meshName': 'string',\n 'routeName': 'string',\n 'virtualRouterName': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef list_virtual_nodes(limit=None, meshName=None, nextToken=None):\n \"\"\"\n Returns a list of existing virtual nodes.\n See also: AWS API Documentation\n \n \n :example: response = client.list_virtual_nodes(\n limit=123,\n meshName='string',\n nextToken='string'\n )\n \n \n :type limit: integer\n :param limit: The maximum number of mesh results returned by ListVirtualNodes in paginated output. When this parameter is used, ListVirtualNodes only returns limit results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListVirtualNodes request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListVirtualNodes returns up to 100 results and a nextToken value if applicable.\n\n :type meshName: string\n :param meshName: [REQUIRED]\n The name of the service mesh in which to list virtual nodes.\n \n\n :type nextToken: string\n :param nextToken: The nextToken value returned from a previous paginated ListVirtualNodes request where limit was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.\n\n :rtype: dict\n :return: {\n 'nextToken': 'string',\n 'virtualNodes': [\n {\n 'arn': 'string',\n 'meshName': 'string',\n 'virtualNodeName': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef list_virtual_routers(limit=None, meshName=None, nextToken=None):\n \"\"\"\n Returns a list of existing virtual routers in a service mesh.\n See also: AWS API Documentation\n \n \n :example: response = client.list_virtual_routers(\n limit=123,\n meshName='string',\n nextToken='string'\n )\n \n \n :type limit: integer\n :param limit: The maximum number of mesh results returned by ListVirtualRouters in paginated output. When this parameter is used, ListVirtualRouters only returns limit results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListVirtualRouters request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListVirtualRouters returns up to 100 results and a nextToken value if applicable.\n\n :type meshName: string\n :param meshName: [REQUIRED]\n The name of the service mesh in which to list virtual routers.\n \n\n :type nextToken: string\n :param nextToken: The nextToken value returned from a previous paginated ListVirtualRouters request where limit was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.\n\n :rtype: dict\n :return: {\n 'nextToken': 'string',\n 'virtualRouters': [\n {\n 'arn': 'string',\n 'meshName': 'string',\n 'virtualRouterName': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef update_route(clientToken=None, meshName=None, routeName=None, spec=None, virtualRouterName=None):\n \"\"\"\n Updates an existing route for a specified service mesh and virtual router.\n See also: AWS API Documentation\n \n \n :example: response = client.update_route(\n clientToken='string',\n meshName='string',\n routeName='string',\n spec={\n 'httpRoute': {\n 'action': {\n 'weightedTargets': [\n {\n 'virtualNode': 'string',\n 'weight': 123\n },\n ]\n },\n 'match': {\n 'prefix': 'string'\n }\n }\n },\n virtualRouterName='string'\n )\n \n \n :type clientToken: string\n :param clientToken: Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 36 letters, numbers, hyphens, and underscores are allowed.\n This field is autopopulated if not provided.\n \n\n :type meshName: string\n :param meshName: [REQUIRED]\n The name of the service mesh in which the route resides.\n \n\n :type routeName: string\n :param routeName: [REQUIRED]\n The name of the route to update.\n \n\n :type spec: dict\n :param spec: [REQUIRED]\n The new route specification to apply. This overwrites the existing data.\n httpRoute (dict) --The HTTP routing information for the route.\n action (dict) --The action to take if a match is determined.\n weightedTargets (list) --The targets that traffic is routed to when a request matches the route. You can specify one or more targets and their relative weights with which to distribute traffic.\n (dict) --An object representing a target and its relative weight. Traffic is distributed across targets according to their relative weight. For example, a weighted target with a relative weight of 50 receives five times as much traffic as one with a relative weight of 10.\n virtualNode (string) --The virtual node to associate with the weighted target.\n weight (integer) --The relative weight of the weighted target.\n \n match (dict) --The criteria for determining an HTTP request match.\n prefix (string) --Specifies the path with which to match requests. This parameter must always start with / , which by itself matches all requests to the virtual router service name. You can also match for path-based routing of requests. For example, if your virtual router service name is my-service.local , and you want the route to match requests to my-service.local/metrics , then your prefix should be /metrics .\n \n \n\n :type virtualRouterName: string\n :param virtualRouterName: [REQUIRED]\n The name of the virtual router with which the route is associated.\n \n\n :rtype: dict\n :return: {\n 'route': {\n 'meshName': 'string',\n 'metadata': {\n 'arn': 'string',\n 'createdAt': datetime(2015, 1, 1),\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'uid': 'string',\n 'version': 123\n },\n 'routeName': 'string',\n 'spec': {\n 'httpRoute': {\n 'action': {\n 'weightedTargets': [\n {\n 'virtualNode': 'string',\n 'weight': 123\n },\n ]\n },\n 'match': {\n 'prefix': 'string'\n }\n }\n },\n 'status': {\n 'status': 'ACTIVE'|'DELETED'|'INACTIVE'\n },\n 'virtualRouterName': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef update_virtual_node(clientToken=None, meshName=None, spec=None, virtualNodeName=None):\n \"\"\"\n Updates an existing virtual node in a specified service mesh.\n See also: AWS API Documentation\n \n \n :example: response = client.update_virtual_node(\n clientToken='string',\n meshName='string',\n spec={\n 'backends': [\n 'string',\n ],\n 'listeners': [\n {\n 'healthCheck': {\n 'healthyThreshold': 123,\n 'intervalMillis': 123,\n 'path': 'string',\n 'port': 123,\n 'protocol': 'http'|'tcp',\n 'timeoutMillis': 123,\n 'unhealthyThreshold': 123\n },\n 'portMapping': {\n 'port': 123,\n 'protocol': 'http'|'tcp'\n }\n },\n ],\n 'serviceDiscovery': {\n 'dns': {\n 'serviceName': 'string'\n }\n }\n },\n virtualNodeName='string'\n )\n \n \n :type clientToken: string\n :param clientToken: Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 36 letters, numbers, hyphens, and underscores are allowed.\n This field is autopopulated if not provided.\n \n\n :type meshName: string\n :param meshName: [REQUIRED]\n The name of the service mesh in which the virtual node resides.\n \n\n :type spec: dict\n :param spec: [REQUIRED]\n The new virtual node specification to apply. This overwrites the existing data.\n backends (list) --The backends to which the virtual node is expected to send outbound traffic.\n (string) --\n listeners (list) --The listeners from which the virtual node is expected to receive inbound traffic.\n (dict) --An object representing a listener for a virtual node.\n healthCheck (dict) --The health check information for the listener.\n Note\n Listener health checks are not available during the App Mesh preview.\n healthyThreshold (integer) --The number of consecutive successful health checks that must occur before declaring listener healthy.\n intervalMillis (integer) --The time period in milliseconds between each health check execution.\n path (string) --The destination path for the health check request.\n port (integer) --The destination port for the health check request.\n protocol (string) --The protocol for the health check request.\n timeoutMillis (integer) --The amount of time to wait when receiving a response from the health check, in milliseconds.\n unhealthyThreshold (integer) --The number of consecutive failed health checks that must occur before declaring a virtual node unhealthy.\n portMapping (dict) --The port mapping information for the listener.\n port (integer) --The port used for the port mapping.\n protocol (string) --The protocol used for the port mapping.\n \n serviceDiscovery (dict) --The service discovery information for the virtual node.\n dns (dict) --Specifies the DNS service name for the virtual node.\n serviceName (string) --The DNS service name for your virtual node.\n \n \n\n :type virtualNodeName: string\n :param virtualNodeName: [REQUIRED]\n The name of the virtual node to update.\n \n\n :rtype: dict\n :return: {\n 'virtualNode': {\n 'meshName': 'string',\n 'metadata': {\n 'arn': 'string',\n 'createdAt': datetime(2015, 1, 1),\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'uid': 'string',\n 'version': 123\n },\n 'spec': {\n 'backends': [\n 'string',\n ],\n 'listeners': [\n {\n 'healthCheck': {\n 'healthyThreshold': 123,\n 'intervalMillis': 123,\n 'path': 'string',\n 'port': 123,\n 'protocol': 'http'|'tcp',\n 'timeoutMillis': 123,\n 'unhealthyThreshold': 123\n },\n 'portMapping': {\n 'port': 123,\n 'protocol': 'http'|'tcp'\n }\n },\n ],\n 'serviceDiscovery': {\n 'dns': {\n 'serviceName': 'string'\n }\n }\n },\n 'status': {\n 'status': 'ACTIVE'|'DELETED'|'INACTIVE'\n },\n 'virtualNodeName': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef update_virtual_router(clientToken=None, meshName=None, spec=None, virtualRouterName=None):\n \"\"\"\n Updates an existing virtual router in a specified service mesh.\n See also: AWS API Documentation\n \n \n :example: response = client.update_virtual_router(\n clientToken='string',\n meshName='string',\n spec={\n 'serviceNames': [\n 'string',\n ]\n },\n virtualRouterName='string'\n )\n \n \n :type clientToken: string\n :param clientToken: Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 36 letters, numbers, hyphens, and underscores are allowed.\n This field is autopopulated if not provided.\n \n\n :type meshName: string\n :param meshName: [REQUIRED]\n The name of the service mesh in which the virtual router resides.\n \n\n :type spec: dict\n :param spec: [REQUIRED]\n The new virtual router specification to apply. This overwrites the existing data.\n serviceNames (list) --The service mesh service names to associate with the virtual router.\n (string) --\n \n\n :type virtualRouterName: string\n :param virtualRouterName: [REQUIRED]\n The name of the virtual router to update.\n \n\n :rtype: dict\n :return: {\n 'virtualRouter': {\n 'meshName': 'string',\n 'metadata': {\n 'arn': 'string',\n 'createdAt': datetime(2015, 1, 1),\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'uid': 'string',\n 'version': 123\n },\n 'spec': {\n 'serviceNames': [\n 'string',\n ]\n },\n 'status': {\n 'status': 'ACTIVE'|'DELETED'|'INACTIVE'\n },\n 'virtualRouterName': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6444395184516907, "alphanum_fraction": 0.6507339477539062, "avg_line_length": 68.62749481201172, "blob_id": "211bd2461db0190ed6d6b92c9630808465dfa116", "content_id": "98e8fe97324b46d0c87470b7f76a336998f1f7be", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 199066, "license_type": "permissive", "max_line_length": 837, "num_lines": 2859, "path": "/pyboto3/firehose.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_delivery_stream(DeliveryStreamName=None, DeliveryStreamType=None, KinesisStreamSourceConfiguration=None, S3DestinationConfiguration=None, ExtendedS3DestinationConfiguration=None, RedshiftDestinationConfiguration=None, ElasticsearchDestinationConfiguration=None, SplunkDestinationConfiguration=None, Tags=None):\n \"\"\"\n Creates a Kinesis Data Firehose delivery stream.\n By default, you can create up to 50 delivery streams per AWS Region.\n This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING . After the delivery stream is created, its status is ACTIVE and it now accepts data. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream .\n A Kinesis Data Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch , or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource , and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter.\n A delivery stream is configured with a single destination: Amazon S3, Amazon ES, Amazon Redshift, or Splunk. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration , S3DestinationConfiguration , ElasticsearchDestinationConfiguration , RedshiftDestinationConfiguration , or SplunkDestinationConfiguration .\n When you specify S3DestinationConfiguration , you can also provide the following optional values: BufferingHints, EncryptionConfiguration , and CompressionFormat . By default, if no BufferingHints value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.\n A few notes about Amazon Redshift as a destination:\n Kinesis Data Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Data Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination in the Amazon Kinesis Data Firehose Developer Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.create_delivery_stream(\n DeliveryStreamName='string',\n DeliveryStreamType='DirectPut'|'KinesisStreamAsSource',\n KinesisStreamSourceConfiguration={\n 'KinesisStreamARN': 'string',\n 'RoleARN': 'string'\n },\n S3DestinationConfiguration={\n 'RoleARN': 'string',\n 'BucketARN': 'string',\n 'Prefix': 'string',\n 'ErrorOutputPrefix': 'string',\n 'BufferingHints': {\n 'SizeInMBs': 123,\n 'IntervalInSeconds': 123\n },\n 'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy',\n 'EncryptionConfiguration': {\n 'NoEncryptionConfig': 'NoEncryption',\n 'KMSEncryptionConfig': {\n 'AWSKMSKeyARN': 'string'\n }\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n },\n ExtendedS3DestinationConfiguration={\n 'RoleARN': 'string',\n 'BucketARN': 'string',\n 'Prefix': 'string',\n 'ErrorOutputPrefix': 'string',\n 'BufferingHints': {\n 'SizeInMBs': 123,\n 'IntervalInSeconds': 123\n },\n 'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy',\n 'EncryptionConfiguration': {\n 'NoEncryptionConfig': 'NoEncryption',\n 'KMSEncryptionConfig': {\n 'AWSKMSKeyARN': 'string'\n }\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n },\n 'ProcessingConfiguration': {\n 'Enabled': True|False,\n 'Processors': [\n {\n 'Type': 'Lambda',\n 'Parameters': [\n {\n 'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',\n 'ParameterValue': 'string'\n },\n ]\n },\n ]\n },\n 'S3BackupMode': 'Disabled'|'Enabled',\n 'S3BackupConfiguration': {\n 'RoleARN': 'string',\n 'BucketARN': 'string',\n 'Prefix': 'string',\n 'ErrorOutputPrefix': 'string',\n 'BufferingHints': {\n 'SizeInMBs': 123,\n 'IntervalInSeconds': 123\n },\n 'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy',\n 'EncryptionConfiguration': {\n 'NoEncryptionConfig': 'NoEncryption',\n 'KMSEncryptionConfig': {\n 'AWSKMSKeyARN': 'string'\n }\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n },\n 'DataFormatConversionConfiguration': {\n 'SchemaConfiguration': {\n 'RoleARN': 'string',\n 'CatalogId': 'string',\n 'DatabaseName': 'string',\n 'TableName': 'string',\n 'Region': 'string',\n 'VersionId': 'string'\n },\n 'InputFormatConfiguration': {\n 'Deserializer': {\n 'OpenXJsonSerDe': {\n 'ConvertDotsInJsonKeysToUnderscores': True|False,\n 'CaseInsensitive': True|False,\n 'ColumnToJsonKeyMappings': {\n 'string': 'string'\n }\n },\n 'HiveJsonSerDe': {\n 'TimestampFormats': [\n 'string',\n ]\n }\n }\n },\n 'OutputFormatConfiguration': {\n 'Serializer': {\n 'ParquetSerDe': {\n 'BlockSizeBytes': 123,\n 'PageSizeBytes': 123,\n 'Compression': 'UNCOMPRESSED'|'GZIP'|'SNAPPY',\n 'EnableDictionaryCompression': True|False,\n 'MaxPaddingBytes': 123,\n 'WriterVersion': 'V1'|'V2'\n },\n 'OrcSerDe': {\n 'StripeSizeBytes': 123,\n 'BlockSizeBytes': 123,\n 'RowIndexStride': 123,\n 'EnablePadding': True|False,\n 'PaddingTolerance': 123.0,\n 'Compression': 'NONE'|'ZLIB'|'SNAPPY',\n 'BloomFilterColumns': [\n 'string',\n ],\n 'BloomFilterFalsePositiveProbability': 123.0,\n 'DictionaryKeyThreshold': 123.0,\n 'FormatVersion': 'V0_11'|'V0_12'\n }\n }\n },\n 'Enabled': True|False\n }\n },\n RedshiftDestinationConfiguration={\n 'RoleARN': 'string',\n 'ClusterJDBCURL': 'string',\n 'CopyCommand': {\n 'DataTableName': 'string',\n 'DataTableColumns': 'string',\n 'CopyOptions': 'string'\n },\n 'Username': 'string',\n 'Password': 'string',\n 'RetryOptions': {\n 'DurationInSeconds': 123\n },\n 'S3Configuration': {\n 'RoleARN': 'string',\n 'BucketARN': 'string',\n 'Prefix': 'string',\n 'ErrorOutputPrefix': 'string',\n 'BufferingHints': {\n 'SizeInMBs': 123,\n 'IntervalInSeconds': 123\n },\n 'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy',\n 'EncryptionConfiguration': {\n 'NoEncryptionConfig': 'NoEncryption',\n 'KMSEncryptionConfig': {\n 'AWSKMSKeyARN': 'string'\n }\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n },\n 'ProcessingConfiguration': {\n 'Enabled': True|False,\n 'Processors': [\n {\n 'Type': 'Lambda',\n 'Parameters': [\n {\n 'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',\n 'ParameterValue': 'string'\n },\n ]\n },\n ]\n },\n 'S3BackupMode': 'Disabled'|'Enabled',\n 'S3BackupConfiguration': {\n 'RoleARN': 'string',\n 'BucketARN': 'string',\n 'Prefix': 'string',\n 'ErrorOutputPrefix': 'string',\n 'BufferingHints': {\n 'SizeInMBs': 123,\n 'IntervalInSeconds': 123\n },\n 'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy',\n 'EncryptionConfiguration': {\n 'NoEncryptionConfig': 'NoEncryption',\n 'KMSEncryptionConfig': {\n 'AWSKMSKeyARN': 'string'\n }\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n },\n ElasticsearchDestinationConfiguration={\n 'RoleARN': 'string',\n 'DomainARN': 'string',\n 'IndexName': 'string',\n 'TypeName': 'string',\n 'IndexRotationPeriod': 'NoRotation'|'OneHour'|'OneDay'|'OneWeek'|'OneMonth',\n 'BufferingHints': {\n 'IntervalInSeconds': 123,\n 'SizeInMBs': 123\n },\n 'RetryOptions': {\n 'DurationInSeconds': 123\n },\n 'S3BackupMode': 'FailedDocumentsOnly'|'AllDocuments',\n 'S3Configuration': {\n 'RoleARN': 'string',\n 'BucketARN': 'string',\n 'Prefix': 'string',\n 'ErrorOutputPrefix': 'string',\n 'BufferingHints': {\n 'SizeInMBs': 123,\n 'IntervalInSeconds': 123\n },\n 'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy',\n 'EncryptionConfiguration': {\n 'NoEncryptionConfig': 'NoEncryption',\n 'KMSEncryptionConfig': {\n 'AWSKMSKeyARN': 'string'\n }\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n },\n 'ProcessingConfiguration': {\n 'Enabled': True|False,\n 'Processors': [\n {\n 'Type': 'Lambda',\n 'Parameters': [\n {\n 'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',\n 'ParameterValue': 'string'\n },\n ]\n },\n ]\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n },\n SplunkDestinationConfiguration={\n 'HECEndpoint': 'string',\n 'HECEndpointType': 'Raw'|'Event',\n 'HECToken': 'string',\n 'HECAcknowledgmentTimeoutInSeconds': 123,\n 'RetryOptions': {\n 'DurationInSeconds': 123\n },\n 'S3BackupMode': 'FailedEventsOnly'|'AllEvents',\n 'S3Configuration': {\n 'RoleARN': 'string',\n 'BucketARN': 'string',\n 'Prefix': 'string',\n 'ErrorOutputPrefix': 'string',\n 'BufferingHints': {\n 'SizeInMBs': 123,\n 'IntervalInSeconds': 123\n },\n 'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy',\n 'EncryptionConfiguration': {\n 'NoEncryptionConfig': 'NoEncryption',\n 'KMSEncryptionConfig': {\n 'AWSKMSKeyARN': 'string'\n }\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n },\n 'ProcessingConfiguration': {\n 'Enabled': True|False,\n 'Processors': [\n {\n 'Type': 'Lambda',\n 'Parameters': [\n {\n 'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',\n 'ParameterValue': 'string'\n },\n ]\n },\n ]\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n },\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type DeliveryStreamName: string\n :param DeliveryStreamName: [REQUIRED]\n The name of the delivery stream. This name must be unique per AWS account in the same AWS Region. If the delivery streams are in different accounts or different Regions, you can have multiple delivery streams with the same name.\n \n\n :type DeliveryStreamType: string\n :param DeliveryStreamType: The delivery stream type. This parameter can be one of the following values:\n DirectPut : Provider applications access the delivery stream directly.\n KinesisStreamAsSource : The delivery stream uses a Kinesis data stream as a source.\n \n\n :type KinesisStreamSourceConfiguration: dict\n :param KinesisStreamSourceConfiguration: When a Kinesis data stream is used as the source for the delivery stream, a KinesisStreamSourceConfiguration containing the Kinesis data stream Amazon Resource Name (ARN) and the role ARN for the source stream.\n KinesisStreamARN (string) -- [REQUIRED]The ARN of the source Kinesis data stream. For more information, see Amazon Kinesis Data Streams ARN Format .\n RoleARN (string) -- [REQUIRED]The ARN of the role that provides access to the source Kinesis data stream. For more information, see AWS Identity and Access Management (IAM) ARN Format .\n \n\n :type S3DestinationConfiguration: dict\n :param S3DestinationConfiguration: [Deprecated] The destination in Amazon S3. You can specify only one destination.\n RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n BucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n Prefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide .\n ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.\n BufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n SizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.\n IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.\n CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\n The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n KMSEncryptionConfig (dict) --The encryption key.\n AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n Enabled (boolean) --Enables or disables CloudWatch logging.\n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n \n\n :type ExtendedS3DestinationConfiguration: dict\n :param ExtendedS3DestinationConfiguration: The destination in Amazon S3. You can specify only one destination.\n RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n BucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n Prefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide .\n ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.\n BufferingHints (dict) --The buffering option.\n SizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.\n IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.\n CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED.\n EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n KMSEncryptionConfig (dict) --The encryption key.\n AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n CloudWatchLoggingOptions (dict) --The Amazon CloudWatch logging options for your delivery stream.\n Enabled (boolean) --Enables or disables CloudWatch logging.\n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n ProcessingConfiguration (dict) --The data processing configuration.\n Enabled (boolean) --Enables or disables data processing.\n Processors (list) --The data processors.\n (dict) --Describes a data processor.\n Type (string) -- [REQUIRED]The type of processor.\n Parameters (list) --The processor parameters.\n (dict) --Describes the processor parameter.\n ParameterName (string) -- [REQUIRED]The name of the parameter.\n ParameterValue (string) -- [REQUIRED]The parameter value.\n \n \n S3BackupMode (string) --The Amazon S3 backup mode.\n S3BackupConfiguration (dict) --The configuration for backup in Amazon S3.\n RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n BucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n Prefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide .\n ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.\n BufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n SizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.\n IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.\n CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\n The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n KMSEncryptionConfig (dict) --The encryption key.\n AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n Enabled (boolean) --Enables or disables CloudWatch logging.\n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n DataFormatConversionConfiguration (dict) --The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.\n SchemaConfiguration (dict) --Specifies the AWS Glue Data Catalog table that contains the column information.\n RoleARN (string) --The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.\n CatalogId (string) --The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.\n DatabaseName (string) --Specifies the name of the AWS Glue database that contains the schema for the output data.\n TableName (string) --Specifies the AWS Glue table that contains the column information that constitutes your data schema.\n Region (string) --If you don't specify an AWS Region, the default is the current Region.\n VersionId (string) --Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to LATEST , Kinesis Data Firehose uses the most recent version. This means that any updates to the table are automatically picked up.\n InputFormatConfiguration (dict) --Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON.\n Deserializer (dict) --Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. If both are non-null, the server rejects the request.\n OpenXJsonSerDe (dict) --The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.\n ConvertDotsInJsonKeysToUnderscores (boolean) --When set to true , specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is 'a.b', you can define the column name to be 'a_b' when using this option.\n The default is false .\n CaseInsensitive (boolean) --When set to true , which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.\n ColumnToJsonKeyMappings (dict) --Maps column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp , set this parameter to {'ts': 'timestamp'} to map this key to a column named ts .\n (string) --\n (string) --\n \n HiveJsonSerDe (dict) --The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.\n TimestampFormats (list) --Indicates how you want Kinesis Data Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat . You can also use the special value millis to parse timestamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.\n (string) --\n \n \n OutputFormatConfiguration (dict) --Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format.\n Serializer (dict) --Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. If both are non-null, the server rejects the request.\n ParquetSerDe (dict) --A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet .\n BlockSizeBytes (integer) --The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.\n PageSizeBytes (integer) --The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.\n Compression (string) --The compression code to use over data blocks. The possible values are UNCOMPRESSED , SNAPPY , and GZIP , with the default being SNAPPY . Use SNAPPY for higher decompression speed. Use GZIP if the compression ration is more important than speed.\n EnableDictionaryCompression (boolean) --Indicates whether to enable dictionary compression.\n MaxPaddingBytes (integer) --The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0.\n WriterVersion (string) --Indicates the version of row format to output. The possible values are V1 and V2 . The default is V1 .\n OrcSerDe (dict) --A serializer to use for converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC .\n StripeSizeBytes (integer) --The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.\n BlockSizeBytes (integer) --The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.\n RowIndexStride (integer) --The number of rows between index entries. The default is 10,000 and the minimum is 1,000.\n EnablePadding (boolean) --Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is false .\n PaddingTolerance (float) --A number between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size.\n For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task.\n Kinesis Data Firehose ignores this parameter when OrcSerDe$EnablePadding is false .\n Compression (string) --The compression code to use over data blocks. The default is SNAPPY .\n BloomFilterColumns (list) --The column names for which you want Kinesis Data Firehose to create bloom filters. The default is null .\n (string) --\n BloomFilterFalsePositiveProbability (float) --The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.\n DictionaryKeyThreshold (float) --Represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.\n FormatVersion (string) --The version of the file to write. The possible values are V0_11 and V0_12 . The default is V0_12 .\n \n Enabled (boolean) --Defaults to true . Set it to false if you want to disable format conversion while preserving the configuration details.\n \n \n\n :type RedshiftDestinationConfiguration: dict\n :param RedshiftDestinationConfiguration: The destination in Amazon Redshift. You can specify only one destination.\n RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n ClusterJDBCURL (string) -- [REQUIRED]The database connection string.\n CopyCommand (dict) -- [REQUIRED]The COPY command.\n DataTableName (string) -- [REQUIRED]The name of the target table. The table must already exist in the database.\n DataTableColumns (string) --A comma-separated list of column names.\n CopyOptions (string) --Optional parameters to use with the Amazon Redshift COPY command. For more information, see the 'Optional Parameters' section of Amazon Redshift COPY command . Some possible examples that would apply to Kinesis Data Firehose are as follows:\n delimiter '\\t' lzop; - fields are delimited with 't' (TAB character) and compressed using lzop.delimiter '|' - fields are delimited with '|' (this is the default delimiter).\n delimiter '|' escape - the delimiter should be escaped.\n fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6' - fields are fixed width in the source, with each width specified after every column in the table.\n JSON 's3://mybucket/jsonpaths.txt' - data is in JSON format, and the path specified is the format of the data.\n For more examples, see Amazon Redshift COPY command examples .\n Username (string) -- [REQUIRED]The name of the user.\n Password (string) -- [REQUIRED]The user password.\n RetryOptions (dict) --The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).\n DurationInSeconds (integer) --The length of time during which Kinesis Data Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Kinesis Data Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.\n S3Configuration (dict) -- [REQUIRED]The configuration for the intermediate Amazon S3 location from which Amazon Redshift obtains data. Restrictions are described in the topic for CreateDeliveryStream .\n The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats.\n RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n BucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n Prefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide .\n ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.\n BufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n SizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.\n IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.\n CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\n The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n KMSEncryptionConfig (dict) --The encryption key.\n AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n Enabled (boolean) --Enables or disables CloudWatch logging.\n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n ProcessingConfiguration (dict) --The data processing configuration.\n Enabled (boolean) --Enables or disables data processing.\n Processors (list) --The data processors.\n (dict) --Describes a data processor.\n Type (string) -- [REQUIRED]The type of processor.\n Parameters (list) --The processor parameters.\n (dict) --Describes the processor parameter.\n ParameterName (string) -- [REQUIRED]The name of the parameter.\n ParameterValue (string) -- [REQUIRED]The parameter value.\n \n \n S3BackupMode (string) --The Amazon S3 backup mode.\n S3BackupConfiguration (dict) --The configuration for backup in Amazon S3.\n RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n BucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n Prefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide .\n ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.\n BufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n SizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.\n IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.\n CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\n The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n KMSEncryptionConfig (dict) --The encryption key.\n AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n Enabled (boolean) --Enables or disables CloudWatch logging.\n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n Enabled (boolean) --Enables or disables CloudWatch logging.\n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n \n\n :type ElasticsearchDestinationConfiguration: dict\n :param ElasticsearchDestinationConfiguration: The destination in Amazon ES. You can specify only one destination.\n RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and AWS Service Namespaces .\n DomainARN (string) -- [REQUIRED]The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeElasticsearchDomain , DescribeElasticsearchDomains , and DescribeElasticsearchDomainConfig after assuming the role specified in RoleARN . For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n IndexName (string) -- [REQUIRED]The Elasticsearch index name.\n TypeName (string) -- [REQUIRED]The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during run time.\n IndexRotationPeriod (string) --The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate the expiration of old data. For more information, see Index Rotation for the Amazon ES Destination . The default value is OneDay .\n BufferingHints (dict) --The buffering options. If no value is specified, the default values for ElasticsearchBufferingHints are used.\n IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).\n SizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.\n RetryOptions (dict) --The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).\n DurationInSeconds (integer) --After an initial failure to deliver to Amazon ES, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.\n S3BackupMode (string) --Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly , Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with elasticsearch-failed/ appended to the key prefix. When set to AllDocuments , Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with elasticsearch-failed/ appended to the prefix. For more information, see Amazon S3 Backup for the Amazon ES Destination . Default value is FailedDocumentsOnly .\n S3Configuration (dict) -- [REQUIRED]The configuration for the backup Amazon S3 location.\n RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n BucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n Prefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide .\n ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.\n BufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n SizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.\n IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.\n CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\n The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n KMSEncryptionConfig (dict) --The encryption key.\n AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n Enabled (boolean) --Enables or disables CloudWatch logging.\n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n ProcessingConfiguration (dict) --The data processing configuration.\n Enabled (boolean) --Enables or disables data processing.\n Processors (list) --The data processors.\n (dict) --Describes a data processor.\n Type (string) -- [REQUIRED]The type of processor.\n Parameters (list) --The processor parameters.\n (dict) --Describes the processor parameter.\n ParameterName (string) -- [REQUIRED]The name of the parameter.\n ParameterValue (string) -- [REQUIRED]The parameter value.\n \n \n CloudWatchLoggingOptions (dict) --The Amazon CloudWatch logging options for your delivery stream.\n Enabled (boolean) --Enables or disables CloudWatch logging.\n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n \n\n :type SplunkDestinationConfiguration: dict\n :param SplunkDestinationConfiguration: The destination in Splunk. You can specify only one destination.\n HECEndpoint (string) -- [REQUIRED]The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.\n HECEndpointType (string) -- [REQUIRED]This type can be either 'Raw' or 'Event.'\n HECToken (string) -- [REQUIRED]This is a GUID that you obtain from your Splunk cluster when you create a new HEC endpoint.\n HECAcknowledgmentTimeoutInSeconds (integer) --The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.\n RetryOptions (dict) --The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk, or if it doesn't receive an acknowledgment of receipt from Splunk.\n DurationInSeconds (integer) --The total amount of time that Kinesis Data Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn't include the periods during which Kinesis Data Firehose waits for acknowledgment from Splunk after each attempt.\n S3BackupMode (string) --Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly , Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllDocuments , Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. Default value is FailedDocumentsOnly .\n S3Configuration (dict) -- [REQUIRED]The configuration for the backup Amazon S3 location.\n RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n BucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n Prefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide .\n ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.\n BufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n SizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.\n IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.\n CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\n The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n KMSEncryptionConfig (dict) --The encryption key.\n AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n Enabled (boolean) --Enables or disables CloudWatch logging.\n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n ProcessingConfiguration (dict) --The data processing configuration.\n Enabled (boolean) --Enables or disables data processing.\n Processors (list) --The data processors.\n (dict) --Describes a data processor.\n Type (string) -- [REQUIRED]The type of processor.\n Parameters (list) --The processor parameters.\n (dict) --Describes the processor parameter.\n ParameterName (string) -- [REQUIRED]The name of the parameter.\n ParameterValue (string) -- [REQUIRED]The parameter value.\n \n \n CloudWatchLoggingOptions (dict) --The Amazon CloudWatch logging options for your delivery stream.\n Enabled (boolean) --Enables or disables CloudWatch logging.\n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n \n\n :type Tags: list\n :param Tags: A set of tags to assign to the delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.\n You can specify up to 50 tags when creating a delivery stream.\n (dict) --Metadata that you can assign to a delivery stream, consisting of a key-value pair.\n Key (string) -- [REQUIRED]A unique identifier for the tag. Maximum length: 128 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @\n Value (string) --An optional string, which you can use to describe or define the tag. Maximum length: 256 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @\n \n \n\n :rtype: dict\n :return: {\n 'DeliveryStreamARN': 'string'\n }\n \n \n :returns: \n DeliveryStreamName (string) -- [REQUIRED]\n The name of the delivery stream. This name must be unique per AWS account in the same AWS Region. If the delivery streams are in different accounts or different Regions, you can have multiple delivery streams with the same name.\n \n DeliveryStreamType (string) -- The delivery stream type. This parameter can be one of the following values:\n \n DirectPut : Provider applications access the delivery stream directly.\n KinesisStreamAsSource : The delivery stream uses a Kinesis data stream as a source.\n \n \n KinesisStreamSourceConfiguration (dict) -- When a Kinesis data stream is used as the source for the delivery stream, a KinesisStreamSourceConfiguration containing the Kinesis data stream Amazon Resource Name (ARN) and the role ARN for the source stream.\n \n KinesisStreamARN (string) -- [REQUIRED]The ARN of the source Kinesis data stream. For more information, see Amazon Kinesis Data Streams ARN Format .\n \n RoleARN (string) -- [REQUIRED]The ARN of the role that provides access to the source Kinesis data stream. For more information, see AWS Identity and Access Management (IAM) ARN Format .\n \n \n \n S3DestinationConfiguration (dict) -- [Deprecated] The destination in Amazon S3. You can specify only one destination.\n \n RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n BucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n Prefix (string) --The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide .\n \n ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.\n \n BufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n \n SizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.\n \n IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.\n \n \n \n CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\n The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n \n EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n \n NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n \n KMSEncryptionConfig (dict) --The encryption key.\n \n AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n \n \n \n \n CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n \n Enabled (boolean) --Enables or disables CloudWatch logging.\n \n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n \n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n \n \n \n \n ExtendedS3DestinationConfiguration (dict) -- The destination in Amazon S3. You can specify only one destination.\n \n RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n BucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n Prefix (string) --The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide .\n \n ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.\n \n BufferingHints (dict) --The buffering option.\n \n SizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.\n \n IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.\n \n \n \n CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED.\n \n EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n \n NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n \n KMSEncryptionConfig (dict) --The encryption key.\n \n AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n \n \n \n \n CloudWatchLoggingOptions (dict) --The Amazon CloudWatch logging options for your delivery stream.\n \n Enabled (boolean) --Enables or disables CloudWatch logging.\n \n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n \n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n \n \n ProcessingConfiguration (dict) --The data processing configuration.\n \n Enabled (boolean) --Enables or disables data processing.\n \n Processors (list) --The data processors.\n \n (dict) --Describes a data processor.\n \n Type (string) -- [REQUIRED]The type of processor.\n \n Parameters (list) --The processor parameters.\n \n (dict) --Describes the processor parameter.\n \n ParameterName (string) -- [REQUIRED]The name of the parameter.\n \n ParameterValue (string) -- [REQUIRED]The parameter value.\n \n \n \n \n \n \n \n \n \n \n \n S3BackupMode (string) --The Amazon S3 backup mode.\n \n S3BackupConfiguration (dict) --The configuration for backup in Amazon S3.\n \n RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n BucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n Prefix (string) --The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide .\n \n ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.\n \n BufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n \n SizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.\n \n IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.\n \n \n \n CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\n The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n \n EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n \n NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n \n KMSEncryptionConfig (dict) --The encryption key.\n \n AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n \n \n \n \n CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n \n Enabled (boolean) --Enables or disables CloudWatch logging.\n \n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n \n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n \n \n \n \n DataFormatConversionConfiguration (dict) --The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.\n \n SchemaConfiguration (dict) --Specifies the AWS Glue Data Catalog table that contains the column information.\n \n RoleARN (string) --The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.\n \n CatalogId (string) --The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.\n \n DatabaseName (string) --Specifies the name of the AWS Glue database that contains the schema for the output data.\n \n TableName (string) --Specifies the AWS Glue table that contains the column information that constitutes your data schema.\n \n Region (string) --If you don't specify an AWS Region, the default is the current Region.\n \n VersionId (string) --Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to LATEST , Kinesis Data Firehose uses the most recent version. This means that any updates to the table are automatically picked up.\n \n \n \n InputFormatConfiguration (dict) --Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON.\n \n Deserializer (dict) --Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. If both are non-null, the server rejects the request.\n \n OpenXJsonSerDe (dict) --The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.\n \n ConvertDotsInJsonKeysToUnderscores (boolean) --When set to true , specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is \"a.b\", you can define the column name to be \"a_b\" when using this option.\n The default is false .\n \n CaseInsensitive (boolean) --When set to true , which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.\n \n ColumnToJsonKeyMappings (dict) --Maps column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp , set this parameter to {\"ts\": \"timestamp\"} to map this key to a column named ts .\n \n (string) --\n (string) --\n \n \n \n \n \n \n HiveJsonSerDe (dict) --The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.\n \n TimestampFormats (list) --Indicates how you want Kinesis Data Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat . You can also use the special value millis to parse timestamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.\n \n (string) --\n \n \n \n \n \n \n \n \n OutputFormatConfiguration (dict) --Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format.\n \n Serializer (dict) --Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. If both are non-null, the server rejects the request.\n \n ParquetSerDe (dict) --A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet .\n \n BlockSizeBytes (integer) --The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.\n \n PageSizeBytes (integer) --The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.\n \n Compression (string) --The compression code to use over data blocks. The possible values are UNCOMPRESSED , SNAPPY , and GZIP , with the default being SNAPPY . Use SNAPPY for higher decompression speed. Use GZIP if the compression ration is more important than speed.\n \n EnableDictionaryCompression (boolean) --Indicates whether to enable dictionary compression.\n \n MaxPaddingBytes (integer) --The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0.\n \n WriterVersion (string) --Indicates the version of row format to output. The possible values are V1 and V2 . The default is V1 .\n \n \n \n OrcSerDe (dict) --A serializer to use for converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC .\n \n StripeSizeBytes (integer) --The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.\n \n BlockSizeBytes (integer) --The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.\n \n RowIndexStride (integer) --The number of rows between index entries. The default is 10,000 and the minimum is 1,000.\n \n EnablePadding (boolean) --Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is false .\n \n PaddingTolerance (float) --A number between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size.\n For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task.\n Kinesis Data Firehose ignores this parameter when OrcSerDe$EnablePadding is false .\n \n Compression (string) --The compression code to use over data blocks. The default is SNAPPY .\n \n BloomFilterColumns (list) --The column names for which you want Kinesis Data Firehose to create bloom filters. The default is null .\n \n (string) --\n \n \n BloomFilterFalsePositiveProbability (float) --The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.\n \n DictionaryKeyThreshold (float) --Represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.\n \n FormatVersion (string) --The version of the file to write. The possible values are V0_11 and V0_12 . The default is V0_12 .\n \n \n \n \n \n \n \n Enabled (boolean) --Defaults to true . Set it to false if you want to disable format conversion while preserving the configuration details.\n \n \n \n \n \n RedshiftDestinationConfiguration (dict) -- The destination in Amazon Redshift. You can specify only one destination.\n \n RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n ClusterJDBCURL (string) -- [REQUIRED]The database connection string.\n \n CopyCommand (dict) -- [REQUIRED]The COPY command.\n \n DataTableName (string) -- [REQUIRED]The name of the target table. The table must already exist in the database.\n \n DataTableColumns (string) --A comma-separated list of column names.\n \n CopyOptions (string) --Optional parameters to use with the Amazon Redshift COPY command. For more information, see the \"Optional Parameters\" section of Amazon Redshift COPY command . Some possible examples that would apply to Kinesis Data Firehose are as follows:\n \n delimiter '\\t' lzop; - fields are delimited with \"t\" (TAB character) and compressed using lzop.delimiter '|' - fields are delimited with \"|\" (this is the default delimiter).\n delimiter '|' escape - the delimiter should be escaped.\n fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6' - fields are fixed width in the source, with each width specified after every column in the table.\n JSON 's3://mybucket/jsonpaths.txt' - data is in JSON format, and the path specified is the format of the data.\n \n For more examples, see Amazon Redshift COPY command examples .\n \n \n \n Username (string) -- [REQUIRED]The name of the user.\n \n Password (string) -- [REQUIRED]The user password.\n \n RetryOptions (dict) --The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).\n \n DurationInSeconds (integer) --The length of time during which Kinesis Data Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Kinesis Data Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.\n \n \n \n S3Configuration (dict) -- [REQUIRED]The configuration for the intermediate Amazon S3 location from which Amazon Redshift obtains data. Restrictions are described in the topic for CreateDeliveryStream .\n The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats.\n \n RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n BucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n Prefix (string) --The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide .\n \n ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.\n \n BufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n \n SizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.\n \n IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.\n \n \n \n CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\n The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n \n EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n \n NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n \n KMSEncryptionConfig (dict) --The encryption key.\n \n AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n \n \n \n \n CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n \n Enabled (boolean) --Enables or disables CloudWatch logging.\n \n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n \n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n \n \n \n \n ProcessingConfiguration (dict) --The data processing configuration.\n \n Enabled (boolean) --Enables or disables data processing.\n \n Processors (list) --The data processors.\n \n (dict) --Describes a data processor.\n \n Type (string) -- [REQUIRED]The type of processor.\n \n Parameters (list) --The processor parameters.\n \n (dict) --Describes the processor parameter.\n \n ParameterName (string) -- [REQUIRED]The name of the parameter.\n \n ParameterValue (string) -- [REQUIRED]The parameter value.\n \n \n \n \n \n \n \n \n \n \n \n S3BackupMode (string) --The Amazon S3 backup mode.\n \n S3BackupConfiguration (dict) --The configuration for backup in Amazon S3.\n \n RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n BucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n Prefix (string) --The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide .\n \n ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.\n \n BufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n \n SizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.\n \n IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.\n \n \n \n CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\n The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n \n EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n \n NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n \n KMSEncryptionConfig (dict) --The encryption key.\n \n AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n \n \n \n \n CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n \n Enabled (boolean) --Enables or disables CloudWatch logging.\n \n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n \n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n \n \n \n \n CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n \n Enabled (boolean) --Enables or disables CloudWatch logging.\n \n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n \n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n \n \n \n \n ElasticsearchDestinationConfiguration (dict) -- The destination in Amazon ES. You can specify only one destination.\n \n RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n DomainARN (string) -- [REQUIRED]The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeElasticsearchDomain , DescribeElasticsearchDomains , and DescribeElasticsearchDomainConfig after assuming the role specified in RoleARN . For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n IndexName (string) -- [REQUIRED]The Elasticsearch index name.\n \n TypeName (string) -- [REQUIRED]The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during run time.\n \n IndexRotationPeriod (string) --The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate the expiration of old data. For more information, see Index Rotation for the Amazon ES Destination . The default value is OneDay .\n \n BufferingHints (dict) --The buffering options. If no value is specified, the default values for ElasticsearchBufferingHints are used.\n \n IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).\n \n SizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.\n \n \n \n RetryOptions (dict) --The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).\n \n DurationInSeconds (integer) --After an initial failure to deliver to Amazon ES, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.\n \n \n \n S3BackupMode (string) --Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly , Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with elasticsearch-failed/ appended to the key prefix. When set to AllDocuments , Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with elasticsearch-failed/ appended to the prefix. For more information, see Amazon S3 Backup for the Amazon ES Destination . Default value is FailedDocumentsOnly .\n \n S3Configuration (dict) -- [REQUIRED]The configuration for the backup Amazon S3 location.\n \n RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n BucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n Prefix (string) --The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide .\n \n ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.\n \n BufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n \n SizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.\n \n IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.\n \n \n \n CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\n The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n \n EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n \n NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n \n KMSEncryptionConfig (dict) --The encryption key.\n \n AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n \n \n \n \n CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n \n Enabled (boolean) --Enables or disables CloudWatch logging.\n \n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n \n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n \n \n \n \n ProcessingConfiguration (dict) --The data processing configuration.\n \n Enabled (boolean) --Enables or disables data processing.\n \n Processors (list) --The data processors.\n \n (dict) --Describes a data processor.\n \n Type (string) -- [REQUIRED]The type of processor.\n \n Parameters (list) --The processor parameters.\n \n (dict) --Describes the processor parameter.\n \n ParameterName (string) -- [REQUIRED]The name of the parameter.\n \n ParameterValue (string) -- [REQUIRED]The parameter value.\n \n \n \n \n \n \n \n \n \n \n \n CloudWatchLoggingOptions (dict) --The Amazon CloudWatch logging options for your delivery stream.\n \n Enabled (boolean) --Enables or disables CloudWatch logging.\n \n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n \n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n \n \n \n \n SplunkDestinationConfiguration (dict) -- The destination in Splunk. You can specify only one destination.\n \n HECEndpoint (string) -- [REQUIRED]The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.\n \n HECEndpointType (string) -- [REQUIRED]This type can be either \"Raw\" or \"Event.\"\n \n HECToken (string) -- [REQUIRED]This is a GUID that you obtain from your Splunk cluster when you create a new HEC endpoint.\n \n HECAcknowledgmentTimeoutInSeconds (integer) --The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.\n \n RetryOptions (dict) --The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk, or if it doesn't receive an acknowledgment of receipt from Splunk.\n \n DurationInSeconds (integer) --The total amount of time that Kinesis Data Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn't include the periods during which Kinesis Data Firehose waits for acknowledgment from Splunk after each attempt.\n \n \n \n S3BackupMode (string) --Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly , Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllDocuments , Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. Default value is FailedDocumentsOnly .\n \n S3Configuration (dict) -- [REQUIRED]The configuration for the backup Amazon S3 location.\n \n RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n BucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n Prefix (string) --The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide .\n \n ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.\n \n BufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n \n SizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.\n \n IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.\n \n \n \n CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\n The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n \n EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n \n NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n \n KMSEncryptionConfig (dict) --The encryption key.\n \n AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n \n \n \n \n CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n \n Enabled (boolean) --Enables or disables CloudWatch logging.\n \n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n \n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n \n \n \n \n ProcessingConfiguration (dict) --The data processing configuration.\n \n Enabled (boolean) --Enables or disables data processing.\n \n Processors (list) --The data processors.\n \n (dict) --Describes a data processor.\n \n Type (string) -- [REQUIRED]The type of processor.\n \n Parameters (list) --The processor parameters.\n \n (dict) --Describes the processor parameter.\n \n ParameterName (string) -- [REQUIRED]The name of the parameter.\n \n ParameterValue (string) -- [REQUIRED]The parameter value.\n \n \n \n \n \n \n \n \n \n \n \n CloudWatchLoggingOptions (dict) --The Amazon CloudWatch logging options for your delivery stream.\n \n Enabled (boolean) --Enables or disables CloudWatch logging.\n \n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n \n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n \n \n \n \n Tags (list) -- A set of tags to assign to the delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.\n You can specify up to 50 tags when creating a delivery stream.\n \n (dict) --Metadata that you can assign to a delivery stream, consisting of a key-value pair.\n \n Key (string) -- [REQUIRED]A unique identifier for the tag. Maximum length: 128 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @\n \n Value (string) --An optional string, which you can use to describe or define the tag. Maximum length: 256 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @\n \n \n \n \n \n \n \"\"\"\n pass\n\ndef delete_delivery_stream(DeliveryStreamName=None):\n \"\"\"\n Deletes a delivery stream and its data.\n You can delete a delivery stream only if it is in ACTIVE or DELETING state, and not in the CREATING state. While the deletion request is in process, the delivery stream is in the DELETING state.\n To check the state of a delivery stream, use DescribeDeliveryStream .\n While the delivery stream is DELETING state, the service might continue to accept the records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, you should first stop any applications that are sending records before deleting a delivery stream.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_delivery_stream(\n DeliveryStreamName='string'\n )\n \n \n :type DeliveryStreamName: string\n :param DeliveryStreamName: [REQUIRED]\n The name of the delivery stream.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef describe_delivery_stream(DeliveryStreamName=None, Limit=None, ExclusiveStartDestinationId=None):\n \"\"\"\n Describes the specified delivery stream and gets the status. For example, after your delivery stream is created, call DescribeDeliveryStream to see whether the delivery stream is ACTIVE and therefore ready for data to be sent to it.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_delivery_stream(\n DeliveryStreamName='string',\n Limit=123,\n ExclusiveStartDestinationId='string'\n )\n \n \n :type DeliveryStreamName: string\n :param DeliveryStreamName: [REQUIRED]\n The name of the delivery stream.\n \n\n :type Limit: integer\n :param Limit: The limit on the number of destinations to return. You can have one destination per delivery stream.\n\n :type ExclusiveStartDestinationId: string\n :param ExclusiveStartDestinationId: The ID of the destination to start returning the destination information. Kinesis Data Firehose supports one destination per delivery stream.\n\n :rtype: dict\n :return: {\n 'DeliveryStreamDescription': {\n 'DeliveryStreamName': 'string',\n 'DeliveryStreamARN': 'string',\n 'DeliveryStreamStatus': 'CREATING'|'DELETING'|'ACTIVE',\n 'DeliveryStreamEncryptionConfiguration': {\n 'Status': 'ENABLED'|'ENABLING'|'DISABLED'|'DISABLING'\n },\n 'DeliveryStreamType': 'DirectPut'|'KinesisStreamAsSource',\n 'VersionId': 'string',\n 'CreateTimestamp': datetime(2015, 1, 1),\n 'LastUpdateTimestamp': datetime(2015, 1, 1),\n 'Source': {\n 'KinesisStreamSourceDescription': {\n 'KinesisStreamARN': 'string',\n 'RoleARN': 'string',\n 'DeliveryStartTimestamp': datetime(2015, 1, 1)\n }\n },\n 'Destinations': [\n {\n 'DestinationId': 'string',\n 'S3DestinationDescription': {\n 'RoleARN': 'string',\n 'BucketARN': 'string',\n 'Prefix': 'string',\n 'ErrorOutputPrefix': 'string',\n 'BufferingHints': {\n 'SizeInMBs': 123,\n 'IntervalInSeconds': 123\n },\n 'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy',\n 'EncryptionConfiguration': {\n 'NoEncryptionConfig': 'NoEncryption',\n 'KMSEncryptionConfig': {\n 'AWSKMSKeyARN': 'string'\n }\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n },\n 'ExtendedS3DestinationDescription': {\n 'RoleARN': 'string',\n 'BucketARN': 'string',\n 'Prefix': 'string',\n 'ErrorOutputPrefix': 'string',\n 'BufferingHints': {\n 'SizeInMBs': 123,\n 'IntervalInSeconds': 123\n },\n 'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy',\n 'EncryptionConfiguration': {\n 'NoEncryptionConfig': 'NoEncryption',\n 'KMSEncryptionConfig': {\n 'AWSKMSKeyARN': 'string'\n }\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n },\n 'ProcessingConfiguration': {\n 'Enabled': True|False,\n 'Processors': [\n {\n 'Type': 'Lambda',\n 'Parameters': [\n {\n 'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',\n 'ParameterValue': 'string'\n },\n ]\n },\n ]\n },\n 'S3BackupMode': 'Disabled'|'Enabled',\n 'S3BackupDescription': {\n 'RoleARN': 'string',\n 'BucketARN': 'string',\n 'Prefix': 'string',\n 'ErrorOutputPrefix': 'string',\n 'BufferingHints': {\n 'SizeInMBs': 123,\n 'IntervalInSeconds': 123\n },\n 'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy',\n 'EncryptionConfiguration': {\n 'NoEncryptionConfig': 'NoEncryption',\n 'KMSEncryptionConfig': {\n 'AWSKMSKeyARN': 'string'\n }\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n },\n 'DataFormatConversionConfiguration': {\n 'SchemaConfiguration': {\n 'RoleARN': 'string',\n 'CatalogId': 'string',\n 'DatabaseName': 'string',\n 'TableName': 'string',\n 'Region': 'string',\n 'VersionId': 'string'\n },\n 'InputFormatConfiguration': {\n 'Deserializer': {\n 'OpenXJsonSerDe': {\n 'ConvertDotsInJsonKeysToUnderscores': True|False,\n 'CaseInsensitive': True|False,\n 'ColumnToJsonKeyMappings': {\n 'string': 'string'\n }\n },\n 'HiveJsonSerDe': {\n 'TimestampFormats': [\n 'string',\n ]\n }\n }\n },\n 'OutputFormatConfiguration': {\n 'Serializer': {\n 'ParquetSerDe': {\n 'BlockSizeBytes': 123,\n 'PageSizeBytes': 123,\n 'Compression': 'UNCOMPRESSED'|'GZIP'|'SNAPPY',\n 'EnableDictionaryCompression': True|False,\n 'MaxPaddingBytes': 123,\n 'WriterVersion': 'V1'|'V2'\n },\n 'OrcSerDe': {\n 'StripeSizeBytes': 123,\n 'BlockSizeBytes': 123,\n 'RowIndexStride': 123,\n 'EnablePadding': True|False,\n 'PaddingTolerance': 123.0,\n 'Compression': 'NONE'|'ZLIB'|'SNAPPY',\n 'BloomFilterColumns': [\n 'string',\n ],\n 'BloomFilterFalsePositiveProbability': 123.0,\n 'DictionaryKeyThreshold': 123.0,\n 'FormatVersion': 'V0_11'|'V0_12'\n }\n }\n },\n 'Enabled': True|False\n }\n },\n 'RedshiftDestinationDescription': {\n 'RoleARN': 'string',\n 'ClusterJDBCURL': 'string',\n 'CopyCommand': {\n 'DataTableName': 'string',\n 'DataTableColumns': 'string',\n 'CopyOptions': 'string'\n },\n 'Username': 'string',\n 'RetryOptions': {\n 'DurationInSeconds': 123\n },\n 'S3DestinationDescription': {\n 'RoleARN': 'string',\n 'BucketARN': 'string',\n 'Prefix': 'string',\n 'ErrorOutputPrefix': 'string',\n 'BufferingHints': {\n 'SizeInMBs': 123,\n 'IntervalInSeconds': 123\n },\n 'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy',\n 'EncryptionConfiguration': {\n 'NoEncryptionConfig': 'NoEncryption',\n 'KMSEncryptionConfig': {\n 'AWSKMSKeyARN': 'string'\n }\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n },\n 'ProcessingConfiguration': {\n 'Enabled': True|False,\n 'Processors': [\n {\n 'Type': 'Lambda',\n 'Parameters': [\n {\n 'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',\n 'ParameterValue': 'string'\n },\n ]\n },\n ]\n },\n 'S3BackupMode': 'Disabled'|'Enabled',\n 'S3BackupDescription': {\n 'RoleARN': 'string',\n 'BucketARN': 'string',\n 'Prefix': 'string',\n 'ErrorOutputPrefix': 'string',\n 'BufferingHints': {\n 'SizeInMBs': 123,\n 'IntervalInSeconds': 123\n },\n 'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy',\n 'EncryptionConfiguration': {\n 'NoEncryptionConfig': 'NoEncryption',\n 'KMSEncryptionConfig': {\n 'AWSKMSKeyARN': 'string'\n }\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n },\n 'ElasticsearchDestinationDescription': {\n 'RoleARN': 'string',\n 'DomainARN': 'string',\n 'IndexName': 'string',\n 'TypeName': 'string',\n 'IndexRotationPeriod': 'NoRotation'|'OneHour'|'OneDay'|'OneWeek'|'OneMonth',\n 'BufferingHints': {\n 'IntervalInSeconds': 123,\n 'SizeInMBs': 123\n },\n 'RetryOptions': {\n 'DurationInSeconds': 123\n },\n 'S3BackupMode': 'FailedDocumentsOnly'|'AllDocuments',\n 'S3DestinationDescription': {\n 'RoleARN': 'string',\n 'BucketARN': 'string',\n 'Prefix': 'string',\n 'ErrorOutputPrefix': 'string',\n 'BufferingHints': {\n 'SizeInMBs': 123,\n 'IntervalInSeconds': 123\n },\n 'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy',\n 'EncryptionConfiguration': {\n 'NoEncryptionConfig': 'NoEncryption',\n 'KMSEncryptionConfig': {\n 'AWSKMSKeyARN': 'string'\n }\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n },\n 'ProcessingConfiguration': {\n 'Enabled': True|False,\n 'Processors': [\n {\n 'Type': 'Lambda',\n 'Parameters': [\n {\n 'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',\n 'ParameterValue': 'string'\n },\n ]\n },\n ]\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n },\n 'SplunkDestinationDescription': {\n 'HECEndpoint': 'string',\n 'HECEndpointType': 'Raw'|'Event',\n 'HECToken': 'string',\n 'HECAcknowledgmentTimeoutInSeconds': 123,\n 'RetryOptions': {\n 'DurationInSeconds': 123\n },\n 'S3BackupMode': 'FailedEventsOnly'|'AllEvents',\n 'S3DestinationDescription': {\n 'RoleARN': 'string',\n 'BucketARN': 'string',\n 'Prefix': 'string',\n 'ErrorOutputPrefix': 'string',\n 'BufferingHints': {\n 'SizeInMBs': 123,\n 'IntervalInSeconds': 123\n },\n 'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy',\n 'EncryptionConfiguration': {\n 'NoEncryptionConfig': 'NoEncryption',\n 'KMSEncryptionConfig': {\n 'AWSKMSKeyARN': 'string'\n }\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n },\n 'ProcessingConfiguration': {\n 'Enabled': True|False,\n 'Processors': [\n {\n 'Type': 'Lambda',\n 'Parameters': [\n {\n 'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',\n 'ParameterValue': 'string'\n },\n ]\n },\n ]\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n }\n },\n ],\n 'HasMoreDestinations': True|False\n }\n }\n \n \n :returns: \n DirectPut : Provider applications access the delivery stream directly.\n KinesisStreamAsSource : The delivery stream uses a Kinesis data stream as a source.\n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_delivery_streams(Limit=None, DeliveryStreamType=None, ExclusiveStartDeliveryStreamName=None):\n \"\"\"\n Lists your delivery streams in alphabetical order of their names.\n The number of delivery streams might be too large to return using a single call to ListDeliveryStreams . You can limit the number of delivery streams returned, using the Limit parameter. To determine whether there are more delivery streams to list, check the value of HasMoreDeliveryStreams in the output. If there are more delivery streams to list, you can request them by calling this operation again and setting the ExclusiveStartDeliveryStreamName parameter to the name of the last delivery stream returned in the last call.\n See also: AWS API Documentation\n \n \n :example: response = client.list_delivery_streams(\n Limit=123,\n DeliveryStreamType='DirectPut'|'KinesisStreamAsSource',\n ExclusiveStartDeliveryStreamName='string'\n )\n \n \n :type Limit: integer\n :param Limit: The maximum number of delivery streams to list. The default value is 10.\n\n :type DeliveryStreamType: string\n :param DeliveryStreamType: The delivery stream type. This can be one of the following values:\n DirectPut : Provider applications access the delivery stream directly.\n KinesisStreamAsSource : The delivery stream uses a Kinesis data stream as a source.\n This parameter is optional. If this parameter is omitted, delivery streams of all types are returned.\n \n\n :type ExclusiveStartDeliveryStreamName: string\n :param ExclusiveStartDeliveryStreamName: The list of delivery streams returned by this call to ListDeliveryStreams will start with the delivery stream whose name comes alphabetically immediately after the name you specify in ExclusiveStartDeliveryStreamName .\n\n :rtype: dict\n :return: {\n 'DeliveryStreamNames': [\n 'string',\n ],\n 'HasMoreDeliveryStreams': True|False\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_tags_for_delivery_stream(DeliveryStreamName=None, ExclusiveStartTagKey=None, Limit=None):\n \"\"\"\n Lists the tags for the specified delivery stream. This operation has a limit of five transactions per second per account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_tags_for_delivery_stream(\n DeliveryStreamName='string',\n ExclusiveStartTagKey='string',\n Limit=123\n )\n \n \n :type DeliveryStreamName: string\n :param DeliveryStreamName: [REQUIRED]\n The name of the delivery stream whose tags you want to list.\n \n\n :type ExclusiveStartTagKey: string\n :param ExclusiveStartTagKey: The key to use as the starting point for the list of tags. If you set this parameter, ListTagsForDeliveryStream gets all tags that occur after ExclusiveStartTagKey .\n\n :type Limit: integer\n :param Limit: The number of tags to return. If this number is less than the total number of tags associated with the delivery stream, HasMoreTags is set to true in the response. To list additional tags, set ExclusiveStartTagKey to the last key in the response.\n\n :rtype: dict\n :return: {\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'HasMoreTags': True|False\n }\n \n \n \"\"\"\n pass\n\ndef put_record(DeliveryStreamName=None, Record=None):\n \"\"\"\n Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch . Applications using these operations are referred to as producers.\n By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch , the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits .\n You must specify the name of the delivery stream and the data record when using PutRecord . The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on.\n Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n ) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.\n The PutRecord operation returns a RecordId , which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.\n If the PutRecord operation throws a ServiceUnavailableException , back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.\n Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.\n See also: AWS API Documentation\n \n \n :example: response = client.put_record(\n DeliveryStreamName='string',\n Record={\n 'Data': b'bytes'\n }\n )\n \n \n :type DeliveryStreamName: string\n :param DeliveryStreamName: [REQUIRED]\n The name of the delivery stream.\n \n\n :type Record: dict\n :param Record: [REQUIRED]\n The record.\n Data (bytes) -- [REQUIRED]The data blob, which is base64-encoded when the blob is serialized. The maximum size of the data blob, before base64-encoding, is 1,000 KiB.\n \n\n :rtype: dict\n :return: {\n 'RecordId': 'string',\n 'Encrypted': True|False\n }\n \n \n \"\"\"\n pass\n\ndef put_record_batch(DeliveryStreamName=None, Records=None):\n \"\"\"\n Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord . Applications using these operations are referred to as producers.\n By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch , the limits are an aggregate across these two operations for each delivery stream. For more information about limits, see Amazon Kinesis Data Firehose Limits .\n Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before 64-bit encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.\n You must specify the name of the delivery stream and the data record when using PutRecord . The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on.\n Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n ) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.\n The PutRecordBatch response includes a count of failed records, FailedPutCount , and an array of responses, RequestResponses . Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.\n A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure . ErrorMessage provides more detailed information about the error.\n If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.\n If PutRecordBatch throws ServiceUnavailableException , back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.\n Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.\n See also: AWS API Documentation\n \n \n :example: response = client.put_record_batch(\n DeliveryStreamName='string',\n Records=[\n {\n 'Data': b'bytes'\n },\n ]\n )\n \n \n :type DeliveryStreamName: string\n :param DeliveryStreamName: [REQUIRED]\n The name of the delivery stream.\n \n\n :type Records: list\n :param Records: [REQUIRED]\n One or more records.\n (dict) --The unit of data in a delivery stream.\n Data (bytes) -- [REQUIRED]The data blob, which is base64-encoded when the blob is serialized. The maximum size of the data blob, before base64-encoding, is 1,000 KiB.\n \n \n\n :rtype: dict\n :return: {\n 'FailedPutCount': 123,\n 'Encrypted': True|False,\n 'RequestResponses': [\n {\n 'RecordId': 'string',\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef start_delivery_stream_encryption(DeliveryStreamName=None):\n \"\"\"\n Enables server-side encryption (SSE) for the delivery stream.\n This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the status of the stream to ENABLING , and then to ENABLED . You can continue to read and write data to your stream while its status is ENABLING , but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted , respectively.\n To check the encryption state of a delivery stream, use DescribeDeliveryStream .\n You can only enable SSE for a delivery stream that uses DirectPut as its source.\n The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.\n See also: AWS API Documentation\n \n \n :example: response = client.start_delivery_stream_encryption(\n DeliveryStreamName='string'\n )\n \n \n :type DeliveryStreamName: string\n :param DeliveryStreamName: [REQUIRED]\n The name of the delivery stream for which you want to enable server-side encryption (SSE).\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef stop_delivery_stream_encryption(DeliveryStreamName=None):\n \"\"\"\n Disables server-side encryption (SSE) for the delivery stream.\n This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the status of the stream to DISABLING , and then to DISABLED . You can continue to read and write data to your stream while its status is DISABLING . It can take up to 5 seconds after the encryption status changes to DISABLED before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted , respectively.\n To check the encryption state of a delivery stream, use DescribeDeliveryStream .\n The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_delivery_stream_encryption(\n DeliveryStreamName='string'\n )\n \n \n :type DeliveryStreamName: string\n :param DeliveryStreamName: [REQUIRED]\n The name of the delivery stream for which you want to disable server-side encryption (SSE).\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef tag_delivery_stream(DeliveryStreamName=None, Tags=None):\n \"\"\"\n Adds or updates tags for the specified delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide .\n Each delivery stream can have up to 50 tags.\n This operation has a limit of five transactions per second per account.\n See also: AWS API Documentation\n \n \n :example: response = client.tag_delivery_stream(\n DeliveryStreamName='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type DeliveryStreamName: string\n :param DeliveryStreamName: [REQUIRED]\n The name of the delivery stream to which you want to add the tags.\n \n\n :type Tags: list\n :param Tags: [REQUIRED]\n A set of key-value pairs to use to create the tags.\n (dict) --Metadata that you can assign to a delivery stream, consisting of a key-value pair.\n Key (string) -- [REQUIRED]A unique identifier for the tag. Maximum length: 128 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @\n Value (string) --An optional string, which you can use to describe or define the tag. Maximum length: 256 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef untag_delivery_stream(DeliveryStreamName=None, TagKeys=None):\n \"\"\"\n Removes tags from the specified delivery stream. Removed tags are deleted, and you can't recover them after this operation successfully completes.\n If you specify a tag that doesn't exist, the operation ignores it.\n This operation has a limit of five transactions per second per account.\n See also: AWS API Documentation\n \n \n :example: response = client.untag_delivery_stream(\n DeliveryStreamName='string',\n TagKeys=[\n 'string',\n ]\n )\n \n \n :type DeliveryStreamName: string\n :param DeliveryStreamName: [REQUIRED]\n The name of the delivery stream.\n \n\n :type TagKeys: list\n :param TagKeys: [REQUIRED]\n A list of tag keys. Each corresponding tag is removed from the delivery stream.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_destination(DeliveryStreamName=None, CurrentDeliveryStreamVersionId=None, DestinationId=None, S3DestinationUpdate=None, ExtendedS3DestinationUpdate=None, RedshiftDestinationUpdate=None, ElasticsearchDestinationUpdate=None, SplunkDestinationUpdate=None):\n \"\"\"\n Updates the specified destination of the specified delivery stream.\n Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes.\n Switching between Amazon ES and other services is not supported. For an Amazon ES destination, you can only update to another Amazon ES destination.\n If the destination type is the same, Kinesis Data Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained on the destination.\n If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any parameters. In this case, all parameters must be specified.\n Kinesis Data Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream . Use the new version ID to set CurrentDeliveryStreamVersionId in the next call.\n See also: AWS API Documentation\n \n \n :example: response = client.update_destination(\n DeliveryStreamName='string',\n CurrentDeliveryStreamVersionId='string',\n DestinationId='string',\n S3DestinationUpdate={\n 'RoleARN': 'string',\n 'BucketARN': 'string',\n 'Prefix': 'string',\n 'ErrorOutputPrefix': 'string',\n 'BufferingHints': {\n 'SizeInMBs': 123,\n 'IntervalInSeconds': 123\n },\n 'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy',\n 'EncryptionConfiguration': {\n 'NoEncryptionConfig': 'NoEncryption',\n 'KMSEncryptionConfig': {\n 'AWSKMSKeyARN': 'string'\n }\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n },\n ExtendedS3DestinationUpdate={\n 'RoleARN': 'string',\n 'BucketARN': 'string',\n 'Prefix': 'string',\n 'ErrorOutputPrefix': 'string',\n 'BufferingHints': {\n 'SizeInMBs': 123,\n 'IntervalInSeconds': 123\n },\n 'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy',\n 'EncryptionConfiguration': {\n 'NoEncryptionConfig': 'NoEncryption',\n 'KMSEncryptionConfig': {\n 'AWSKMSKeyARN': 'string'\n }\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n },\n 'ProcessingConfiguration': {\n 'Enabled': True|False,\n 'Processors': [\n {\n 'Type': 'Lambda',\n 'Parameters': [\n {\n 'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',\n 'ParameterValue': 'string'\n },\n ]\n },\n ]\n },\n 'S3BackupMode': 'Disabled'|'Enabled',\n 'S3BackupUpdate': {\n 'RoleARN': 'string',\n 'BucketARN': 'string',\n 'Prefix': 'string',\n 'ErrorOutputPrefix': 'string',\n 'BufferingHints': {\n 'SizeInMBs': 123,\n 'IntervalInSeconds': 123\n },\n 'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy',\n 'EncryptionConfiguration': {\n 'NoEncryptionConfig': 'NoEncryption',\n 'KMSEncryptionConfig': {\n 'AWSKMSKeyARN': 'string'\n }\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n },\n 'DataFormatConversionConfiguration': {\n 'SchemaConfiguration': {\n 'RoleARN': 'string',\n 'CatalogId': 'string',\n 'DatabaseName': 'string',\n 'TableName': 'string',\n 'Region': 'string',\n 'VersionId': 'string'\n },\n 'InputFormatConfiguration': {\n 'Deserializer': {\n 'OpenXJsonSerDe': {\n 'ConvertDotsInJsonKeysToUnderscores': True|False,\n 'CaseInsensitive': True|False,\n 'ColumnToJsonKeyMappings': {\n 'string': 'string'\n }\n },\n 'HiveJsonSerDe': {\n 'TimestampFormats': [\n 'string',\n ]\n }\n }\n },\n 'OutputFormatConfiguration': {\n 'Serializer': {\n 'ParquetSerDe': {\n 'BlockSizeBytes': 123,\n 'PageSizeBytes': 123,\n 'Compression': 'UNCOMPRESSED'|'GZIP'|'SNAPPY',\n 'EnableDictionaryCompression': True|False,\n 'MaxPaddingBytes': 123,\n 'WriterVersion': 'V1'|'V2'\n },\n 'OrcSerDe': {\n 'StripeSizeBytes': 123,\n 'BlockSizeBytes': 123,\n 'RowIndexStride': 123,\n 'EnablePadding': True|False,\n 'PaddingTolerance': 123.0,\n 'Compression': 'NONE'|'ZLIB'|'SNAPPY',\n 'BloomFilterColumns': [\n 'string',\n ],\n 'BloomFilterFalsePositiveProbability': 123.0,\n 'DictionaryKeyThreshold': 123.0,\n 'FormatVersion': 'V0_11'|'V0_12'\n }\n }\n },\n 'Enabled': True|False\n }\n },\n RedshiftDestinationUpdate={\n 'RoleARN': 'string',\n 'ClusterJDBCURL': 'string',\n 'CopyCommand': {\n 'DataTableName': 'string',\n 'DataTableColumns': 'string',\n 'CopyOptions': 'string'\n },\n 'Username': 'string',\n 'Password': 'string',\n 'RetryOptions': {\n 'DurationInSeconds': 123\n },\n 'S3Update': {\n 'RoleARN': 'string',\n 'BucketARN': 'string',\n 'Prefix': 'string',\n 'ErrorOutputPrefix': 'string',\n 'BufferingHints': {\n 'SizeInMBs': 123,\n 'IntervalInSeconds': 123\n },\n 'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy',\n 'EncryptionConfiguration': {\n 'NoEncryptionConfig': 'NoEncryption',\n 'KMSEncryptionConfig': {\n 'AWSKMSKeyARN': 'string'\n }\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n },\n 'ProcessingConfiguration': {\n 'Enabled': True|False,\n 'Processors': [\n {\n 'Type': 'Lambda',\n 'Parameters': [\n {\n 'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',\n 'ParameterValue': 'string'\n },\n ]\n },\n ]\n },\n 'S3BackupMode': 'Disabled'|'Enabled',\n 'S3BackupUpdate': {\n 'RoleARN': 'string',\n 'BucketARN': 'string',\n 'Prefix': 'string',\n 'ErrorOutputPrefix': 'string',\n 'BufferingHints': {\n 'SizeInMBs': 123,\n 'IntervalInSeconds': 123\n },\n 'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy',\n 'EncryptionConfiguration': {\n 'NoEncryptionConfig': 'NoEncryption',\n 'KMSEncryptionConfig': {\n 'AWSKMSKeyARN': 'string'\n }\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n },\n ElasticsearchDestinationUpdate={\n 'RoleARN': 'string',\n 'DomainARN': 'string',\n 'IndexName': 'string',\n 'TypeName': 'string',\n 'IndexRotationPeriod': 'NoRotation'|'OneHour'|'OneDay'|'OneWeek'|'OneMonth',\n 'BufferingHints': {\n 'IntervalInSeconds': 123,\n 'SizeInMBs': 123\n },\n 'RetryOptions': {\n 'DurationInSeconds': 123\n },\n 'S3Update': {\n 'RoleARN': 'string',\n 'BucketARN': 'string',\n 'Prefix': 'string',\n 'ErrorOutputPrefix': 'string',\n 'BufferingHints': {\n 'SizeInMBs': 123,\n 'IntervalInSeconds': 123\n },\n 'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy',\n 'EncryptionConfiguration': {\n 'NoEncryptionConfig': 'NoEncryption',\n 'KMSEncryptionConfig': {\n 'AWSKMSKeyARN': 'string'\n }\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n },\n 'ProcessingConfiguration': {\n 'Enabled': True|False,\n 'Processors': [\n {\n 'Type': 'Lambda',\n 'Parameters': [\n {\n 'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',\n 'ParameterValue': 'string'\n },\n ]\n },\n ]\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n },\n SplunkDestinationUpdate={\n 'HECEndpoint': 'string',\n 'HECEndpointType': 'Raw'|'Event',\n 'HECToken': 'string',\n 'HECAcknowledgmentTimeoutInSeconds': 123,\n 'RetryOptions': {\n 'DurationInSeconds': 123\n },\n 'S3BackupMode': 'FailedEventsOnly'|'AllEvents',\n 'S3Update': {\n 'RoleARN': 'string',\n 'BucketARN': 'string',\n 'Prefix': 'string',\n 'ErrorOutputPrefix': 'string',\n 'BufferingHints': {\n 'SizeInMBs': 123,\n 'IntervalInSeconds': 123\n },\n 'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy',\n 'EncryptionConfiguration': {\n 'NoEncryptionConfig': 'NoEncryption',\n 'KMSEncryptionConfig': {\n 'AWSKMSKeyARN': 'string'\n }\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n },\n 'ProcessingConfiguration': {\n 'Enabled': True|False,\n 'Processors': [\n {\n 'Type': 'Lambda',\n 'Parameters': [\n {\n 'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',\n 'ParameterValue': 'string'\n },\n ]\n },\n ]\n },\n 'CloudWatchLoggingOptions': {\n 'Enabled': True|False,\n 'LogGroupName': 'string',\n 'LogStreamName': 'string'\n }\n }\n )\n \n \n :type DeliveryStreamName: string\n :param DeliveryStreamName: [REQUIRED]\n The name of the delivery stream.\n \n\n :type CurrentDeliveryStreamVersionId: string\n :param CurrentDeliveryStreamVersionId: [REQUIRED]\n Obtain this value from the VersionId result of DeliveryStreamDescription . This value is required, and helps the service perform conditional operations. For example, if there is an interleaving update and this value is null, then the update destination fails. After the update is successful, the VersionId value is updated. The service then performs a merge of the old configuration with the new configuration.\n \n\n :type DestinationId: string\n :param DestinationId: [REQUIRED]\n The ID of the destination.\n \n\n :type S3DestinationUpdate: dict\n :param S3DestinationUpdate: [Deprecated] Describes an update for a destination in Amazon S3.\n RoleARN (string) --The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n BucketARN (string) --The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n Prefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide .\n ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.\n BufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n SizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.\n IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.\n CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\n The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n KMSEncryptionConfig (dict) --The encryption key.\n AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n Enabled (boolean) --Enables or disables CloudWatch logging.\n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n \n\n :type ExtendedS3DestinationUpdate: dict\n :param ExtendedS3DestinationUpdate: Describes an update for a destination in Amazon S3.\n RoleARN (string) --The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n BucketARN (string) --The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n Prefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide .\n ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.\n BufferingHints (dict) --The buffering option.\n SizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.\n IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.\n CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\n EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n KMSEncryptionConfig (dict) --The encryption key.\n AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n CloudWatchLoggingOptions (dict) --The Amazon CloudWatch logging options for your delivery stream.\n Enabled (boolean) --Enables or disables CloudWatch logging.\n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n ProcessingConfiguration (dict) --The data processing configuration.\n Enabled (boolean) --Enables or disables data processing.\n Processors (list) --The data processors.\n (dict) --Describes a data processor.\n Type (string) -- [REQUIRED]The type of processor.\n Parameters (list) --The processor parameters.\n (dict) --Describes the processor parameter.\n ParameterName (string) -- [REQUIRED]The name of the parameter.\n ParameterValue (string) -- [REQUIRED]The parameter value.\n \n \n S3BackupMode (string) --Enables or disables Amazon S3 backup mode.\n S3BackupUpdate (dict) --The Amazon S3 destination for backup.\n RoleARN (string) --The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n BucketARN (string) --The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n Prefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide .\n ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.\n BufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n SizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.\n IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.\n CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\n The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n KMSEncryptionConfig (dict) --The encryption key.\n AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n Enabled (boolean) --Enables or disables CloudWatch logging.\n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n DataFormatConversionConfiguration (dict) --The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.\n SchemaConfiguration (dict) --Specifies the AWS Glue Data Catalog table that contains the column information.\n RoleARN (string) --The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.\n CatalogId (string) --The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.\n DatabaseName (string) --Specifies the name of the AWS Glue database that contains the schema for the output data.\n TableName (string) --Specifies the AWS Glue table that contains the column information that constitutes your data schema.\n Region (string) --If you don't specify an AWS Region, the default is the current Region.\n VersionId (string) --Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to LATEST , Kinesis Data Firehose uses the most recent version. This means that any updates to the table are automatically picked up.\n InputFormatConfiguration (dict) --Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON.\n Deserializer (dict) --Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. If both are non-null, the server rejects the request.\n OpenXJsonSerDe (dict) --The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.\n ConvertDotsInJsonKeysToUnderscores (boolean) --When set to true , specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is 'a.b', you can define the column name to be 'a_b' when using this option.\n The default is false .\n CaseInsensitive (boolean) --When set to true , which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.\n ColumnToJsonKeyMappings (dict) --Maps column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp , set this parameter to {'ts': 'timestamp'} to map this key to a column named ts .\n (string) --\n (string) --\n \n HiveJsonSerDe (dict) --The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.\n TimestampFormats (list) --Indicates how you want Kinesis Data Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat . You can also use the special value millis to parse timestamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.\n (string) --\n \n \n OutputFormatConfiguration (dict) --Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format.\n Serializer (dict) --Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. If both are non-null, the server rejects the request.\n ParquetSerDe (dict) --A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet .\n BlockSizeBytes (integer) --The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.\n PageSizeBytes (integer) --The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.\n Compression (string) --The compression code to use over data blocks. The possible values are UNCOMPRESSED , SNAPPY , and GZIP , with the default being SNAPPY . Use SNAPPY for higher decompression speed. Use GZIP if the compression ration is more important than speed.\n EnableDictionaryCompression (boolean) --Indicates whether to enable dictionary compression.\n MaxPaddingBytes (integer) --The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0.\n WriterVersion (string) --Indicates the version of row format to output. The possible values are V1 and V2 . The default is V1 .\n OrcSerDe (dict) --A serializer to use for converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC .\n StripeSizeBytes (integer) --The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.\n BlockSizeBytes (integer) --The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.\n RowIndexStride (integer) --The number of rows between index entries. The default is 10,000 and the minimum is 1,000.\n EnablePadding (boolean) --Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is false .\n PaddingTolerance (float) --A number between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size.\n For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task.\n Kinesis Data Firehose ignores this parameter when OrcSerDe$EnablePadding is false .\n Compression (string) --The compression code to use over data blocks. The default is SNAPPY .\n BloomFilterColumns (list) --The column names for which you want Kinesis Data Firehose to create bloom filters. The default is null .\n (string) --\n BloomFilterFalsePositiveProbability (float) --The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.\n DictionaryKeyThreshold (float) --Represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.\n FormatVersion (string) --The version of the file to write. The possible values are V0_11 and V0_12 . The default is V0_12 .\n \n Enabled (boolean) --Defaults to true . Set it to false if you want to disable format conversion while preserving the configuration details.\n \n \n\n :type RedshiftDestinationUpdate: dict\n :param RedshiftDestinationUpdate: Describes an update for a destination in Amazon Redshift.\n RoleARN (string) --The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n ClusterJDBCURL (string) --The database connection string.\n CopyCommand (dict) --The COPY command.\n DataTableName (string) -- [REQUIRED]The name of the target table. The table must already exist in the database.\n DataTableColumns (string) --A comma-separated list of column names.\n CopyOptions (string) --Optional parameters to use with the Amazon Redshift COPY command. For more information, see the 'Optional Parameters' section of Amazon Redshift COPY command . Some possible examples that would apply to Kinesis Data Firehose are as follows:\n delimiter '\\t' lzop; - fields are delimited with 't' (TAB character) and compressed using lzop.delimiter '|' - fields are delimited with '|' (this is the default delimiter).\n delimiter '|' escape - the delimiter should be escaped.\n fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6' - fields are fixed width in the source, with each width specified after every column in the table.\n JSON 's3://mybucket/jsonpaths.txt' - data is in JSON format, and the path specified is the format of the data.\n For more examples, see Amazon Redshift COPY command examples .\n Username (string) --The name of the user.\n Password (string) --The user password.\n RetryOptions (dict) --The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).\n DurationInSeconds (integer) --The length of time during which Kinesis Data Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Kinesis Data Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.\n S3Update (dict) --The Amazon S3 destination.\n The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationUpdate.S3Update because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats.\n RoleARN (string) --The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n BucketARN (string) --The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n Prefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide .\n ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.\n BufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n SizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.\n IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.\n CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\n The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n KMSEncryptionConfig (dict) --The encryption key.\n AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n Enabled (boolean) --Enables or disables CloudWatch logging.\n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n ProcessingConfiguration (dict) --The data processing configuration.\n Enabled (boolean) --Enables or disables data processing.\n Processors (list) --The data processors.\n (dict) --Describes a data processor.\n Type (string) -- [REQUIRED]The type of processor.\n Parameters (list) --The processor parameters.\n (dict) --Describes the processor parameter.\n ParameterName (string) -- [REQUIRED]The name of the parameter.\n ParameterValue (string) -- [REQUIRED]The parameter value.\n \n \n S3BackupMode (string) --The Amazon S3 backup mode.\n S3BackupUpdate (dict) --The Amazon S3 destination for backup.\n RoleARN (string) --The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n BucketARN (string) --The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n Prefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide .\n ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.\n BufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n SizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.\n IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.\n CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\n The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n KMSEncryptionConfig (dict) --The encryption key.\n AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n Enabled (boolean) --Enables or disables CloudWatch logging.\n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n CloudWatchLoggingOptions (dict) --The Amazon CloudWatch logging options for your delivery stream.\n Enabled (boolean) --Enables or disables CloudWatch logging.\n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n \n\n :type ElasticsearchDestinationUpdate: dict\n :param ElasticsearchDestinationUpdate: Describes an update for a destination in Amazon ES.\n RoleARN (string) --The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and AWS Service Namespaces .\n DomainARN (string) --The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeElasticsearchDomain , DescribeElasticsearchDomains , and DescribeElasticsearchDomainConfig after assuming the IAM role specified in RoleARN . For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n IndexName (string) --The Elasticsearch index name.\n TypeName (string) --The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during runtime.\n IndexRotationPeriod (string) --The Elasticsearch index rotation period. Index rotation appends a timestamp to IndexName to facilitate the expiration of old data. For more information, see Index Rotation for the Amazon ES Destination . Default value is OneDay .\n BufferingHints (dict) --The buffering options. If no value is specified, ElasticsearchBufferingHints object default values are used.\n IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).\n SizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.\n RetryOptions (dict) --The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).\n DurationInSeconds (integer) --After an initial failure to deliver to Amazon ES, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.\n S3Update (dict) --The Amazon S3 destination.\n RoleARN (string) --The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n BucketARN (string) --The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n Prefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide .\n ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.\n BufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n SizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.\n IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.\n CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\n The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n KMSEncryptionConfig (dict) --The encryption key.\n AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n Enabled (boolean) --Enables or disables CloudWatch logging.\n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n ProcessingConfiguration (dict) --The data processing configuration.\n Enabled (boolean) --Enables or disables data processing.\n Processors (list) --The data processors.\n (dict) --Describes a data processor.\n Type (string) -- [REQUIRED]The type of processor.\n Parameters (list) --The processor parameters.\n (dict) --Describes the processor parameter.\n ParameterName (string) -- [REQUIRED]The name of the parameter.\n ParameterValue (string) -- [REQUIRED]The parameter value.\n \n \n CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n Enabled (boolean) --Enables or disables CloudWatch logging.\n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n \n\n :type SplunkDestinationUpdate: dict\n :param SplunkDestinationUpdate: Describes an update for a destination in Splunk.\n HECEndpoint (string) --The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.\n HECEndpointType (string) --This type can be either 'Raw' or 'Event.'\n HECToken (string) --A GUID that you obtain from your Splunk cluster when you create a new HEC endpoint.\n HECAcknowledgmentTimeoutInSeconds (integer) --The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.\n RetryOptions (dict) --The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.\n DurationInSeconds (integer) --The total amount of time that Kinesis Data Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn't include the periods during which Kinesis Data Firehose waits for acknowledgment from Splunk after each attempt.\n S3BackupMode (string) --Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly , Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllDocuments , Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. Default value is FailedDocumentsOnly .\n S3Update (dict) --Your update to the configuration of the backup Amazon S3 location.\n RoleARN (string) --The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n BucketARN (string) --The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n Prefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide .\n ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name.\n BufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n SizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.\n IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.\n CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\n The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n KMSEncryptionConfig (dict) --The encryption key.\n AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n \n CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n Enabled (boolean) --Enables or disables CloudWatch logging.\n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n ProcessingConfiguration (dict) --The data processing configuration.\n Enabled (boolean) --Enables or disables data processing.\n Processors (list) --The data processors.\n (dict) --Describes a data processor.\n Type (string) -- [REQUIRED]The type of processor.\n Parameters (list) --The processor parameters.\n (dict) --Describes the processor parameter.\n ParameterName (string) -- [REQUIRED]The name of the parameter.\n ParameterValue (string) -- [REQUIRED]The parameter value.\n \n \n CloudWatchLoggingOptions (dict) --The Amazon CloudWatch logging options for your delivery stream.\n Enabled (boolean) --Enables or disables CloudWatch logging.\n LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6230798959732056, "alphanum_fraction": 0.6297954320907593, "avg_line_length": 34.49040985107422, "blob_id": "7d1cd122053dad74501cba056717394dcd00ce43", "content_id": "4083a6f97ce8445d7a9c0086935dca7f427e6662", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12955, "license_type": "permissive", "max_line_length": 398, "num_lines": 365, "path": "/pyboto3/translate.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef delete_terminology(Name=None):\n \"\"\"\n A synchronous action that deletes a custom terminology.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_terminology(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the custom terminology being deleted.\n \n\n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_terminology(Name=None, TerminologyDataFormat=None):\n \"\"\"\n Retrieves a custom terminology.\n See also: AWS API Documentation\n \n \n :example: response = client.get_terminology(\n Name='string',\n TerminologyDataFormat='CSV'|'TMX'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the custom terminology being retrieved.\n \n\n :type TerminologyDataFormat: string\n :param TerminologyDataFormat: [REQUIRED]\n The data format of the custom terminology being retrieved, either CSV or TMX.\n \n\n :rtype: dict\n :return: {\n 'TerminologyProperties': {\n 'Name': 'string',\n 'Description': 'string',\n 'Arn': 'string',\n 'SourceLanguageCode': 'string',\n 'TargetLanguageCodes': [\n 'string',\n ],\n 'EncryptionKey': {\n 'Type': 'KMS',\n 'Id': 'string'\n },\n 'SizeBytes': 123,\n 'TermCount': 123,\n 'CreatedAt': datetime(2015, 1, 1),\n 'LastUpdatedAt': datetime(2015, 1, 1)\n },\n 'TerminologyDataLocation': {\n 'RepositoryType': 'string',\n 'Location': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef import_terminology(Name=None, MergeStrategy=None, Description=None, TerminologyData=None, EncryptionKey=None):\n \"\"\"\n Creates or updates a custom terminology, depending on whether or not one already exists for the given terminology name. Importing a terminology with the same name as an existing one will merge the terminologies based on the chosen merge strategy. Currently, the only supported merge strategy is OVERWRITE, and so the imported terminology will overwrite an existing terminology of the same name.\n If you import a terminology that overwrites an existing one, the new terminology take up to 10 minutes to fully propagate and be available for use in a translation due to cache policies with the DataPlane service that performs the translations.\n See also: AWS API Documentation\n \n \n :example: response = client.import_terminology(\n Name='string',\n MergeStrategy='OVERWRITE',\n Description='string',\n TerminologyData={\n 'File': b'bytes',\n 'Format': 'CSV'|'TMX'\n },\n EncryptionKey={\n 'Type': 'KMS',\n 'Id': 'string'\n }\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the custom terminology being imported.\n \n\n :type MergeStrategy: string\n :param MergeStrategy: [REQUIRED]\n The merge strategy of the custom terminology being imported. Currently, only the OVERWRITE merge strategy is supported. In this case, the imported terminology will overwrite an existing terminology of the same name.\n \n\n :type Description: string\n :param Description: The description of the custom terminology being imported.\n\n :type TerminologyData: dict\n :param TerminologyData: [REQUIRED]\n The terminology data for the custom terminology being imported.\n File (bytes) -- [REQUIRED]The file containing the custom terminology data.\n Format (string) -- [REQUIRED]The data format of the custom terminology. Either CSV or TMX.\n \n\n :type EncryptionKey: dict\n :param EncryptionKey: The encryption key for the custom terminology being imported.\n Type (string) -- [REQUIRED]The type of encryption key used by Amazon Translate to encrypt custom terminologies.\n Id (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key being used to encrypt the custom terminology.\n \n\n :rtype: dict\n :return: {\n 'TerminologyProperties': {\n 'Name': 'string',\n 'Description': 'string',\n 'Arn': 'string',\n 'SourceLanguageCode': 'string',\n 'TargetLanguageCodes': [\n 'string',\n ],\n 'EncryptionKey': {\n 'Type': 'KMS',\n 'Id': 'string'\n },\n 'SizeBytes': 123,\n 'TermCount': 123,\n 'CreatedAt': datetime(2015, 1, 1),\n 'LastUpdatedAt': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_terminologies(NextToken=None, MaxResults=None):\n \"\"\"\n Provides a list of custom terminologies associated with your account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_terminologies(\n NextToken='string',\n MaxResults=123\n )\n \n \n :type NextToken: string\n :param NextToken: If the result of the request to ListTerminologies was truncated, include the NextToken to fetch the next group of custom terminologies.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of custom terminologies returned per list request.\n\n :rtype: dict\n :return: {\n 'TerminologyPropertiesList': [\n {\n 'Name': 'string',\n 'Description': 'string',\n 'Arn': 'string',\n 'SourceLanguageCode': 'string',\n 'TargetLanguageCodes': [\n 'string',\n ],\n 'EncryptionKey': {\n 'Type': 'KMS',\n 'Id': 'string'\n },\n 'SizeBytes': 123,\n 'TermCount': 123,\n 'CreatedAt': datetime(2015, 1, 1),\n 'LastUpdatedAt': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef translate_text(Text=None, TerminologyNames=None, SourceLanguageCode=None, TargetLanguageCode=None):\n \"\"\"\n Translates input text from the source language to the target language. It is not necessary to use English (en) as either the source or the target language but not all language combinations are supported by Amazon Translate. For more information, see Supported Language Pairs .\n To have Amazon Translate determine the source language of your text, you can specify auto in the SourceLanguageCode field. If you specify auto , Amazon Translate will call Amazon Comprehend to determine the source language.\n See also: AWS API Documentation\n \n \n :example: response = client.translate_text(\n Text='string',\n TerminologyNames=[\n 'string',\n ],\n SourceLanguageCode='string',\n TargetLanguageCode='string'\n )\n \n \n :type Text: string\n :param Text: [REQUIRED]\n The text to translate. The text string can be a maximum of 5,000 bytes long. Depending on your character set, this may be fewer than 5,000 characters.\n \n\n :type TerminologyNames: list\n :param TerminologyNames: The TerminologyNames list that is taken as input to the TranslateText request. This has a minimum length of 0 and a maximum length of 1.\n (string) --\n \n\n :type SourceLanguageCode: string\n :param SourceLanguageCode: [REQUIRED]\n The language code for the language of the source text. The language must be a language supported by Amazon Translate.\n To have Amazon Translate determine the source language of your text, you can specify auto in the SourceLanguageCode field. If you specify auto , Amazon Translate will call Amazon Comprehend to determine the source language.\n \n\n :type TargetLanguageCode: string\n :param TargetLanguageCode: [REQUIRED]\n The language code requested for the language of the target text. The language must be a language supported by Amazon Translate.\n \n\n :rtype: dict\n :return: {\n 'TranslatedText': 'string',\n 'SourceLanguageCode': 'string',\n 'TargetLanguageCode': 'string',\n 'AppliedTerminologies': [\n {\n 'Name': 'string',\n 'Terms': [\n {\n 'SourceText': 'string',\n 'TargetText': 'string'\n },\n ]\n },\n ]\n }\n \n \n :returns: \n Text (string) -- [REQUIRED]\n The text to translate. The text string can be a maximum of 5,000 bytes long. Depending on your character set, this may be fewer than 5,000 characters.\n \n TerminologyNames (list) -- The TerminologyNames list that is taken as input to the TranslateText request. This has a minimum length of 0 and a maximum length of 1.\n \n (string) --\n \n \n SourceLanguageCode (string) -- [REQUIRED]\n The language code for the language of the source text. The language must be a language supported by Amazon Translate.\n To have Amazon Translate determine the source language of your text, you can specify auto in the SourceLanguageCode field. If you specify auto , Amazon Translate will call Amazon Comprehend to determine the source language.\n \n TargetLanguageCode (string) -- [REQUIRED]\n The language code requested for the language of the target text. The language must be a language supported by Amazon Translate.\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.5250222086906433, "alphanum_fraction": 0.5290502905845642, "avg_line_length": 31.40265464782715, "blob_id": "87a8502e7f62a521d7e47c88e554586e94575c54", "content_id": "c7ab4932349841402feb076212a5182e6be7ebcc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14647, "license_type": "permissive", "max_line_length": 150, "num_lines": 452, "path": "/pyboto3/dlm.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_lifecycle_policy(ExecutionRoleArn=None, Description=None, State=None, PolicyDetails=None):\n \"\"\"\n Creates a policy to manage the lifecycle of the specified AWS resources. You can create up to 100 lifecycle policies.\n See also: AWS API Documentation\n \n \n :example: response = client.create_lifecycle_policy(\n ExecutionRoleArn='string',\n Description='string',\n State='ENABLED'|'DISABLED',\n PolicyDetails={\n 'ResourceTypes': [\n 'VOLUME',\n ],\n 'TargetTags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'Schedules': [\n {\n 'Name': 'string',\n 'CopyTags': True|False,\n 'TagsToAdd': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'CreateRule': {\n 'Interval': 123,\n 'IntervalUnit': 'HOURS',\n 'Times': [\n 'string',\n ]\n },\n 'RetainRule': {\n 'Count': 123\n }\n },\n ]\n }\n )\n \n \n :type ExecutionRoleArn: string\n :param ExecutionRoleArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the IAM role used to run the operations specified by the lifecycle policy.\n \n\n :type Description: string\n :param Description: [REQUIRED]\n A description of the lifecycle policy. The characters ^[0-9A-Za-z _-]+$ are supported.\n \n\n :type State: string\n :param State: [REQUIRED]\n The desired activation state of the lifecycle policy after creation.\n \n\n :type PolicyDetails: dict\n :param PolicyDetails: [REQUIRED]\n The configuration details of the lifecycle policy.\n Target tags cannot be re-used across lifecycle policies.\n ResourceTypes (list) --The resource type.\n (string) --\n TargetTags (list) --The single tag that identifies targeted resources for this policy.\n (dict) --Specifies a tag for a resource.\n Key (string) -- [REQUIRED]The tag key.\n Value (string) -- [REQUIRED]The tag value.\n \n Schedules (list) --The schedule of policy-defined actions.\n (dict) --Specifies a schedule.\n Name (string) --The name of the schedule.\n CopyTags (boolean) --\n TagsToAdd (list) --The tags to apply to policy-created resources. These user-defined tags are in addition to the AWS-added lifecycle tags.\n (dict) --Specifies a tag for a resource.\n Key (string) -- [REQUIRED]The tag key.\n Value (string) -- [REQUIRED]The tag value.\n \n CreateRule (dict) --The create rule.\n Interval (integer) -- [REQUIRED]The interval. The supported values are 12 and 24.\n IntervalUnit (string) -- [REQUIRED]The interval unit.\n Times (list) --The time, in UTC, to start the operation.\n The operation occurs within a one-hour window following the specified time.\n (string) --\n \n RetainRule (dict) --The retain rule.\n Count (integer) -- [REQUIRED]The number of snapshots to keep for each volume, up to a maximum of 1000.\n \n \n \n\n :rtype: dict\n :return: {\n 'PolicyId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_lifecycle_policy(PolicyId=None):\n \"\"\"\n Deletes the specified lifecycle policy and halts the automated operations that the policy specified.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_lifecycle_policy(\n PolicyId='string'\n )\n \n \n :type PolicyId: string\n :param PolicyId: [REQUIRED]\n The identifier of the lifecycle policy.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_lifecycle_policies(PolicyIds=None, State=None, ResourceTypes=None, TargetTags=None, TagsToAdd=None):\n \"\"\"\n Gets summary information about all or the specified data lifecycle policies.\n To get complete information about a policy, use GetLifecyclePolicy .\n See also: AWS API Documentation\n \n \n :example: response = client.get_lifecycle_policies(\n PolicyIds=[\n 'string',\n ],\n State='ENABLED'|'DISABLED'|'ERROR',\n ResourceTypes=[\n 'VOLUME',\n ],\n TargetTags=[\n 'string',\n ],\n TagsToAdd=[\n 'string',\n ]\n )\n \n \n :type PolicyIds: list\n :param PolicyIds: The identifiers of the data lifecycle policies.\n (string) --\n \n\n :type State: string\n :param State: The activation state.\n\n :type ResourceTypes: list\n :param ResourceTypes: The resource type.\n (string) --\n \n\n :type TargetTags: list\n :param TargetTags: The target tag for a policy.\n Tags are strings in the format key=value .\n (string) --\n \n\n :type TagsToAdd: list\n :param TagsToAdd: The tags to add to objects created by the policy.\n Tags are strings in the format key=value .\n These user-defined tags are added in addition to the AWS-added lifecycle tags.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'Policies': [\n {\n 'PolicyId': 'string',\n 'Description': 'string',\n 'State': 'ENABLED'|'DISABLED'|'ERROR'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_lifecycle_policy(PolicyId=None):\n \"\"\"\n Gets detailed information about the specified lifecycle policy.\n See also: AWS API Documentation\n \n \n :example: response = client.get_lifecycle_policy(\n PolicyId='string'\n )\n \n \n :type PolicyId: string\n :param PolicyId: [REQUIRED]\n The identifier of the lifecycle policy.\n \n\n :rtype: dict\n :return: {\n 'Policy': {\n 'PolicyId': 'string',\n 'Description': 'string',\n 'State': 'ENABLED'|'DISABLED'|'ERROR',\n 'ExecutionRoleArn': 'string',\n 'DateCreated': datetime(2015, 1, 1),\n 'DateModified': datetime(2015, 1, 1),\n 'PolicyDetails': {\n 'ResourceTypes': [\n 'VOLUME',\n ],\n 'TargetTags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'Schedules': [\n {\n 'Name': 'string',\n 'CopyTags': True|False,\n 'TagsToAdd': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'CreateRule': {\n 'Interval': 123,\n 'IntervalUnit': 'HOURS',\n 'Times': [\n 'string',\n ]\n },\n 'RetainRule': {\n 'Count': 123\n }\n },\n ]\n }\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef update_lifecycle_policy(PolicyId=None, ExecutionRoleArn=None, State=None, Description=None, PolicyDetails=None):\n \"\"\"\n Updates the specified lifecycle policy.\n See also: AWS API Documentation\n \n \n :example: response = client.update_lifecycle_policy(\n PolicyId='string',\n ExecutionRoleArn='string',\n State='ENABLED'|'DISABLED',\n Description='string',\n PolicyDetails={\n 'ResourceTypes': [\n 'VOLUME',\n ],\n 'TargetTags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'Schedules': [\n {\n 'Name': 'string',\n 'CopyTags': True|False,\n 'TagsToAdd': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'CreateRule': {\n 'Interval': 123,\n 'IntervalUnit': 'HOURS',\n 'Times': [\n 'string',\n ]\n },\n 'RetainRule': {\n 'Count': 123\n }\n },\n ]\n }\n )\n \n \n :type PolicyId: string\n :param PolicyId: [REQUIRED]\n The identifier of the lifecycle policy.\n \n\n :type ExecutionRoleArn: string\n :param ExecutionRoleArn: The Amazon Resource Name (ARN) of the IAM role used to run the operations specified by the lifecycle policy.\n\n :type State: string\n :param State: The desired activation state of the lifecycle policy after creation.\n\n :type Description: string\n :param Description: A description of the lifecycle policy.\n\n :type PolicyDetails: dict\n :param PolicyDetails: The configuration of the lifecycle policy.\n Target tags cannot be re-used across policies.\n ResourceTypes (list) --The resource type.\n (string) --\n TargetTags (list) --The single tag that identifies targeted resources for this policy.\n (dict) --Specifies a tag for a resource.\n Key (string) -- [REQUIRED]The tag key.\n Value (string) -- [REQUIRED]The tag value.\n \n Schedules (list) --The schedule of policy-defined actions.\n (dict) --Specifies a schedule.\n Name (string) --The name of the schedule.\n CopyTags (boolean) --\n TagsToAdd (list) --The tags to apply to policy-created resources. These user-defined tags are in addition to the AWS-added lifecycle tags.\n (dict) --Specifies a tag for a resource.\n Key (string) -- [REQUIRED]The tag key.\n Value (string) -- [REQUIRED]The tag value.\n \n CreateRule (dict) --The create rule.\n Interval (integer) -- [REQUIRED]The interval. The supported values are 12 and 24.\n IntervalUnit (string) -- [REQUIRED]The interval unit.\n Times (list) --The time, in UTC, to start the operation.\n The operation occurs within a one-hour window following the specified time.\n (string) --\n \n RetainRule (dict) --The retain rule.\n Count (integer) -- [REQUIRED]The number of snapshots to keep for each volume, up to a maximum of 1000.\n \n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6237213015556335, "alphanum_fraction": 0.6302075386047363, "avg_line_length": 45.91999816894531, "blob_id": "37e4782de26d5bbddc02494e3f65d99db4349a72", "content_id": "8059481eacdec3232769d17aa4a74f86ce5bf166", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 26980, "license_type": "permissive", "max_line_length": 512, "num_lines": 575, "path": "/pyboto3/polly.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef delete_lexicon(Name=None):\n \"\"\"\n Deletes the specified pronunciation lexicon stored in an AWS Region. A lexicon which has been deleted is not available for speech synthesis, nor is it possible to retrieve it using either the GetLexicon or ListLexicon APIs.\n For more information, see Managing Lexicons .\n See also: AWS API Documentation\n \n Examples\n Deletes a specified pronunciation lexicon stored in an AWS Region.\n Expected Output:\n \n :example: response = client.delete_lexicon(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the lexicon to delete. Must be an existing lexicon in the region.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef describe_voices(LanguageCode=None, IncludeAdditionalLanguageCodes=None, NextToken=None):\n \"\"\"\n Returns the list of voices that are available for use when requesting speech synthesis. Each voice speaks a specified language, is either male or female, and is identified by an ID, which is the ASCII version of the voice name.\n When synthesizing speech ( SynthesizeSpeech ), you provide the voice ID for the voice you want from the list of voices returned by DescribeVoices .\n For example, you want your news reader application to read news in a specific language, but giving a user the option to choose the voice. Using the DescribeVoices operation you can provide the user with a list of available voices to select from.\n You can optionally specify a language code to filter the available voices. For example, if you specify en-US , the operation returns a list of all available US English voices.\n This operation requires permissions to perform the polly:DescribeVoices action.\n See also: AWS API Documentation\n \n Examples\n Returns the list of voices that are available for use when requesting speech synthesis. Displayed languages are those within the specified language code. If no language code is specified, voices for all available languages are displayed.\n Expected Output:\n \n :example: response = client.describe_voices(\n LanguageCode='cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',\n IncludeAdditionalLanguageCodes=True|False,\n NextToken='string'\n )\n \n \n :type LanguageCode: string\n :param LanguageCode: The language identification tag (ISO 639 code for the language name-ISO 3166 country code) for filtering the list of voices returned. If you don't specify this optional parameter, all available voices are returned.\n\n :type IncludeAdditionalLanguageCodes: boolean\n :param IncludeAdditionalLanguageCodes: Boolean value indicating whether to return any bilingual voices that use the specified language as an additional language. For instance, if you request all languages that use US English (es-US), and there is an Italian voice that speaks both Italian (it-IT) and US English, that voice will be included if you specify yes but not if you specify no .\n\n :type NextToken: string\n :param NextToken: An opaque pagination token returned from the previous DescribeVoices operation. If present, this indicates where to continue the listing.\n\n :rtype: dict\n :return: {\n 'Voices': [\n {\n 'Gender': 'Female'|'Male',\n 'Id': 'Geraint'|'Gwyneth'|'Mads'|'Naja'|'Hans'|'Marlene'|'Nicole'|'Russell'|'Amy'|'Brian'|'Emma'|'Raveena'|'Ivy'|'Joanna'|'Joey'|'Justin'|'Kendra'|'Kimberly'|'Matthew'|'Salli'|'Conchita'|'Enrique'|'Miguel'|'Penelope'|'Chantal'|'Celine'|'Lea'|'Mathieu'|'Dora'|'Karl'|'Carla'|'Giorgio'|'Mizuki'|'Liv'|'Lotte'|'Ruben'|'Ewa'|'Jacek'|'Jan'|'Maja'|'Ricardo'|'Vitoria'|'Cristiano'|'Ines'|'Carmen'|'Maxim'|'Tatyana'|'Astrid'|'Filiz'|'Vicki'|'Takumi'|'Seoyeon'|'Aditi'|'Zhiyu'|'Bianca'|'Lucia'|'Mia',\n 'LanguageCode': 'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',\n 'LanguageName': 'string',\n 'Name': 'string',\n 'AdditionalLanguageCodes': [\n 'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',\n ]\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_lexicon(Name=None):\n \"\"\"\n Returns the content of the specified pronunciation lexicon stored in an AWS Region. For more information, see Managing Lexicons .\n See also: AWS API Documentation\n \n Examples\n Returns the content of the specified pronunciation lexicon stored in an AWS Region.\n Expected Output:\n \n :example: response = client.get_lexicon(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n Name of the lexicon.\n \n\n :rtype: dict\n :return: {\n 'Lexicon': {\n 'Content': 'string',\n 'Name': 'string'\n },\n 'LexiconAttributes': {\n 'Alphabet': 'string',\n 'LanguageCode': 'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',\n 'LastModified': datetime(2015, 1, 1),\n 'LexiconArn': 'string',\n 'LexemesCount': 123,\n 'Size': 123\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_speech_synthesis_task(TaskId=None):\n \"\"\"\n Retrieves a specific SpeechSynthesisTask object based on its TaskID. This object contains information about the given speech synthesis task, including the status of the task, and a link to the S3 bucket containing the output of the task.\n See also: AWS API Documentation\n \n \n :example: response = client.get_speech_synthesis_task(\n TaskId='string'\n )\n \n \n :type TaskId: string\n :param TaskId: [REQUIRED]\n The Amazon Polly generated identifier for a speech synthesis task.\n \n\n :rtype: dict\n :return: {\n 'SynthesisTask': {\n 'TaskId': 'string',\n 'TaskStatus': 'scheduled'|'inProgress'|'completed'|'failed',\n 'TaskStatusReason': 'string',\n 'OutputUri': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'RequestCharacters': 123,\n 'SnsTopicArn': 'string',\n 'LexiconNames': [\n 'string',\n ],\n 'OutputFormat': 'json'|'mp3'|'ogg_vorbis'|'pcm',\n 'SampleRate': 'string',\n 'SpeechMarkTypes': [\n 'sentence'|'ssml'|'viseme'|'word',\n ],\n 'TextType': 'ssml'|'text',\n 'VoiceId': 'Geraint'|'Gwyneth'|'Mads'|'Naja'|'Hans'|'Marlene'|'Nicole'|'Russell'|'Amy'|'Brian'|'Emma'|'Raveena'|'Ivy'|'Joanna'|'Joey'|'Justin'|'Kendra'|'Kimberly'|'Matthew'|'Salli'|'Conchita'|'Enrique'|'Miguel'|'Penelope'|'Chantal'|'Celine'|'Lea'|'Mathieu'|'Dora'|'Karl'|'Carla'|'Giorgio'|'Mizuki'|'Liv'|'Lotte'|'Ruben'|'Ewa'|'Jacek'|'Jan'|'Maja'|'Ricardo'|'Vitoria'|'Cristiano'|'Ines'|'Carmen'|'Maxim'|'Tatyana'|'Astrid'|'Filiz'|'Vicki'|'Takumi'|'Seoyeon'|'Aditi'|'Zhiyu'|'Bianca'|'Lucia'|'Mia',\n 'LanguageCode': 'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_lexicons(NextToken=None):\n \"\"\"\n Returns a list of pronunciation lexicons stored in an AWS Region. For more information, see Managing Lexicons .\n See also: AWS API Documentation\n \n Examples\n Returns a list of pronunciation lexicons stored in an AWS Region.\n Expected Output:\n \n :example: response = client.list_lexicons(\n NextToken='string'\n )\n \n \n :type NextToken: string\n :param NextToken: An opaque pagination token returned from previous ListLexicons operation. If present, indicates where to continue the list of lexicons.\n\n :rtype: dict\n :return: {\n 'Lexicons': [\n {\n 'Name': 'string',\n 'Attributes': {\n 'Alphabet': 'string',\n 'LanguageCode': 'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',\n 'LastModified': datetime(2015, 1, 1),\n 'LexiconArn': 'string',\n 'LexemesCount': 123,\n 'Size': 123\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_speech_synthesis_tasks(MaxResults=None, NextToken=None, Status=None):\n \"\"\"\n Returns a list of SpeechSynthesisTask objects ordered by their creation date. This operation can filter the tasks by their status, for example, allowing users to list only tasks that are completed.\n See also: AWS API Documentation\n \n \n :example: response = client.list_speech_synthesis_tasks(\n MaxResults=123,\n NextToken='string',\n Status='scheduled'|'inProgress'|'completed'|'failed'\n )\n \n \n :type MaxResults: integer\n :param MaxResults: Maximum number of speech synthesis tasks returned in a List operation.\n\n :type NextToken: string\n :param NextToken: The pagination token to use in the next request to continue the listing of speech synthesis tasks.\n\n :type Status: string\n :param Status: Status of the speech synthesis tasks returned in a List operation\n\n :rtype: dict\n :return: {\n 'NextToken': 'string',\n 'SynthesisTasks': [\n {\n 'TaskId': 'string',\n 'TaskStatus': 'scheduled'|'inProgress'|'completed'|'failed',\n 'TaskStatusReason': 'string',\n 'OutputUri': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'RequestCharacters': 123,\n 'SnsTopicArn': 'string',\n 'LexiconNames': [\n 'string',\n ],\n 'OutputFormat': 'json'|'mp3'|'ogg_vorbis'|'pcm',\n 'SampleRate': 'string',\n 'SpeechMarkTypes': [\n 'sentence'|'ssml'|'viseme'|'word',\n ],\n 'TextType': 'ssml'|'text',\n 'VoiceId': 'Geraint'|'Gwyneth'|'Mads'|'Naja'|'Hans'|'Marlene'|'Nicole'|'Russell'|'Amy'|'Brian'|'Emma'|'Raveena'|'Ivy'|'Joanna'|'Joey'|'Justin'|'Kendra'|'Kimberly'|'Matthew'|'Salli'|'Conchita'|'Enrique'|'Miguel'|'Penelope'|'Chantal'|'Celine'|'Lea'|'Mathieu'|'Dora'|'Karl'|'Carla'|'Giorgio'|'Mizuki'|'Liv'|'Lotte'|'Ruben'|'Ewa'|'Jacek'|'Jan'|'Maja'|'Ricardo'|'Vitoria'|'Cristiano'|'Ines'|'Carmen'|'Maxim'|'Tatyana'|'Astrid'|'Filiz'|'Vicki'|'Takumi'|'Seoyeon'|'Aditi'|'Zhiyu'|'Bianca'|'Lucia'|'Mia',\n 'LanguageCode': 'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR'\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef put_lexicon(Name=None, Content=None):\n \"\"\"\n Stores a pronunciation lexicon in an AWS Region. If a lexicon with the same name already exists in the region, it is overwritten by the new lexicon. Lexicon operations have eventual consistency, therefore, it might take some time before the lexicon is available to the SynthesizeSpeech operation.\n For more information, see Managing Lexicons .\n See also: AWS API Documentation\n \n Examples\n Stores a pronunciation lexicon in an AWS Region.\n Expected Output:\n \n :example: response = client.put_lexicon(\n Name='string',\n Content='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n Name of the lexicon. The name must follow the regular express format [0-9A-Za-z]{1,20}. That is, the name is a case-sensitive alphanumeric string up to 20 characters long.\n \n\n :type Content: string\n :param Content: [REQUIRED]\n Content of the PLS lexicon as string data.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef start_speech_synthesis_task(LexiconNames=None, OutputFormat=None, OutputS3BucketName=None, OutputS3KeyPrefix=None, SampleRate=None, SnsTopicArn=None, SpeechMarkTypes=None, Text=None, TextType=None, VoiceId=None, LanguageCode=None):\n \"\"\"\n Allows the creation of an asynchronous synthesis task, by starting a new SpeechSynthesisTask . This operation requires all the standard information needed for speech synthesis, plus the name of an Amazon S3 bucket for the service to store the output of the synthesis task and two optional parameters (OutputS3KeyPrefix and SnsTopicArn). Once the synthesis task is created, this operation will return a SpeechSynthesisTask object, which will include an identifier of this task as well as the current status.\n See also: AWS API Documentation\n \n \n :example: response = client.start_speech_synthesis_task(\n LexiconNames=[\n 'string',\n ],\n OutputFormat='json'|'mp3'|'ogg_vorbis'|'pcm',\n OutputS3BucketName='string',\n OutputS3KeyPrefix='string',\n SampleRate='string',\n SnsTopicArn='string',\n SpeechMarkTypes=[\n 'sentence'|'ssml'|'viseme'|'word',\n ],\n Text='string',\n TextType='ssml'|'text',\n VoiceId='Geraint'|'Gwyneth'|'Mads'|'Naja'|'Hans'|'Marlene'|'Nicole'|'Russell'|'Amy'|'Brian'|'Emma'|'Raveena'|'Ivy'|'Joanna'|'Joey'|'Justin'|'Kendra'|'Kimberly'|'Matthew'|'Salli'|'Conchita'|'Enrique'|'Miguel'|'Penelope'|'Chantal'|'Celine'|'Lea'|'Mathieu'|'Dora'|'Karl'|'Carla'|'Giorgio'|'Mizuki'|'Liv'|'Lotte'|'Ruben'|'Ewa'|'Jacek'|'Jan'|'Maja'|'Ricardo'|'Vitoria'|'Cristiano'|'Ines'|'Carmen'|'Maxim'|'Tatyana'|'Astrid'|'Filiz'|'Vicki'|'Takumi'|'Seoyeon'|'Aditi'|'Zhiyu'|'Bianca'|'Lucia'|'Mia',\n LanguageCode='cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR'\n )\n \n \n :type LexiconNames: list\n :param LexiconNames: List of one or more pronunciation lexicon names you want the service to apply during synthesis. Lexicons are applied only if the language of the lexicon is the same as the language of the voice.\n (string) --\n \n\n :type OutputFormat: string\n :param OutputFormat: [REQUIRED]\n The format in which the returned output will be encoded. For audio stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.\n \n\n :type OutputS3BucketName: string\n :param OutputS3BucketName: [REQUIRED]\n Amazon S3 bucket name to which the output file will be saved.\n \n\n :type OutputS3KeyPrefix: string\n :param OutputS3KeyPrefix: The Amazon S3 key prefix for the output speech file.\n\n :type SampleRate: string\n :param SampleRate: The audio frequency specified in Hz.\n The valid values for mp3 and ogg_vorbis are '8000', '16000', and '22050'. The default value is '22050'.\n Valid values for pcm are '8000' and '16000' The default value is '16000'.\n \n\n :type SnsTopicArn: string\n :param SnsTopicArn: ARN for the SNS topic optionally used for providing status notification for a speech synthesis task.\n\n :type SpeechMarkTypes: list\n :param SpeechMarkTypes: The type of speech marks returned for the input text.\n (string) --\n \n\n :type Text: string\n :param Text: [REQUIRED]\n The input text to synthesize. If you specify ssml as the TextType, follow the SSML format for the input text.\n \n\n :type TextType: string\n :param TextType: Specifies whether the input text is plain text or SSML. The default value is plain text.\n\n :type VoiceId: string\n :param VoiceId: [REQUIRED]\n Voice ID to use for the synthesis.\n \n\n :type LanguageCode: string\n :param LanguageCode: Optional language code for the Speech Synthesis request. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).\n If a bilingual voice is used and no language code is specified, Amazon Polly will use the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.\n \n\n :rtype: dict\n :return: {\n 'SynthesisTask': {\n 'TaskId': 'string',\n 'TaskStatus': 'scheduled'|'inProgress'|'completed'|'failed',\n 'TaskStatusReason': 'string',\n 'OutputUri': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'RequestCharacters': 123,\n 'SnsTopicArn': 'string',\n 'LexiconNames': [\n 'string',\n ],\n 'OutputFormat': 'json'|'mp3'|'ogg_vorbis'|'pcm',\n 'SampleRate': 'string',\n 'SpeechMarkTypes': [\n 'sentence'|'ssml'|'viseme'|'word',\n ],\n 'TextType': 'ssml'|'text',\n 'VoiceId': 'Geraint'|'Gwyneth'|'Mads'|'Naja'|'Hans'|'Marlene'|'Nicole'|'Russell'|'Amy'|'Brian'|'Emma'|'Raveena'|'Ivy'|'Joanna'|'Joey'|'Justin'|'Kendra'|'Kimberly'|'Matthew'|'Salli'|'Conchita'|'Enrique'|'Miguel'|'Penelope'|'Chantal'|'Celine'|'Lea'|'Mathieu'|'Dora'|'Karl'|'Carla'|'Giorgio'|'Mizuki'|'Liv'|'Lotte'|'Ruben'|'Ewa'|'Jacek'|'Jan'|'Maja'|'Ricardo'|'Vitoria'|'Cristiano'|'Ines'|'Carmen'|'Maxim'|'Tatyana'|'Astrid'|'Filiz'|'Vicki'|'Takumi'|'Seoyeon'|'Aditi'|'Zhiyu'|'Bianca'|'Lucia'|'Mia',\n 'LanguageCode': 'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef synthesize_speech(LexiconNames=None, OutputFormat=None, SampleRate=None, SpeechMarkTypes=None, Text=None, TextType=None, VoiceId=None, LanguageCode=None):\n \"\"\"\n Synthesizes UTF-8 input, plain text or SSML, to a stream of bytes. SSML input must be valid, well-formed SSML. Some alphabets might not be available with all the voices (for example, Cyrillic might not be read at all by English voices) unless phoneme mapping is used. For more information, see How it Works .\n See also: AWS API Documentation\n \n Examples\n Synthesizes plain text or SSML into a file of human-like speech.\n Expected Output:\n \n :example: response = client.synthesize_speech(\n LexiconNames=[\n 'string',\n ],\n OutputFormat='json'|'mp3'|'ogg_vorbis'|'pcm',\n SampleRate='string',\n SpeechMarkTypes=[\n 'sentence'|'ssml'|'viseme'|'word',\n ],\n Text='string',\n TextType='ssml'|'text',\n VoiceId='Geraint'|'Gwyneth'|'Mads'|'Naja'|'Hans'|'Marlene'|'Nicole'|'Russell'|'Amy'|'Brian'|'Emma'|'Raveena'|'Ivy'|'Joanna'|'Joey'|'Justin'|'Kendra'|'Kimberly'|'Matthew'|'Salli'|'Conchita'|'Enrique'|'Miguel'|'Penelope'|'Chantal'|'Celine'|'Lea'|'Mathieu'|'Dora'|'Karl'|'Carla'|'Giorgio'|'Mizuki'|'Liv'|'Lotte'|'Ruben'|'Ewa'|'Jacek'|'Jan'|'Maja'|'Ricardo'|'Vitoria'|'Cristiano'|'Ines'|'Carmen'|'Maxim'|'Tatyana'|'Astrid'|'Filiz'|'Vicki'|'Takumi'|'Seoyeon'|'Aditi'|'Zhiyu'|'Bianca'|'Lucia'|'Mia',\n LanguageCode='cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR'\n )\n \n \n :type LexiconNames: list\n :param LexiconNames: List of one or more pronunciation lexicon names you want the service to apply during synthesis. Lexicons are applied only if the language of the lexicon is the same as the language of the voice. For information about storing lexicons, see PutLexicon .\n (string) --\n \n\n :type OutputFormat: string\n :param OutputFormat: [REQUIRED]\n The format in which the returned output will be encoded. For audio stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.\n When pcm is used, the content returned is audio/pcm in a signed 16-bit, 1 channel (mono), little-endian format.\n \n\n :type SampleRate: string\n :param SampleRate: The audio frequency specified in Hz.\n The valid values for mp3 and ogg_vorbis are '8000', '16000', and '22050'. The default value is '22050'.\n Valid values for pcm are '8000' and '16000' The default value is '16000'.\n \n\n :type SpeechMarkTypes: list\n :param SpeechMarkTypes: The type of speech marks returned for the input text.\n (string) --\n \n\n :type Text: string\n :param Text: [REQUIRED]\n Input text to synthesize. If you specify ssml as the TextType , follow the SSML format for the input text.\n \n\n :type TextType: string\n :param TextType: Specifies whether the input text is plain text or SSML. The default value is plain text. For more information, see Using SSML .\n\n :type VoiceId: string\n :param VoiceId: [REQUIRED]\n Voice ID to use for the synthesis. You can get a list of available voice IDs by calling the DescribeVoices operation.\n \n\n :type LanguageCode: string\n :param LanguageCode: Optional language code for the Synthesize Speech request. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).\n If a bilingual voice is used and no language code is specified, Amazon Polly will use the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.\n \n\n :rtype: dict\n :return: {\n 'AudioStream': StreamingBody(),\n 'ContentType': 'string',\n 'RequestCharacters': 123\n }\n \n \n :returns: \n If you request mp3 as the OutputFormat , the ContentType returned is audio/mpeg.\n If you request ogg_vorbis as the OutputFormat , the ContentType returned is audio/ogg.\n If you request pcm as the OutputFormat , the ContentType returned is audio/pcm in a signed 16-bit, 1 channel (mono), little-endian format.\n If you request json as the OutputFormat , the ContentType returned is audio/json.\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6011859774589539, "alphanum_fraction": 0.6058326363563538, "avg_line_length": 29.99588394165039, "blob_id": "7b1fccfa189ff06d8b1bb8d8acfe9fc139c5c08a", "content_id": "94434f470cdc4b3bd221ac56450ecc24e041dc31", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22597, "license_type": "permissive", "max_line_length": 331, "num_lines": 729, "path": "/pyboto3/chime.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef batch_suspend_user(AccountId=None, UserIdList=None):\n \"\"\"\n Suspends up to 50 users from a Team or EnterpriseLWA Amazon Chime account. For more information about different account types, see Managing Your Amazon Chime Accounts in the Amazon Chime Administration Guide .\n Users suspended from a Team account are dissociated from the account, but they can continue to use Amazon Chime as free users. To remove the suspension from suspended Team account users, invite them to the Team account again. You can use the InviteUsers action to do so.\n Users suspended from an EnterpriseLWA account are immediately signed out of Amazon Chime and are no longer able to sign in. To remove the suspension from suspended EnterpriseLWA account users, use the BatchUnsuspendUser action.\n To sign out users without suspending them, use the LogoutUser action.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_suspend_user(\n AccountId='string',\n UserIdList=[\n 'string',\n ]\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The Amazon Chime account ID.\n \n\n :type UserIdList: list\n :param UserIdList: [REQUIRED]\n The request containing the user IDs to suspend.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'UserErrors': [\n {\n 'UserId': 'string',\n 'ErrorCode': 'Unauthorized'|'Forbidden'|'NotFound'|'BadRequest'|'Conflict'|'ServiceFailure'|'ServiceUnavailable'|'Unprocessable'|'Throttled'|'PreconditionFailed',\n 'ErrorMessage': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef batch_unsuspend_user(AccountId=None, UserIdList=None):\n \"\"\"\n Removes the suspension from up to 50 previously suspended users for the specified Amazon Chime EnterpriseLWA account. Only users on EnterpriseLWA accounts can be unsuspended using this action. For more information about different account types, see Managing Your Amazon Chime Accounts in the Amazon Chime Administration Guide .\n Previously suspended users who are unsuspended using this action are returned to Registered status. Users who are not previously suspended are ignored.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_unsuspend_user(\n AccountId='string',\n UserIdList=[\n 'string',\n ]\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The Amazon Chime account ID.\n \n\n :type UserIdList: list\n :param UserIdList: [REQUIRED]\n The request containing the user IDs to unsuspend.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'UserErrors': [\n {\n 'UserId': 'string',\n 'ErrorCode': 'Unauthorized'|'Forbidden'|'NotFound'|'BadRequest'|'Conflict'|'ServiceFailure'|'ServiceUnavailable'|'Unprocessable'|'Throttled'|'PreconditionFailed',\n 'ErrorMessage': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef batch_update_user(AccountId=None, UpdateUserRequestItems=None):\n \"\"\"\n Updates user details within the UpdateUserRequestItem object for up to 20 users for the specified Amazon Chime account. Currently, only LicenseType updates are supported for this action.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_update_user(\n AccountId='string',\n UpdateUserRequestItems=[\n {\n 'UserId': 'string',\n 'LicenseType': 'Basic'|'Plus'|'Pro'|'ProTrial'\n },\n ]\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The Amazon Chime account ID.\n \n\n :type UpdateUserRequestItems: list\n :param UpdateUserRequestItems: [REQUIRED]\n The request containing the user IDs and details to update.\n (dict) --The user ID and user fields to update, used with the BatchUpdateUser action.\n UserId (string) -- [REQUIRED]The user ID.\n LicenseType (string) --The user license type.\n \n \n\n :rtype: dict\n :return: {\n 'UserErrors': [\n {\n 'UserId': 'string',\n 'ErrorCode': 'Unauthorized'|'Forbidden'|'NotFound'|'BadRequest'|'Conflict'|'ServiceFailure'|'ServiceUnavailable'|'Unprocessable'|'Throttled'|'PreconditionFailed',\n 'ErrorMessage': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_account(Name=None):\n \"\"\"\n Creates an Amazon Chime account under the administrator's AWS account. Only Team account types are currently supported for this action. For more information about different account types, see Managing Your Amazon Chime Accounts in the Amazon Chime Administration Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.create_account(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the Amazon Chime account.\n \n\n :rtype: dict\n :return: {\n 'Account': {\n 'AwsAccountId': 'string',\n 'AccountId': 'string',\n 'Name': 'string',\n 'AccountType': 'Team'|'EnterpriseDirectory'|'EnterpriseLWA'|'EnterpriseOIDC',\n 'CreatedTimestamp': datetime(2015, 1, 1),\n 'DefaultLicense': 'Basic'|'Plus'|'Pro'|'ProTrial',\n 'SupportedLicenses': [\n 'Basic'|'Plus'|'Pro'|'ProTrial',\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef delete_account(AccountId=None):\n \"\"\"\n Deletes the specified Amazon Chime account. You must suspend all users before deleting a Team account. You can use the BatchSuspendUser action to do so.\n For EnterpriseLWA and EnterpriseAD accounts, you must release the claimed domains for your Amazon Chime account before deletion. As soon as you release the domain, all users under that account are suspended.\n Deleted accounts appear in your Disabled accounts list for 90 days. To restore a deleted account from your Disabled accounts list, you must contact AWS Support.\n After 90 days, deleted accounts are permanently removed from your Disabled accounts list.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_account(\n AccountId='string'\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The Amazon Chime account ID.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_account(AccountId=None):\n \"\"\"\n Retrieves details for the specified Amazon Chime account, such as account type and supported licenses.\n See also: AWS API Documentation\n \n \n :example: response = client.get_account(\n AccountId='string'\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The Amazon Chime account ID.\n \n\n :rtype: dict\n :return: {\n 'Account': {\n 'AwsAccountId': 'string',\n 'AccountId': 'string',\n 'Name': 'string',\n 'AccountType': 'Team'|'EnterpriseDirectory'|'EnterpriseLWA'|'EnterpriseOIDC',\n 'CreatedTimestamp': datetime(2015, 1, 1),\n 'DefaultLicense': 'Basic'|'Plus'|'Pro'|'ProTrial',\n 'SupportedLicenses': [\n 'Basic'|'Plus'|'Pro'|'ProTrial',\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_account_settings(AccountId=None):\n \"\"\"\n Retrieves account settings for the specified Amazon Chime account ID, such as remote control and dial out settings. For more information about these settings, see Use the Policies Page in the Amazon Chime Administration Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.get_account_settings(\n AccountId='string'\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The Amazon Chime account ID.\n \n\n :rtype: dict\n :return: {\n 'AccountSettings': {\n 'DisableRemoteControl': True|False,\n 'EnableDialOut': True|False\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_user(AccountId=None, UserId=None):\n \"\"\"\n Retrieves details for the specified user ID, such as primary email address, license type, and personal meeting PIN.\n To retrieve user details with an email address instead of a user ID, use the ListUsers action, and then filter by email address.\n See also: AWS API Documentation\n \n \n :example: response = client.get_user(\n AccountId='string',\n UserId='string'\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The Amazon Chime account ID.\n \n\n :type UserId: string\n :param UserId: [REQUIRED]\n The user ID.\n \n\n :rtype: dict\n :return: {\n 'User': {\n 'UserId': 'string',\n 'AccountId': 'string',\n 'PrimaryEmail': 'string',\n 'DisplayName': 'string',\n 'LicenseType': 'Basic'|'Plus'|'Pro'|'ProTrial',\n 'UserRegistrationStatus': 'Unregistered'|'Registered'|'Suspended',\n 'UserInvitationStatus': 'Pending'|'Accepted'|'Failed',\n 'RegisteredOn': datetime(2015, 1, 1),\n 'InvitedOn': datetime(2015, 1, 1),\n 'PersonalPIN': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef invite_users(AccountId=None, UserEmailList=None):\n \"\"\"\n Sends email invites to as many as 50 users, inviting them to the specified Amazon Chime Team account. Only Team account types are currently supported for this action.\n See also: AWS API Documentation\n \n \n :example: response = client.invite_users(\n AccountId='string',\n UserEmailList=[\n 'string',\n ]\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The Amazon Chime account ID.\n \n\n :type UserEmailList: list\n :param UserEmailList: [REQUIRED]\n The user email addresses to which to send the invite.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'Invites': [\n {\n 'InviteId': 'string',\n 'Status': 'Pending'|'Accepted'|'Failed',\n 'EmailAddress': 'string',\n 'EmailStatus': 'NotSent'|'Sent'|'Failed'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef list_accounts(Name=None, UserEmail=None, NextToken=None, MaxResults=None):\n \"\"\"\n Lists the Amazon Chime accounts under the administrator's AWS account. You can filter accounts by account name prefix. To find out which Amazon Chime account a user belongs to, you can filter by the user's email address, which returns one account result.\n See also: AWS API Documentation\n \n \n :example: response = client.list_accounts(\n Name='string',\n UserEmail='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type Name: string\n :param Name: Amazon Chime account name prefix with which to filter results.\n\n :type UserEmail: string\n :param UserEmail: User email address with which to filter results.\n\n :type NextToken: string\n :param NextToken: The token to use to retrieve the next page of results.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return in a single call. Defaults to 100.\n\n :rtype: dict\n :return: {\n 'Accounts': [\n {\n 'AwsAccountId': 'string',\n 'AccountId': 'string',\n 'Name': 'string',\n 'AccountType': 'Team'|'EnterpriseDirectory'|'EnterpriseLWA'|'EnterpriseOIDC',\n 'CreatedTimestamp': datetime(2015, 1, 1),\n 'DefaultLicense': 'Basic'|'Plus'|'Pro'|'ProTrial',\n 'SupportedLicenses': [\n 'Basic'|'Plus'|'Pro'|'ProTrial',\n ]\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_users(AccountId=None, UserEmail=None, MaxResults=None, NextToken=None):\n \"\"\"\n Lists the users that belong to the specified Amazon Chime account. You can specify an email address to list only the user that the email address belongs to.\n See also: AWS API Documentation\n \n \n :example: response = client.list_users(\n AccountId='string',\n UserEmail='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The Amazon Chime account ID.\n \n\n :type UserEmail: string\n :param UserEmail: Optional. The user email address used to filter results. Maximum 1.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return in a single call. Defaults to 100.\n\n :type NextToken: string\n :param NextToken: The token to use to retrieve the next page of results.\n\n :rtype: dict\n :return: {\n 'Users': [\n {\n 'UserId': 'string',\n 'AccountId': 'string',\n 'PrimaryEmail': 'string',\n 'DisplayName': 'string',\n 'LicenseType': 'Basic'|'Plus'|'Pro'|'ProTrial',\n 'UserRegistrationStatus': 'Unregistered'|'Registered'|'Suspended',\n 'UserInvitationStatus': 'Pending'|'Accepted'|'Failed',\n 'RegisteredOn': datetime(2015, 1, 1),\n 'InvitedOn': datetime(2015, 1, 1),\n 'PersonalPIN': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef logout_user(AccountId=None, UserId=None):\n \"\"\"\n Logs out the specified user from all of the devices they are currently logged into.\n See also: AWS API Documentation\n \n \n :example: response = client.logout_user(\n AccountId='string',\n UserId='string'\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The Amazon Chime account ID.\n \n\n :type UserId: string\n :param UserId: [REQUIRED]\n The user ID.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef reset_personal_pin(AccountId=None, UserId=None):\n \"\"\"\n Resets the personal meeting PIN for the specified user on an Amazon Chime account. Returns the User object with the updated personal meeting PIN.\n See also: AWS API Documentation\n \n \n :example: response = client.reset_personal_pin(\n AccountId='string',\n UserId='string'\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The Amazon Chime account ID.\n \n\n :type UserId: string\n :param UserId: [REQUIRED]\n The user ID.\n \n\n :rtype: dict\n :return: {\n 'User': {\n 'UserId': 'string',\n 'AccountId': 'string',\n 'PrimaryEmail': 'string',\n 'DisplayName': 'string',\n 'LicenseType': 'Basic'|'Plus'|'Pro'|'ProTrial',\n 'UserRegistrationStatus': 'Unregistered'|'Registered'|'Suspended',\n 'UserInvitationStatus': 'Pending'|'Accepted'|'Failed',\n 'RegisteredOn': datetime(2015, 1, 1),\n 'InvitedOn': datetime(2015, 1, 1),\n 'PersonalPIN': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef update_account(AccountId=None, Name=None):\n \"\"\"\n Updates account details for the specified Amazon Chime account. Currently, only account name updates are supported for this action.\n See also: AWS API Documentation\n \n \n :example: response = client.update_account(\n AccountId='string',\n Name='string'\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The Amazon Chime account ID.\n \n\n :type Name: string\n :param Name: The new name for the specified Amazon Chime account.\n\n :rtype: dict\n :return: {\n 'Account': {\n 'AwsAccountId': 'string',\n 'AccountId': 'string',\n 'Name': 'string',\n 'AccountType': 'Team'|'EnterpriseDirectory'|'EnterpriseLWA'|'EnterpriseOIDC',\n 'CreatedTimestamp': datetime(2015, 1, 1),\n 'DefaultLicense': 'Basic'|'Plus'|'Pro'|'ProTrial',\n 'SupportedLicenses': [\n 'Basic'|'Plus'|'Pro'|'ProTrial',\n ]\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef update_account_settings(AccountId=None, AccountSettings=None):\n \"\"\"\n Updates the settings for the specified Amazon Chime account. You can update settings for remote control of shared screens, or for the dial-out option. For more information about these settings, see Use the Policies Page in the Amazon Chime Administration Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.update_account_settings(\n AccountId='string',\n AccountSettings={\n 'DisableRemoteControl': True|False,\n 'EnableDialOut': True|False\n }\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The Amazon Chime account ID.\n \n\n :type AccountSettings: dict\n :param AccountSettings: [REQUIRED]\n The Amazon Chime account settings to update.\n DisableRemoteControl (boolean) --Setting that stops or starts remote control of shared screens during meetings.\n EnableDialOut (boolean) --Setting that allows meeting participants to choose the Call me at a phone number option. For more information, see Join a Meeting without the Amazon Chime App .\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_user(AccountId=None, UserId=None, LicenseType=None):\n \"\"\"\n Updates user details for a specified user ID. Currently, only LicenseType updates are supported for this action.\n See also: AWS API Documentation\n \n \n :example: response = client.update_user(\n AccountId='string',\n UserId='string',\n LicenseType='Basic'|'Plus'|'Pro'|'ProTrial'\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The Amazon Chime account ID.\n \n\n :type UserId: string\n :param UserId: [REQUIRED]\n The user ID.\n \n\n :type LicenseType: string\n :param LicenseType: The user license type to update. This must be a supported license type for the Amazon Chime account that the user belongs to.\n\n :rtype: dict\n :return: {\n 'User': {\n 'UserId': 'string',\n 'AccountId': 'string',\n 'PrimaryEmail': 'string',\n 'DisplayName': 'string',\n 'LicenseType': 'Basic'|'Plus'|'Pro'|'ProTrial',\n 'UserRegistrationStatus': 'Unregistered'|'Registered'|'Suspended',\n 'UserInvitationStatus': 'Pending'|'Accepted'|'Failed',\n 'RegisteredOn': datetime(2015, 1, 1),\n 'InvitedOn': datetime(2015, 1, 1),\n 'PersonalPIN': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6389126181602478, "alphanum_fraction": 0.6415506601333618, "avg_line_length": 47.537803649902344, "blob_id": "165409fd38029c428c3c50cddb53d53486858dba", "content_id": "76906f30ee0c70ba550d5a77d44037445bf41d6d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 39802, "license_type": "permissive", "max_line_length": 597, "num_lines": 820, "path": "/pyboto3/cloudwatchevents.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef delete_rule(Name=None, Force=None):\n \"\"\"\n Deletes the specified rule.\n Before you can delete the rule, you must remove all targets, using RemoveTargets .\n When you delete a rule, incoming events might continue to match to the deleted rule. Allow a short period of time for changes to take effect.\n Managed rules are rules created and managed by another AWS service on your behalf. These rules are created by those other AWS services to support functionality in those services. You can delete these rules using the Force option, but you should do so only if you are sure the other service is not still using that rule.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_rule(\n Name='string',\n Force=True|False\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the rule.\n \n\n :type Force: boolean\n :param Force: If this is a managed rule, created by an AWS service on your behalf, you must specify Force as True to delete the rule. This parameter is ignored for rules that are not managed rules. You can check whether a rule is a managed rule by using DescribeRule or ListRules and checking the ManagedBy field of the response.\n\n \"\"\"\n pass\n\ndef describe_event_bus():\n \"\"\"\n Displays the external AWS accounts that are permitted to write events to your account using your account's event bus, and the associated policy. To enable your account to receive events from other accounts, use PutPermission .\n See also: AWS API Documentation\n \n \n :example: response = client.describe_event_bus()\n \n \n :rtype: dict\n :return: {\n 'Name': 'string',\n 'Arn': 'string',\n 'Policy': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_rule(Name=None):\n \"\"\"\n Describes the specified rule.\n DescribeRule does not list the targets of a rule. To see the targets associated with a rule, use ListTargetsByRule .\n See also: AWS API Documentation\n \n \n :example: response = client.describe_rule(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the rule.\n \n\n :rtype: dict\n :return: {\n 'Name': 'string',\n 'Arn': 'string',\n 'EventPattern': 'string',\n 'ScheduleExpression': 'string',\n 'State': 'ENABLED'|'DISABLED',\n 'Description': 'string',\n 'RoleArn': 'string',\n 'ManagedBy': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef disable_rule(Name=None):\n \"\"\"\n Disables the specified rule. A disabled rule won't match any events, and won't self-trigger if it has a schedule expression.\n When you disable a rule, incoming events might continue to match to the disabled rule. Allow a short period of time for changes to take effect.\n See also: AWS API Documentation\n \n \n :example: response = client.disable_rule(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the rule.\n \n\n \"\"\"\n pass\n\ndef enable_rule(Name=None):\n \"\"\"\n Enables the specified rule. If the rule does not exist, the operation fails.\n When you enable a rule, incoming events might not immediately start matching to a newly enabled rule. Allow a short period of time for changes to take effect.\n See also: AWS API Documentation\n \n \n :example: response = client.enable_rule(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the rule.\n \n\n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_rule_names_by_target(TargetArn=None, NextToken=None, Limit=None):\n \"\"\"\n Lists the rules for the specified target. You can see which of the rules in Amazon CloudWatch Events can invoke a specific target in your account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_rule_names_by_target(\n TargetArn='string',\n NextToken='string',\n Limit=123\n )\n \n \n :type TargetArn: string\n :param TargetArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the target resource.\n \n\n :type NextToken: string\n :param NextToken: The token returned by a previous call to retrieve the next set of results.\n\n :type Limit: integer\n :param Limit: The maximum number of results to return.\n\n :rtype: dict\n :return: {\n 'RuleNames': [\n 'string',\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_rules(NamePrefix=None, NextToken=None, Limit=None):\n \"\"\"\n Lists your Amazon CloudWatch Events rules. You can either list all the rules or you can provide a prefix to match to the rule names.\n ListRules does not list the targets of a rule. To see the targets associated with a rule, use ListTargetsByRule .\n See also: AWS API Documentation\n \n \n :example: response = client.list_rules(\n NamePrefix='string',\n NextToken='string',\n Limit=123\n )\n \n \n :type NamePrefix: string\n :param NamePrefix: The prefix matching the rule name.\n\n :type NextToken: string\n :param NextToken: The token returned by a previous call to retrieve the next set of results.\n\n :type Limit: integer\n :param Limit: The maximum number of results to return.\n\n :rtype: dict\n :return: {\n 'Rules': [\n {\n 'Name': 'string',\n 'Arn': 'string',\n 'EventPattern': 'string',\n 'State': 'ENABLED'|'DISABLED',\n 'Description': 'string',\n 'ScheduleExpression': 'string',\n 'RoleArn': 'string',\n 'ManagedBy': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_targets_by_rule(Rule=None, NextToken=None, Limit=None):\n \"\"\"\n Lists the targets assigned to the specified rule.\n See also: AWS API Documentation\n \n \n :example: response = client.list_targets_by_rule(\n Rule='string',\n NextToken='string',\n Limit=123\n )\n \n \n :type Rule: string\n :param Rule: [REQUIRED]\n The name of the rule.\n \n\n :type NextToken: string\n :param NextToken: The token returned by a previous call to retrieve the next set of results.\n\n :type Limit: integer\n :param Limit: The maximum number of results to return.\n\n :rtype: dict\n :return: {\n 'Targets': [\n {\n 'Id': 'string',\n 'Arn': 'string',\n 'RoleArn': 'string',\n 'Input': 'string',\n 'InputPath': 'string',\n 'InputTransformer': {\n 'InputPathsMap': {\n 'string': 'string'\n },\n 'InputTemplate': 'string'\n },\n 'KinesisParameters': {\n 'PartitionKeyPath': 'string'\n },\n 'RunCommandParameters': {\n 'RunCommandTargets': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ]\n },\n 'EcsParameters': {\n 'TaskDefinitionArn': 'string',\n 'TaskCount': 123,\n 'LaunchType': 'EC2'|'FARGATE',\n 'NetworkConfiguration': {\n 'awsvpcConfiguration': {\n 'Subnets': [\n 'string',\n ],\n 'SecurityGroups': [\n 'string',\n ],\n 'AssignPublicIp': 'ENABLED'|'DISABLED'\n }\n },\n 'PlatformVersion': 'string',\n 'Group': 'string'\n },\n 'BatchParameters': {\n 'JobDefinition': 'string',\n 'JobName': 'string',\n 'ArrayProperties': {\n 'Size': 123\n },\n 'RetryStrategy': {\n 'Attempts': 123\n }\n },\n 'SqsParameters': {\n 'MessageGroupId': 'string'\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef put_events(Entries=None):\n \"\"\"\n Sends custom events to Amazon CloudWatch Events so that they can be matched to rules.\n See also: AWS API Documentation\n \n \n :example: response = client.put_events(\n Entries=[\n {\n 'Time': datetime(2015, 1, 1),\n 'Source': 'string',\n 'Resources': [\n 'string',\n ],\n 'DetailType': 'string',\n 'Detail': 'string'\n },\n ]\n )\n \n \n :type Entries: list\n :param Entries: [REQUIRED]\n The entry that defines an event in your system. You can specify several parameters for the entry such as the source and type of the event, resources associated with the event, and so on.\n (dict) --Represents an event to be submitted.\n Time (datetime) --The time stamp of the event, per RFC3339 . If no time stamp is provided, the time stamp of the PutEvents call is used.\n Source (string) --The source of the event. This field is required.\n Resources (list) --AWS resources, identified by Amazon Resource Name (ARN), which the event primarily concerns. Any number, including zero, may be present.\n (string) --\n DetailType (string) --Free-form string used to decide what fields to expect in the event detail.\n Detail (string) --A valid JSON string. There is no other schema imposed. The JSON string may contain fields and nested subobjects.\n \n \n\n :rtype: dict\n :return: {\n 'FailedEntryCount': 123,\n 'Entries': [\n {\n 'EventId': 'string',\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef put_permission(Action=None, Principal=None, StatementId=None, Condition=None):\n \"\"\"\n Running PutPermission permits the specified AWS account or AWS organization to put events to your account's default event bus . CloudWatch Events rules in your account are triggered by these events arriving to your default event bus.\n For another account to send events to your account, that external account must have a CloudWatch Events rule with your account's default event bus as a target.\n To enable multiple AWS accounts to put events to your default event bus, run PutPermission once for each of these accounts. Or, if all the accounts are members of the same AWS organization, you can run PutPermission once specifying Principal as \"*\" and specifying the AWS organization ID in Condition , to grant permissions to all accounts in that organization.\n If you grant permissions using an organization, then accounts in that organization must specify a RoleArn with proper permissions when they use PutTarget to add your account's event bus as a target. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon CloudWatch Events User Guide .\n The permission policy on the default event bus cannot exceed 10 KB in size.\n See also: AWS API Documentation\n \n \n :example: response = client.put_permission(\n Action='string',\n Principal='string',\n StatementId='string',\n Condition={\n 'Type': 'string',\n 'Key': 'string',\n 'Value': 'string'\n }\n )\n \n \n :type Action: string\n :param Action: [REQUIRED]\n The action that you are enabling the other account to perform. Currently, this must be events:PutEvents .\n \n\n :type Principal: string\n :param Principal: [REQUIRED]\n The 12-digit AWS account ID that you are permitting to put events to your default event bus. Specify '*' to permit any account to put events to your default event bus.\n If you specify '*' without specifying Condition , avoid creating rules that may match undesirable events. To create more secure rules, make sure that the event pattern for each rule contains an account field with a specific account ID from which to receive events. Rules with an account field do not match any events sent from other accounts.\n \n\n :type StatementId: string\n :param StatementId: [REQUIRED]\n An identifier string for the external account that you are granting permissions to. If you later want to revoke the permission for this external account, specify this StatementId when you run RemovePermission .\n \n\n :type Condition: dict\n :param Condition: This parameter enables you to limit the permission to accounts that fulfill a certain condition, such as being a member of a certain AWS organization. For more information about AWS Organizations, see What Is AWS Organizations in the AWS Organizations User Guide .\n If you specify Condition with an AWS organization ID, and specify '*' as the value for Principal , you grant permission to all the accounts in the named organization.\n The Condition is a JSON string which must contain Type , Key , and Value fields.\n Type (string) -- [REQUIRED]Specifies the type of condition. Currently the only supported value is StringEquals .\n Key (string) -- [REQUIRED]Specifies the key for the condition. Currently the only supported key is aws:PrincipalOrgID .\n Value (string) -- [REQUIRED]Specifies the value for the key. Currently, this must be the ID of the organization.\n \n\n \"\"\"\n pass\n\ndef put_rule(Name=None, ScheduleExpression=None, EventPattern=None, State=None, Description=None, RoleArn=None):\n \"\"\"\n Creates or updates the specified rule. Rules are enabled by default, or based on value of the state. You can disable a rule using DisableRule .\n If you are updating an existing rule, the rule is replaced with what you specify in this PutRule command. If you omit arguments in PutRule , the old values for those arguments are not kept. Instead, they are replaced with null values.\n When you create or update a rule, incoming events might not immediately start matching to new or updated rules. Allow a short period of time for changes to take effect.\n A rule must contain at least an EventPattern or ScheduleExpression. Rules with EventPatterns are triggered when a matching event is observed. Rules with ScheduleExpressions self-trigger based on the given schedule. A rule can have both an EventPattern and a ScheduleExpression, in which case the rule triggers on matching events as well as on a schedule.\n Most services in AWS treat : or / as the same character in Amazon Resource Names (ARNs). However, CloudWatch Events uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event you want to match.\n In CloudWatch Events, it is possible to create rules that lead to infinite loops, where a rule is fired repeatedly. For example, a rule might detect that ACLs have changed on an S3 bucket, and trigger software to change them to the desired state. If the rule is not written carefully, the subsequent change to the ACLs fires the rule again, creating an infinite loop.\n To prevent this, write the rules so that the triggered actions do not re-fire the same rule. For example, your rule could fire only if ACLs are found to be in a bad state, instead of after any change.\n An infinite loop can quickly cause higher than expected charges. We recommend that you use budgeting, which alerts you when charges exceed your specified limit. For more information, see Managing Your Costs with Budgets .\n See also: AWS API Documentation\n \n \n :example: response = client.put_rule(\n Name='string',\n ScheduleExpression='string',\n EventPattern='string',\n State='ENABLED'|'DISABLED',\n Description='string',\n RoleArn='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the rule that you are creating or updating.\n \n\n :type ScheduleExpression: string\n :param ScheduleExpression: The scheduling expression. For example, 'cron(0 20 * * ? *)' or 'rate(5 minutes)'.\n\n :type EventPattern: string\n :param EventPattern: The event pattern. For more information, see Events and Event Patterns in the Amazon CloudWatch Events User Guide .\n\n :type State: string\n :param State: Indicates whether the rule is enabled or disabled.\n\n :type Description: string\n :param Description: A description of the rule.\n\n :type RoleArn: string\n :param RoleArn: The Amazon Resource Name (ARN) of the IAM role associated with the rule.\n\n :rtype: dict\n :return: {\n 'RuleArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef put_targets(Rule=None, Targets=None):\n \"\"\"\n Adds the specified targets to the specified rule, or updates the targets if they are already associated with the rule.\n Targets are the resources that are invoked when a rule is triggered.\n You can configure the following as targets for CloudWatch Events:\n Creating rules with built-in targets is supported only in the AWS Management Console. The built-in targets are EC2 CreateSnapshot API call , EC2 RebootInstances API call , EC2 StopInstances API call , and EC2 TerminateInstances API call .\n For some target types, PutTargets provides target-specific parameters. If the target is a Kinesis data stream, you can optionally specify which shard the event goes to by using the KinesisParameters argument. To invoke a command on multiple EC2 instances with one rule, you can use the RunCommandParameters field.\n To be able to make API calls against the resources that you own, Amazon CloudWatch Events needs the appropriate permissions. For AWS Lambda and Amazon SNS resources, CloudWatch Events relies on resource-based policies. For EC2 instances, Kinesis data streams, and AWS Step Functions state machines, CloudWatch Events relies on IAM roles that you specify in the RoleARN argument in PutTargets . For more information, see Authentication and Access Control in the Amazon CloudWatch Events User Guide .\n If another AWS account is in the same region and has granted you permission (using PutPermission ), you can send events to that account. Set that account's event bus as a target of the rules in your account. To send the matched events to the other account, specify that account's event bus as the Arn value when you run PutTargets . If your account sends events to another account, your account is charged for each sent event. Each event sent to another account is charged as a custom event. The account receiving the event is not charged. For more information, see Amazon CloudWatch Pricing .\n If you are setting the event bus of another account as the target, and that account granted permission to your account through an organization instead of directly by the account ID, then you must specify a RoleArn with proper permissions in the Target structure. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon CloudWatch Events User Guide .\n For more information about enabling cross-account events, see PutPermission .\n When you specify InputPath or InputTransformer , you must use JSON dot notation, not bracket notation.\n When you add targets to a rule and the associated rule triggers soon after, new or updated targets might not be immediately invoked. Allow a short period of time for changes to take effect.\n This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries provides the ID of the failed target and the error code.\n See also: AWS API Documentation\n \n \n :example: response = client.put_targets(\n Rule='string',\n Targets=[\n {\n 'Id': 'string',\n 'Arn': 'string',\n 'RoleArn': 'string',\n 'Input': 'string',\n 'InputPath': 'string',\n 'InputTransformer': {\n 'InputPathsMap': {\n 'string': 'string'\n },\n 'InputTemplate': 'string'\n },\n 'KinesisParameters': {\n 'PartitionKeyPath': 'string'\n },\n 'RunCommandParameters': {\n 'RunCommandTargets': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ]\n },\n 'EcsParameters': {\n 'TaskDefinitionArn': 'string',\n 'TaskCount': 123,\n 'LaunchType': 'EC2'|'FARGATE',\n 'NetworkConfiguration': {\n 'awsvpcConfiguration': {\n 'Subnets': [\n 'string',\n ],\n 'SecurityGroups': [\n 'string',\n ],\n 'AssignPublicIp': 'ENABLED'|'DISABLED'\n }\n },\n 'PlatformVersion': 'string',\n 'Group': 'string'\n },\n 'BatchParameters': {\n 'JobDefinition': 'string',\n 'JobName': 'string',\n 'ArrayProperties': {\n 'Size': 123\n },\n 'RetryStrategy': {\n 'Attempts': 123\n }\n },\n 'SqsParameters': {\n 'MessageGroupId': 'string'\n }\n },\n ]\n )\n \n \n :type Rule: string\n :param Rule: [REQUIRED]\n The name of the rule.\n \n\n :type Targets: list\n :param Targets: [REQUIRED]\n The targets to update or add to the rule.\n (dict) --Targets are the resources to be invoked when a rule is triggered. For a complete list of services and resources that can be set as a target, see PutTargets .\n If you are setting the event bus of another account as the target, and that account granted permission to your account through an organization instead of directly by the account ID, then you must specify a RoleArn with proper permissions in the Target structure. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon CloudWatch Events User Guide .\n Id (string) -- [REQUIRED]The ID of the target.\n Arn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the target.\n RoleArn (string) --The Amazon Resource Name (ARN) of the IAM role to be used for this target when the rule is triggered. If one rule triggers multiple targets, you can use a different IAM role for each target.\n Input (string) --Valid JSON text passed to the target. In this case, nothing from the event itself is passed to the target. For more information, see The JavaScript Object Notation (JSON) Data Interchange Format .\n InputPath (string) --The value of the JSONPath that is used for extracting part of the matched event when passing it to the target. You must use JSON dot notation, not bracket notation. For more information about JSON paths, see JSONPath .\n InputTransformer (dict) --Settings to enable you to provide custom input to a target based on certain event data. You can extract one or more key-value pairs from the event and then use that data to send customized input to the target.\n InputPathsMap (dict) --Map of JSON paths to be extracted from the event. You can then insert these in the template in InputTemplate to produce the output you want to be sent to the target.\n InputPathsMap is an array key-value pairs, where each value is a valid JSON path. You can have as many as 10 key-value pairs. You must use JSON dot notation, not bracket notation.\n The keys cannot start with 'AWS.'\n (string) --\n (string) --\n \n InputTemplate (string) -- [REQUIRED]Input template where you specify placeholders that will be filled with the values of the keys from InputPathsMap to customize the data sent to the target. Enclose each InputPathsMaps value in brackets: <value > The InputTemplate must be valid JSON.\n If InputTemplate is a JSON object (surrounded by curly braces), the following restrictions apply:\n The placeholder cannot be used as an object key.\n Object values cannot include quote marks.\n The following example shows the syntax for using InputPathsMap and InputTemplate .\n 'InputTransformer':{\n 'InputPathsMap': {'instance': '$.detail.instance','status': '$.detail.status'},\n 'InputTemplate': '<instance> is in state <status>'\n }\n To have the InputTemplate include quote marks within a JSON string, escape each quote marks with a slash, as in the following example:\n 'InputTransformer':{\n 'InputPathsMap': {'instance': '$.detail.instance','status': '$.detail.status'},\n 'InputTemplate': '<instance> is in state \\'<status>\\''\n }\n \n KinesisParameters (dict) --The custom parameter you can use to control the shard assignment, when the target is a Kinesis data stream. If you do not include this parameter, the default is to use the eventId as the partition key.\n PartitionKeyPath (string) -- [REQUIRED]The JSON path to be extracted from the event and used as the partition key. For more information, see Amazon Kinesis Streams Key Concepts in the Amazon Kinesis Streams Developer Guide .\n RunCommandParameters (dict) --Parameters used when you are using the rule to invoke Amazon EC2 Run Command.\n RunCommandTargets (list) -- [REQUIRED]Currently, we support including only one RunCommandTarget block, which specifies either an array of InstanceIds or a tag.\n (dict) --Information about the EC2 instances that are to be sent the command, specified as key-value pairs. Each RunCommandTarget block can include only one key, but this key may specify multiple values.\n Key (string) -- [REQUIRED]Can be either tag: tag-key or InstanceIds .\n Values (list) -- [REQUIRED]If Key is tag: tag-key , Values is a list of tag values. If Key is InstanceIds , Values is a list of Amazon EC2 instance IDs.\n (string) --\n \n \n EcsParameters (dict) --Contains the Amazon ECS task definition and task count to be used, if the event target is an Amazon ECS task. For more information about Amazon ECS tasks, see Task Definitions in the Amazon EC2 Container Service Developer Guide .\n TaskDefinitionArn (string) -- [REQUIRED]The ARN of the task definition to use if the event target is an Amazon ECS task.\n TaskCount (integer) --The number of tasks to create based on TaskDefinition . The default is 1.\n LaunchType (string) --Specifies the launch type on which your task is running. The launch type that you specify here must match one of the launch type (compatibilities) of the target task. The FARGATE value is supported only in the Regions where AWS Fargate with Amazon ECS is supported. For more information, see AWS Fargate on Amazon ECS in the Amazon Elastic Container Service Developer Guide .\n NetworkConfiguration (dict) --Use this structure if the ECS task uses the awsvpc network mode. This structure specifies the VPC subnets and security groups associated with the task, and whether a public IP address is to be used. This structure is required if LaunchType is FARGATE because the awsvpc mode is required for Fargate tasks.\n If you specify NetworkConfiguration when the target ECS task does not use the awsvpc network mode, the task fails.\n awsvpcConfiguration (dict) --Use this structure to specify the VPC subnets and security groups for the task, and whether a public IP address is to be used. This structure is relevant only for ECS tasks that use the awsvpc network mode.\n Subnets (list) -- [REQUIRED]Specifies the subnets associated with the task. These subnets must all be in the same VPC. You can specify as many as 16 subnets.\n (string) --\n SecurityGroups (list) --Specifies the security groups associated with the task. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used.\n (string) --\n AssignPublicIp (string) --Specifies whether the task's elastic network interface receives a public IP address. You can specify ENABLED only when LaunchType in EcsParameters is set to FARGATE .\n \n PlatformVersion (string) --Specifies the platform version for the task. Specify only the numeric portion of the platform version, such as 1.1.0 .\n This structure is used only if LaunchType is FARGATE . For more information about valid platform versions, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide .\n Group (string) --Specifies an ECS task group for the task. The maximum length is 255 characters.\n BatchParameters (dict) --If the event target is an AWS Batch job, this contains the job definition, job name, and other parameters. For more information, see Jobs in the AWS Batch User Guide .\n JobDefinition (string) -- [REQUIRED]The ARN or name of the job definition to use if the event target is an AWS Batch job. This job definition must already exist.\n JobName (string) -- [REQUIRED]The name to use for this execution of the job, if the target is an AWS Batch job.\n ArrayProperties (dict) --The array properties for the submitted job, such as the size of the array. The array size can be between 2 and 10,000. If you specify array properties for a job, it becomes an array job. This parameter is used only if the target is an AWS Batch job.\n Size (integer) --The size of the array, if this is an array batch job. Valid values are integers between 2 and 10,000.\n RetryStrategy (dict) --The retry strategy to use for failed jobs, if the target is an AWS Batch job. The retry strategy is the number of times to retry the failed job execution. Valid values are 1 10. When you specify a retry strategy here, it overrides the retry strategy defined in the job definition.\n Attempts (integer) --The number of times to attempt to retry, if the job fails. Valid values are 1 10.\n \n SqsParameters (dict) --Contains the message group ID to use when the target is a FIFO queue.\n If you specify an SQS FIFO queue as a target, the queue must have content-based deduplication enabled.\n MessageGroupId (string) --The FIFO message group ID to use as the target.\n \n \n\n :rtype: dict\n :return: {\n 'FailedEntryCount': 123,\n 'FailedEntries': [\n {\n 'TargetId': 'string',\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n },\n ]\n }\n \n \n :returns: \n If none of the following arguments are specified for a target, then the entire event is passed to the target in JSON format (unless the target is Amazon EC2 Run Command or Amazon ECS task, in which case nothing from the event is passed to the target).\n If Input is specified in the form of valid JSON, then the matched event is overridden with this constant.\n If InputPath is specified in the form of JSONPath (for example, $.detail ), then only the part of the event specified in the path is passed to the target (for example, only the detail part of the event is passed).\n If InputTransformer is specified, then one or more specified JSONPaths are extracted from the event and used as values in a template that you specify as the input to the target.\n \n \"\"\"\n pass\n\ndef remove_permission(StatementId=None):\n \"\"\"\n Revokes the permission of another AWS account to be able to put events to your default event bus. Specify the account to revoke by the StatementId value that you associated with the account when you granted it permission with PutPermission . You can find the StatementId by using DescribeEventBus .\n See also: AWS API Documentation\n \n \n :example: response = client.remove_permission(\n StatementId='string'\n )\n \n \n :type StatementId: string\n :param StatementId: [REQUIRED]\n The statement ID corresponding to the account that is no longer allowed to put events to the default event bus.\n \n\n \"\"\"\n pass\n\ndef remove_targets(Rule=None, Ids=None, Force=None):\n \"\"\"\n Removes the specified targets from the specified rule. When the rule is triggered, those targets are no longer be invoked.\n When you remove a target, when the associated rule triggers, removed targets might continue to be invoked. Allow a short period of time for changes to take effect.\n This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries provides the ID of the failed target and the error code.\n See also: AWS API Documentation\n \n \n :example: response = client.remove_targets(\n Rule='string',\n Ids=[\n 'string',\n ],\n Force=True|False\n )\n \n \n :type Rule: string\n :param Rule: [REQUIRED]\n The name of the rule.\n \n\n :type Ids: list\n :param Ids: [REQUIRED]\n The IDs of the targets to remove from the rule.\n (string) --\n \n\n :type Force: boolean\n :param Force: If this is a managed rule, created by an AWS service on your behalf, you must specify Force as True to remove targets. This parameter is ignored for rules that are not managed rules. You can check whether a rule is a managed rule by using DescribeRule or ListRules and checking the ManagedBy field of the response.\n\n :rtype: dict\n :return: {\n 'FailedEntryCount': 123,\n 'FailedEntries': [\n {\n 'TargetId': 'string',\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef test_event_pattern(EventPattern=None, Event=None):\n \"\"\"\n Tests whether the specified event pattern matches the provided event.\n Most services in AWS treat : or / as the same character in Amazon Resource Names (ARNs). However, CloudWatch Events uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event you want to match.\n See also: AWS API Documentation\n \n \n :example: response = client.test_event_pattern(\n EventPattern='string',\n Event='string'\n )\n \n \n :type EventPattern: string\n :param EventPattern: [REQUIRED]\n The event pattern. For more information, see Events and Event Patterns in the Amazon CloudWatch Events User Guide .\n \n\n :type Event: string\n :param Event: [REQUIRED]\n The event, in JSON format, to test against the event pattern.\n \n\n :rtype: dict\n :return: {\n 'Result': True|False\n }\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6219446063041687, "alphanum_fraction": 0.630224883556366, "avg_line_length": 47.433448791503906, "blob_id": "03debc7a717fc39a1f96e26ee6ec9ed2fd0054da", "content_id": "8261cbae669c1f959f51b78e4d9ffdbe959ca85a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 55312, "license_type": "permissive", "max_line_length": 555, "num_lines": 1142, "path": "/pyboto3/budgets.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_budget(AccountId=None, Budget=None, NotificationsWithSubscribers=None):\n \"\"\"\n Creates a budget and, if included, notifications and subscribers.\n See also: AWS API Documentation\n \n \n :example: response = client.create_budget(\n AccountId='string',\n Budget={\n 'BudgetName': 'string',\n 'BudgetLimit': {\n 'Amount': 'string',\n 'Unit': 'string'\n },\n 'CostFilters': {\n 'string': [\n 'string',\n ]\n },\n 'CostTypes': {\n 'IncludeTax': True|False,\n 'IncludeSubscription': True|False,\n 'UseBlended': True|False,\n 'IncludeRefund': True|False,\n 'IncludeCredit': True|False,\n 'IncludeUpfront': True|False,\n 'IncludeRecurring': True|False,\n 'IncludeOtherSubscription': True|False,\n 'IncludeSupport': True|False,\n 'IncludeDiscount': True|False,\n 'UseAmortized': True|False\n },\n 'TimeUnit': 'DAILY'|'MONTHLY'|'QUARTERLY'|'ANNUALLY',\n 'TimePeriod': {\n 'Start': datetime(2015, 1, 1),\n 'End': datetime(2015, 1, 1)\n },\n 'CalculatedSpend': {\n 'ActualSpend': {\n 'Amount': 'string',\n 'Unit': 'string'\n },\n 'ForecastedSpend': {\n 'Amount': 'string',\n 'Unit': 'string'\n }\n },\n 'BudgetType': 'USAGE'|'COST'|'RI_UTILIZATION'|'RI_COVERAGE',\n 'LastUpdatedTime': datetime(2015, 1, 1)\n },\n NotificationsWithSubscribers=[\n {\n 'Notification': {\n 'NotificationType': 'ACTUAL'|'FORECASTED',\n 'ComparisonOperator': 'GREATER_THAN'|'LESS_THAN'|'EQUAL_TO',\n 'Threshold': 123.0,\n 'ThresholdType': 'PERCENTAGE'|'ABSOLUTE_VALUE',\n 'NotificationState': 'OK'|'ALARM'\n },\n 'Subscribers': [\n {\n 'SubscriptionType': 'SNS'|'EMAIL',\n 'Address': 'string'\n },\n ]\n },\n ]\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The accountId that is associated with the budget.\n \n\n :type Budget: dict\n :param Budget: [REQUIRED]\n The budget object that you want to create.\n BudgetName (string) -- [REQUIRED]The name of a budget. The name must be unique within accounts. The : and \\ characters aren't allowed in BudgetName .\n BudgetLimit (dict) --The total amount of cost, usage, RI utilization, or RI coverage that you want to track with your budget.\n BudgetLimit is required for cost or usage budgets, but optional for RI utilization or coverage budgets. RI utilization or coverage budgets default to 100 , which is the only valid value for RI utilization or coverage budgets.\n Amount (string) -- [REQUIRED]The cost or usage amount that is associated with a budget forecast, actual spend, or budget threshold.\n Unit (string) -- [REQUIRED]The unit of measurement that is used for the budget forecast, actual spend, or budget threshold, such as dollars or GB.\n CostFilters (dict) --The cost filters, such as service or region, that are applied to a budget.\n AWS Budgets supports the following services as a filter for RI budgets:\n Amazon Elastic Compute Cloud - Compute\n Amazon Redshift\n Amazon Relational Database Service\n Amazon ElastiCache\n Amazon Elasticsearch Service\n (string) --A generic string.\n (list) --\n (string) --A generic string.\n \n CostTypes (dict) --The types of costs that are included in this COST budget.\n USAGE , RI_UTILIZATION , and RI_COVERAGE budgets do not have CostTypes .\n IncludeTax (boolean) --Specifies whether a budget includes taxes.\n The default value is true .\n IncludeSubscription (boolean) --Specifies whether a budget includes subscriptions.\n The default value is true .\n UseBlended (boolean) --Specifies whether a budget uses a blended rate.\n The default value is false .\n IncludeRefund (boolean) --Specifies whether a budget includes refunds.\n The default value is true .\n IncludeCredit (boolean) --Specifies whether a budget includes credits.\n The default value is true .\n IncludeUpfront (boolean) --Specifies whether a budget includes upfront RI costs.\n The default value is true .\n IncludeRecurring (boolean) --Specifies whether a budget includes recurring fees such as monthly RI fees.\n The default value is true .\n IncludeOtherSubscription (boolean) --Specifies whether a budget includes non-RI subscription costs.\n The default value is true .\n IncludeSupport (boolean) --Specifies whether a budget includes support subscription fees.\n The default value is true .\n IncludeDiscount (boolean) --Specifies whether a budget includes discounts.\n The default value is true .\n UseAmortized (boolean) --Specifies whether a budget uses the amortized rate.\n The default value is false .\n TimeUnit (string) -- [REQUIRED]The length of time until a budget resets the actual and forecasted spend. DAILY is available only for RI_UTILIZATION and RI_COVERAGE budgets.\n TimePeriod (dict) --The period of time that is covered by a budget. The period has a start date and an end date. The start date must come before the end date. The end date must come before 06/15/87 00:00 UTC .\n If you create your budget and don't specify a start date, AWS defaults to the start of your chosen time period (DAILY, MONTHLY, QUARTERLY, or ANNUALLY). For example, if you created your budget on January 24, 2018, chose DAILY , and didn't set a start date, AWS set your start date to 01/24/18 00:00 UTC . If you chose MONTHLY , AWS set your start date to 01/01/18 00:00 UTC . If you didn't specify an end date, AWS set your end date to 06/15/87 00:00 UTC . The defaults are the same for the AWS Billing and Cost Management console and the API.\n You can change either date with the UpdateBudget operation.\n After the end date, AWS deletes the budget and all associated notifications and subscribers.\n Start (datetime) --The start date for a budget. If you created your budget and didn't specify a start date, AWS defaults to the start of your chosen time period (DAILY, MONTHLY, QUARTERLY, or ANNUALLY). For example, if you created your budget on January 24, 2018, chose DAILY , and didn't set a start date, AWS set your start date to 01/24/18 00:00 UTC . If you chose MONTHLY , AWS set your start date to 01/01/18 00:00 UTC . The defaults are the same for the AWS Billing and Cost Management console and the API.\n You can change your start date with the UpdateBudget operation.\n End (datetime) --The end date for a budget. If you didn't specify an end date, AWS set your end date to 06/15/87 00:00 UTC . The defaults are the same for the AWS Billing and Cost Management console and the API.\n After the end date, AWS deletes the budget and all associated notifications and subscribers. You can change your end date with the UpdateBudget operation.\n CalculatedSpend (dict) --The actual and forecasted cost or usage that the budget tracks.\n ActualSpend (dict) -- [REQUIRED]The amount of cost, usage, or RI units that you have used.\n Amount (string) -- [REQUIRED]The cost or usage amount that is associated with a budget forecast, actual spend, or budget threshold.\n Unit (string) -- [REQUIRED]The unit of measurement that is used for the budget forecast, actual spend, or budget threshold, such as dollars or GB.\n ForecastedSpend (dict) --The amount of cost, usage, or RI units that you are forecasted to use.\n Amount (string) -- [REQUIRED]The cost or usage amount that is associated with a budget forecast, actual spend, or budget threshold.\n Unit (string) -- [REQUIRED]The unit of measurement that is used for the budget forecast, actual spend, or budget threshold, such as dollars or GB.\n \n BudgetType (string) -- [REQUIRED]Whether this budget tracks monetary costs, usage, RI utilization, or RI coverage.\n LastUpdatedTime (datetime) --The last time that you updated this budget.\n \n\n :type NotificationsWithSubscribers: list\n :param NotificationsWithSubscribers: A notification that you want to associate with a budget. A budget can have up to five notifications, and each notification can have one SNS subscriber and up to 10 email subscribers. If you include notifications and subscribers in your CreateBudget call, AWS creates the notifications and subscribers for you.\n (dict) --A notification with subscribers. A notification can have one SNS subscriber and up to 10 email subscribers, for a total of 11 subscribers.\n Notification (dict) -- [REQUIRED]The notification that is associated with a budget.\n NotificationType (string) -- [REQUIRED]Whether the notification is for how much you have spent (ACTUAL ) or for how much you're forecasted to spend (FORECASTED ).\n ComparisonOperator (string) -- [REQUIRED]The comparison that is used for this notification.\n Threshold (float) -- [REQUIRED]The threshold that is associated with a notification. Thresholds are always a percentage.\n ThresholdType (string) --The type of threshold for a notification. For ABSOLUTE_VALUE thresholds, AWS notifies you when you go over or are forecasted to go over your total cost threshold. For PERCENTAGE thresholds, AWS notifies you when you go over or are forecasted to go over a certain percentage of your forecasted spend. For example, if you have a budget for 200 dollars and you have a PERCENTAGE threshold of 80%, AWS notifies you when you go over 160 dollars.\n NotificationState (string) --Whether this notification is in alarm. If a budget notification is in the ALARM state, you have passed the set threshold for the budget.\n Subscribers (list) -- [REQUIRED]A list of subscribers who are subscribed to this notification.\n (dict) --The subscriber to a budget notification. The subscriber consists of a subscription type and either an Amazon SNS topic or an email address.\n For example, an email subscriber would have the following parameters:\n A subscriptionType of EMAIL\n An address of [email protected]\n SubscriptionType (string) -- [REQUIRED]The type of notification that AWS sends to a subscriber.\n Address (string) -- [REQUIRED]The address that AWS sends budget notifications to, either an SNS topic or an email.\n \n \n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef create_notification(AccountId=None, BudgetName=None, Notification=None, Subscribers=None):\n \"\"\"\n Creates a notification. You must create the budget before you create the associated notification.\n See also: AWS API Documentation\n \n \n :example: response = client.create_notification(\n AccountId='string',\n BudgetName='string',\n Notification={\n 'NotificationType': 'ACTUAL'|'FORECASTED',\n 'ComparisonOperator': 'GREATER_THAN'|'LESS_THAN'|'EQUAL_TO',\n 'Threshold': 123.0,\n 'ThresholdType': 'PERCENTAGE'|'ABSOLUTE_VALUE',\n 'NotificationState': 'OK'|'ALARM'\n },\n Subscribers=[\n {\n 'SubscriptionType': 'SNS'|'EMAIL',\n 'Address': 'string'\n },\n ]\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The accountId that is associated with the budget that you want to create a notification for.\n \n\n :type BudgetName: string\n :param BudgetName: [REQUIRED]\n The name of the budget that you want AWS to notify you about. Budget names must be unique within an account.\n \n\n :type Notification: dict\n :param Notification: [REQUIRED]\n The notification that you want to create.\n NotificationType (string) -- [REQUIRED]Whether the notification is for how much you have spent (ACTUAL ) or for how much you're forecasted to spend (FORECASTED ).\n ComparisonOperator (string) -- [REQUIRED]The comparison that is used for this notification.\n Threshold (float) -- [REQUIRED]The threshold that is associated with a notification. Thresholds are always a percentage.\n ThresholdType (string) --The type of threshold for a notification. For ABSOLUTE_VALUE thresholds, AWS notifies you when you go over or are forecasted to go over your total cost threshold. For PERCENTAGE thresholds, AWS notifies you when you go over or are forecasted to go over a certain percentage of your forecasted spend. For example, if you have a budget for 200 dollars and you have a PERCENTAGE threshold of 80%, AWS notifies you when you go over 160 dollars.\n NotificationState (string) --Whether this notification is in alarm. If a budget notification is in the ALARM state, you have passed the set threshold for the budget.\n \n\n :type Subscribers: list\n :param Subscribers: [REQUIRED]\n A list of subscribers that you want to associate with the notification. Each notification can have one SNS subscriber and up to 10 email subscribers.\n (dict) --The subscriber to a budget notification. The subscriber consists of a subscription type and either an Amazon SNS topic or an email address.\n For example, an email subscriber would have the following parameters:\n A subscriptionType of EMAIL\n An address of [email protected]\n SubscriptionType (string) -- [REQUIRED]The type of notification that AWS sends to a subscriber.\n Address (string) -- [REQUIRED]The address that AWS sends budget notifications to, either an SNS topic or an email.\n \n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef create_subscriber(AccountId=None, BudgetName=None, Notification=None, Subscriber=None):\n \"\"\"\n Creates a subscriber. You must create the associated budget and notification before you create the subscriber.\n See also: AWS API Documentation\n \n \n :example: response = client.create_subscriber(\n AccountId='string',\n BudgetName='string',\n Notification={\n 'NotificationType': 'ACTUAL'|'FORECASTED',\n 'ComparisonOperator': 'GREATER_THAN'|'LESS_THAN'|'EQUAL_TO',\n 'Threshold': 123.0,\n 'ThresholdType': 'PERCENTAGE'|'ABSOLUTE_VALUE',\n 'NotificationState': 'OK'|'ALARM'\n },\n Subscriber={\n 'SubscriptionType': 'SNS'|'EMAIL',\n 'Address': 'string'\n }\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The accountId that is associated with the budget that you want to create a subscriber for.\n \n\n :type BudgetName: string\n :param BudgetName: [REQUIRED]\n The name of the budget that you want to subscribe to. Budget names must be unique within an account.\n \n\n :type Notification: dict\n :param Notification: [REQUIRED]\n The notification that you want to create a subscriber for.\n NotificationType (string) -- [REQUIRED]Whether the notification is for how much you have spent (ACTUAL ) or for how much you're forecasted to spend (FORECASTED ).\n ComparisonOperator (string) -- [REQUIRED]The comparison that is used for this notification.\n Threshold (float) -- [REQUIRED]The threshold that is associated with a notification. Thresholds are always a percentage.\n ThresholdType (string) --The type of threshold for a notification. For ABSOLUTE_VALUE thresholds, AWS notifies you when you go over or are forecasted to go over your total cost threshold. For PERCENTAGE thresholds, AWS notifies you when you go over or are forecasted to go over a certain percentage of your forecasted spend. For example, if you have a budget for 200 dollars and you have a PERCENTAGE threshold of 80%, AWS notifies you when you go over 160 dollars.\n NotificationState (string) --Whether this notification is in alarm. If a budget notification is in the ALARM state, you have passed the set threshold for the budget.\n \n\n :type Subscriber: dict\n :param Subscriber: [REQUIRED]\n The subscriber that you want to associate with a budget notification.\n SubscriptionType (string) -- [REQUIRED]The type of notification that AWS sends to a subscriber.\n Address (string) -- [REQUIRED]The address that AWS sends budget notifications to, either an SNS topic or an email.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_budget(AccountId=None, BudgetName=None):\n \"\"\"\n Deletes a budget. You can delete your budget at any time.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_budget(\n AccountId='string',\n BudgetName='string'\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The accountId that is associated with the budget that you want to delete.\n \n\n :type BudgetName: string\n :param BudgetName: [REQUIRED]\n The name of the budget that you want to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_notification(AccountId=None, BudgetName=None, Notification=None):\n \"\"\"\n Deletes a notification.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_notification(\n AccountId='string',\n BudgetName='string',\n Notification={\n 'NotificationType': 'ACTUAL'|'FORECASTED',\n 'ComparisonOperator': 'GREATER_THAN'|'LESS_THAN'|'EQUAL_TO',\n 'Threshold': 123.0,\n 'ThresholdType': 'PERCENTAGE'|'ABSOLUTE_VALUE',\n 'NotificationState': 'OK'|'ALARM'\n }\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The accountId that is associated with the budget whose notification you want to delete.\n \n\n :type BudgetName: string\n :param BudgetName: [REQUIRED]\n The name of the budget whose notification you want to delete.\n \n\n :type Notification: dict\n :param Notification: [REQUIRED]\n The notification that you want to delete.\n NotificationType (string) -- [REQUIRED]Whether the notification is for how much you have spent (ACTUAL ) or for how much you're forecasted to spend (FORECASTED ).\n ComparisonOperator (string) -- [REQUIRED]The comparison that is used for this notification.\n Threshold (float) -- [REQUIRED]The threshold that is associated with a notification. Thresholds are always a percentage.\n ThresholdType (string) --The type of threshold for a notification. For ABSOLUTE_VALUE thresholds, AWS notifies you when you go over or are forecasted to go over your total cost threshold. For PERCENTAGE thresholds, AWS notifies you when you go over or are forecasted to go over a certain percentage of your forecasted spend. For example, if you have a budget for 200 dollars and you have a PERCENTAGE threshold of 80%, AWS notifies you when you go over 160 dollars.\n NotificationState (string) --Whether this notification is in alarm. If a budget notification is in the ALARM state, you have passed the set threshold for the budget.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_subscriber(AccountId=None, BudgetName=None, Notification=None, Subscriber=None):\n \"\"\"\n Deletes a subscriber.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_subscriber(\n AccountId='string',\n BudgetName='string',\n Notification={\n 'NotificationType': 'ACTUAL'|'FORECASTED',\n 'ComparisonOperator': 'GREATER_THAN'|'LESS_THAN'|'EQUAL_TO',\n 'Threshold': 123.0,\n 'ThresholdType': 'PERCENTAGE'|'ABSOLUTE_VALUE',\n 'NotificationState': 'OK'|'ALARM'\n },\n Subscriber={\n 'SubscriptionType': 'SNS'|'EMAIL',\n 'Address': 'string'\n }\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The accountId that is associated with the budget whose subscriber you want to delete.\n \n\n :type BudgetName: string\n :param BudgetName: [REQUIRED]\n The name of the budget whose subscriber you want to delete.\n \n\n :type Notification: dict\n :param Notification: [REQUIRED]\n The notification whose subscriber you want to delete.\n NotificationType (string) -- [REQUIRED]Whether the notification is for how much you have spent (ACTUAL ) or for how much you're forecasted to spend (FORECASTED ).\n ComparisonOperator (string) -- [REQUIRED]The comparison that is used for this notification.\n Threshold (float) -- [REQUIRED]The threshold that is associated with a notification. Thresholds are always a percentage.\n ThresholdType (string) --The type of threshold for a notification. For ABSOLUTE_VALUE thresholds, AWS notifies you when you go over or are forecasted to go over your total cost threshold. For PERCENTAGE thresholds, AWS notifies you when you go over or are forecasted to go over a certain percentage of your forecasted spend. For example, if you have a budget for 200 dollars and you have a PERCENTAGE threshold of 80%, AWS notifies you when you go over 160 dollars.\n NotificationState (string) --Whether this notification is in alarm. If a budget notification is in the ALARM state, you have passed the set threshold for the budget.\n \n\n :type Subscriber: dict\n :param Subscriber: [REQUIRED]\n The subscriber that you want to delete.\n SubscriptionType (string) -- [REQUIRED]The type of notification that AWS sends to a subscriber.\n Address (string) -- [REQUIRED]The address that AWS sends budget notifications to, either an SNS topic or an email.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef describe_budget(AccountId=None, BudgetName=None):\n \"\"\"\n Describes a budget.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_budget(\n AccountId='string',\n BudgetName='string'\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The accountId that is associated with the budget that you want a description of.\n \n\n :type BudgetName: string\n :param BudgetName: [REQUIRED]\n The name of the budget that you want a description of.\n \n\n :rtype: dict\n :return: {\n 'Budget': {\n 'BudgetName': 'string',\n 'BudgetLimit': {\n 'Amount': 'string',\n 'Unit': 'string'\n },\n 'CostFilters': {\n 'string': [\n 'string',\n ]\n },\n 'CostTypes': {\n 'IncludeTax': True|False,\n 'IncludeSubscription': True|False,\n 'UseBlended': True|False,\n 'IncludeRefund': True|False,\n 'IncludeCredit': True|False,\n 'IncludeUpfront': True|False,\n 'IncludeRecurring': True|False,\n 'IncludeOtherSubscription': True|False,\n 'IncludeSupport': True|False,\n 'IncludeDiscount': True|False,\n 'UseAmortized': True|False\n },\n 'TimeUnit': 'DAILY'|'MONTHLY'|'QUARTERLY'|'ANNUALLY',\n 'TimePeriod': {\n 'Start': datetime(2015, 1, 1),\n 'End': datetime(2015, 1, 1)\n },\n 'CalculatedSpend': {\n 'ActualSpend': {\n 'Amount': 'string',\n 'Unit': 'string'\n },\n 'ForecastedSpend': {\n 'Amount': 'string',\n 'Unit': 'string'\n }\n },\n 'BudgetType': 'USAGE'|'COST'|'RI_UTILIZATION'|'RI_COVERAGE',\n 'LastUpdatedTime': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n Amazon Elastic Compute Cloud - Compute\n Amazon Redshift\n Amazon Relational Database Service\n Amazon ElastiCache\n Amazon Elasticsearch Service\n \n \"\"\"\n pass\n\ndef describe_budget_performance_history(AccountId=None, BudgetName=None, TimePeriod=None, MaxResults=None, NextToken=None):\n \"\"\"\n Describes the history for DAILY , MONTHLY , and QUARTERLY budgets. Budget history isn't available for ANNUAL budgets.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_budget_performance_history(\n AccountId='string',\n BudgetName='string',\n TimePeriod={\n 'Start': datetime(2015, 1, 1),\n 'End': datetime(2015, 1, 1)\n },\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The account ID of the user. It should be a 12-digit number.\n \n\n :type BudgetName: string\n :param BudgetName: [REQUIRED]\n A string that represents the budget name. The ':' and '' characters aren't allowed.\n \n\n :type TimePeriod: dict\n :param TimePeriod: Retrieves how often the budget went into an ALARM state for the specified time period.\n Start (datetime) --The start date for a budget. If you created your budget and didn't specify a start date, AWS defaults to the start of your chosen time period (DAILY, MONTHLY, QUARTERLY, or ANNUALLY). For example, if you created your budget on January 24, 2018, chose DAILY , and didn't set a start date, AWS set your start date to 01/24/18 00:00 UTC . If you chose MONTHLY , AWS set your start date to 01/01/18 00:00 UTC . The defaults are the same for the AWS Billing and Cost Management console and the API.\n You can change your start date with the UpdateBudget operation.\n End (datetime) --The end date for a budget. If you didn't specify an end date, AWS set your end date to 06/15/87 00:00 UTC . The defaults are the same for the AWS Billing and Cost Management console and the API.\n After the end date, AWS deletes the budget and all associated notifications and subscribers. You can change your end date with the UpdateBudget operation.\n \n\n :type MaxResults: integer\n :param MaxResults: An integer that represents how many entries a paginated response contains. The maximum is 100.\n\n :type NextToken: string\n :param NextToken: A generic string.\n\n :rtype: dict\n :return: {\n 'BudgetPerformanceHistory': {\n 'BudgetName': 'string',\n 'BudgetType': 'USAGE'|'COST'|'RI_UTILIZATION'|'RI_COVERAGE',\n 'CostFilters': {\n 'string': [\n 'string',\n ]\n },\n 'CostTypes': {\n 'IncludeTax': True|False,\n 'IncludeSubscription': True|False,\n 'UseBlended': True|False,\n 'IncludeRefund': True|False,\n 'IncludeCredit': True|False,\n 'IncludeUpfront': True|False,\n 'IncludeRecurring': True|False,\n 'IncludeOtherSubscription': True|False,\n 'IncludeSupport': True|False,\n 'IncludeDiscount': True|False,\n 'UseAmortized': True|False\n },\n 'TimeUnit': 'DAILY'|'MONTHLY'|'QUARTERLY'|'ANNUALLY',\n 'BudgetedAndActualAmountsList': [\n {\n 'BudgetedAmount': {\n 'Amount': 'string',\n 'Unit': 'string'\n },\n 'ActualAmount': {\n 'Amount': 'string',\n 'Unit': 'string'\n },\n 'TimePeriod': {\n 'Start': datetime(2015, 1, 1),\n 'End': datetime(2015, 1, 1)\n }\n },\n ]\n },\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_budgets(AccountId=None, MaxResults=None, NextToken=None):\n \"\"\"\n Lists the budgets that are associated with an account.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_budgets(\n AccountId='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The accountId that is associated with the budgets that you want descriptions of.\n \n\n :type MaxResults: integer\n :param MaxResults: An optional integer that represents how many entries a paginated response contains. The maximum is 100.\n\n :type NextToken: string\n :param NextToken: The pagination token that you include in your request to indicate the next set of results that you want to retrieve.\n\n :rtype: dict\n :return: {\n 'Budgets': [\n {\n 'BudgetName': 'string',\n 'BudgetLimit': {\n 'Amount': 'string',\n 'Unit': 'string'\n },\n 'CostFilters': {\n 'string': [\n 'string',\n ]\n },\n 'CostTypes': {\n 'IncludeTax': True|False,\n 'IncludeSubscription': True|False,\n 'UseBlended': True|False,\n 'IncludeRefund': True|False,\n 'IncludeCredit': True|False,\n 'IncludeUpfront': True|False,\n 'IncludeRecurring': True|False,\n 'IncludeOtherSubscription': True|False,\n 'IncludeSupport': True|False,\n 'IncludeDiscount': True|False,\n 'UseAmortized': True|False\n },\n 'TimeUnit': 'DAILY'|'MONTHLY'|'QUARTERLY'|'ANNUALLY',\n 'TimePeriod': {\n 'Start': datetime(2015, 1, 1),\n 'End': datetime(2015, 1, 1)\n },\n 'CalculatedSpend': {\n 'ActualSpend': {\n 'Amount': 'string',\n 'Unit': 'string'\n },\n 'ForecastedSpend': {\n 'Amount': 'string',\n 'Unit': 'string'\n }\n },\n 'BudgetType': 'USAGE'|'COST'|'RI_UTILIZATION'|'RI_COVERAGE',\n 'LastUpdatedTime': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n Amazon Elastic Compute Cloud - Compute\n Amazon Redshift\n Amazon Relational Database Service\n Amazon ElastiCache\n Amazon Elasticsearch Service\n \n \"\"\"\n pass\n\ndef describe_notifications_for_budget(AccountId=None, BudgetName=None, MaxResults=None, NextToken=None):\n \"\"\"\n Lists the notifications that are associated with a budget.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_notifications_for_budget(\n AccountId='string',\n BudgetName='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The accountId that is associated with the budget whose notifications you want descriptions of.\n \n\n :type BudgetName: string\n :param BudgetName: [REQUIRED]\n The name of the budget whose notifications you want descriptions of.\n \n\n :type MaxResults: integer\n :param MaxResults: An optional integer that represents how many entries a paginated response contains. The maximum is 100.\n\n :type NextToken: string\n :param NextToken: The pagination token that you include in your request to indicate the next set of results that you want to retrieve.\n\n :rtype: dict\n :return: {\n 'Notifications': [\n {\n 'NotificationType': 'ACTUAL'|'FORECASTED',\n 'ComparisonOperator': 'GREATER_THAN'|'LESS_THAN'|'EQUAL_TO',\n 'Threshold': 123.0,\n 'ThresholdType': 'PERCENTAGE'|'ABSOLUTE_VALUE',\n 'NotificationState': 'OK'|'ALARM'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n A notificationType of ACTUAL\n A thresholdType of PERCENTAGE\n A comparisonOperator of GREATER_THAN\n A notification threshold of 80\n \n \"\"\"\n pass\n\ndef describe_subscribers_for_notification(AccountId=None, BudgetName=None, Notification=None, MaxResults=None, NextToken=None):\n \"\"\"\n Lists the subscribers that are associated with a notification.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_subscribers_for_notification(\n AccountId='string',\n BudgetName='string',\n Notification={\n 'NotificationType': 'ACTUAL'|'FORECASTED',\n 'ComparisonOperator': 'GREATER_THAN'|'LESS_THAN'|'EQUAL_TO',\n 'Threshold': 123.0,\n 'ThresholdType': 'PERCENTAGE'|'ABSOLUTE_VALUE',\n 'NotificationState': 'OK'|'ALARM'\n },\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The accountId that is associated with the budget whose subscribers you want descriptions of.\n \n\n :type BudgetName: string\n :param BudgetName: [REQUIRED]\n The name of the budget whose subscribers you want descriptions of.\n \n\n :type Notification: dict\n :param Notification: [REQUIRED]\n The notification whose subscribers you want to list.\n NotificationType (string) -- [REQUIRED]Whether the notification is for how much you have spent (ACTUAL ) or for how much you're forecasted to spend (FORECASTED ).\n ComparisonOperator (string) -- [REQUIRED]The comparison that is used for this notification.\n Threshold (float) -- [REQUIRED]The threshold that is associated with a notification. Thresholds are always a percentage.\n ThresholdType (string) --The type of threshold for a notification. For ABSOLUTE_VALUE thresholds, AWS notifies you when you go over or are forecasted to go over your total cost threshold. For PERCENTAGE thresholds, AWS notifies you when you go over or are forecasted to go over a certain percentage of your forecasted spend. For example, if you have a budget for 200 dollars and you have a PERCENTAGE threshold of 80%, AWS notifies you when you go over 160 dollars.\n NotificationState (string) --Whether this notification is in alarm. If a budget notification is in the ALARM state, you have passed the set threshold for the budget.\n \n\n :type MaxResults: integer\n :param MaxResults: An optional integer that represents how many entries a paginated response contains. The maximum is 100.\n\n :type NextToken: string\n :param NextToken: The pagination token that you include in your request to indicate the next set of results that you want to retrieve.\n\n :rtype: dict\n :return: {\n 'Subscribers': [\n {\n 'SubscriptionType': 'SNS'|'EMAIL',\n 'Address': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n A subscriptionType of EMAIL\n An address of [email protected]\n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef update_budget(AccountId=None, NewBudget=None):\n \"\"\"\n Updates a budget. You can change every part of a budget except for the budgetName and the calculatedSpend . When you modify a budget, the calculatedSpend drops to zero until AWS has new usage data to use for forecasting.\n See also: AWS API Documentation\n \n \n :example: response = client.update_budget(\n AccountId='string',\n NewBudget={\n 'BudgetName': 'string',\n 'BudgetLimit': {\n 'Amount': 'string',\n 'Unit': 'string'\n },\n 'CostFilters': {\n 'string': [\n 'string',\n ]\n },\n 'CostTypes': {\n 'IncludeTax': True|False,\n 'IncludeSubscription': True|False,\n 'UseBlended': True|False,\n 'IncludeRefund': True|False,\n 'IncludeCredit': True|False,\n 'IncludeUpfront': True|False,\n 'IncludeRecurring': True|False,\n 'IncludeOtherSubscription': True|False,\n 'IncludeSupport': True|False,\n 'IncludeDiscount': True|False,\n 'UseAmortized': True|False\n },\n 'TimeUnit': 'DAILY'|'MONTHLY'|'QUARTERLY'|'ANNUALLY',\n 'TimePeriod': {\n 'Start': datetime(2015, 1, 1),\n 'End': datetime(2015, 1, 1)\n },\n 'CalculatedSpend': {\n 'ActualSpend': {\n 'Amount': 'string',\n 'Unit': 'string'\n },\n 'ForecastedSpend': {\n 'Amount': 'string',\n 'Unit': 'string'\n }\n },\n 'BudgetType': 'USAGE'|'COST'|'RI_UTILIZATION'|'RI_COVERAGE',\n 'LastUpdatedTime': datetime(2015, 1, 1)\n }\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The accountId that is associated with the budget that you want to update.\n \n\n :type NewBudget: dict\n :param NewBudget: [REQUIRED]\n The budget that you want to update your budget to.\n BudgetName (string) -- [REQUIRED]The name of a budget. The name must be unique within accounts. The : and \\ characters aren't allowed in BudgetName .\n BudgetLimit (dict) --The total amount of cost, usage, RI utilization, or RI coverage that you want to track with your budget.\n BudgetLimit is required for cost or usage budgets, but optional for RI utilization or coverage budgets. RI utilization or coverage budgets default to 100 , which is the only valid value for RI utilization or coverage budgets.\n Amount (string) -- [REQUIRED]The cost or usage amount that is associated with a budget forecast, actual spend, or budget threshold.\n Unit (string) -- [REQUIRED]The unit of measurement that is used for the budget forecast, actual spend, or budget threshold, such as dollars or GB.\n CostFilters (dict) --The cost filters, such as service or region, that are applied to a budget.\n AWS Budgets supports the following services as a filter for RI budgets:\n Amazon Elastic Compute Cloud - Compute\n Amazon Redshift\n Amazon Relational Database Service\n Amazon ElastiCache\n Amazon Elasticsearch Service\n (string) --A generic string.\n (list) --\n (string) --A generic string.\n \n CostTypes (dict) --The types of costs that are included in this COST budget.\n USAGE , RI_UTILIZATION , and RI_COVERAGE budgets do not have CostTypes .\n IncludeTax (boolean) --Specifies whether a budget includes taxes.\n The default value is true .\n IncludeSubscription (boolean) --Specifies whether a budget includes subscriptions.\n The default value is true .\n UseBlended (boolean) --Specifies whether a budget uses a blended rate.\n The default value is false .\n IncludeRefund (boolean) --Specifies whether a budget includes refunds.\n The default value is true .\n IncludeCredit (boolean) --Specifies whether a budget includes credits.\n The default value is true .\n IncludeUpfront (boolean) --Specifies whether a budget includes upfront RI costs.\n The default value is true .\n IncludeRecurring (boolean) --Specifies whether a budget includes recurring fees such as monthly RI fees.\n The default value is true .\n IncludeOtherSubscription (boolean) --Specifies whether a budget includes non-RI subscription costs.\n The default value is true .\n IncludeSupport (boolean) --Specifies whether a budget includes support subscription fees.\n The default value is true .\n IncludeDiscount (boolean) --Specifies whether a budget includes discounts.\n The default value is true .\n UseAmortized (boolean) --Specifies whether a budget uses the amortized rate.\n The default value is false .\n TimeUnit (string) -- [REQUIRED]The length of time until a budget resets the actual and forecasted spend. DAILY is available only for RI_UTILIZATION and RI_COVERAGE budgets.\n TimePeriod (dict) --The period of time that is covered by a budget. The period has a start date and an end date. The start date must come before the end date. The end date must come before 06/15/87 00:00 UTC .\n If you create your budget and don't specify a start date, AWS defaults to the start of your chosen time period (DAILY, MONTHLY, QUARTERLY, or ANNUALLY). For example, if you created your budget on January 24, 2018, chose DAILY , and didn't set a start date, AWS set your start date to 01/24/18 00:00 UTC . If you chose MONTHLY , AWS set your start date to 01/01/18 00:00 UTC . If you didn't specify an end date, AWS set your end date to 06/15/87 00:00 UTC . The defaults are the same for the AWS Billing and Cost Management console and the API.\n You can change either date with the UpdateBudget operation.\n After the end date, AWS deletes the budget and all associated notifications and subscribers.\n Start (datetime) --The start date for a budget. If you created your budget and didn't specify a start date, AWS defaults to the start of your chosen time period (DAILY, MONTHLY, QUARTERLY, or ANNUALLY). For example, if you created your budget on January 24, 2018, chose DAILY , and didn't set a start date, AWS set your start date to 01/24/18 00:00 UTC . If you chose MONTHLY , AWS set your start date to 01/01/18 00:00 UTC . The defaults are the same for the AWS Billing and Cost Management console and the API.\n You can change your start date with the UpdateBudget operation.\n End (datetime) --The end date for a budget. If you didn't specify an end date, AWS set your end date to 06/15/87 00:00 UTC . The defaults are the same for the AWS Billing and Cost Management console and the API.\n After the end date, AWS deletes the budget and all associated notifications and subscribers. You can change your end date with the UpdateBudget operation.\n CalculatedSpend (dict) --The actual and forecasted cost or usage that the budget tracks.\n ActualSpend (dict) -- [REQUIRED]The amount of cost, usage, or RI units that you have used.\n Amount (string) -- [REQUIRED]The cost or usage amount that is associated with a budget forecast, actual spend, or budget threshold.\n Unit (string) -- [REQUIRED]The unit of measurement that is used for the budget forecast, actual spend, or budget threshold, such as dollars or GB.\n ForecastedSpend (dict) --The amount of cost, usage, or RI units that you are forecasted to use.\n Amount (string) -- [REQUIRED]The cost or usage amount that is associated with a budget forecast, actual spend, or budget threshold.\n Unit (string) -- [REQUIRED]The unit of measurement that is used for the budget forecast, actual spend, or budget threshold, such as dollars or GB.\n \n BudgetType (string) -- [REQUIRED]Whether this budget tracks monetary costs, usage, RI utilization, or RI coverage.\n LastUpdatedTime (datetime) --The last time that you updated this budget.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef update_notification(AccountId=None, BudgetName=None, OldNotification=None, NewNotification=None):\n \"\"\"\n Updates a notification.\n See also: AWS API Documentation\n \n \n :example: response = client.update_notification(\n AccountId='string',\n BudgetName='string',\n OldNotification={\n 'NotificationType': 'ACTUAL'|'FORECASTED',\n 'ComparisonOperator': 'GREATER_THAN'|'LESS_THAN'|'EQUAL_TO',\n 'Threshold': 123.0,\n 'ThresholdType': 'PERCENTAGE'|'ABSOLUTE_VALUE',\n 'NotificationState': 'OK'|'ALARM'\n },\n NewNotification={\n 'NotificationType': 'ACTUAL'|'FORECASTED',\n 'ComparisonOperator': 'GREATER_THAN'|'LESS_THAN'|'EQUAL_TO',\n 'Threshold': 123.0,\n 'ThresholdType': 'PERCENTAGE'|'ABSOLUTE_VALUE',\n 'NotificationState': 'OK'|'ALARM'\n }\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The accountId that is associated with the budget whose notification you want to update.\n \n\n :type BudgetName: string\n :param BudgetName: [REQUIRED]\n The name of the budget whose notification you want to update.\n \n\n :type OldNotification: dict\n :param OldNotification: [REQUIRED]\n The previous notification that is associated with a budget.\n NotificationType (string) -- [REQUIRED]Whether the notification is for how much you have spent (ACTUAL ) or for how much you're forecasted to spend (FORECASTED ).\n ComparisonOperator (string) -- [REQUIRED]The comparison that is used for this notification.\n Threshold (float) -- [REQUIRED]The threshold that is associated with a notification. Thresholds are always a percentage.\n ThresholdType (string) --The type of threshold for a notification. For ABSOLUTE_VALUE thresholds, AWS notifies you when you go over or are forecasted to go over your total cost threshold. For PERCENTAGE thresholds, AWS notifies you when you go over or are forecasted to go over a certain percentage of your forecasted spend. For example, if you have a budget for 200 dollars and you have a PERCENTAGE threshold of 80%, AWS notifies you when you go over 160 dollars.\n NotificationState (string) --Whether this notification is in alarm. If a budget notification is in the ALARM state, you have passed the set threshold for the budget.\n \n\n :type NewNotification: dict\n :param NewNotification: [REQUIRED]\n The updated notification to be associated with a budget.\n NotificationType (string) -- [REQUIRED]Whether the notification is for how much you have spent (ACTUAL ) or for how much you're forecasted to spend (FORECASTED ).\n ComparisonOperator (string) -- [REQUIRED]The comparison that is used for this notification.\n Threshold (float) -- [REQUIRED]The threshold that is associated with a notification. Thresholds are always a percentage.\n ThresholdType (string) --The type of threshold for a notification. For ABSOLUTE_VALUE thresholds, AWS notifies you when you go over or are forecasted to go over your total cost threshold. For PERCENTAGE thresholds, AWS notifies you when you go over or are forecasted to go over a certain percentage of your forecasted spend. For example, if you have a budget for 200 dollars and you have a PERCENTAGE threshold of 80%, AWS notifies you when you go over 160 dollars.\n NotificationState (string) --Whether this notification is in alarm. If a budget notification is in the ALARM state, you have passed the set threshold for the budget.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef update_subscriber(AccountId=None, BudgetName=None, Notification=None, OldSubscriber=None, NewSubscriber=None):\n \"\"\"\n Updates a subscriber.\n See also: AWS API Documentation\n \n \n :example: response = client.update_subscriber(\n AccountId='string',\n BudgetName='string',\n Notification={\n 'NotificationType': 'ACTUAL'|'FORECASTED',\n 'ComparisonOperator': 'GREATER_THAN'|'LESS_THAN'|'EQUAL_TO',\n 'Threshold': 123.0,\n 'ThresholdType': 'PERCENTAGE'|'ABSOLUTE_VALUE',\n 'NotificationState': 'OK'|'ALARM'\n },\n OldSubscriber={\n 'SubscriptionType': 'SNS'|'EMAIL',\n 'Address': 'string'\n },\n NewSubscriber={\n 'SubscriptionType': 'SNS'|'EMAIL',\n 'Address': 'string'\n }\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The accountId that is associated with the budget whose subscriber you want to update.\n \n\n :type BudgetName: string\n :param BudgetName: [REQUIRED]\n The name of the budget whose subscriber you want to update.\n \n\n :type Notification: dict\n :param Notification: [REQUIRED]\n The notification whose subscriber you want to update.\n NotificationType (string) -- [REQUIRED]Whether the notification is for how much you have spent (ACTUAL ) or for how much you're forecasted to spend (FORECASTED ).\n ComparisonOperator (string) -- [REQUIRED]The comparison that is used for this notification.\n Threshold (float) -- [REQUIRED]The threshold that is associated with a notification. Thresholds are always a percentage.\n ThresholdType (string) --The type of threshold for a notification. For ABSOLUTE_VALUE thresholds, AWS notifies you when you go over or are forecasted to go over your total cost threshold. For PERCENTAGE thresholds, AWS notifies you when you go over or are forecasted to go over a certain percentage of your forecasted spend. For example, if you have a budget for 200 dollars and you have a PERCENTAGE threshold of 80%, AWS notifies you when you go over 160 dollars.\n NotificationState (string) --Whether this notification is in alarm. If a budget notification is in the ALARM state, you have passed the set threshold for the budget.\n \n\n :type OldSubscriber: dict\n :param OldSubscriber: [REQUIRED]\n The previous subscriber that is associated with a budget notification.\n SubscriptionType (string) -- [REQUIRED]The type of notification that AWS sends to a subscriber.\n Address (string) -- [REQUIRED]The address that AWS sends budget notifications to, either an SNS topic or an email.\n \n\n :type NewSubscriber: dict\n :param NewSubscriber: [REQUIRED]\n The updated subscriber that is associated with a budget notification.\n SubscriptionType (string) -- [REQUIRED]The type of notification that AWS sends to a subscriber.\n Address (string) -- [REQUIRED]The address that AWS sends budget notifications to, either an SNS topic or an email.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6565065383911133, "alphanum_fraction": 0.6626092791557312, "avg_line_length": 42.628753662109375, "blob_id": "e767c0d73220ee2ca809955d678864b22897aeba", "content_id": "1953e9b47b61cc75ec0a0c0a1cc1369182278792", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 149604, "license_type": "permissive", "max_line_length": 685, "num_lines": 3429, "path": "/pyboto3/autoscaling.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef attach_instances(InstanceIds=None, AutoScalingGroupName=None):\n \"\"\"\n Attaches one or more EC2 instances to the specified Auto Scaling group.\n When you attach instances, Amazon EC2 Auto Scaling increases the desired capacity of the group by the number of instances being attached. If the number of instances being attached plus the desired capacity of the group exceeds the maximum size of the group, the operation fails.\n If there is a Classic Load Balancer attached to your Auto Scaling group, the instances are also registered with the load balancer. If there are target groups attached to your Auto Scaling group, the instances are also registered with the target groups.\n For more information, see Attach EC2 Instances to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide .\n See also: AWS API Documentation\n \n Examples\n This example attaches the specified instance to the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.attach_instances(\n InstanceIds=[\n 'string',\n ],\n AutoScalingGroupName='string'\n )\n \n \n :type InstanceIds: list\n :param InstanceIds: The IDs of the instances. You can specify up to 20 instances.\n (string) --\n \n\n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :return: response = client.attach_instances(\n AutoScalingGroupName='my-auto-scaling-group',\n InstanceIds=[\n 'i-93633f9b',\n ],\n )\n \n print(response)\n \n \n \"\"\"\n pass\n\ndef attach_load_balancer_target_groups(AutoScalingGroupName=None, TargetGroupARNs=None):\n \"\"\"\n Attaches one or more target groups to the specified Auto Scaling group.\n To describe the target groups for an Auto Scaling group, use DescribeLoadBalancerTargetGroups . To detach the target group from the Auto Scaling group, use DetachLoadBalancerTargetGroups .\n For more information, see Attach a Load Balancer to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide .\n See also: AWS API Documentation\n \n Examples\n This example attaches the specified target group to the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.attach_load_balancer_target_groups(\n AutoScalingGroupName='string',\n TargetGroupARNs=[\n 'string',\n ]\n )\n \n \n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type TargetGroupARNs: list\n :param TargetGroupARNs: [REQUIRED]\n The Amazon Resource Names (ARN) of the target groups. You can specify up to 10 target groups.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef attach_load_balancers(AutoScalingGroupName=None, LoadBalancerNames=None):\n \"\"\"\n Attaches one or more Classic Load Balancers to the specified Auto Scaling group.\n To attach an Application Load Balancer instead, see AttachLoadBalancerTargetGroups .\n To describe the load balancers for an Auto Scaling group, use DescribeLoadBalancers . To detach the load balancer from the Auto Scaling group, use DetachLoadBalancers .\n For more information, see Attach a Load Balancer to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide .\n See also: AWS API Documentation\n \n Examples\n This example attaches the specified load balancer to the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.attach_load_balancers(\n AutoScalingGroupName='string',\n LoadBalancerNames=[\n 'string',\n ]\n )\n \n \n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type LoadBalancerNames: list\n :param LoadBalancerNames: [REQUIRED]\n The names of the load balancers. You can specify up to 10 load balancers.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef batch_delete_scheduled_action(AutoScalingGroupName=None, ScheduledActionNames=None):\n \"\"\"\n Deletes one or more scheduled actions for the specified Auto Scaling group.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_delete_scheduled_action(\n AutoScalingGroupName='string',\n ScheduledActionNames=[\n 'string',\n ]\n )\n \n \n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type ScheduledActionNames: list\n :param ScheduledActionNames: [REQUIRED]\n The names of the scheduled actions to delete. The maximum number allowed is 50.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'FailedScheduledActions': [\n {\n 'ScheduledActionName': 'string',\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef batch_put_scheduled_update_group_action(AutoScalingGroupName=None, ScheduledUpdateGroupActions=None):\n \"\"\"\n Creates or updates one or more scheduled scaling actions for an Auto Scaling group. If you leave a parameter unspecified when updating a scheduled scaling action, the corresponding value remains unchanged.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_put_scheduled_update_group_action(\n AutoScalingGroupName='string',\n ScheduledUpdateGroupActions=[\n {\n 'ScheduledActionName': 'string',\n 'StartTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'Recurrence': 'string',\n 'MinSize': 123,\n 'MaxSize': 123,\n 'DesiredCapacity': 123\n },\n ]\n )\n \n \n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type ScheduledUpdateGroupActions: list\n :param ScheduledUpdateGroupActions: [REQUIRED]\n One or more scheduled actions. The maximum number allowed is 50.\n (dict) --Describes one or more scheduled scaling action updates for a specified Auto Scaling group. Used in combination with BatchPutScheduledUpdateGroupAction .\n When updating a scheduled scaling action, all optional parameters are left unchanged if not specified.\n ScheduledActionName (string) -- [REQUIRED]The name of the scaling action.\n StartTime (datetime) --The time for the action to start, in 'YYYY-MM-DDThh:mm:ssZ' format in UTC/GMT only (for example, 2014-06-01T00:00:00Z ).\n If you specify Recurrence and StartTime , Amazon EC2 Auto Scaling performs the action at this time, and then performs the action based on the specified recurrence.\n If you try to schedule the action in the past, Amazon EC2 Auto Scaling returns an error message.\n EndTime (datetime) --The time for the recurring schedule to end. Amazon EC2 Auto Scaling does not perform the action after this time.\n Recurrence (string) --The recurring schedule for the action, in Unix cron syntax format. For more information about this format, see Crontab .\n MinSize (integer) --The minimum size of the group.\n MaxSize (integer) --The maximum size of the group.\n DesiredCapacity (integer) --The number of EC2 instances that should be running in the group.\n \n \n\n :rtype: dict\n :return: {\n 'FailedScheduledUpdateGroupActions': [\n {\n 'ScheduledActionName': 'string',\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef complete_lifecycle_action(LifecycleHookName=None, AutoScalingGroupName=None, LifecycleActionToken=None, LifecycleActionResult=None, InstanceId=None):\n \"\"\"\n Completes the lifecycle action for the specified token or instance with the specified result.\n This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:\n For more information, see Auto Scaling Lifecycle in the Amazon EC2 Auto Scaling User Guide .\n See also: AWS API Documentation\n \n Examples\n This example notifies Auto Scaling that the specified lifecycle action is complete so that it can finish launching or terminating the instance.\n Expected Output:\n \n :example: response = client.complete_lifecycle_action(\n LifecycleHookName='string',\n AutoScalingGroupName='string',\n LifecycleActionToken='string',\n LifecycleActionResult='string',\n InstanceId='string'\n )\n \n \n :type LifecycleHookName: string\n :param LifecycleHookName: [REQUIRED]\n The name of the lifecycle hook.\n \n\n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type LifecycleActionToken: string\n :param LifecycleActionToken: A universally unique identifier (UUID) that identifies a specific lifecycle action associated with an instance. Amazon EC2 Auto Scaling sends this token to the notification target you specified when you created the lifecycle hook.\n\n :type LifecycleActionResult: string\n :param LifecycleActionResult: [REQUIRED]\n The action for the group to take. This parameter can be either CONTINUE or ABANDON .\n \n\n :type InstanceId: string\n :param InstanceId: The ID of the instance.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n LifecycleHookName (string) -- [REQUIRED]\n The name of the lifecycle hook.\n \n AutoScalingGroupName (string) -- [REQUIRED]\n The name of the Auto Scaling group.\n \n LifecycleActionToken (string) -- A universally unique identifier (UUID) that identifies a specific lifecycle action associated with an instance. Amazon EC2 Auto Scaling sends this token to the notification target you specified when you created the lifecycle hook.\n LifecycleActionResult (string) -- [REQUIRED]\n The action for the group to take. This parameter can be either CONTINUE or ABANDON .\n \n InstanceId (string) -- The ID of the instance.\n \n \"\"\"\n pass\n\ndef create_auto_scaling_group(AutoScalingGroupName=None, LaunchConfigurationName=None, LaunchTemplate=None, MixedInstancesPolicy=None, InstanceId=None, MinSize=None, MaxSize=None, DesiredCapacity=None, DefaultCooldown=None, AvailabilityZones=None, LoadBalancerNames=None, TargetGroupARNs=None, HealthCheckType=None, HealthCheckGracePeriod=None, PlacementGroup=None, VPCZoneIdentifier=None, TerminationPolicies=None, NewInstancesProtectedFromScaleIn=None, LifecycleHookSpecificationList=None, Tags=None, ServiceLinkedRoleARN=None):\n \"\"\"\n Creates an Auto Scaling group with the specified name and attributes.\n If you exceed your maximum limit of Auto Scaling groups, the call fails. For information about viewing this limit, see DescribeAccountLimits . For information about updating this limit, see Auto Scaling Limits in the Amazon EC2 Auto Scaling User Guide .\n For more information, see Auto Scaling Groups in the Amazon EC2 Auto Scaling User Guide .\n See also: AWS API Documentation\n \n Examples\n This example creates an Auto Scaling group.\n Expected Output:\n This example creates an Auto Scaling group and attaches the specified Classic Load Balancer.\n Expected Output:\n This example creates an Auto Scaling group and attaches the specified target group.\n Expected Output:\n \n :example: response = client.create_auto_scaling_group(\n AutoScalingGroupName='string',\n LaunchConfigurationName='string',\n LaunchTemplate={\n 'LaunchTemplateId': 'string',\n 'LaunchTemplateName': 'string',\n 'Version': 'string'\n },\n MixedInstancesPolicy={\n 'LaunchTemplate': {\n 'LaunchTemplateSpecification': {\n 'LaunchTemplateId': 'string',\n 'LaunchTemplateName': 'string',\n 'Version': 'string'\n },\n 'Overrides': [\n {\n 'InstanceType': 'string'\n },\n ]\n },\n 'InstancesDistribution': {\n 'OnDemandAllocationStrategy': 'string',\n 'OnDemandBaseCapacity': 123,\n 'OnDemandPercentageAboveBaseCapacity': 123,\n 'SpotAllocationStrategy': 'string',\n 'SpotInstancePools': 123,\n 'SpotMaxPrice': 'string'\n }\n },\n InstanceId='string',\n MinSize=123,\n MaxSize=123,\n DesiredCapacity=123,\n DefaultCooldown=123,\n AvailabilityZones=[\n 'string',\n ],\n LoadBalancerNames=[\n 'string',\n ],\n TargetGroupARNs=[\n 'string',\n ],\n HealthCheckType='string',\n HealthCheckGracePeriod=123,\n PlacementGroup='string',\n VPCZoneIdentifier='string',\n TerminationPolicies=[\n 'string',\n ],\n NewInstancesProtectedFromScaleIn=True|False,\n LifecycleHookSpecificationList=[\n {\n 'LifecycleHookName': 'string',\n 'LifecycleTransition': 'string',\n 'NotificationMetadata': 'string',\n 'HeartbeatTimeout': 123,\n 'DefaultResult': 'string',\n 'NotificationTargetARN': 'string',\n 'RoleARN': 'string'\n },\n ],\n Tags=[\n {\n 'ResourceId': 'string',\n 'ResourceType': 'string',\n 'Key': 'string',\n 'Value': 'string',\n 'PropagateAtLaunch': True|False\n },\n ],\n ServiceLinkedRoleARN='string'\n )\n \n \n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group. This name must be unique within the scope of your AWS account.\n \n\n :type LaunchConfigurationName: string\n :param LaunchConfigurationName: The name of the launch configuration. This parameter, a launch template, a mixed instances policy, or an EC2 instance must be specified.\n\n :type LaunchTemplate: dict\n :param LaunchTemplate: The launch template to use to launch instances. This parameter, a launch configuration, a mixed instances policy, or an EC2 instance must be specified.\n LaunchTemplateId (string) --The ID of the launch template. You must specify either a template ID or a template name.\n LaunchTemplateName (string) --The name of the launch template. You must specify either a template name or a template ID.\n Version (string) --The version number, $Latest , or $Default . If the value is $Latest , Amazon EC2 Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default , Amazon EC2 Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default .\n \n\n :type MixedInstancesPolicy: dict\n :param MixedInstancesPolicy: The mixed instances policy to use to launch instances. This parameter, a launch template, a launch configuration, or an EC2 instance must be specified.\n LaunchTemplate (dict) --The launch template and overrides.\n This parameter is required when creating an Auto Scaling group with a mixed instances policy, but is not required when updating the group.\n LaunchTemplateSpecification (dict) --The launch template to use. You must specify either the launch template ID or launch template name in the request.\n LaunchTemplateId (string) --The ID of the launch template. You must specify either a template ID or a template name.\n LaunchTemplateName (string) --The name of the launch template. You must specify either a template name or a template ID.\n Version (string) --The version number, $Latest , or $Default . If the value is $Latest , Amazon EC2 Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default , Amazon EC2 Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default .\n Overrides (list) --Any parameters that you specify override the same parameters in the launch template. Currently, the only supported override is instance type.\n You must specify between 2 and 20 overrides.\n (dict) --Describes an override for a launch template.\n InstanceType (string) --The instance type.\n For information about available instance types, see Available Instance Types in the Amazon Elastic Compute Cloud User Guide.\n \n InstancesDistribution (dict) --The instances distribution to use.\n If you leave this parameter unspecified when creating the group, the default values are used.\n OnDemandAllocationStrategy (string) --Indicates how to allocate instance types to fulfill On-Demand capacity.\n The only valid value is prioritized , which is also the default value. This strategy uses the order of instance types in the Overrides array of LaunchTemplate to define the launch priority of each instance type. The first instance type in the array is prioritized higher than the last. If all your On-Demand capacity cannot be fulfilled using your highest priority instance, then the Auto Scaling groups launches the remaining capacity using the second priority instance type, and so on.\n OnDemandBaseCapacity (integer) --The minimum amount of the Auto Scaling group's capacity that must be fulfilled by On-Demand Instances. This base portion is provisioned first as your group scales.\n The default value is 0. If you leave this parameter set to 0, On-Demand Instances are launched as a percentage of the Auto Scaling group's desired capacity, per the OnDemandPercentageAboveBaseCapacity setting.\n OnDemandPercentageAboveBaseCapacity (integer) --Controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBaseCapacity .\n The range is 0 100. The default value is 100. If you leave this parameter set to 100, the percentages are 100% for On-Demand Instances and 0% for Spot Instances.\n SpotAllocationStrategy (string) --Indicates how to allocate Spot capacity across Spot pools.\n The only valid value is lowest-price , which is also the default value. The Auto Scaling group selects the cheapest Spot pools and evenly allocates your Spot capacity across the number of Spot pools that you specify.\n SpotInstancePools (integer) --The number of Spot pools to use to allocate your Spot capacity. The Spot pools are determined from the different instance types in the Overrides array of LaunchTemplate .\n The range is 1 20 and the default is 2.\n SpotMaxPrice (string) --The maximum price per unit hour that you are willing to pay for a Spot Instance. If you leave this value blank (which is the default), the maximum Spot price is set at the On-Demand price.\n \n \n\n :type InstanceId: string\n :param InstanceId: The ID of the instance used to create a launch configuration for the group. This parameter, a launch configuration, a launch template, or a mixed instances policy must be specified.\n When you specify an ID of an instance, Amazon EC2 Auto Scaling creates a new launch configuration and associates it with the group. This launch configuration derives its attributes from the specified instance, except for the block device mapping.\n For more information, see Create an Auto Scaling Group Using an EC2 Instance in the Amazon EC2 Auto Scaling User Guide .\n \n\n :type MinSize: integer\n :param MinSize: [REQUIRED]\n The minimum size of the group.\n \n\n :type MaxSize: integer\n :param MaxSize: [REQUIRED]\n The maximum size of the group.\n \n\n :type DesiredCapacity: integer\n :param DesiredCapacity: The number of EC2 instances that should be running in the group. This number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group. If you do not specify a desired capacity, the default is the minimum size of the group.\n\n :type DefaultCooldown: integer\n :param DefaultCooldown: The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. The default is 300.\n For more information, see Scaling Cooldowns in the Amazon EC2 Auto Scaling User Guide .\n \n\n :type AvailabilityZones: list\n :param AvailabilityZones: One or more Availability Zones for the group. This parameter is optional if you specify one or more subnets.\n (string) --\n \n\n :type LoadBalancerNames: list\n :param LoadBalancerNames: One or more Classic Load Balancers. To specify an Application Load Balancer, use TargetGroupARNs instead.\n For more information, see Using a Load Balancer With an Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide .\n (string) --\n \n\n :type TargetGroupARNs: list\n :param TargetGroupARNs: The Amazon Resource Names (ARN) of the target groups.\n (string) --\n \n\n :type HealthCheckType: string\n :param HealthCheckType: The service to use for the health checks. The valid values are EC2 and ELB .\n By default, health checks use Amazon EC2 instance status checks to determine the health of an instance. For more information, see Health Checks in the Amazon EC2 Auto Scaling User Guide .\n \n\n :type HealthCheckGracePeriod: integer\n :param HealthCheckGracePeriod: The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service. During this time, any health check failures for the instance are ignored. The default is 0.\n This parameter is required if you are adding an ELB health check.\n For more information, see Health Checks in the Amazon EC2 Auto Scaling User Guide .\n \n\n :type PlacementGroup: string\n :param PlacementGroup: The name of the placement group into which to launch your instances, if any. For more information, see Placement Groups in the Amazon Elastic Compute Cloud User Guide .\n\n :type VPCZoneIdentifier: string\n :param VPCZoneIdentifier: A comma-separated list of subnet identifiers for your virtual private cloud (VPC).\n If you specify subnets and Availability Zones with this call, ensure that the subnets' Availability Zones match the Availability Zones specified.\n For more information, see Launching Auto Scaling Instances in a VPC in the Amazon EC2 Auto Scaling User Guide .\n \n\n :type TerminationPolicies: list\n :param TerminationPolicies: One or more termination policies used to select the instance to terminate. These policies are executed in the order that they are listed.\n For more information, see Controlling Which Instances Auto Scaling Terminates During Scale In in the Auto Scaling User Guide .\n (string) --\n \n\n :type NewInstancesProtectedFromScaleIn: boolean\n :param NewInstancesProtectedFromScaleIn: Indicates whether newly launched instances are protected from termination by Auto Scaling when scaling in.\n\n :type LifecycleHookSpecificationList: list\n :param LifecycleHookSpecificationList: One or more lifecycle hooks.\n (dict) --Describes a lifecycle hook, which tells Amazon EC2 Auto Scaling that you want to perform an action whenever it launches instances or whenever it terminates instances.\n For more information, see Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide .\n LifecycleHookName (string) -- [REQUIRED]The name of the lifecycle hook.\n LifecycleTransition (string) -- [REQUIRED]The state of the EC2 instance to which you want to attach the lifecycle hook. The possible values are:\n autoscaling:EC2_INSTANCE_LAUNCHING\n autoscaling:EC2_INSTANCE_TERMINATING\n NotificationMetadata (string) --Additional information that you want to include any time Amazon EC2 Auto Scaling sends a message to the notification target.\n HeartbeatTimeout (integer) --The maximum time, in seconds, that can elapse before the lifecycle hook times out. If the lifecycle hook times out, Amazon EC2 Auto Scaling performs the default action. You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat .\n DefaultResult (string) --Defines the action the Auto Scaling group should take when the lifecycle hook timeout elapses or if an unexpected failure occurs. The valid values are CONTINUE and ABANDON .\n NotificationTargetARN (string) --The ARN of the target that Amazon EC2 Auto Scaling sends notifications to when an instance is in the transition state for the lifecycle hook. The notification target can be either an SQS queue or an SNS topic.\n RoleARN (string) --The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target.\n \n \n\n :type Tags: list\n :param Tags: One or more tags.\n For more information, see Tagging Auto Scaling Groups and Instances in the Amazon EC2 Auto Scaling User Guide .\n (dict) --Describes a tag for an Auto Scaling group.\n ResourceId (string) --The name of the group.\n ResourceType (string) --The type of resource. The only supported value is auto-scaling-group .\n Key (string) -- [REQUIRED]The tag key.\n Value (string) --The tag value.\n PropagateAtLaunch (boolean) --Determines whether the tag is added to new instances as they are launched in the group.\n \n \n\n :type ServiceLinkedRoleARN: string\n :param ServiceLinkedRoleARN: The Amazon Resource Name (ARN) of the service-linked role that the Auto Scaling group uses to call other AWS services on your behalf. By default, Amazon EC2 Auto Scaling uses a service-linked role named AWSServiceRoleForAutoScaling, which it creates if it does not exist.\n\n :return: response = client.create_auto_scaling_group(\n AutoScalingGroupName='my-auto-scaling-group',\n LaunchConfigurationName='my-launch-config',\n MaxSize=3,\n MinSize=1,\n VPCZoneIdentifier='subnet-4176792c',\n )\n \n print(response)\n \n \n \"\"\"\n pass\n\ndef create_launch_configuration(LaunchConfigurationName=None, ImageId=None, KeyName=None, SecurityGroups=None, ClassicLinkVPCId=None, ClassicLinkVPCSecurityGroups=None, UserData=None, InstanceId=None, InstanceType=None, KernelId=None, RamdiskId=None, BlockDeviceMappings=None, InstanceMonitoring=None, SpotPrice=None, IamInstanceProfile=None, EbsOptimized=None, AssociatePublicIpAddress=None, PlacementTenancy=None):\n \"\"\"\n Creates a launch configuration.\n If you exceed your maximum limit of launch configurations, the call fails. For information about viewing this limit, see DescribeAccountLimits . For information about updating this limit, see Auto Scaling Limits in the Amazon EC2 Auto Scaling User Guide .\n For more information, see Launch Configurations in the Amazon EC2 Auto Scaling User Guide .\n See also: AWS API Documentation\n \n Examples\n This example creates a launch configuration.\n Expected Output:\n \n :example: response = client.create_launch_configuration(\n LaunchConfigurationName='string',\n ImageId='string',\n KeyName='string',\n SecurityGroups=[\n 'string',\n ],\n ClassicLinkVPCId='string',\n ClassicLinkVPCSecurityGroups=[\n 'string',\n ],\n UserData='string',\n InstanceId='string',\n InstanceType='string',\n KernelId='string',\n RamdiskId='string',\n BlockDeviceMappings=[\n {\n 'VirtualName': 'string',\n 'DeviceName': 'string',\n 'Ebs': {\n 'SnapshotId': 'string',\n 'VolumeSize': 123,\n 'VolumeType': 'string',\n 'DeleteOnTermination': True|False,\n 'Iops': 123,\n 'Encrypted': True|False\n },\n 'NoDevice': True|False\n },\n ],\n InstanceMonitoring={\n 'Enabled': True|False\n },\n SpotPrice='string',\n IamInstanceProfile='string',\n EbsOptimized=True|False,\n AssociatePublicIpAddress=True|False,\n PlacementTenancy='string'\n )\n \n \n :type LaunchConfigurationName: string\n :param LaunchConfigurationName: [REQUIRED]\n The name of the launch configuration. This name must be unique within the scope of your AWS account.\n \n\n :type ImageId: string\n :param ImageId: The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances.\n If you do not specify InstanceId , you must specify ImageId .\n For more information, see Finding an AMI in the Amazon Elastic Compute Cloud User Guide .\n \n\n :type KeyName: string\n :param KeyName: The name of the key pair. For more information, see Amazon EC2 Key Pairs in the Amazon Elastic Compute Cloud User Guide .\n\n :type SecurityGroups: list\n :param SecurityGroups: One or more security groups with which to associate the instances.\n If your instances are launched in EC2-Classic, you can either specify security group names or the security group IDs. For more information, see Amazon EC2 Security Groups in the Amazon Elastic Compute Cloud User Guide .\n If your instances are launched into a VPC, specify security group IDs. For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide .\n (string) --\n \n\n :type ClassicLinkVPCId: string\n :param ClassicLinkVPCId: The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. This parameter is supported only if you are launching EC2-Classic instances. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide .\n\n :type ClassicLinkVPCSecurityGroups: list\n :param ClassicLinkVPCSecurityGroups: The IDs of one or more security groups for the specified ClassicLink-enabled VPC. This parameter is required if you specify a ClassicLink-enabled VPC, and is not supported otherwise. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide .\n (string) --\n \n\n :type UserData: string\n :param UserData: The user data to make available to the launched EC2 instances. For more information, see Instance Metadata and User Data in the Amazon Elastic Compute Cloud User Guide .\n This value will be base64 encoded automatically. Do not base64 encode this value prior to performing the operation.\n \n\n :type InstanceId: string\n :param InstanceId: The ID of the instance to use to create the launch configuration. The new launch configuration derives attributes from the instance, except for the block device mapping.\n If you do not specify InstanceId , you must specify both ImageId and InstanceType .\n To create a launch configuration with a block device mapping or override any other instance attributes, specify them as part of the same request.\n For more information, see Create a Launch Configuration Using an EC2 Instance in the Amazon EC2 Auto Scaling User Guide .\n \n\n :type InstanceType: string\n :param InstanceType: The instance type of the EC2 instance.\n If you do not specify InstanceId , you must specify InstanceType .\n For information about available instance types, see Available Instance Types in the Amazon Elastic Compute Cloud User Guide.\n \n\n :type KernelId: string\n :param KernelId: The ID of the kernel associated with the AMI.\n\n :type RamdiskId: string\n :param RamdiskId: The ID of the RAM disk associated with the AMI.\n\n :type BlockDeviceMappings: list\n :param BlockDeviceMappings: One or more mappings that specify how block devices are exposed to the instance. For more information, see Block Device Mapping in the Amazon Elastic Compute Cloud User Guide .\n (dict) --Describes a block device mapping.\n VirtualName (string) --The name of the virtual device (for example, ephemeral0 ).\n DeviceName (string) -- [REQUIRED]The device name exposed to the EC2 instance (for example, /dev/sdh or xvdh ).\n Ebs (dict) --The information about the Amazon EBS volume.\n SnapshotId (string) --The ID of the snapshot.\n VolumeSize (integer) --The volume size, in GiB. For standard volumes, specify a value from 1 to 1,024. For io1 volumes, specify a value from 4 to 16,384. For gp2 volumes, specify a value from 1 to 16,384. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.\n Default: If you create a volume from a snapshot and you don't specify a volume size, the default is the snapshot size.\n VolumeType (string) --The volume type. For more information, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide .\n Valid values: standard | io1 | gp2\n DeleteOnTermination (boolean) --Indicates whether the volume is deleted on instance termination. The default is true .\n Iops (integer) --The number of I/O operations per second (IOPS) to provision for the volume.\n Constraint: Required when the volume type is io1 .\n Encrypted (boolean) --Indicates whether the volume should be encrypted. Encrypted EBS volumes must be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are automatically encrypted. There is no way to create an encrypted volume from an unencrypted snapshot or an unencrypted volume from an encrypted snapshot. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide .\n NoDevice (boolean) --Suppresses a device mapping.\n If this parameter is true for the root device, the instance might fail the EC2 health check. In that case, Amazon EC2 Auto Scaling launches a replacement instance.\n \n \n\n :type InstanceMonitoring: dict\n :param InstanceMonitoring: Enables detailed monitoring (true ) or basic monitoring (false ) for the Auto Scaling instances. The default is true .\n Enabled (boolean) --If true , detailed monitoring is enabled. Otherwise, basic monitoring is enabled.\n \n\n :type SpotPrice: string\n :param SpotPrice: The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot market price. For more information, see Launching Spot Instances in Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide .\n\n :type IamInstanceProfile: string\n :param IamInstanceProfile: The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance.\n EC2 instances launched with an IAM role automatically have AWS security credentials available. You can use IAM roles with Amazon EC2 Auto Scaling to automatically enable applications running on your EC2 instances to securely access other AWS resources. For more information, see Launch Auto Scaling Instances with an IAM Role in the Amazon EC2 Auto Scaling User Guide .\n \n\n :type EbsOptimized: boolean\n :param EbsOptimized: Indicates whether the instance is optimized for Amazon EBS I/O. By default, the instance is not optimized for EBS I/O. The optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization is not available with all instance types. Additional usage charges apply. For more information, see Amazon EBS-Optimized Instances in the Amazon Elastic Compute Cloud User Guide .\n\n :type AssociatePublicIpAddress: boolean\n :param AssociatePublicIpAddress: Used for groups that launch instances into a virtual private cloud (VPC). Specifies whether to assign a public IP address to each instance. For more information, see Launching Auto Scaling Instances in a VPC in the Amazon EC2 Auto Scaling User Guide .\n If you specify this parameter, be sure to specify at least one subnet when you create your group.\n Default: If the instance is launched into a default subnet, the default is to assign a public IP address. If the instance is launched into a nondefault subnet, the default is not to assign a public IP address.\n \n\n :type PlacementTenancy: string\n :param PlacementTenancy: The tenancy of the instance. An instance with a tenancy of dedicated runs on single-tenant hardware and can only be launched into a VPC.\n To launch Dedicated Instances into a shared tenancy VPC (a VPC with the instance placement tenancy attribute set to default ), you must set the value of this parameter to dedicated .\n If you specify this parameter, be sure to specify at least one subnet when you create your group.\n For more information, see Launching Auto Scaling Instances in a VPC in the Amazon EC2 Auto Scaling User Guide .\n Valid values: default | dedicated\n \n\n :return: response = client.create_launch_configuration(\n IamInstanceProfile='my-iam-role',\n ImageId='ami-12345678',\n InstanceType='m3.medium',\n LaunchConfigurationName='my-launch-config',\n SecurityGroups=[\n 'sg-eb2af88e',\n ],\n )\n \n print(response)\n \n \n \"\"\"\n pass\n\ndef create_or_update_tags(Tags=None):\n \"\"\"\n Creates or updates tags for the specified Auto Scaling group.\n When you specify a tag with a key that already exists, the operation overwrites the previous tag definition, and you do not get an error message.\n For more information, see Tagging Auto Scaling Groups and Instances in the Amazon EC2 Auto Scaling User Guide .\n See also: AWS API Documentation\n \n Examples\n This example adds two tags to the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.create_or_update_tags(\n Tags=[\n {\n 'ResourceId': 'string',\n 'ResourceType': 'string',\n 'Key': 'string',\n 'Value': 'string',\n 'PropagateAtLaunch': True|False\n },\n ]\n )\n \n \n :type Tags: list\n :param Tags: [REQUIRED]\n One or more tags.\n (dict) --Describes a tag for an Auto Scaling group.\n ResourceId (string) --The name of the group.\n ResourceType (string) --The type of resource. The only supported value is auto-scaling-group .\n Key (string) -- [REQUIRED]The tag key.\n Value (string) --The tag value.\n PropagateAtLaunch (boolean) --Determines whether the tag is added to new instances as they are launched in the group.\n \n \n\n :return: response = client.create_or_update_tags(\n Tags=[\n {\n 'Key': 'Role',\n 'PropagateAtLaunch': True,\n 'ResourceId': 'my-auto-scaling-group',\n 'ResourceType': 'auto-scaling-group',\n 'Value': 'WebServer',\n },\n {\n 'Key': 'Dept',\n 'PropagateAtLaunch': True,\n 'ResourceId': 'my-auto-scaling-group',\n 'ResourceType': 'auto-scaling-group',\n 'Value': 'Research',\n },\n ],\n )\n \n print(response)\n \n \n \"\"\"\n pass\n\ndef delete_auto_scaling_group(AutoScalingGroupName=None, ForceDelete=None):\n \"\"\"\n Deletes the specified Auto Scaling group.\n If the group has instances or scaling activities in progress, you must specify the option to force the deletion in order for it to succeed.\n If the group has policies, deleting the group deletes the policies, the underlying alarm actions, and any alarm that no longer has an associated action.\n To remove instances from the Auto Scaling group before deleting it, call DetachInstances with the list of instances and the option to decrement the desired capacity. This ensures that Amazon EC2 Auto Scaling does not launch replacement instances.\n To terminate all instances before deleting the Auto Scaling group, call UpdateAutoScalingGroup and set the minimum size and desired capacity of the Auto Scaling group to zero.\n See also: AWS API Documentation\n \n Examples\n This example deletes the specified Auto Scaling group.\n Expected Output:\n This example deletes the specified Auto Scaling group and all its instances.\n Expected Output:\n \n :example: response = client.delete_auto_scaling_group(\n AutoScalingGroupName='string',\n ForceDelete=True|False\n )\n \n \n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type ForceDelete: boolean\n :param ForceDelete: Specifies that the group is to be deleted along with all instances associated with the group, without waiting for all instances to be terminated. This parameter also deletes any lifecycle actions associated with the group.\n\n :return: response = client.delete_auto_scaling_group(\n AutoScalingGroupName='my-auto-scaling-group',\n )\n \n print(response)\n \n \n \"\"\"\n pass\n\ndef delete_launch_configuration(LaunchConfigurationName=None):\n \"\"\"\n Deletes the specified launch configuration.\n The launch configuration must not be attached to an Auto Scaling group. When this call completes, the launch configuration is no longer available for use.\n See also: AWS API Documentation\n \n Examples\n This example deletes the specified launch configuration.\n Expected Output:\n \n :example: response = client.delete_launch_configuration(\n LaunchConfigurationName='string'\n )\n \n \n :type LaunchConfigurationName: string\n :param LaunchConfigurationName: [REQUIRED]\n The name of the launch configuration.\n \n\n :return: response = client.delete_launch_configuration(\n LaunchConfigurationName='my-launch-config',\n )\n \n print(response)\n \n \n \"\"\"\n pass\n\ndef delete_lifecycle_hook(LifecycleHookName=None, AutoScalingGroupName=None):\n \"\"\"\n Deletes the specified lifecycle hook.\n If there are any outstanding lifecycle actions, they are completed first (ABANDON for launching instances, CONTINUE for terminating instances).\n See also: AWS API Documentation\n \n Examples\n This example deletes the specified lifecycle hook.\n Expected Output:\n \n :example: response = client.delete_lifecycle_hook(\n LifecycleHookName='string',\n AutoScalingGroupName='string'\n )\n \n \n :type LifecycleHookName: string\n :param LifecycleHookName: [REQUIRED]\n The name of the lifecycle hook.\n \n\n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_notification_configuration(AutoScalingGroupName=None, TopicARN=None):\n \"\"\"\n Deletes the specified notification.\n See also: AWS API Documentation\n \n Examples\n This example deletes the specified notification from the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.delete_notification_configuration(\n AutoScalingGroupName='string',\n TopicARN='string'\n )\n \n \n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type TopicARN: string\n :param TopicARN: [REQUIRED]\n The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (Amazon SNS) topic.\n \n\n :return: response = client.delete_notification_configuration(\n AutoScalingGroupName='my-auto-scaling-group',\n TopicARN='arn:aws:sns:us-west-2:123456789012:my-sns-topic',\n )\n \n print(response)\n \n \n \"\"\"\n pass\n\ndef delete_policy(AutoScalingGroupName=None, PolicyName=None):\n \"\"\"\n Deletes the specified Auto Scaling policy.\n Deleting a policy deletes the underlying alarm action, but does not delete the alarm, even if it no longer has an associated action.\n See also: AWS API Documentation\n \n Examples\n This example deletes the specified Auto Scaling policy.\n Expected Output:\n \n :example: response = client.delete_policy(\n AutoScalingGroupName='string',\n PolicyName='string'\n )\n \n \n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: The name of the Auto Scaling group.\n\n :type PolicyName: string\n :param PolicyName: [REQUIRED]\n The name or Amazon Resource Name (ARN) of the policy.\n \n\n :return: response = client.delete_policy(\n AutoScalingGroupName='my-auto-scaling-group',\n PolicyName='ScaleIn',\n )\n \n print(response)\n \n \n \"\"\"\n pass\n\ndef delete_scheduled_action(AutoScalingGroupName=None, ScheduledActionName=None):\n \"\"\"\n Deletes the specified scheduled action.\n See also: AWS API Documentation\n \n Examples\n This example deletes the specified scheduled action from the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.delete_scheduled_action(\n AutoScalingGroupName='string',\n ScheduledActionName='string'\n )\n \n \n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type ScheduledActionName: string\n :param ScheduledActionName: [REQUIRED]\n The name of the action to delete.\n \n\n :return: response = client.delete_scheduled_action(\n AutoScalingGroupName='my-auto-scaling-group',\n ScheduledActionName='my-scheduled-action',\n )\n \n print(response)\n \n \n \"\"\"\n pass\n\ndef delete_tags(Tags=None):\n \"\"\"\n Deletes the specified tags.\n See also: AWS API Documentation\n \n Examples\n This example deletes the specified tag from the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.delete_tags(\n Tags=[\n {\n 'ResourceId': 'string',\n 'ResourceType': 'string',\n 'Key': 'string',\n 'Value': 'string',\n 'PropagateAtLaunch': True|False\n },\n ]\n )\n \n \n :type Tags: list\n :param Tags: [REQUIRED]\n One or more tags.\n (dict) --Describes a tag for an Auto Scaling group.\n ResourceId (string) --The name of the group.\n ResourceType (string) --The type of resource. The only supported value is auto-scaling-group .\n Key (string) -- [REQUIRED]The tag key.\n Value (string) --The tag value.\n PropagateAtLaunch (boolean) --Determines whether the tag is added to new instances as they are launched in the group.\n \n \n\n :return: response = client.delete_tags(\n Tags=[\n {\n 'Key': 'Dept',\n 'ResourceId': 'my-auto-scaling-group',\n 'ResourceType': 'auto-scaling-group',\n 'Value': 'Research',\n },\n ],\n )\n \n print(response)\n \n \n \"\"\"\n pass\n\ndef describe_account_limits():\n \"\"\"\n Describes the current Auto Scaling resource limits for your AWS account.\n For information about requesting an increase in these limits, see Auto Scaling Limits in the Amazon EC2 Auto Scaling User Guide .\n See also: AWS API Documentation\n \n Examples\n This example describes the Auto Scaling limits for your AWS account.\n Expected Output:\n \n :example: response = client.describe_account_limits()\n \n \n :rtype: dict\n :return: {\n 'MaxNumberOfAutoScalingGroups': 123,\n 'MaxNumberOfLaunchConfigurations': 123,\n 'NumberOfAutoScalingGroups': 123,\n 'NumberOfLaunchConfigurations': 123\n }\n \n \n \"\"\"\n pass\n\ndef describe_adjustment_types():\n \"\"\"\n Describes the policy adjustment types for use with PutScalingPolicy .\n See also: AWS API Documentation\n \n Examples\n This example describes the available adjustment types.\n Expected Output:\n \n :example: response = client.describe_adjustment_types()\n \n \n :rtype: dict\n :return: {\n 'AdjustmentTypes': [\n {\n 'AdjustmentType': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef describe_auto_scaling_groups(AutoScalingGroupNames=None, NextToken=None, MaxRecords=None):\n \"\"\"\n Describes one or more Auto Scaling groups.\n See also: AWS API Documentation\n \n Examples\n This example describes the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.describe_auto_scaling_groups(\n AutoScalingGroupNames=[\n 'string',\n ],\n NextToken='string',\n MaxRecords=123\n )\n \n \n :type AutoScalingGroupNames: list\n :param AutoScalingGroupNames: The names of the Auto Scaling groups. You can specify up to MaxRecords names. If you omit this parameter, all Auto Scaling groups are described.\n (string) --\n \n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of items to return with this call. The default value is 50 and the maximum value is 100.\n\n :rtype: dict\n :return: {\n 'AutoScalingGroups': [\n {\n 'AutoScalingGroupName': 'string',\n 'AutoScalingGroupARN': 'string',\n 'LaunchConfigurationName': 'string',\n 'LaunchTemplate': {\n 'LaunchTemplateId': 'string',\n 'LaunchTemplateName': 'string',\n 'Version': 'string'\n },\n 'MixedInstancesPolicy': {\n 'LaunchTemplate': {\n 'LaunchTemplateSpecification': {\n 'LaunchTemplateId': 'string',\n 'LaunchTemplateName': 'string',\n 'Version': 'string'\n },\n 'Overrides': [\n {\n 'InstanceType': 'string'\n },\n ]\n },\n 'InstancesDistribution': {\n 'OnDemandAllocationStrategy': 'string',\n 'OnDemandBaseCapacity': 123,\n 'OnDemandPercentageAboveBaseCapacity': 123,\n 'SpotAllocationStrategy': 'string',\n 'SpotInstancePools': 123,\n 'SpotMaxPrice': 'string'\n }\n },\n 'MinSize': 123,\n 'MaxSize': 123,\n 'DesiredCapacity': 123,\n 'DefaultCooldown': 123,\n 'AvailabilityZones': [\n 'string',\n ],\n 'LoadBalancerNames': [\n 'string',\n ],\n 'TargetGroupARNs': [\n 'string',\n ],\n 'HealthCheckType': 'string',\n 'HealthCheckGracePeriod': 123,\n 'Instances': [\n {\n 'InstanceId': 'string',\n 'AvailabilityZone': 'string',\n 'LifecycleState': 'Pending'|'Pending:Wait'|'Pending:Proceed'|'Quarantined'|'InService'|'Terminating'|'Terminating:Wait'|'Terminating:Proceed'|'Terminated'|'Detaching'|'Detached'|'EnteringStandby'|'Standby',\n 'HealthStatus': 'string',\n 'LaunchConfigurationName': 'string',\n 'LaunchTemplate': {\n 'LaunchTemplateId': 'string',\n 'LaunchTemplateName': 'string',\n 'Version': 'string'\n },\n 'ProtectedFromScaleIn': True|False\n },\n ],\n 'CreatedTime': datetime(2015, 1, 1),\n 'SuspendedProcesses': [\n {\n 'ProcessName': 'string',\n 'SuspensionReason': 'string'\n },\n ],\n 'PlacementGroup': 'string',\n 'VPCZoneIdentifier': 'string',\n 'EnabledMetrics': [\n {\n 'Metric': 'string',\n 'Granularity': 'string'\n },\n ],\n 'Status': 'string',\n 'Tags': [\n {\n 'ResourceId': 'string',\n 'ResourceType': 'string',\n 'Key': 'string',\n 'Value': 'string',\n 'PropagateAtLaunch': True|False\n },\n ],\n 'TerminationPolicies': [\n 'string',\n ],\n 'NewInstancesProtectedFromScaleIn': True|False,\n 'ServiceLinkedRoleARN': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_auto_scaling_instances(InstanceIds=None, MaxRecords=None, NextToken=None):\n \"\"\"\n Describes one or more Auto Scaling instances.\n See also: AWS API Documentation\n \n Examples\n This example describes the specified Auto Scaling instance.\n Expected Output:\n \n :example: response = client.describe_auto_scaling_instances(\n InstanceIds=[\n 'string',\n ],\n MaxRecords=123,\n NextToken='string'\n )\n \n \n :type InstanceIds: list\n :param InstanceIds: The IDs of the instances. You can specify up to MaxRecords IDs. If you omit this parameter, all Auto Scaling instances are described. If you specify an ID that does not exist, it is ignored with no error.\n (string) --\n \n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of items to return with this call. The default value is 50 and the maximum value is 50.\n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :rtype: dict\n :return: {\n 'AutoScalingInstances': [\n {\n 'InstanceId': 'string',\n 'AutoScalingGroupName': 'string',\n 'AvailabilityZone': 'string',\n 'LifecycleState': 'string',\n 'HealthStatus': 'string',\n 'LaunchConfigurationName': 'string',\n 'LaunchTemplate': {\n 'LaunchTemplateId': 'string',\n 'LaunchTemplateName': 'string',\n 'Version': 'string'\n },\n 'ProtectedFromScaleIn': True|False\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_auto_scaling_notification_types():\n \"\"\"\n Describes the notification types that are supported by Amazon EC2 Auto Scaling.\n See also: AWS API Documentation\n \n Examples\n This example describes the available notification types.\n Expected Output:\n \n :example: response = client.describe_auto_scaling_notification_types()\n \n \n :rtype: dict\n :return: {\n 'AutoScalingNotificationTypes': [\n 'string',\n ]\n }\n \n \n \"\"\"\n pass\n\ndef describe_launch_configurations(LaunchConfigurationNames=None, NextToken=None, MaxRecords=None):\n \"\"\"\n Describes one or more launch configurations.\n See also: AWS API Documentation\n \n Examples\n This example describes the specified launch configuration.\n Expected Output:\n \n :example: response = client.describe_launch_configurations(\n LaunchConfigurationNames=[\n 'string',\n ],\n NextToken='string',\n MaxRecords=123\n )\n \n \n :type LaunchConfigurationNames: list\n :param LaunchConfigurationNames: The launch configuration names. If you omit this parameter, all launch configurations are described.\n (string) --\n \n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of items to return with this call. The default value is 50 and the maximum value is 100.\n\n :rtype: dict\n :return: {\n 'LaunchConfigurations': [\n {\n 'LaunchConfigurationName': 'string',\n 'LaunchConfigurationARN': 'string',\n 'ImageId': 'string',\n 'KeyName': 'string',\n 'SecurityGroups': [\n 'string',\n ],\n 'ClassicLinkVPCId': 'string',\n 'ClassicLinkVPCSecurityGroups': [\n 'string',\n ],\n 'UserData': 'string',\n 'InstanceType': 'string',\n 'KernelId': 'string',\n 'RamdiskId': 'string',\n 'BlockDeviceMappings': [\n {\n 'VirtualName': 'string',\n 'DeviceName': 'string',\n 'Ebs': {\n 'SnapshotId': 'string',\n 'VolumeSize': 123,\n 'VolumeType': 'string',\n 'DeleteOnTermination': True|False,\n 'Iops': 123,\n 'Encrypted': True|False\n },\n 'NoDevice': True|False\n },\n ],\n 'InstanceMonitoring': {\n 'Enabled': True|False\n },\n 'SpotPrice': 'string',\n 'IamInstanceProfile': 'string',\n 'CreatedTime': datetime(2015, 1, 1),\n 'EbsOptimized': True|False,\n 'AssociatePublicIpAddress': True|False,\n 'PlacementTenancy': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_lifecycle_hook_types():\n \"\"\"\n Describes the available types of lifecycle hooks.\n The following hook types are supported:\n See also: AWS API Documentation\n \n Examples\n This example describes the available lifecycle hook types.\n Expected Output:\n \n :example: response = client.describe_lifecycle_hook_types()\n \n \n :rtype: dict\n :return: {\n 'LifecycleHookTypes': [\n 'string',\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_lifecycle_hooks(AutoScalingGroupName=None, LifecycleHookNames=None):\n \"\"\"\n Describes the lifecycle hooks for the specified Auto Scaling group.\n See also: AWS API Documentation\n \n Examples\n This example describes the lifecycle hooks for the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.describe_lifecycle_hooks(\n AutoScalingGroupName='string',\n LifecycleHookNames=[\n 'string',\n ]\n )\n \n \n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type LifecycleHookNames: list\n :param LifecycleHookNames: The names of one or more lifecycle hooks. If you omit this parameter, all lifecycle hooks are described.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'LifecycleHooks': [\n {\n 'LifecycleHookName': 'string',\n 'AutoScalingGroupName': 'string',\n 'LifecycleTransition': 'string',\n 'NotificationTargetARN': 'string',\n 'RoleARN': 'string',\n 'NotificationMetadata': 'string',\n 'HeartbeatTimeout': 123,\n 'GlobalTimeout': 123,\n 'DefaultResult': 'string'\n },\n ]\n }\n \n \n :returns: \n autoscaling:EC2_INSTANCE_LAUNCHING\n autoscaling:EC2_INSTANCE_TERMINATING\n \n \"\"\"\n pass\n\ndef describe_load_balancer_target_groups(AutoScalingGroupName=None, NextToken=None, MaxRecords=None):\n \"\"\"\n Describes the target groups for the specified Auto Scaling group.\n See also: AWS API Documentation\n \n Examples\n This example describes the target groups attached to the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.describe_load_balancer_target_groups(\n AutoScalingGroupName='string',\n NextToken='string',\n MaxRecords=123\n )\n \n \n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of items to return with this call. The default value is 100 and the maximum value is 100.\n\n :rtype: dict\n :return: {\n 'LoadBalancerTargetGroups': [\n {\n 'LoadBalancerTargetGroupARN': 'string',\n 'State': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n Adding - The Auto Scaling instances are being registered with the target group.\n Added - All Auto Scaling instances are registered with the target group.\n InService - At least one Auto Scaling instance passed an ELB health check.\n Removing - The Auto Scaling instances are being deregistered from the target group. If connection draining is enabled, Elastic Load Balancing waits for in-flight requests to complete before deregistering the instances.\n Removed - All Auto Scaling instances are deregistered from the target group.\n \n \"\"\"\n pass\n\ndef describe_load_balancers(AutoScalingGroupName=None, NextToken=None, MaxRecords=None):\n \"\"\"\n Describes the load balancers for the specified Auto Scaling group.\n This operation describes only Classic Load Balancers. If you have Application Load Balancers, use DescribeLoadBalancerTargetGroups instead.\n See also: AWS API Documentation\n \n Examples\n This example describes the load balancers attached to the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.describe_load_balancers(\n AutoScalingGroupName='string',\n NextToken='string',\n MaxRecords=123\n )\n \n \n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of items to return with this call. The default value is 100 and the maximum value is 100.\n\n :rtype: dict\n :return: {\n 'LoadBalancers': [\n {\n 'LoadBalancerName': 'string',\n 'State': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n Adding - The instances in the group are being registered with the load balancer.\n Added - All instances in the group are registered with the load balancer.\n InService - At least one instance in the group passed an ELB health check.\n Removing - The instances in the group are being deregistered from the load balancer. If connection draining is enabled, Elastic Load Balancing waits for in-flight requests to complete before deregistering the instances.\n Removed - All instances in the group are deregistered from the load balancer.\n \n \"\"\"\n pass\n\ndef describe_metric_collection_types():\n \"\"\"\n Describes the available CloudWatch metrics for Amazon EC2 Auto Scaling.\n The GroupStandbyInstances metric is not returned by default. You must explicitly request this metric when calling EnableMetricsCollection .\n See also: AWS API Documentation\n \n Examples\n This example describes the available metric collection types.\n Expected Output:\n \n :example: response = client.describe_metric_collection_types()\n \n \n :rtype: dict\n :return: {\n 'Metrics': [\n {\n 'Metric': 'string'\n },\n ],\n 'Granularities': [\n {\n 'Granularity': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef describe_notification_configurations(AutoScalingGroupNames=None, NextToken=None, MaxRecords=None):\n \"\"\"\n Describes the notification actions associated with the specified Auto Scaling group.\n See also: AWS API Documentation\n \n Examples\n This example describes the notification configurations for the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.describe_notification_configurations(\n AutoScalingGroupNames=[\n 'string',\n ],\n NextToken='string',\n MaxRecords=123\n )\n \n \n :type AutoScalingGroupNames: list\n :param AutoScalingGroupNames: The name of the Auto Scaling group.\n (string) --\n \n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of items to return with this call. The default value is 50 and the maximum value is 100.\n\n :rtype: dict\n :return: {\n 'NotificationConfigurations': [\n {\n 'AutoScalingGroupName': 'string',\n 'TopicARN': 'string',\n 'NotificationType': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n autoscaling:EC2_INSTANCE_LAUNCH\n autoscaling:EC2_INSTANCE_LAUNCH_ERROR\n autoscaling:EC2_INSTANCE_TERMINATE\n autoscaling:EC2_INSTANCE_TERMINATE_ERROR\n autoscaling:TEST_NOTIFICATION\n \n \"\"\"\n pass\n\ndef describe_policies(AutoScalingGroupName=None, PolicyNames=None, PolicyTypes=None, NextToken=None, MaxRecords=None):\n \"\"\"\n Describes the policies for the specified Auto Scaling group.\n See also: AWS API Documentation\n \n Examples\n This example describes the policies for the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.describe_policies(\n AutoScalingGroupName='string',\n PolicyNames=[\n 'string',\n ],\n PolicyTypes=[\n 'string',\n ],\n NextToken='string',\n MaxRecords=123\n )\n \n \n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: The name of the Auto Scaling group.\n\n :type PolicyNames: list\n :param PolicyNames: The names of one or more policies. If you omit this parameter, all policies are described. If a group name is provided, the results are limited to that group. This list is limited to 50 items. If you specify an unknown policy name, it is ignored with no error.\n (string) --\n \n\n :type PolicyTypes: list\n :param PolicyTypes: One or more policy types. Valid values are SimpleScaling and StepScaling .\n (string) --\n \n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of items to be returned with each call. The default value is 50 and the maximum value is 100.\n\n :rtype: dict\n :return: {\n 'ScalingPolicies': [\n {\n 'AutoScalingGroupName': 'string',\n 'PolicyName': 'string',\n 'PolicyARN': 'string',\n 'PolicyType': 'string',\n 'AdjustmentType': 'string',\n 'MinAdjustmentStep': 123,\n 'MinAdjustmentMagnitude': 123,\n 'ScalingAdjustment': 123,\n 'Cooldown': 123,\n 'StepAdjustments': [\n {\n 'MetricIntervalLowerBound': 123.0,\n 'MetricIntervalUpperBound': 123.0,\n 'ScalingAdjustment': 123\n },\n ],\n 'MetricAggregationType': 'string',\n 'EstimatedInstanceWarmup': 123,\n 'Alarms': [\n {\n 'AlarmName': 'string',\n 'AlarmARN': 'string'\n },\n ],\n 'TargetTrackingConfiguration': {\n 'PredefinedMetricSpecification': {\n 'PredefinedMetricType': 'ASGAverageCPUUtilization'|'ASGAverageNetworkIn'|'ASGAverageNetworkOut'|'ALBRequestCountPerTarget',\n 'ResourceLabel': 'string'\n },\n 'CustomizedMetricSpecification': {\n 'MetricName': 'string',\n 'Namespace': 'string',\n 'Dimensions': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'Statistic': 'Average'|'Minimum'|'Maximum'|'SampleCount'|'Sum',\n 'Unit': 'string'\n },\n 'TargetValue': 123.0,\n 'DisableScaleIn': True|False\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n To trigger the adjustment when the metric is greater than or equal to 50 and less than 60, specify a lower bound of 0 and an upper bound of 10.\n To trigger the adjustment when the metric is greater than 40 and less than or equal to 50, specify a lower bound of -10 and an upper bound of 0.\n \n \"\"\"\n pass\n\ndef describe_scaling_activities(ActivityIds=None, AutoScalingGroupName=None, MaxRecords=None, NextToken=None):\n \"\"\"\n Describes one or more scaling activities for the specified Auto Scaling group.\n See also: AWS API Documentation\n \n Examples\n This example describes the scaling activities for the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.describe_scaling_activities(\n ActivityIds=[\n 'string',\n ],\n AutoScalingGroupName='string',\n MaxRecords=123,\n NextToken='string'\n )\n \n \n :type ActivityIds: list\n :param ActivityIds: The activity IDs of the desired scaling activities. You can specify up to 50 IDs. If you omit this parameter, all activities for the past six weeks are described. If unknown activities are requested, they are ignored with no error. If you specify an Auto Scaling group, the results are limited to that group.\n (string) --\n \n\n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: The name of the Auto Scaling group.\n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of items to return with this call. The default value is 100 and the maximum value is 100.\n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :rtype: dict\n :return: {\n 'Activities': [\n {\n 'ActivityId': 'string',\n 'AutoScalingGroupName': 'string',\n 'Description': 'string',\n 'Cause': 'string',\n 'StartTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'StatusCode': 'PendingSpotBidPlacement'|'WaitingForSpotInstanceRequestId'|'WaitingForSpotInstanceId'|'WaitingForInstanceId'|'PreInService'|'InProgress'|'WaitingForELBConnectionDraining'|'MidLifecycleAction'|'WaitingForInstanceWarmup'|'Successful'|'Failed'|'Cancelled',\n 'StatusMessage': 'string',\n 'Progress': 123,\n 'Details': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_scaling_process_types():\n \"\"\"\n Describes the scaling process types for use with ResumeProcesses and SuspendProcesses .\n See also: AWS API Documentation\n \n Examples\n This example describes the Auto Scaling process types.\n Expected Output:\n \n :example: response = client.describe_scaling_process_types()\n \n \n :rtype: dict\n :return: {\n 'Processes': [\n {\n 'ProcessName': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef describe_scheduled_actions(AutoScalingGroupName=None, ScheduledActionNames=None, StartTime=None, EndTime=None, NextToken=None, MaxRecords=None):\n \"\"\"\n Describes the actions scheduled for your Auto Scaling group that haven't run. To describe the actions that have already run, use DescribeScalingActivities .\n See also: AWS API Documentation\n \n Examples\n This example describes the scheduled actions for the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.describe_scheduled_actions(\n AutoScalingGroupName='string',\n ScheduledActionNames=[\n 'string',\n ],\n StartTime=datetime(2015, 1, 1),\n EndTime=datetime(2015, 1, 1),\n NextToken='string',\n MaxRecords=123\n )\n \n \n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: The name of the Auto Scaling group.\n\n :type ScheduledActionNames: list\n :param ScheduledActionNames: The names of one or more scheduled actions. You can specify up to 50 actions. If you omit this parameter, all scheduled actions are described. If you specify an unknown scheduled action, it is ignored with no error.\n (string) --\n \n\n :type StartTime: datetime\n :param StartTime: The earliest scheduled start time to return. If scheduled action names are provided, this parameter is ignored.\n\n :type EndTime: datetime\n :param EndTime: The latest scheduled start time to return. If scheduled action names are provided, this parameter is ignored.\n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of items to return with this call. The default value is 50 and the maximum value is 100.\n\n :rtype: dict\n :return: {\n 'ScheduledUpdateGroupActions': [\n {\n 'AutoScalingGroupName': 'string',\n 'ScheduledActionName': 'string',\n 'ScheduledActionARN': 'string',\n 'Time': datetime(2015, 1, 1),\n 'StartTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'Recurrence': 'string',\n 'MinSize': 123,\n 'MaxSize': 123,\n 'DesiredCapacity': 123\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_tags(Filters=None, NextToken=None, MaxRecords=None):\n \"\"\"\n Describes the specified tags.\n You can use filters to limit the results. For example, you can query for the tags for a specific Auto Scaling group. You can specify multiple values for a filter. A tag must match at least one of the specified values for it to be included in the results.\n You can also specify multiple filters. The result includes information for a particular tag only if it matches all the filters. If there's no match, no special message is returned.\n See also: AWS API Documentation\n \n Examples\n This example describes the tags for the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.describe_tags(\n Filters=[\n {\n 'Name': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n NextToken='string',\n MaxRecords=123\n )\n \n \n :type Filters: list\n :param Filters: One or more filters to scope the tags to return. The maximum number of filters per filter type (for example, auto-scaling-group ) is 1000.\n (dict) --Describes a filter.\n Name (string) --The name of the filter. The valid values are: 'auto-scaling-group' , 'key' , 'value' , and 'propagate-at-launch' .\n Values (list) --The value of the filter.\n (string) --\n \n \n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of items to return with this call. The default value is 50 and the maximum value is 100.\n\n :rtype: dict\n :return: {\n 'Tags': [\n {\n 'ResourceId': 'string',\n 'ResourceType': 'string',\n 'Key': 'string',\n 'Value': 'string',\n 'PropagateAtLaunch': True|False\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_termination_policy_types():\n \"\"\"\n Describes the termination policies supported by Amazon EC2 Auto Scaling.\n For more information, see Controlling Which Auto Scaling Instances Terminate During Scale In in the Amazon EC2 Auto Scaling User Guide .\n See also: AWS API Documentation\n \n Examples\n This example describes the available termination policy types.\n Expected Output:\n \n :example: response = client.describe_termination_policy_types()\n \n \n :rtype: dict\n :return: {\n 'TerminationPolicyTypes': [\n 'string',\n ]\n }\n \n \n \"\"\"\n pass\n\ndef detach_instances(InstanceIds=None, AutoScalingGroupName=None, ShouldDecrementDesiredCapacity=None):\n \"\"\"\n Removes one or more instances from the specified Auto Scaling group.\n After the instances are detached, you can manage them independent of the Auto Scaling group.\n If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that are detached.\n If there is a Classic Load Balancer attached to the Auto Scaling group, the instances are deregistered from the load balancer. If there are target groups attached to the Auto Scaling group, the instances are deregistered from the target groups.\n For more information, see Detach EC2 Instances from Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide .\n See also: AWS API Documentation\n \n Examples\n This example detaches the specified instance from the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.detach_instances(\n InstanceIds=[\n 'string',\n ],\n AutoScalingGroupName='string',\n ShouldDecrementDesiredCapacity=True|False\n )\n \n \n :type InstanceIds: list\n :param InstanceIds: The IDs of the instances. You can specify up to 20 instances.\n (string) --\n \n\n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type ShouldDecrementDesiredCapacity: boolean\n :param ShouldDecrementDesiredCapacity: [REQUIRED]\n Indicates whether the Auto Scaling group decrements the desired capacity value by the number of instances detached.\n \n\n :rtype: dict\n :return: {\n 'Activities': [\n {\n 'ActivityId': 'string',\n 'AutoScalingGroupName': 'string',\n 'Description': 'string',\n 'Cause': 'string',\n 'StartTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'StatusCode': 'PendingSpotBidPlacement'|'WaitingForSpotInstanceRequestId'|'WaitingForSpotInstanceId'|'WaitingForInstanceId'|'PreInService'|'InProgress'|'WaitingForELBConnectionDraining'|'MidLifecycleAction'|'WaitingForInstanceWarmup'|'Successful'|'Failed'|'Cancelled',\n 'StatusMessage': 'string',\n 'Progress': 123,\n 'Details': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef detach_load_balancer_target_groups(AutoScalingGroupName=None, TargetGroupARNs=None):\n \"\"\"\n Detaches one or more target groups from the specified Auto Scaling group.\n See also: AWS API Documentation\n \n Examples\n This example detaches the specified target group from the specified Auto Scaling group\n Expected Output:\n \n :example: response = client.detach_load_balancer_target_groups(\n AutoScalingGroupName='string',\n TargetGroupARNs=[\n 'string',\n ]\n )\n \n \n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type TargetGroupARNs: list\n :param TargetGroupARNs: [REQUIRED]\n The Amazon Resource Names (ARN) of the target groups. You can specify up to 10 target groups.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef detach_load_balancers(AutoScalingGroupName=None, LoadBalancerNames=None):\n \"\"\"\n Detaches one or more Classic Load Balancers from the specified Auto Scaling group.\n This operation detaches only Classic Load Balancers. If you have Application Load Balancers, use DetachLoadBalancerTargetGroups instead.\n When you detach a load balancer, it enters the Removing state while deregistering the instances in the group. When all instances are deregistered, then you can no longer describe the load balancer using DescribeLoadBalancers . The instances remain running.\n See also: AWS API Documentation\n \n Examples\n This example detaches the specified load balancer from the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.detach_load_balancers(\n AutoScalingGroupName='string',\n LoadBalancerNames=[\n 'string',\n ]\n )\n \n \n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type LoadBalancerNames: list\n :param LoadBalancerNames: [REQUIRED]\n The names of the load balancers. You can specify up to 10 load balancers.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef disable_metrics_collection(AutoScalingGroupName=None, Metrics=None):\n \"\"\"\n Disables group metrics for the specified Auto Scaling group.\n See also: AWS API Documentation\n \n Examples\n This example disables collecting data for the GroupDesiredCapacity metric for the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.disable_metrics_collection(\n AutoScalingGroupName='string',\n Metrics=[\n 'string',\n ]\n )\n \n \n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type Metrics: list\n :param Metrics: One or more of the following metrics. If you omit this parameter, all metrics are disabled.\n GroupMinSize\n GroupMaxSize\n GroupDesiredCapacity\n GroupInServiceInstances\n GroupPendingInstances\n GroupStandbyInstances\n GroupTerminatingInstances\n GroupTotalInstances\n (string) --\n \n\n :return: response = client.disable_metrics_collection(\n AutoScalingGroupName='my-auto-scaling-group',\n Metrics=[\n 'GroupDesiredCapacity',\n ],\n )\n \n print(response)\n \n \n \"\"\"\n pass\n\ndef enable_metrics_collection(AutoScalingGroupName=None, Metrics=None, Granularity=None):\n \"\"\"\n Enables group metrics for the specified Auto Scaling group. For more information, see Monitoring Your Auto Scaling Groups and Instances in the Amazon EC2 Auto Scaling User Guide .\n See also: AWS API Documentation\n \n Examples\n This example enables data collection for the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.enable_metrics_collection(\n AutoScalingGroupName='string',\n Metrics=[\n 'string',\n ],\n Granularity='string'\n )\n \n \n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type Metrics: list\n :param Metrics: One or more of the following metrics. If you omit this parameter, all metrics are enabled.\n GroupMinSize\n GroupMaxSize\n GroupDesiredCapacity\n GroupInServiceInstances\n GroupPendingInstances\n GroupStandbyInstances\n GroupTerminatingInstances\n GroupTotalInstances\n (string) --\n \n\n :type Granularity: string\n :param Granularity: [REQUIRED]\n The granularity to associate with the metrics to collect. The only valid value is 1Minute .\n \n\n :return: response = client.enable_metrics_collection(\n AutoScalingGroupName='my-auto-scaling-group',\n Granularity='1Minute',\n )\n \n print(response)\n \n \n \"\"\"\n pass\n\ndef enter_standby(InstanceIds=None, AutoScalingGroupName=None, ShouldDecrementDesiredCapacity=None):\n \"\"\"\n Moves the specified instances into the standby state.\n For more information, see Temporarily Removing Instances from Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide .\n See also: AWS API Documentation\n \n Examples\n This example puts the specified instance into standby mode.\n Expected Output:\n \n :example: response = client.enter_standby(\n InstanceIds=[\n 'string',\n ],\n AutoScalingGroupName='string',\n ShouldDecrementDesiredCapacity=True|False\n )\n \n \n :type InstanceIds: list\n :param InstanceIds: The IDs of the instances. You can specify up to 20 instances.\n (string) --\n \n\n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type ShouldDecrementDesiredCapacity: boolean\n :param ShouldDecrementDesiredCapacity: [REQUIRED]\n Indicates whether to decrement the desired capacity of the Auto Scaling group by the number of instances moved to Standby mode.\n \n\n :rtype: dict\n :return: {\n 'Activities': [\n {\n 'ActivityId': 'string',\n 'AutoScalingGroupName': 'string',\n 'Description': 'string',\n 'Cause': 'string',\n 'StartTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'StatusCode': 'PendingSpotBidPlacement'|'WaitingForSpotInstanceRequestId'|'WaitingForSpotInstanceId'|'WaitingForInstanceId'|'PreInService'|'InProgress'|'WaitingForELBConnectionDraining'|'MidLifecycleAction'|'WaitingForInstanceWarmup'|'Successful'|'Failed'|'Cancelled',\n 'StatusMessage': 'string',\n 'Progress': 123,\n 'Details': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef execute_policy(AutoScalingGroupName=None, PolicyName=None, HonorCooldown=None, MetricValue=None, BreachThreshold=None):\n \"\"\"\n Executes the specified policy.\n See also: AWS API Documentation\n \n Examples\n This example executes the specified Auto Scaling policy for the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.execute_policy(\n AutoScalingGroupName='string',\n PolicyName='string',\n HonorCooldown=True|False,\n MetricValue=123.0,\n BreachThreshold=123.0\n )\n \n \n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: The name of the Auto Scaling group.\n\n :type PolicyName: string\n :param PolicyName: [REQUIRED]\n The name or ARN of the policy.\n \n\n :type HonorCooldown: boolean\n :param HonorCooldown: Indicates whether Amazon EC2 Auto Scaling waits for the cooldown period to complete before executing the policy.\n This parameter is not supported if the policy type is StepScaling .\n For more information, see Scaling Cooldowns in the Amazon EC2 Auto Scaling User Guide .\n \n\n :type MetricValue: float\n :param MetricValue: The metric value to compare to BreachThreshold . This enables you to execute a policy of type StepScaling and determine which step adjustment to use. For example, if the breach threshold is 50 and you want to use a step adjustment with a lower bound of 0 and an upper bound of 10, you can set the metric value to 59.\n If you specify a metric value that doesn't correspond to a step adjustment for the policy, the call returns an error.\n This parameter is required if the policy type is StepScaling and not supported otherwise.\n \n\n :type BreachThreshold: float\n :param BreachThreshold: The breach threshold for the alarm.\n This parameter is required if the policy type is StepScaling and not supported otherwise.\n \n\n :return: response = client.execute_policy(\n AutoScalingGroupName='my-auto-scaling-group',\n HonorCooldown=True,\n PolicyName='ScaleIn',\n )\n \n print(response)\n \n \n \"\"\"\n pass\n\ndef exit_standby(InstanceIds=None, AutoScalingGroupName=None):\n \"\"\"\n Moves the specified instances out of the standby state.\n For more information, see Temporarily Removing Instances from Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide .\n See also: AWS API Documentation\n \n Examples\n This example moves the specified instance out of standby mode.\n Expected Output:\n \n :example: response = client.exit_standby(\n InstanceIds=[\n 'string',\n ],\n AutoScalingGroupName='string'\n )\n \n \n :type InstanceIds: list\n :param InstanceIds: The IDs of the instances. You can specify up to 20 instances.\n (string) --\n \n\n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :rtype: dict\n :return: {\n 'Activities': [\n {\n 'ActivityId': 'string',\n 'AutoScalingGroupName': 'string',\n 'Description': 'string',\n 'Cause': 'string',\n 'StartTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'StatusCode': 'PendingSpotBidPlacement'|'WaitingForSpotInstanceRequestId'|'WaitingForSpotInstanceId'|'WaitingForInstanceId'|'PreInService'|'InProgress'|'WaitingForELBConnectionDraining'|'MidLifecycleAction'|'WaitingForInstanceWarmup'|'Successful'|'Failed'|'Cancelled',\n 'StatusMessage': 'string',\n 'Progress': 123,\n 'Details': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef put_lifecycle_hook(LifecycleHookName=None, AutoScalingGroupName=None, LifecycleTransition=None, RoleARN=None, NotificationTargetARN=None, NotificationMetadata=None, HeartbeatTimeout=None, DefaultResult=None):\n \"\"\"\n Creates or updates a lifecycle hook for the specified Auto Scaling group.\n A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an instance that is not actively in service; for example, either when the instance launches or before the instance terminates.\n This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:\n For more information, see Auto Scaling Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide .\n If you exceed your maximum limit of lifecycle hooks, which by default is 50 per Auto Scaling group, the call fails. For information about updating this limit, see AWS Service Limits in the Amazon Web Services General Reference .\n See also: AWS API Documentation\n \n Examples\n This example creates a lifecycle hook.\n Expected Output:\n \n :example: response = client.put_lifecycle_hook(\n LifecycleHookName='string',\n AutoScalingGroupName='string',\n LifecycleTransition='string',\n RoleARN='string',\n NotificationTargetARN='string',\n NotificationMetadata='string',\n HeartbeatTimeout=123,\n DefaultResult='string'\n )\n \n \n :type LifecycleHookName: string\n :param LifecycleHookName: [REQUIRED]\n The name of the lifecycle hook.\n \n\n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type LifecycleTransition: string\n :param LifecycleTransition: The instance state to which you want to attach the lifecycle hook. The possible values are:\n autoscaling:EC2_INSTANCE_LAUNCHING\n autoscaling:EC2_INSTANCE_TERMINATING\n This parameter is required for new lifecycle hooks, but optional when updating existing hooks.\n \n\n :type RoleARN: string\n :param RoleARN: The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target.\n This parameter is required for new lifecycle hooks, but optional when updating existing hooks.\n \n\n :type NotificationTargetARN: string\n :param NotificationTargetARN: The ARN of the notification target that Amazon EC2 Auto Scaling uses to notify you when an instance is in the transition state for the lifecycle hook. This target can be either an SQS queue or an SNS topic. If you specify an empty string, this overrides the current ARN.\n This operation uses the JSON format when sending notifications to an Amazon SQS queue, and an email key-value pair format when sending notifications to an Amazon SNS topic.\n When you specify a notification target, Amazon EC2 Auto Scaling sends it a test message. Test messages contain the following additional key-value pair: 'Event': 'autoscaling:TEST_NOTIFICATION' .\n \n\n :type NotificationMetadata: string\n :param NotificationMetadata: Contains additional information that you want to include any time Amazon EC2 Auto Scaling sends a message to the notification target.\n\n :type HeartbeatTimeout: integer\n :param HeartbeatTimeout: The maximum time, in seconds, that can elapse before the lifecycle hook times out. The range is from 30 to 7200 seconds. The default is 3600 seconds (1 hour).\n If the lifecycle hook times out, Amazon EC2 Auto Scaling performs the default action. You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat .\n \n\n :type DefaultResult: string\n :param DefaultResult: Defines the action the Auto Scaling group should take when the lifecycle hook timeout elapses or if an unexpected failure occurs. This parameter can be either CONTINUE or ABANDON . The default value is ABANDON .\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n LifecycleHookName (string) -- [REQUIRED]\n The name of the lifecycle hook.\n \n AutoScalingGroupName (string) -- [REQUIRED]\n The name of the Auto Scaling group.\n \n LifecycleTransition (string) -- The instance state to which you want to attach the lifecycle hook. The possible values are:\n \n autoscaling:EC2_INSTANCE_LAUNCHING\n autoscaling:EC2_INSTANCE_TERMINATING\n \n This parameter is required for new lifecycle hooks, but optional when updating existing hooks.\n \n RoleARN (string) -- The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target.\n This parameter is required for new lifecycle hooks, but optional when updating existing hooks.\n \n NotificationTargetARN (string) -- The ARN of the notification target that Amazon EC2 Auto Scaling uses to notify you when an instance is in the transition state for the lifecycle hook. This target can be either an SQS queue or an SNS topic. If you specify an empty string, this overrides the current ARN.\n This operation uses the JSON format when sending notifications to an Amazon SQS queue, and an email key-value pair format when sending notifications to an Amazon SNS topic.\n When you specify a notification target, Amazon EC2 Auto Scaling sends it a test message. Test messages contain the following additional key-value pair: \"Event\": \"autoscaling:TEST_NOTIFICATION\" .\n \n NotificationMetadata (string) -- Contains additional information that you want to include any time Amazon EC2 Auto Scaling sends a message to the notification target.\n HeartbeatTimeout (integer) -- The maximum time, in seconds, that can elapse before the lifecycle hook times out. The range is from 30 to 7200 seconds. The default is 3600 seconds (1 hour).\n If the lifecycle hook times out, Amazon EC2 Auto Scaling performs the default action. You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat .\n \n DefaultResult (string) -- Defines the action the Auto Scaling group should take when the lifecycle hook timeout elapses or if an unexpected failure occurs. This parameter can be either CONTINUE or ABANDON . The default value is ABANDON .\n \n \"\"\"\n pass\n\ndef put_notification_configuration(AutoScalingGroupName=None, TopicARN=None, NotificationTypes=None):\n \"\"\"\n Configures an Auto Scaling group to send notifications when specified events take place. Subscribers to the specified topic can have messages delivered to an endpoint such as a web server or an email address.\n This configuration overwrites any existing configuration.\n For more information, see Getting SNS Notifications When Your Auto Scaling Group Scales in the Auto Scaling User Guide .\n See also: AWS API Documentation\n \n Examples\n This example adds the specified notification to the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.put_notification_configuration(\n AutoScalingGroupName='string',\n TopicARN='string',\n NotificationTypes=[\n 'string',\n ]\n )\n \n \n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type TopicARN: string\n :param TopicARN: [REQUIRED]\n The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (Amazon SNS) topic.\n \n\n :type NotificationTypes: list\n :param NotificationTypes: [REQUIRED]\n The type of event that causes the notification to be sent. For more information about notification types supported by Amazon EC2 Auto Scaling, see DescribeAutoScalingNotificationTypes .\n (string) --\n \n\n :return: response = client.put_notification_configuration(\n AutoScalingGroupName='my-auto-scaling-group',\n NotificationTypes=[\n 'autoscaling:TEST_NOTIFICATION',\n ],\n TopicARN='arn:aws:sns:us-west-2:123456789012:my-sns-topic',\n )\n \n print(response)\n \n \n \"\"\"\n pass\n\ndef put_scaling_policy(AutoScalingGroupName=None, PolicyName=None, PolicyType=None, AdjustmentType=None, MinAdjustmentStep=None, MinAdjustmentMagnitude=None, ScalingAdjustment=None, Cooldown=None, MetricAggregationType=None, StepAdjustments=None, EstimatedInstanceWarmup=None, TargetTrackingConfiguration=None):\n \"\"\"\n Creates or updates a policy for an Auto Scaling group. To update an existing policy, use the existing policy name and set the parameters to change. Any existing parameter not changed in an update to an existing policy is not changed in this update request.\n If you exceed your maximum limit of step adjustments, which by default is 20 per region, the call fails. For information about updating this limit, see AWS Service Limits in the Amazon Web Services General Reference .\n See also: AWS API Documentation\n \n Examples\n This example adds the specified policy to the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.put_scaling_policy(\n AutoScalingGroupName='string',\n PolicyName='string',\n PolicyType='string',\n AdjustmentType='string',\n MinAdjustmentStep=123,\n MinAdjustmentMagnitude=123,\n ScalingAdjustment=123,\n Cooldown=123,\n MetricAggregationType='string',\n StepAdjustments=[\n {\n 'MetricIntervalLowerBound': 123.0,\n 'MetricIntervalUpperBound': 123.0,\n 'ScalingAdjustment': 123\n },\n ],\n EstimatedInstanceWarmup=123,\n TargetTrackingConfiguration={\n 'PredefinedMetricSpecification': {\n 'PredefinedMetricType': 'ASGAverageCPUUtilization'|'ASGAverageNetworkIn'|'ASGAverageNetworkOut'|'ALBRequestCountPerTarget',\n 'ResourceLabel': 'string'\n },\n 'CustomizedMetricSpecification': {\n 'MetricName': 'string',\n 'Namespace': 'string',\n 'Dimensions': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'Statistic': 'Average'|'Minimum'|'Maximum'|'SampleCount'|'Sum',\n 'Unit': 'string'\n },\n 'TargetValue': 123.0,\n 'DisableScaleIn': True|False\n }\n )\n \n \n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type PolicyName: string\n :param PolicyName: [REQUIRED]\n The name of the policy.\n \n\n :type PolicyType: string\n :param PolicyType: The policy type. The valid values are SimpleScaling , StepScaling , and TargetTrackingScaling . If the policy type is null, the value is treated as SimpleScaling .\n\n :type AdjustmentType: string\n :param AdjustmentType: The adjustment type. The valid values are ChangeInCapacity , ExactCapacity , and PercentChangeInCapacity .\n This parameter is supported if the policy type is SimpleScaling or StepScaling .\n For more information, see Dynamic Scaling in the Amazon EC2 Auto Scaling User Guide .\n \n\n :type MinAdjustmentStep: integer\n :param MinAdjustmentStep: Available for backward compatibility. Use MinAdjustmentMagnitude instead.\n\n :type MinAdjustmentMagnitude: integer\n :param MinAdjustmentMagnitude: The minimum number of instances to scale. If the value of AdjustmentType is PercentChangeInCapacity , the scaling policy changes the DesiredCapacity of the Auto Scaling group by at least this many instances. Otherwise, the error is ValidationError .\n This parameter is supported if the policy type is SimpleScaling or StepScaling .\n \n\n :type ScalingAdjustment: integer\n :param ScalingAdjustment: The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity.\n This parameter is required if the policy type is SimpleScaling and not supported otherwise.\n \n\n :type Cooldown: integer\n :param Cooldown: The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. If this parameter is not specified, the default cooldown period for the group applies.\n This parameter is supported if the policy type is SimpleScaling .\n For more information, see Scaling Cooldowns in the Amazon EC2 Auto Scaling User Guide .\n \n\n :type MetricAggregationType: string\n :param MetricAggregationType: The aggregation type for the CloudWatch metrics. The valid values are Minimum , Maximum , and Average . If the aggregation type is null, the value is treated as Average .\n This parameter is supported if the policy type is StepScaling .\n \n\n :type StepAdjustments: list\n :param StepAdjustments: A set of adjustments that enable you to scale based on the size of the alarm breach.\n This parameter is required if the policy type is StepScaling and not supported otherwise.\n (dict) --Describes an adjustment based on the difference between the value of the aggregated CloudWatch metric and the breach threshold that you've defined for the alarm.\n For the following examples, suppose that you have an alarm with a breach threshold of 50:\n To trigger the adjustment when the metric is greater than or equal to 50 and less than 60, specify a lower bound of 0 and an upper bound of 10.\n To trigger the adjustment when the metric is greater than 40 and less than or equal to 50, specify a lower bound of -10 and an upper bound of 0.\n There are a few rules for the step adjustments for your step policy:\n The ranges of your step adjustments can't overlap or have a gap.\n At most, one step adjustment can have a null lower bound. If one step adjustment has a negative lower bound, then there must be a step adjustment with a null lower bound.\n At most, one step adjustment can have a null upper bound. If one step adjustment has a positive upper bound, then there must be a step adjustment with a null upper bound.\n The upper and lower bound can't be null in the same step adjustment.\n MetricIntervalLowerBound (float) --The lower bound for the difference between the alarm threshold and the CloudWatch metric. If the metric value is above the breach threshold, the lower bound is inclusive (the metric must be greater than or equal to the threshold plus the lower bound). Otherwise, it is exclusive (the metric must be greater than the threshold plus the lower bound). A null value indicates negative infinity.\n MetricIntervalUpperBound (float) --The upper bound for the difference between the alarm threshold and the CloudWatch metric. If the metric value is above the breach threshold, the upper bound is exclusive (the metric must be less than the threshold plus the upper bound). Otherwise, it is inclusive (the metric must be less than or equal to the threshold plus the upper bound). A null value indicates positive infinity.\n The upper bound must be greater than the lower bound.\n ScalingAdjustment (integer) -- [REQUIRED]The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity.\n \n \n\n :type EstimatedInstanceWarmup: integer\n :param EstimatedInstanceWarmup: The estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics. The default is to use the value specified for the default cooldown period for the group.\n This parameter is supported if the policy type is StepScaling or TargetTrackingScaling .\n \n\n :type TargetTrackingConfiguration: dict\n :param TargetTrackingConfiguration: A target tracking policy.\n This parameter is required if the policy type is TargetTrackingScaling and not supported otherwise.\n PredefinedMetricSpecification (dict) --A predefined metric. You can specify either a predefined metric or a customized metric.\n PredefinedMetricType (string) -- [REQUIRED]The metric type.\n ResourceLabel (string) --Identifies the resource associated with the metric type. The following predefined metrics are available:\n ASGAverageCPUUtilization - Average CPU utilization of the Auto Scaling group.\n ASGAverageNetworkIn - Average number of bytes received on all network interfaces by the Auto Scaling group.\n ASGAverageNetworkOut - Average number of bytes sent out on all network interfaces by the Auto Scaling group.\n ALBRequestCountPerTarget - Number of requests completed per target in an Application Load Balancer target group.\n For predefined metric types ASGAverageCPUUtilization , ASGAverageNetworkIn , and ASGAverageNetworkOut , the parameter must not be specified as the resource associated with the metric type is the Auto Scaling group. For predefined metric type ALBRequestCountPerTarget , the parameter must be specified in the format: ``app/load-balancer-name /load-balancer-id /targetgroup/target-group-name /target-group-id `` , where ``app/load-balancer-name /load-balancer-id `` is the final portion of the load balancer ARN, and ``targetgroup/target-group-name /target-group-id `` is the final portion of the target group ARN. The target group must be attached to the Auto Scaling group.\n CustomizedMetricSpecification (dict) --A customized metric.\n MetricName (string) -- [REQUIRED]The name of the metric.\n Namespace (string) -- [REQUIRED]The namespace of the metric.\n Dimensions (list) --The dimensions of the metric.\n (dict) --Describes the dimension of a metric.\n Name (string) -- [REQUIRED]The name of the dimension.\n Value (string) -- [REQUIRED]The value of the dimension.\n \n Statistic (string) -- [REQUIRED]The statistic of the metric.\n Unit (string) --The unit of the metric.\n TargetValue (float) -- [REQUIRED]The target value for the metric.\n DisableScaleIn (boolean) --Indicates whether scaling in by the target tracking policy is disabled. If scaling in is disabled, the target tracking policy doesn't remove instances from the Auto Scaling group. Otherwise, the target tracking policy can remove instances from the Auto Scaling group. The default is disabled.\n \n\n :rtype: dict\n :return: {\n 'PolicyARN': 'string',\n 'Alarms': [\n {\n 'AlarmName': 'string',\n 'AlarmARN': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef put_scheduled_update_group_action(AutoScalingGroupName=None, ScheduledActionName=None, Time=None, StartTime=None, EndTime=None, Recurrence=None, MinSize=None, MaxSize=None, DesiredCapacity=None):\n \"\"\"\n Creates or updates a scheduled scaling action for an Auto Scaling group. If you leave a parameter unspecified when updating a scheduled scaling action, the corresponding value remains unchanged.\n For more information, see Scheduled Scaling in the Amazon EC2 Auto Scaling User Guide .\n See also: AWS API Documentation\n \n Examples\n This example adds the specified scheduled action to the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.put_scheduled_update_group_action(\n AutoScalingGroupName='string',\n ScheduledActionName='string',\n Time=datetime(2015, 1, 1),\n StartTime=datetime(2015, 1, 1),\n EndTime=datetime(2015, 1, 1),\n Recurrence='string',\n MinSize=123,\n MaxSize=123,\n DesiredCapacity=123\n )\n \n \n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type ScheduledActionName: string\n :param ScheduledActionName: [REQUIRED]\n The name of this scaling action.\n \n\n :type Time: datetime\n :param Time: This parameter is deprecated.\n\n :type StartTime: datetime\n :param StartTime: The time for this action to start, in 'YYYY-MM-DDThh:mm:ssZ' format in UTC/GMT only (for example, 2014-06-01T00:00:00Z ).\n If you specify Recurrence and StartTime , Amazon EC2 Auto Scaling performs the action at this time, and then performs the action based on the specified recurrence.\n If you try to schedule your action in the past, Amazon EC2 Auto Scaling returns an error message.\n \n\n :type EndTime: datetime\n :param EndTime: The time for the recurring schedule to end. Amazon EC2 Auto Scaling does not perform the action after this time.\n\n :type Recurrence: string\n :param Recurrence: The recurring schedule for this action, in Unix cron syntax format. For more information about this format, see Crontab .\n\n :type MinSize: integer\n :param MinSize: The minimum size for the Auto Scaling group.\n\n :type MaxSize: integer\n :param MaxSize: The maximum size for the Auto Scaling group.\n\n :type DesiredCapacity: integer\n :param DesiredCapacity: The number of EC2 instances that should be running in the group.\n\n :return: response = client.put_scheduled_update_group_action(\n AutoScalingGroupName='my-auto-scaling-group',\n DesiredCapacity=4,\n EndTime=datetime(2014, 5, 12, 8, 0, 0, 0, 132, 0),\n MaxSize=6,\n MinSize=2,\n ScheduledActionName='my-scheduled-action',\n StartTime=datetime(2014, 5, 12, 8, 0, 0, 0, 132, 0),\n )\n \n print(response)\n \n \n \"\"\"\n pass\n\ndef record_lifecycle_action_heartbeat(LifecycleHookName=None, AutoScalingGroupName=None, LifecycleActionToken=None, InstanceId=None):\n \"\"\"\n Records a heartbeat for the lifecycle action associated with the specified token or instance. This extends the timeout by the length of time defined using PutLifecycleHook .\n This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:\n For more information, see Auto Scaling Lifecycle in the Amazon EC2 Auto Scaling User Guide .\n See also: AWS API Documentation\n \n Examples\n This example records a lifecycle action heartbeat to keep the instance in a pending state.\n Expected Output:\n \n :example: response = client.record_lifecycle_action_heartbeat(\n LifecycleHookName='string',\n AutoScalingGroupName='string',\n LifecycleActionToken='string',\n InstanceId='string'\n )\n \n \n :type LifecycleHookName: string\n :param LifecycleHookName: [REQUIRED]\n The name of the lifecycle hook.\n \n\n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type LifecycleActionToken: string\n :param LifecycleActionToken: A token that uniquely identifies a specific lifecycle action associated with an instance. Amazon EC2 Auto Scaling sends this token to the notification target that you specified when you created the lifecycle hook.\n\n :type InstanceId: string\n :param InstanceId: The ID of the instance.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n LifecycleHookName (string) -- [REQUIRED]\n The name of the lifecycle hook.\n \n AutoScalingGroupName (string) -- [REQUIRED]\n The name of the Auto Scaling group.\n \n LifecycleActionToken (string) -- A token that uniquely identifies a specific lifecycle action associated with an instance. Amazon EC2 Auto Scaling sends this token to the notification target that you specified when you created the lifecycle hook.\n InstanceId (string) -- The ID of the instance.\n \n \"\"\"\n pass\n\ndef resume_processes(AutoScalingGroupName=None, ScalingProcesses=None):\n \"\"\"\n Resumes the specified suspended automatic scaling processes, or all suspended process, for the specified Auto Scaling group.\n For more information, see Suspending and Resuming Scaling Processes in the Amazon EC2 Auto Scaling User Guide .\n See also: AWS API Documentation\n \n Examples\n This example resumes the specified suspended scaling process for the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.resume_processes(\n AutoScalingGroupName='string',\n ScalingProcesses=[\n 'string',\n ]\n )\n \n \n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type ScalingProcesses: list\n :param ScalingProcesses: One or more of the following processes. If you omit this parameter, all processes are specified.\n Launch\n Terminate\n HealthCheck\n ReplaceUnhealthy\n AZRebalance\n AlarmNotification\n ScheduledActions\n AddToLoadBalancer\n (string) --\n \n\n :return: response = client.resume_processes(\n AutoScalingGroupName='my-auto-scaling-group',\n ScalingProcesses=[\n 'AlarmNotification',\n ],\n )\n \n print(response)\n \n \n \"\"\"\n pass\n\ndef set_desired_capacity(AutoScalingGroupName=None, DesiredCapacity=None, HonorCooldown=None):\n \"\"\"\n Sets the size of the specified Auto Scaling group.\n For more information about desired capacity, see What Is Amazon EC2 Auto Scaling? in the Amazon EC2 Auto Scaling User Guide .\n See also: AWS API Documentation\n \n Examples\n This example sets the desired capacity for the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.set_desired_capacity(\n AutoScalingGroupName='string',\n DesiredCapacity=123,\n HonorCooldown=True|False\n )\n \n \n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type DesiredCapacity: integer\n :param DesiredCapacity: [REQUIRED]\n The number of EC2 instances that should be running in the Auto Scaling group.\n \n\n :type HonorCooldown: boolean\n :param HonorCooldown: Indicates whether Amazon EC2 Auto Scaling waits for the cooldown period to complete before initiating a scaling activity to set your Auto Scaling group to its new capacity. By default, Amazon EC2 Auto Scaling does not honor the cooldown period during manual scaling activities.\n\n :return: response = client.set_desired_capacity(\n AutoScalingGroupName='my-auto-scaling-group',\n DesiredCapacity=2,\n HonorCooldown=True,\n )\n \n print(response)\n \n \n \"\"\"\n pass\n\ndef set_instance_health(InstanceId=None, HealthStatus=None, ShouldRespectGracePeriod=None):\n \"\"\"\n Sets the health status of the specified instance.\n For more information, see Health Checks in the Amazon EC2 Auto Scaling User Guide .\n See also: AWS API Documentation\n \n Examples\n This example sets the health status of the specified instance to Unhealthy.\n Expected Output:\n \n :example: response = client.set_instance_health(\n InstanceId='string',\n HealthStatus='string',\n ShouldRespectGracePeriod=True|False\n )\n \n \n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The ID of the instance.\n \n\n :type HealthStatus: string\n :param HealthStatus: [REQUIRED]\n The health status of the instance. Set to Healthy to have the instance remain in service. Set to Unhealthy to have the instance be out of service. Amazon EC2 Auto Scaling terminates and replaces the unhealthy instance.\n \n\n :type ShouldRespectGracePeriod: boolean\n :param ShouldRespectGracePeriod: If the Auto Scaling group of the specified instance has a HealthCheckGracePeriod specified for the group, by default, this call respects the grace period. Set this to False , to have the call not respect the grace period associated with the group.\n For more information about the health check grace period, see CreateAutoScalingGroup .\n \n\n :return: response = client.set_instance_health(\n HealthStatus='Unhealthy',\n InstanceId='i-93633f9b',\n )\n \n print(response)\n \n \n \"\"\"\n pass\n\ndef set_instance_protection(InstanceIds=None, AutoScalingGroupName=None, ProtectedFromScaleIn=None):\n \"\"\"\n Updates the instance protection settings of the specified instances.\n For more information, see Instance Protection in the Amazon EC2 Auto Scaling User Guide .\n See also: AWS API Documentation\n \n Examples\n This example enables instance protection for the specified instance.\n Expected Output:\n This example disables instance protection for the specified instance.\n Expected Output:\n \n :example: response = client.set_instance_protection(\n InstanceIds=[\n 'string',\n ],\n AutoScalingGroupName='string',\n ProtectedFromScaleIn=True|False\n )\n \n \n :type InstanceIds: list\n :param InstanceIds: [REQUIRED]\n One or more instance IDs.\n (string) --\n \n\n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type ProtectedFromScaleIn: boolean\n :param ProtectedFromScaleIn: [REQUIRED]\n Indicates whether the instance is protected from termination by Amazon EC2 Auto Scaling when scaling in.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef suspend_processes(AutoScalingGroupName=None, ScalingProcesses=None):\n \"\"\"\n Suspends the specified automatic scaling processes, or all processes, for the specified Auto Scaling group.\n If you suspend either the Launch or Terminate process types, it can prevent other process types from functioning properly.\n To resume processes that have been suspended, use ResumeProcesses .\n For more information, see Suspending and Resuming Scaling Processes in the Amazon EC2 Auto Scaling User Guide .\n See also: AWS API Documentation\n \n Examples\n This example suspends the specified scaling process for the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.suspend_processes(\n AutoScalingGroupName='string',\n ScalingProcesses=[\n 'string',\n ]\n )\n \n \n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type ScalingProcesses: list\n :param ScalingProcesses: One or more of the following processes. If you omit this parameter, all processes are specified.\n Launch\n Terminate\n HealthCheck\n ReplaceUnhealthy\n AZRebalance\n AlarmNotification\n ScheduledActions\n AddToLoadBalancer\n (string) --\n \n\n :return: response = client.suspend_processes(\n AutoScalingGroupName='my-auto-scaling-group',\n ScalingProcesses=[\n 'AlarmNotification',\n ],\n )\n \n print(response)\n \n \n \"\"\"\n pass\n\ndef terminate_instance_in_auto_scaling_group(InstanceId=None, ShouldDecrementDesiredCapacity=None):\n \"\"\"\n Terminates the specified instance and optionally adjusts the desired group size.\n This call simply makes a termination request. The instance is not terminated immediately.\n See also: AWS API Documentation\n \n Examples\n This example terminates the specified instance from the specified Auto Scaling group without updating the size of the group. Auto Scaling launches a replacement instance after the specified instance terminates.\n Expected Output:\n \n :example: response = client.terminate_instance_in_auto_scaling_group(\n InstanceId='string',\n ShouldDecrementDesiredCapacity=True|False\n )\n \n \n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The ID of the instance.\n \n\n :type ShouldDecrementDesiredCapacity: boolean\n :param ShouldDecrementDesiredCapacity: [REQUIRED]\n Indicates whether terminating the instance also decrements the size of the Auto Scaling group.\n \n\n :rtype: dict\n :return: {\n 'Activity': {\n 'ActivityId': 'string',\n 'AutoScalingGroupName': 'string',\n 'Description': 'string',\n 'Cause': 'string',\n 'StartTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'StatusCode': 'PendingSpotBidPlacement'|'WaitingForSpotInstanceRequestId'|'WaitingForSpotInstanceId'|'WaitingForInstanceId'|'PreInService'|'InProgress'|'WaitingForELBConnectionDraining'|'MidLifecycleAction'|'WaitingForInstanceWarmup'|'Successful'|'Failed'|'Cancelled',\n 'StatusMessage': 'string',\n 'Progress': 123,\n 'Details': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef update_auto_scaling_group(AutoScalingGroupName=None, LaunchConfigurationName=None, LaunchTemplate=None, MixedInstancesPolicy=None, MinSize=None, MaxSize=None, DesiredCapacity=None, DefaultCooldown=None, AvailabilityZones=None, HealthCheckType=None, HealthCheckGracePeriod=None, PlacementGroup=None, VPCZoneIdentifier=None, TerminationPolicies=None, NewInstancesProtectedFromScaleIn=None, ServiceLinkedRoleARN=None):\n \"\"\"\n Updates the configuration for the specified Auto Scaling group.\n The new settings take effect on any scaling activities after this call returns. Scaling activities that are currently in progress aren't affected.\n To update an Auto Scaling group with a launch configuration with InstanceMonitoring set to false , you must first disable the collection of group metrics. Otherwise, you get an error. If you have previously enabled the collection of group metrics, you can disable it using DisableMetricsCollection .\n Note the following:\n See also: AWS API Documentation\n \n Examples\n This example updates the launch configuration of the specified Auto Scaling group.\n Expected Output:\n This example updates the minimum size and maximum size of the specified Auto Scaling group.\n Expected Output:\n This example enables instance protection for the specified Auto Scaling group.\n Expected Output:\n \n :example: response = client.update_auto_scaling_group(\n AutoScalingGroupName='string',\n LaunchConfigurationName='string',\n LaunchTemplate={\n 'LaunchTemplateId': 'string',\n 'LaunchTemplateName': 'string',\n 'Version': 'string'\n },\n MixedInstancesPolicy={\n 'LaunchTemplate': {\n 'LaunchTemplateSpecification': {\n 'LaunchTemplateId': 'string',\n 'LaunchTemplateName': 'string',\n 'Version': 'string'\n },\n 'Overrides': [\n {\n 'InstanceType': 'string'\n },\n ]\n },\n 'InstancesDistribution': {\n 'OnDemandAllocationStrategy': 'string',\n 'OnDemandBaseCapacity': 123,\n 'OnDemandPercentageAboveBaseCapacity': 123,\n 'SpotAllocationStrategy': 'string',\n 'SpotInstancePools': 123,\n 'SpotMaxPrice': 'string'\n }\n },\n MinSize=123,\n MaxSize=123,\n DesiredCapacity=123,\n DefaultCooldown=123,\n AvailabilityZones=[\n 'string',\n ],\n HealthCheckType='string',\n HealthCheckGracePeriod=123,\n PlacementGroup='string',\n VPCZoneIdentifier='string',\n TerminationPolicies=[\n 'string',\n ],\n NewInstancesProtectedFromScaleIn=True|False,\n ServiceLinkedRoleARN='string'\n )\n \n \n :type AutoScalingGroupName: string\n :param AutoScalingGroupName: [REQUIRED]\n The name of the Auto Scaling group.\n \n\n :type LaunchConfigurationName: string\n :param LaunchConfigurationName: The name of the launch configuration. If you specify this parameter, you can't specify a launch template or a mixed instances policy.\n\n :type LaunchTemplate: dict\n :param LaunchTemplate: The launch template and version to use to specify the updates. If you specify this parameter, you can't specify a launch configuration or a mixed instances policy.\n LaunchTemplateId (string) --The ID of the launch template. You must specify either a template ID or a template name.\n LaunchTemplateName (string) --The name of the launch template. You must specify either a template name or a template ID.\n Version (string) --The version number, $Latest , or $Default . If the value is $Latest , Amazon EC2 Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default , Amazon EC2 Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default .\n \n\n :type MixedInstancesPolicy: dict\n :param MixedInstancesPolicy: The mixed instances policy to use to specify the updates. If you specify this parameter, you can't specify a launch configuration or a launch template.\n LaunchTemplate (dict) --The launch template and overrides.\n This parameter is required when creating an Auto Scaling group with a mixed instances policy, but is not required when updating the group.\n LaunchTemplateSpecification (dict) --The launch template to use. You must specify either the launch template ID or launch template name in the request.\n LaunchTemplateId (string) --The ID of the launch template. You must specify either a template ID or a template name.\n LaunchTemplateName (string) --The name of the launch template. You must specify either a template name or a template ID.\n Version (string) --The version number, $Latest , or $Default . If the value is $Latest , Amazon EC2 Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default , Amazon EC2 Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default .\n Overrides (list) --Any parameters that you specify override the same parameters in the launch template. Currently, the only supported override is instance type.\n You must specify between 2 and 20 overrides.\n (dict) --Describes an override for a launch template.\n InstanceType (string) --The instance type.\n For information about available instance types, see Available Instance Types in the Amazon Elastic Compute Cloud User Guide.\n \n InstancesDistribution (dict) --The instances distribution to use.\n If you leave this parameter unspecified when creating the group, the default values are used.\n OnDemandAllocationStrategy (string) --Indicates how to allocate instance types to fulfill On-Demand capacity.\n The only valid value is prioritized , which is also the default value. This strategy uses the order of instance types in the Overrides array of LaunchTemplate to define the launch priority of each instance type. The first instance type in the array is prioritized higher than the last. If all your On-Demand capacity cannot be fulfilled using your highest priority instance, then the Auto Scaling groups launches the remaining capacity using the second priority instance type, and so on.\n OnDemandBaseCapacity (integer) --The minimum amount of the Auto Scaling group's capacity that must be fulfilled by On-Demand Instances. This base portion is provisioned first as your group scales.\n The default value is 0. If you leave this parameter set to 0, On-Demand Instances are launched as a percentage of the Auto Scaling group's desired capacity, per the OnDemandPercentageAboveBaseCapacity setting.\n OnDemandPercentageAboveBaseCapacity (integer) --Controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBaseCapacity .\n The range is 0 100. The default value is 100. If you leave this parameter set to 100, the percentages are 100% for On-Demand Instances and 0% for Spot Instances.\n SpotAllocationStrategy (string) --Indicates how to allocate Spot capacity across Spot pools.\n The only valid value is lowest-price , which is also the default value. The Auto Scaling group selects the cheapest Spot pools and evenly allocates your Spot capacity across the number of Spot pools that you specify.\n SpotInstancePools (integer) --The number of Spot pools to use to allocate your Spot capacity. The Spot pools are determined from the different instance types in the Overrides array of LaunchTemplate .\n The range is 1 20 and the default is 2.\n SpotMaxPrice (string) --The maximum price per unit hour that you are willing to pay for a Spot Instance. If you leave this value blank (which is the default), the maximum Spot price is set at the On-Demand price.\n \n \n\n :type MinSize: integer\n :param MinSize: The minimum size of the Auto Scaling group.\n\n :type MaxSize: integer\n :param MaxSize: The maximum size of the Auto Scaling group.\n\n :type DesiredCapacity: integer\n :param DesiredCapacity: The number of EC2 instances that should be running in the Auto Scaling group. This number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group.\n\n :type DefaultCooldown: integer\n :param DefaultCooldown: The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. The default is 300.\n For more information, see Scaling Cooldowns in the Amazon EC2 Auto Scaling User Guide .\n \n\n :type AvailabilityZones: list\n :param AvailabilityZones: One or more Availability Zones for the group.\n (string) --\n \n\n :type HealthCheckType: string\n :param HealthCheckType: The service to use for the health checks. The valid values are EC2 and ELB .\n\n :type HealthCheckGracePeriod: integer\n :param HealthCheckGracePeriod: The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service. The default is 0.\n For more information, see Health Checks in the Amazon EC2 Auto Scaling User Guide .\n \n\n :type PlacementGroup: string\n :param PlacementGroup: The name of the placement group into which to launch your instances, if any. For more information, see Placement Groups in the Amazon Elastic Compute Cloud User Guide .\n\n :type VPCZoneIdentifier: string\n :param VPCZoneIdentifier: The ID of the subnet, if you are launching into a VPC. You can specify several subnets in a comma-separated list.\n When you specify VPCZoneIdentifier with AvailabilityZones , ensure that the subnets' Availability Zones match the values you specify for AvailabilityZones .\n For more information, see Launching Auto Scaling Instances in a VPC in the Amazon EC2 Auto Scaling User Guide .\n \n\n :type TerminationPolicies: list\n :param TerminationPolicies: A standalone termination policy or a list of termination policies used to select the instance to terminate. The policies are executed in the order that they are listed.\n For more information, see Controlling Which Instances Auto Scaling Terminates During Scale In in the Auto Scaling User Guide .\n (string) --\n \n\n :type NewInstancesProtectedFromScaleIn: boolean\n :param NewInstancesProtectedFromScaleIn: Indicates whether newly launched instances are protected from termination by Auto Scaling when scaling in.\n\n :type ServiceLinkedRoleARN: string\n :param ServiceLinkedRoleARN: The Amazon Resource Name (ARN) of the service-linked role that the Auto Scaling group uses to call other AWS services on your behalf.\n\n :return: response = client.update_auto_scaling_group(\n AutoScalingGroupName='my-auto-scaling-group',\n LaunchConfigurationName='new-launch-config',\n )\n \n print(response)\n \n \n :returns: \n AutoScalingGroupName (string) -- [REQUIRED]\n The name of the Auto Scaling group.\n \n LaunchConfigurationName (string) -- The name of the launch configuration. If you specify this parameter, you can't specify a launch template or a mixed instances policy.\n LaunchTemplate (dict) -- The launch template and version to use to specify the updates. If you specify this parameter, you can't specify a launch configuration or a mixed instances policy.\n \n LaunchTemplateId (string) --The ID of the launch template. You must specify either a template ID or a template name.\n \n LaunchTemplateName (string) --The name of the launch template. You must specify either a template name or a template ID.\n \n Version (string) --The version number, $Latest , or $Default . If the value is $Latest , Amazon EC2 Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default , Amazon EC2 Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default .\n \n \n \n MixedInstancesPolicy (dict) -- The mixed instances policy to use to specify the updates. If you specify this parameter, you can't specify a launch configuration or a launch template.\n \n LaunchTemplate (dict) --The launch template and overrides.\n This parameter is required when creating an Auto Scaling group with a mixed instances policy, but is not required when updating the group.\n \n LaunchTemplateSpecification (dict) --The launch template to use. You must specify either the launch template ID or launch template name in the request.\n \n LaunchTemplateId (string) --The ID of the launch template. You must specify either a template ID or a template name.\n \n LaunchTemplateName (string) --The name of the launch template. You must specify either a template name or a template ID.\n \n Version (string) --The version number, $Latest , or $Default . If the value is $Latest , Amazon EC2 Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default , Amazon EC2 Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default .\n \n \n \n Overrides (list) --Any parameters that you specify override the same parameters in the launch template. Currently, the only supported override is instance type.\n You must specify between 2 and 20 overrides.\n \n (dict) --Describes an override for a launch template.\n \n InstanceType (string) --The instance type.\n For information about available instance types, see Available Instance Types in the Amazon Elastic Compute Cloud User Guide.\n \n \n \n \n \n \n \n InstancesDistribution (dict) --The instances distribution to use.\n If you leave this parameter unspecified when creating the group, the default values are used.\n \n OnDemandAllocationStrategy (string) --Indicates how to allocate instance types to fulfill On-Demand capacity.\n The only valid value is prioritized , which is also the default value. This strategy uses the order of instance types in the Overrides array of LaunchTemplate to define the launch priority of each instance type. The first instance type in the array is prioritized higher than the last. If all your On-Demand capacity cannot be fulfilled using your highest priority instance, then the Auto Scaling groups launches the remaining capacity using the second priority instance type, and so on.\n \n OnDemandBaseCapacity (integer) --The minimum amount of the Auto Scaling group's capacity that must be fulfilled by On-Demand Instances. This base portion is provisioned first as your group scales.\n The default value is 0. If you leave this parameter set to 0, On-Demand Instances are launched as a percentage of the Auto Scaling group's desired capacity, per the OnDemandPercentageAboveBaseCapacity setting.\n \n OnDemandPercentageAboveBaseCapacity (integer) --Controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBaseCapacity .\n The range is 0100. The default value is 100. If you leave this parameter set to 100, the percentages are 100% for On-Demand Instances and 0% for Spot Instances.\n \n SpotAllocationStrategy (string) --Indicates how to allocate Spot capacity across Spot pools.\n The only valid value is lowest-price , which is also the default value. The Auto Scaling group selects the cheapest Spot pools and evenly allocates your Spot capacity across the number of Spot pools that you specify.\n \n SpotInstancePools (integer) --The number of Spot pools to use to allocate your Spot capacity. The Spot pools are determined from the different instance types in the Overrides array of LaunchTemplate .\n The range is 120 and the default is 2.\n \n SpotMaxPrice (string) --The maximum price per unit hour that you are willing to pay for a Spot Instance. If you leave this value blank (which is the default), the maximum Spot price is set at the On-Demand price.\n \n \n \n \n \n MinSize (integer) -- The minimum size of the Auto Scaling group.\n MaxSize (integer) -- The maximum size of the Auto Scaling group.\n DesiredCapacity (integer) -- The number of EC2 instances that should be running in the Auto Scaling group. This number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group.\n DefaultCooldown (integer) -- The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. The default is 300.\n For more information, see Scaling Cooldowns in the Amazon EC2 Auto Scaling User Guide .\n \n AvailabilityZones (list) -- One or more Availability Zones for the group.\n \n (string) --\n \n \n HealthCheckType (string) -- The service to use for the health checks. The valid values are EC2 and ELB .\n HealthCheckGracePeriod (integer) -- The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service. The default is 0.\n For more information, see Health Checks in the Amazon EC2 Auto Scaling User Guide .\n \n PlacementGroup (string) -- The name of the placement group into which to launch your instances, if any. For more information, see Placement Groups in the Amazon Elastic Compute Cloud User Guide .\n VPCZoneIdentifier (string) -- The ID of the subnet, if you are launching into a VPC. You can specify several subnets in a comma-separated list.\n When you specify VPCZoneIdentifier with AvailabilityZones , ensure that the subnets' Availability Zones match the values you specify for AvailabilityZones .\n For more information, see Launching Auto Scaling Instances in a VPC in the Amazon EC2 Auto Scaling User Guide .\n \n TerminationPolicies (list) -- A standalone termination policy or a list of termination policies used to select the instance to terminate. The policies are executed in the order that they are listed.\n For more information, see Controlling Which Instances Auto Scaling Terminates During Scale In in the Auto Scaling User Guide .\n \n (string) --\n \n \n NewInstancesProtectedFromScaleIn (boolean) -- Indicates whether newly launched instances are protected from termination by Auto Scaling when scaling in.\n ServiceLinkedRoleARN (string) -- The Amazon Resource Name (ARN) of the service-linked role that the Auto Scaling group uses to call other AWS services on your behalf.\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6395554542541504, "alphanum_fraction": 0.6454609036445618, "avg_line_length": 37.72349166870117, "blob_id": "fedb17a63db906f5ceb23e98602128f8612db695", "content_id": "94468402b3f77c1a82ecd0d99b5a9a26f08d4c6c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18627, "license_type": "permissive", "max_line_length": 392, "num_lines": 481, "path": "/pyboto3/transcribeservice.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_vocabulary(VocabularyName=None, LanguageCode=None, Phrases=None):\n \"\"\"\n Creates a new custom vocabulary that you can use to change the way Amazon Transcribe handles transcription of an audio file.\n See also: AWS API Documentation\n \n \n :example: response = client.create_vocabulary(\n VocabularyName='string',\n LanguageCode='en-US'|'es-US'|'en-AU'|'fr-CA'|'en-GB'|'de-DE'|'pt-BR'|'fr-FR'|'it-IT',\n Phrases=[\n 'string',\n ]\n )\n \n \n :type VocabularyName: string\n :param VocabularyName: [REQUIRED]\n The name of the vocabulary. The name must be unique within an AWS account. The name is case-sensitive.\n \n\n :type LanguageCode: string\n :param LanguageCode: [REQUIRED]\n The language code of the vocabulary entries.\n \n\n :type Phrases: list\n :param Phrases: [REQUIRED]\n An array of strings that contains the vocabulary entries.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'VocabularyName': 'string',\n 'LanguageCode': 'en-US'|'es-US'|'en-AU'|'fr-CA'|'en-GB'|'de-DE'|'pt-BR'|'fr-FR'|'it-IT',\n 'VocabularyState': 'PENDING'|'READY'|'FAILED',\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'FailureReason': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_transcription_job(TranscriptionJobName=None):\n \"\"\"\n Deletes a previously submitted transcription job along with any other generated results such as the transcription, models, and so on.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_transcription_job(\n TranscriptionJobName='string'\n )\n \n \n :type TranscriptionJobName: string\n :param TranscriptionJobName: [REQUIRED]\n The name of the transcription job to be deleted.\n \n\n \"\"\"\n pass\n\ndef delete_vocabulary(VocabularyName=None):\n \"\"\"\n Deletes a vocabulary from Amazon Transcribe.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_vocabulary(\n VocabularyName='string'\n )\n \n \n :type VocabularyName: string\n :param VocabularyName: [REQUIRED]\n The name of the vocabulary to delete.\n \n\n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_transcription_job(TranscriptionJobName=None):\n \"\"\"\n Returns information about a transcription job. To see the status of the job, check the TranscriptionJobStatus field. If the status is COMPLETED , the job is finished and you can find the results at the location specified in the TranscriptionFileUri field.\n See also: AWS API Documentation\n \n \n :example: response = client.get_transcription_job(\n TranscriptionJobName='string'\n )\n \n \n :type TranscriptionJobName: string\n :param TranscriptionJobName: [REQUIRED]\n The name of the job.\n \n\n :rtype: dict\n :return: {\n 'TranscriptionJob': {\n 'TranscriptionJobName': 'string',\n 'TranscriptionJobStatus': 'IN_PROGRESS'|'FAILED'|'COMPLETED',\n 'LanguageCode': 'en-US'|'es-US'|'en-AU'|'fr-CA'|'en-GB'|'de-DE'|'pt-BR'|'fr-FR'|'it-IT',\n 'MediaSampleRateHertz': 123,\n 'MediaFormat': 'mp3'|'mp4'|'wav'|'flac',\n 'Media': {\n 'MediaFileUri': 'string'\n },\n 'Transcript': {\n 'TranscriptFileUri': 'string'\n },\n 'CreationTime': datetime(2015, 1, 1),\n 'CompletionTime': datetime(2015, 1, 1),\n 'FailureReason': 'string',\n 'Settings': {\n 'VocabularyName': 'string',\n 'ShowSpeakerLabels': True|False,\n 'MaxSpeakerLabels': 123,\n 'ChannelIdentification': True|False\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_vocabulary(VocabularyName=None):\n \"\"\"\n Gets information about a vocabulary.\n See also: AWS API Documentation\n \n \n :example: response = client.get_vocabulary(\n VocabularyName='string'\n )\n \n \n :type VocabularyName: string\n :param VocabularyName: [REQUIRED]\n The name of the vocabulary to return information about. The name is case-sensitive.\n \n\n :rtype: dict\n :return: {\n 'VocabularyName': 'string',\n 'LanguageCode': 'en-US'|'es-US'|'en-AU'|'fr-CA'|'en-GB'|'de-DE'|'pt-BR'|'fr-FR'|'it-IT',\n 'VocabularyState': 'PENDING'|'READY'|'FAILED',\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'FailureReason': 'string',\n 'DownloadUri': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_transcription_jobs(Status=None, JobNameContains=None, NextToken=None, MaxResults=None):\n \"\"\"\n Lists transcription jobs with the specified status.\n See also: AWS API Documentation\n \n \n :example: response = client.list_transcription_jobs(\n Status='IN_PROGRESS'|'FAILED'|'COMPLETED',\n JobNameContains='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type Status: string\n :param Status: When specified, returns only transcription jobs with the specified status. Jobs are ordered by creation date, with the newest jobs returned first. If you don t specify a status, Amazon Transcribe returns all transcription jobs ordered by creation date.\n\n :type JobNameContains: string\n :param JobNameContains: When specified, the jobs returned in the list are limited to jobs whose name contains the specified string.\n\n :type NextToken: string\n :param NextToken: If the result of the previous request to ListTranscriptionJobs was truncated, include the NextToken to fetch the next set of jobs.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of jobs to return in the response. If there are fewer results in the list, this response contains only the actual results.\n\n :rtype: dict\n :return: {\n 'Status': 'IN_PROGRESS'|'FAILED'|'COMPLETED',\n 'NextToken': 'string',\n 'TranscriptionJobSummaries': [\n {\n 'TranscriptionJobName': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'CompletionTime': datetime(2015, 1, 1),\n 'LanguageCode': 'en-US'|'es-US'|'en-AU'|'fr-CA'|'en-GB'|'de-DE'|'pt-BR'|'fr-FR'|'it-IT',\n 'TranscriptionJobStatus': 'IN_PROGRESS'|'FAILED'|'COMPLETED',\n 'FailureReason': 'string',\n 'OutputLocationType': 'CUSTOMER_BUCKET'|'SERVICE_BUCKET'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef list_vocabularies(NextToken=None, MaxResults=None, StateEquals=None, NameContains=None):\n \"\"\"\n Returns a list of vocabularies that match the specified criteria. If no criteria are specified, returns the entire list of vocabularies.\n See also: AWS API Documentation\n \n \n :example: response = client.list_vocabularies(\n NextToken='string',\n MaxResults=123,\n StateEquals='PENDING'|'READY'|'FAILED',\n NameContains='string'\n )\n \n \n :type NextToken: string\n :param NextToken: If the result of the previous request to ListVocabularies was truncated, include the NextToken to fetch the next set of jobs.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of vocabularies to return in the response. If there are fewer results in the list, this response contains only the actual results.\n\n :type StateEquals: string\n :param StateEquals: When specified, only returns vocabularies with the VocabularyState field equal to the specified state.\n\n :type NameContains: string\n :param NameContains: When specified, the vocabularies returned in the list are limited to vocabularies whose name contains the specified string. The search is case-insensitive, ListVocabularies will return both 'vocabularyname' and 'VocabularyName' in the response list.\n\n :rtype: dict\n :return: {\n 'Status': 'IN_PROGRESS'|'FAILED'|'COMPLETED',\n 'NextToken': 'string',\n 'Vocabularies': [\n {\n 'VocabularyName': 'string',\n 'LanguageCode': 'en-US'|'es-US'|'en-AU'|'fr-CA'|'en-GB'|'de-DE'|'pt-BR'|'fr-FR'|'it-IT',\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'VocabularyState': 'PENDING'|'READY'|'FAILED'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef start_transcription_job(TranscriptionJobName=None, LanguageCode=None, MediaSampleRateHertz=None, MediaFormat=None, Media=None, OutputBucketName=None, Settings=None):\n \"\"\"\n Starts an asynchronous job to transcribe speech to text.\n See also: AWS API Documentation\n \n \n :example: response = client.start_transcription_job(\n TranscriptionJobName='string',\n LanguageCode='en-US'|'es-US'|'en-AU'|'fr-CA'|'en-GB'|'de-DE'|'pt-BR'|'fr-FR'|'it-IT',\n MediaSampleRateHertz=123,\n MediaFormat='mp3'|'mp4'|'wav'|'flac',\n Media={\n 'MediaFileUri': 'string'\n },\n OutputBucketName='string',\n Settings={\n 'VocabularyName': 'string',\n 'ShowSpeakerLabels': True|False,\n 'MaxSpeakerLabels': 123,\n 'ChannelIdentification': True|False\n }\n )\n \n \n :type TranscriptionJobName: string\n :param TranscriptionJobName: [REQUIRED]\n The name of the job. Note that you can't use the strings '.' or '..' by themselves as the job name. The name must also be unique within an AWS account.\n \n\n :type LanguageCode: string\n :param LanguageCode: [REQUIRED]\n The language code for the language used in the input media file.\n \n\n :type MediaSampleRateHertz: integer\n :param MediaSampleRateHertz: The sample rate, in Hertz, of the audio track in the input media file.\n\n :type MediaFormat: string\n :param MediaFormat: [REQUIRED]\n The format of the input media file.\n \n\n :type Media: dict\n :param Media: [REQUIRED]\n An object that describes the input media for a transcription job.\n MediaFileUri (string) --The S3 location of the input media file. The URI must be in the same region as the API endpoint that you are calling. The general form is:\n https://s3-<aws-region>.amazonaws.com/<bucket-name>/<keyprefix>/<objectkey>\n For example:\n https://s3-us-east-1.amazonaws.com/examplebucket/example.mp4https://s3-us-east-1.amazonaws.com/examplebucket/mediadocs/example.mp4\n For more information about S3 object names, see Object Keys in the Amazon S3 Developer Guide .\n \n\n :type OutputBucketName: string\n :param OutputBucketName: The location where the transcription is stored.\n If you set the OutputBucketName , Amazon Transcribe puts the transcription in the specified S3 bucket. When you call the GetTranscriptionJob operation, the operation returns this location in the TranscriptFileUri field. The S3 bucket must have permissions that allow Amazon Transcribe to put files in the bucket. For more information, see Permissions Required for IAM User Roles .\n If you don't set the OutputBucketName , Amazon Transcribe generates a pre-signed URL, a shareable URL that provides secure access to your transcription, and returns it in the TranscriptFileUri field. Use this URL to download the transcription.\n \n\n :type Settings: dict\n :param Settings: A Settings object that provides optional settings for a transcription job.\n VocabularyName (string) --The name of a vocabulary to use when processing the transcription job.\n ShowSpeakerLabels (boolean) --Determines whether the transcription job uses speaker recognition to identify different speakers in the input audio. Speaker recognition labels individual speakers in the audio file. If you set the ShowSpeakerLabels field to true, you must also set the maximum number of speaker labels MaxSpeakerLabels field.\n You can't set both ShowSpeakerLabels and ChannelIdentification in the same request. If you set both, your request returns a BadRequestException .\n MaxSpeakerLabels (integer) --The maximum number of speakers to identify in the input audio. If there are more speakers in the audio than this number, multiple speakers will be identified as a single speaker. If you specify the MaxSpeakerLabels field, you must set the ShowSpeakerLabels field to true.\n ChannelIdentification (boolean) --Instructs Amazon Transcribe to process each audio channel separately and then merge the transcription output of each channel into a single transcription.\n Amazon Transcribe also produces a transcription of each item detected on an audio channel, including the start time and end time of the item and alternative transcriptions of the item including the confidence that Amazon Transcribe has in the transcription.\n You can't set both ShowSpeakerLabels and ChannelIdentification in the same request. If you set both, your request returns a BadRequestException .\n \n\n :rtype: dict\n :return: {\n 'TranscriptionJob': {\n 'TranscriptionJobName': 'string',\n 'TranscriptionJobStatus': 'IN_PROGRESS'|'FAILED'|'COMPLETED',\n 'LanguageCode': 'en-US'|'es-US'|'en-AU'|'fr-CA'|'en-GB'|'de-DE'|'pt-BR'|'fr-FR'|'it-IT',\n 'MediaSampleRateHertz': 123,\n 'MediaFormat': 'mp3'|'mp4'|'wav'|'flac',\n 'Media': {\n 'MediaFileUri': 'string'\n },\n 'Transcript': {\n 'TranscriptFileUri': 'string'\n },\n 'CreationTime': datetime(2015, 1, 1),\n 'CompletionTime': datetime(2015, 1, 1),\n 'FailureReason': 'string',\n 'Settings': {\n 'VocabularyName': 'string',\n 'ShowSpeakerLabels': True|False,\n 'MaxSpeakerLabels': 123,\n 'ChannelIdentification': True|False\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef update_vocabulary(VocabularyName=None, LanguageCode=None, Phrases=None):\n \"\"\"\n Updates an existing vocabulary with new values. The UpdateVocabulary operation overwrites all of the existing information with the values that you provide in the request.\n See also: AWS API Documentation\n \n \n :example: response = client.update_vocabulary(\n VocabularyName='string',\n LanguageCode='en-US'|'es-US'|'en-AU'|'fr-CA'|'en-GB'|'de-DE'|'pt-BR'|'fr-FR'|'it-IT',\n Phrases=[\n 'string',\n ]\n )\n \n \n :type VocabularyName: string\n :param VocabularyName: [REQUIRED]\n The name of the vocabulary to update. The name is case-sensitive.\n \n\n :type LanguageCode: string\n :param LanguageCode: [REQUIRED]\n The language code of the vocabulary entries.\n \n\n :type Phrases: list\n :param Phrases: [REQUIRED]\n An array of strings containing the vocabulary entries.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'VocabularyName': 'string',\n 'LanguageCode': 'en-US'|'es-US'|'en-AU'|'fr-CA'|'en-GB'|'de-DE'|'pt-BR'|'fr-FR'|'it-IT',\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'VocabularyState': 'PENDING'|'READY'|'FAILED'\n }\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6872178316116333, "alphanum_fraction": 0.6900917887687683, "avg_line_length": 57.433387756347656, "blob_id": "6eefd09b5998a4275e551d6a048284369b93e828", "content_id": "720fea47a9f2edd62c5b56ef64ec1b0ee60b6d9f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 176762, "license_type": "permissive", "max_line_length": 974, "num_lines": 3025, "path": "/pyboto3/cloudformation.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef cancel_update_stack(StackName=None, ClientRequestToken=None):\n \"\"\"\n Cancels an update on the specified stack. If the call completes successfully, the stack rolls back the update and reverts to the previous stack configuration.\n See also: AWS API Documentation\n \n Examples\n This example cancels an update of the specified stack.\n Expected Output:\n \n :example: response = client.cancel_update_stack(\n StackName='string',\n ClientRequestToken='string'\n )\n \n \n :type StackName: string\n :param StackName: [REQUIRED]\n The name or the unique stack ID that is associated with the stack.\n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: A unique identifier for this CancelUpdateStack request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to cancel an update on a stack with the same name. You might retry CancelUpdateStack requests to ensure that AWS CloudFormation successfully received them.\n\n :return: response = client.cancel_update_stack(\n StackName='MyStack',\n )\n \n print(response)\n \n \n \"\"\"\n pass\n\ndef continue_update_rollback(StackName=None, RoleARN=None, ResourcesToSkip=None, ClientRequestToken=None):\n \"\"\"\n For a specified stack that is in the UPDATE_ROLLBACK_FAILED state, continues rolling it back to the UPDATE_ROLLBACK_COMPLETE state. Depending on the cause of the failure, you can manually fix the error and continue the rollback. By continuing the rollback, you can return your stack to a working state (the UPDATE_ROLLBACK_COMPLETE state), and then try to update the stack again.\n A stack goes into the UPDATE_ROLLBACK_FAILED state when AWS CloudFormation cannot roll back all changes after a failed stack update. For example, you might have a stack that is rolling back to an old database instance that was deleted outside of AWS CloudFormation. Because AWS CloudFormation doesn't know the database was deleted, it assumes that the database instance still exists and attempts to roll back to it, causing the update rollback to fail.\n See also: AWS API Documentation\n \n \n :example: response = client.continue_update_rollback(\n StackName='string',\n RoleARN='string',\n ResourcesToSkip=[\n 'string',\n ],\n ClientRequestToken='string'\n )\n \n \n :type StackName: string\n :param StackName: [REQUIRED]\n The name or the unique ID of the stack that you want to continue rolling back.\n Note\n Don't specify the name of a nested stack (a stack that was created by using the AWS::CloudFormation::Stack resource). Instead, use this operation on the parent stack (the stack that contains the AWS::CloudFormation::Stack resource).\n \n\n :type RoleARN: string\n :param RoleARN: The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role that AWS CloudFormation assumes to roll back the stack. AWS CloudFormation uses the role's credentials to make calls on your behalf. AWS CloudFormation always uses this role for all future operations on the stack. As long as users have permission to operate on the stack, AWS CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege.\n If you don't specify a value, AWS CloudFormation uses the role that was previously associated with the stack. If no role is available, AWS CloudFormation uses a temporary session that is generated from your user credentials.\n \n\n :type ResourcesToSkip: list\n :param ResourcesToSkip: A list of the logical IDs of the resources that AWS CloudFormation skips during the continue update rollback operation. You can specify only resources that are in the UPDATE_FAILED state because a rollback failed. You can't specify resources that are in the UPDATE_FAILED state for other reasons, for example, because an update was cancelled. To check why a resource update failed, use the DescribeStackResources action, and view the resource status reason.\n Warning\n Specify this property to skip rolling back resources that AWS CloudFormation can't successfully roll back. We recommend that you troubleshoot resources before skipping them. AWS CloudFormation sets the status of the specified resources to UPDATE_COMPLETE and continues to roll back the stack. After the rollback is complete, the state of the skipped resources will be inconsistent with the state of the resources in the stack template. Before performing another stack update, you must update the stack or resources to be consistent with each other. If you don't, subsequent stack updates might fail, and the stack will become unrecoverable.\n Specify the minimum number of resources required to successfully roll back your stack. For example, a failed resource update might cause dependent resources to fail. In this case, it might not be necessary to skip the dependent resources.\n To skip resources that are part of nested stacks, use the following format: NestedStackName.ResourceLogicalID . If you want to specify the logical ID of a stack resource (Type: AWS::CloudFormation::Stack ) in the ResourcesToSkip list, then its corresponding embedded stack must be in one of the following states: DELETE_IN_PROGRESS , DELETE_COMPLETE , or DELETE_FAILED .\n Note\n Don't confuse a child stack's name with its corresponding logical ID defined in the parent stack. For an example of a continue update rollback operation with nested stacks, see Using ResourcesToSkip to recover a nested stacks hierarchy .\n (string) --\n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: A unique identifier for this ContinueUpdateRollback request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to continue the rollback to a stack with the same name. You might retry ContinueUpdateRollback requests to ensure that AWS CloudFormation successfully received them.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef create_change_set(StackName=None, TemplateBody=None, TemplateURL=None, UsePreviousTemplate=None, Parameters=None, Capabilities=None, ResourceTypes=None, RoleARN=None, RollbackConfiguration=None, NotificationARNs=None, Tags=None, ChangeSetName=None, ClientToken=None, Description=None, ChangeSetType=None):\n \"\"\"\n Creates a list of changes that will be applied to a stack so that you can review the changes before executing them. You can create a change set for a stack that doesn't exist or an existing stack. If you create a change set for a stack that doesn't exist, the change set shows all of the resources that AWS CloudFormation will create. If you create a change set for an existing stack, AWS CloudFormation compares the stack's information with the information that you submit in the change set and lists the differences. Use change sets to understand which resources AWS CloudFormation will create or change, and how it will change resources in an existing stack, before you create or update a stack.\n To create a change set for a stack that doesn't exist, for the ChangeSetType parameter, specify CREATE . To create a change set for an existing stack, specify UPDATE for the ChangeSetType parameter. After the CreateChangeSet call successfully completes, AWS CloudFormation starts creating the change set. To check the status of the change set or to review it, use the DescribeChangeSet action.\n When you are satisfied with the changes the change set will make, execute the change set by using the ExecuteChangeSet action. AWS CloudFormation doesn't make changes until you execute the change set.\n See also: AWS API Documentation\n \n \n :example: response = client.create_change_set(\n StackName='string',\n TemplateBody='string',\n TemplateURL='string',\n UsePreviousTemplate=True|False,\n Parameters=[\n {\n 'ParameterKey': 'string',\n 'ParameterValue': 'string',\n 'UsePreviousValue': True|False,\n 'ResolvedValue': 'string'\n },\n ],\n Capabilities=[\n 'CAPABILITY_IAM'|'CAPABILITY_NAMED_IAM'|'CAPABILITY_AUTO_EXPAND',\n ],\n ResourceTypes=[\n 'string',\n ],\n RoleARN='string',\n RollbackConfiguration={\n 'RollbackTriggers': [\n {\n 'Arn': 'string',\n 'Type': 'string'\n },\n ],\n 'MonitoringTimeInMinutes': 123\n },\n NotificationARNs=[\n 'string',\n ],\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n ChangeSetName='string',\n ClientToken='string',\n Description='string',\n ChangeSetType='CREATE'|'UPDATE'\n )\n \n \n :type StackName: string\n :param StackName: [REQUIRED]\n The name or the unique ID of the stack for which you are creating a change set. AWS CloudFormation generates the change set by comparing this stack's information with the information that you submit, such as a modified template or different parameter input values.\n \n\n :type TemplateBody: string\n :param TemplateBody: A structure that contains the body of the revised template, with a minimum length of 1 byte and a maximum length of 51,200 bytes. AWS CloudFormation generates the change set by comparing this template with the template of the stack that you specified.\n Conditional: You must specify only TemplateBody or TemplateURL .\n \n\n :type TemplateURL: string\n :param TemplateURL: The location of the file that contains the revised template. The URL must point to a template (max size: 460,800 bytes) that is located in an S3 bucket. AWS CloudFormation generates the change set by comparing this template with the stack that you specified.\n Conditional: You must specify only TemplateBody or TemplateURL .\n \n\n :type UsePreviousTemplate: boolean\n :param UsePreviousTemplate: Whether to reuse the template that is associated with the stack to create the change set.\n\n :type Parameters: list\n :param Parameters: A list of Parameter structures that specify input parameters for the change set. For more information, see the Parameter data type.\n (dict) --The Parameter data type.\n ParameterKey (string) --The key associated with the parameter. If you don't specify a key and value for a particular parameter, AWS CloudFormation uses the default value that is specified in your template.\n ParameterValue (string) --The input value associated with the parameter.\n UsePreviousValue (boolean) --During a stack update, use the existing parameter value that the stack is using for a given parameter key. If you specify true , do not specify a parameter value.\n ResolvedValue (string) --Read-only. The value that corresponds to a Systems Manager parameter key. This field is returned only for ` SSM parameter types <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html#aws-ssm-parameter-types>`__ in the template.\n \n \n\n :type Capabilities: list\n :param Capabilities: In some cases, you must explicity acknowledge that your stack template contains certain capabilities in order for AWS CloudFormation to create the stack.\n CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your AWS account; for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.\n If you have IAM resources, you can specify either capability.\n If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM .\n If you don't specify either of these capabilities, AWS CloudFormation returns an InsufficientCapabilities error.\n If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.\n AWS::IAM::AccessKey\n AWS::IAM::Group\n AWS::IAM::InstanceProfile\n AWS::IAM::Policy\n AWS::IAM::Role\n AWS::IAM::User\n AWS::IAM::UserToGroupAddition\n For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates .\n CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by AWS CloudFormation.\n Note\n This capacity does not apply to creating change sets, and specifying it when creating change sets has no effect. Also, change sets do not currently support nested stacks. If you want to create a stack from a stack template that contains macros and nested stacks, you must create or update the stack directly from the template using the CreateStack or UpdateStack action, and specifying this capability.\n For more information on macros, see Using AWS CloudFormation Macros to Perform Custom Processing on Templates .\n (string) --\n \n\n :type ResourceTypes: list\n :param ResourceTypes: The template resource types that you have permissions to work with if you execute this change set, such as AWS::EC2::Instance , AWS::EC2::* , or Custom::MyCustomInstance .\n If the list of resource types doesn't include a resource type that you're updating, the stack update fails. By default, AWS CloudFormation grants permissions to all resource types. AWS Identity and Access Management (IAM) uses this parameter for condition keys in IAM policies for AWS CloudFormation. For more information, see Controlling Access with AWS Identity and Access Management in the AWS CloudFormation User Guide.\n (string) --\n \n\n :type RoleARN: string\n :param RoleARN: The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role that AWS CloudFormation assumes when executing the change set. AWS CloudFormation uses the role's credentials to make calls on your behalf. AWS CloudFormation uses this role for all future operations on the stack. As long as users have permission to operate on the stack, AWS CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege.\n If you don't specify a value, AWS CloudFormation uses the role that was previously associated with the stack. If no role is available, AWS CloudFormation uses a temporary session that is generated from your user credentials.\n \n\n :type RollbackConfiguration: dict\n :param RollbackConfiguration: The rollback triggers for AWS CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards.\n RollbackTriggers (list) --The triggers to monitor during stack creation or update actions.\n By default, AWS CloudFormation saves the rollback triggers specified for a stack and applies them to any subsequent update operations for the stack, unless you specify otherwise. If you do specify rollback triggers for this parameter, those triggers replace any list of triggers previously specified for the stack. This means:\n To use the rollback triggers previously specified for this stack, if any, don't specify this parameter.\n To specify new or updated rollback triggers, you must specify all the triggers that you want used for this stack, even triggers you've specifed before (for example, when creating the stack or during a previous stack update). Any triggers that you don't include in the updated list of triggers are no longer applied to the stack.\n To remove all currently specified triggers, specify an empty list for this parameter.\n If a specified trigger is missing, the entire stack operation fails and is rolled back.\n (dict) --A rollback trigger AWS CloudFormation monitors during creation and updating of stacks. If any of the alarms you specify goes to ALARM state during the stack operation or within the specified monitoring period afterwards, CloudFormation rolls back the entire stack operation.\n Arn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the rollback trigger.\n If a specified trigger is missing, the entire stack operation fails and is rolled back.\n Type (string) -- [REQUIRED]The resource type of the rollback trigger. Currently, AWS::CloudWatch::Alarm is the only supported resource type.\n \n MonitoringTimeInMinutes (integer) --The amount of time, in minutes, during which CloudFormation should monitor all the rollback triggers after the stack creation or update operation deploys all necessary resources.\n The default is 0 minutes.\n If you specify a monitoring period but do not specify any rollback triggers, CloudFormation still waits the specified period of time before cleaning up old resources after update operations. You can use this monitoring period to perform any manual stack validation desired, and manually cancel the stack creation or update (using CancelUpdateStack , for example) as necessary.\n If you specify 0 for this parameter, CloudFormation still monitors the specified rollback triggers during stack creation and update operations. Then, for update operations, it begins disposing of old resources immediately once the operation completes.\n \n\n :type NotificationARNs: list\n :param NotificationARNs: The Amazon Resource Names (ARNs) of Amazon Simple Notification Service (Amazon SNS) topics that AWS CloudFormation associates with the stack. To remove all associated notification topics, specify an empty list.\n (string) --\n \n\n :type Tags: list\n :param Tags: Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to resources in the stack. You can specify a maximum of 50 tags.\n (dict) --The Tag type enables you to specify a key-value pair that can be used to store information about an AWS CloudFormation stack.\n Key (string) -- [REQUIRED]\n Required . A string used to identify this tag. You can specify a maximum of 128 characters for a tag key. Tags owned by Amazon Web Services (AWS) have the reserved prefix: aws: .\n Value (string) -- [REQUIRED]\n Required . A string containing the value for this tag. You can specify a maximum of 256 characters for a tag value.\n \n \n\n :type ChangeSetName: string\n :param ChangeSetName: [REQUIRED]\n The name of the change set. The name must be unique among all change sets that are associated with the specified stack.\n A change set name can contain only alphanumeric, case sensitive characters and hyphens. It must start with an alphabetic character and cannot exceed 128 characters.\n \n\n :type ClientToken: string\n :param ClientToken: A unique identifier for this CreateChangeSet request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to create another change set with the same name. You might retry CreateChangeSet requests to ensure that AWS CloudFormation successfully received them.\n\n :type Description: string\n :param Description: A description to help you identify this change set.\n\n :type ChangeSetType: string\n :param ChangeSetType: The type of change set operation. To create a change set for a new stack, specify CREATE . To create a change set for an existing stack, specify UPDATE .\n If you create a change set for a new stack, AWS Cloudformation creates a stack with a unique stack ID, but no template or resources. The stack will be in the ` REVIEW_IN_PROGRESS http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html#d0e11995`__ state until you execute the change set.\n By default, AWS CloudFormation specifies UPDATE . You can't use the UPDATE type to create a change set for a new stack or the CREATE type to create a change set for an existing stack.\n \n\n :rtype: dict\n :return: {\n 'Id': 'string',\n 'StackId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_stack(StackName=None, TemplateBody=None, TemplateURL=None, Parameters=None, DisableRollback=None, RollbackConfiguration=None, TimeoutInMinutes=None, NotificationARNs=None, Capabilities=None, ResourceTypes=None, RoleARN=None, OnFailure=None, StackPolicyBody=None, StackPolicyURL=None, Tags=None, ClientRequestToken=None, EnableTerminationProtection=None):\n \"\"\"\n Creates a stack as specified in the template. After the call completes successfully, the stack creation starts. You can check the status of the stack via the DescribeStacks API.\n See also: AWS API Documentation\n \n \n :example: response = client.create_stack(\n StackName='string',\n TemplateBody='string',\n TemplateURL='string',\n Parameters=[\n {\n 'ParameterKey': 'string',\n 'ParameterValue': 'string',\n 'UsePreviousValue': True|False,\n 'ResolvedValue': 'string'\n },\n ],\n DisableRollback=True|False,\n RollbackConfiguration={\n 'RollbackTriggers': [\n {\n 'Arn': 'string',\n 'Type': 'string'\n },\n ],\n 'MonitoringTimeInMinutes': 123\n },\n TimeoutInMinutes=123,\n NotificationARNs=[\n 'string',\n ],\n Capabilities=[\n 'CAPABILITY_IAM'|'CAPABILITY_NAMED_IAM'|'CAPABILITY_AUTO_EXPAND',\n ],\n ResourceTypes=[\n 'string',\n ],\n RoleARN='string',\n OnFailure='DO_NOTHING'|'ROLLBACK'|'DELETE',\n StackPolicyBody='string',\n StackPolicyURL='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n ClientRequestToken='string',\n EnableTerminationProtection=True|False\n )\n \n \n :type StackName: string\n :param StackName: [REQUIRED]\n The name that is associated with the stack. The name must be unique in the region in which you are creating the stack.\n Note\n A stack name can contain only alphanumeric characters (case sensitive) and hyphens. It must start with an alphabetic character and cannot be longer than 128 characters.\n \n\n :type TemplateBody: string\n :param TemplateBody: Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.\n Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.\n \n\n :type TemplateURL: string\n :param TemplateURL: Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information, go to the Template Anatomy in the AWS CloudFormation User Guide.\n Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.\n \n\n :type Parameters: list\n :param Parameters: A list of Parameter structures that specify input parameters for the stack. For more information, see the Parameter data type.\n (dict) --The Parameter data type.\n ParameterKey (string) --The key associated with the parameter. If you don't specify a key and value for a particular parameter, AWS CloudFormation uses the default value that is specified in your template.\n ParameterValue (string) --The input value associated with the parameter.\n UsePreviousValue (boolean) --During a stack update, use the existing parameter value that the stack is using for a given parameter key. If you specify true , do not specify a parameter value.\n ResolvedValue (string) --Read-only. The value that corresponds to a Systems Manager parameter key. This field is returned only for ` SSM parameter types <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html#aws-ssm-parameter-types>`__ in the template.\n \n \n\n :type DisableRollback: boolean\n :param DisableRollback: Set to true to disable rollback of the stack if stack creation failed. You can specify either DisableRollback or OnFailure , but not both.\n Default: false\n \n\n :type RollbackConfiguration: dict\n :param RollbackConfiguration: The rollback triggers for AWS CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards.\n RollbackTriggers (list) --The triggers to monitor during stack creation or update actions.\n By default, AWS CloudFormation saves the rollback triggers specified for a stack and applies them to any subsequent update operations for the stack, unless you specify otherwise. If you do specify rollback triggers for this parameter, those triggers replace any list of triggers previously specified for the stack. This means:\n To use the rollback triggers previously specified for this stack, if any, don't specify this parameter.\n To specify new or updated rollback triggers, you must specify all the triggers that you want used for this stack, even triggers you've specifed before (for example, when creating the stack or during a previous stack update). Any triggers that you don't include in the updated list of triggers are no longer applied to the stack.\n To remove all currently specified triggers, specify an empty list for this parameter.\n If a specified trigger is missing, the entire stack operation fails and is rolled back.\n (dict) --A rollback trigger AWS CloudFormation monitors during creation and updating of stacks. If any of the alarms you specify goes to ALARM state during the stack operation or within the specified monitoring period afterwards, CloudFormation rolls back the entire stack operation.\n Arn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the rollback trigger.\n If a specified trigger is missing, the entire stack operation fails and is rolled back.\n Type (string) -- [REQUIRED]The resource type of the rollback trigger. Currently, AWS::CloudWatch::Alarm is the only supported resource type.\n \n MonitoringTimeInMinutes (integer) --The amount of time, in minutes, during which CloudFormation should monitor all the rollback triggers after the stack creation or update operation deploys all necessary resources.\n The default is 0 minutes.\n If you specify a monitoring period but do not specify any rollback triggers, CloudFormation still waits the specified period of time before cleaning up old resources after update operations. You can use this monitoring period to perform any manual stack validation desired, and manually cancel the stack creation or update (using CancelUpdateStack , for example) as necessary.\n If you specify 0 for this parameter, CloudFormation still monitors the specified rollback triggers during stack creation and update operations. Then, for update operations, it begins disposing of old resources immediately once the operation completes.\n \n\n :type TimeoutInMinutes: integer\n :param TimeoutInMinutes: The amount of time that can pass before the stack status becomes CREATE_FAILED; if DisableRollback is not set or is set to false , the stack will be rolled back.\n\n :type NotificationARNs: list\n :param NotificationARNs: The Simple Notification Service (SNS) topic ARNs to publish stack related events. You can find your SNS topic ARNs using the SNS console or your Command Line Interface (CLI).\n (string) --\n \n\n :type Capabilities: list\n :param Capabilities: In some cases, you must explicity acknowledge that your stack template contains certain capabilities in order for AWS CloudFormation to create the stack.\n CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your AWS account; for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.\n If you have IAM resources, you can specify either capability.\n If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM .\n If you don't specify either of these capabilities, AWS CloudFormation returns an InsufficientCapabilities error.\n If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.\n AWS::IAM::AccessKey\n AWS::IAM::Group\n AWS::IAM::InstanceProfile\n AWS::IAM::Policy\n AWS::IAM::Role\n AWS::IAM::User\n AWS::IAM::UserToGroupAddition\n For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates .\n CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by AWS CloudFormation. Change sets do not currently support nested stacks. If you want to create a stack from a stack template that contains macros and nested stacks, you must create the stack directly from the template using this capability.\n Warning\n You should only create stacks directly from a stack template that contains macros if you know what processing the macro performs. Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without AWS CloudFormation being notified.\n For more information, see Using AWS CloudFormation Macros to Perform Custom Processing on Templates .\n (string) --\n \n\n :type ResourceTypes: list\n :param ResourceTypes: The template resource types that you have permissions to work with for this create stack action, such as AWS::EC2::Instance , AWS::EC2::* , or Custom::MyCustomInstance . Use the following syntax to describe template resource types: AWS::* (for all AWS resource), Custom::* (for all custom resources), Custom::*logical_ID* `` (for a specific custom resource), ``AWS::*service_name* ::* (for all resources of a particular AWS service), and ``AWS::service_name ::resource_logical_ID `` (for a specific AWS resource).\n If the list of resource types doesn't include a resource that you're creating, the stack creation fails. By default, AWS CloudFormation grants permissions to all resource types. AWS Identity and Access Management (IAM) uses this parameter for AWS CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with AWS Identity and Access Management .\n (string) --\n \n\n :type RoleARN: string\n :param RoleARN: The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role that AWS CloudFormation assumes to create the stack. AWS CloudFormation uses the role's credentials to make calls on your behalf. AWS CloudFormation always uses this role for all future operations on the stack. As long as users have permission to operate on the stack, AWS CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege.\n If you don't specify a value, AWS CloudFormation uses the role that was previously associated with the stack. If no role is available, AWS CloudFormation uses a temporary session that is generated from your user credentials.\n \n\n :type OnFailure: string\n :param OnFailure: Determines what action will be taken if stack creation fails. This must be one of: DO_NOTHING, ROLLBACK, or DELETE. You can specify either OnFailure or DisableRollback , but not both.\n Default: ROLLBACK\n \n\n :type StackPolicyBody: string\n :param StackPolicyBody: Structure containing the stack policy body. For more information, go to Prevent Updates to Stack Resources in the AWS CloudFormation User Guide . You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.\n\n :type StackPolicyURL: string\n :param StackPolicyURL: Location of a file containing the stack policy. The URL must point to a policy (maximum size: 16 KB) located in an S3 bucket in the same region as the stack. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.\n\n :type Tags: list\n :param Tags: Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to the resources created in the stack. A maximum number of 50 tags can be specified.\n (dict) --The Tag type enables you to specify a key-value pair that can be used to store information about an AWS CloudFormation stack.\n Key (string) -- [REQUIRED]\n Required . A string used to identify this tag. You can specify a maximum of 128 characters for a tag key. Tags owned by Amazon Web Services (AWS) have the reserved prefix: aws: .\n Value (string) -- [REQUIRED]\n Required . A string containing the value for this tag. You can specify a maximum of 256 characters for a tag value.\n \n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: A unique identifier for this CreateStack request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to create a stack with the same name. You might retry CreateStack requests to ensure that AWS CloudFormation successfully received them.\n All events triggered by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1 , then all the StackEvents generated by that operation will have ClientRequestToken set as token1 .\n In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID , which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002 .\n \n\n :type EnableTerminationProtection: boolean\n :param EnableTerminationProtection: Whether to enable termination protection on the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protecting a Stack From Being Deleted in the AWS CloudFormation User Guide . Termination protection is disabled on stacks by default.\n For nested stacks , termination protection is set on the root stack and cannot be changed directly on the nested stack.\n \n\n :rtype: dict\n :return: {\n 'StackId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_stack_instances(StackSetName=None, Accounts=None, Regions=None, ParameterOverrides=None, OperationPreferences=None, OperationId=None):\n \"\"\"\n Creates stack instances for the specified accounts, within the specified regions. A stack instance refers to a stack in a specific account and region. Accounts and Regions are required parametersyou must specify at least one account and one region.\n See also: AWS API Documentation\n \n \n :example: response = client.create_stack_instances(\n StackSetName='string',\n Accounts=[\n 'string',\n ],\n Regions=[\n 'string',\n ],\n ParameterOverrides=[\n {\n 'ParameterKey': 'string',\n 'ParameterValue': 'string',\n 'UsePreviousValue': True|False,\n 'ResolvedValue': 'string'\n },\n ],\n OperationPreferences={\n 'RegionOrder': [\n 'string',\n ],\n 'FailureToleranceCount': 123,\n 'FailureTolerancePercentage': 123,\n 'MaxConcurrentCount': 123,\n 'MaxConcurrentPercentage': 123\n },\n OperationId='string'\n )\n \n \n :type StackSetName: string\n :param StackSetName: [REQUIRED]\n The name or unique ID of the stack set that you want to create stack instances from.\n \n\n :type Accounts: list\n :param Accounts: [REQUIRED]\n The names of one or more AWS accounts that you want to create stack instances in the specified region(s) for.\n (string) --\n \n\n :type Regions: list\n :param Regions: [REQUIRED]\n The names of one or more regions where you want to create stack instances using the specified AWS account(s).\n (string) --\n \n\n :type ParameterOverrides: list\n :param ParameterOverrides: A list of stack set parameters whose values you want to override in the selected stack instances.\n Any overridden parameter values will be applied to all stack instances in the specified accounts and regions. When specifying parameters and their values, be aware of how AWS CloudFormation sets parameter values during stack instance operations:\n To override the current value for a parameter, include the parameter and specify its value.\n To leave a parameter set to its present value, you can do one of the following:\n Do not include the parameter in the list.\n Include the parameter and specify UsePreviousValue as true . (You cannot specify both a value and set UsePreviousValue to true .)\n To set all overridden parameter back to the values specified in the stack set, specify a parameter list but do not include any parameters.\n To leave all parameters set to their present values, do not specify this property at all.\n During stack set updates, any parameter values overridden for a stack instance are not updated, but retain their overridden value.\n You can only override the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet to update the stack set template.\n (dict) --The Parameter data type.\n ParameterKey (string) --The key associated with the parameter. If you don't specify a key and value for a particular parameter, AWS CloudFormation uses the default value that is specified in your template.\n ParameterValue (string) --The input value associated with the parameter.\n UsePreviousValue (boolean) --During a stack update, use the existing parameter value that the stack is using for a given parameter key. If you specify true , do not specify a parameter value.\n ResolvedValue (string) --Read-only. The value that corresponds to a Systems Manager parameter key. This field is returned only for ` SSM parameter types <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html#aws-ssm-parameter-types>`__ in the template.\n \n \n\n :type OperationPreferences: dict\n :param OperationPreferences: Preferences for how AWS CloudFormation performs this stack set operation.\n RegionOrder (list) --The order of the regions in where you want to perform the stack operation.\n (string) --\n FailureToleranceCount (integer) --The number of accounts, per region, for which this operation can fail before AWS CloudFormation stops the operation in that region. If the operation is stopped in a region, AWS CloudFormation doesn't attempt the operation in any subsequent regions.\n Conditional: You must specify either FailureToleranceCount or FailureTolerancePercentage (but not both).\n FailureTolerancePercentage (integer) --The percentage of accounts, per region, for which this stack operation can fail before AWS CloudFormation stops the operation in that region. If the operation is stopped in a region, AWS CloudFormation doesn't attempt the operation in any subsequent regions.\n When calculating the number of accounts based on the specified percentage, AWS CloudFormation rounds down to the next whole number.\n Conditional: You must specify either FailureToleranceCount or FailureTolerancePercentage , but not both.\n MaxConcurrentCount (integer) --The maximum number of accounts in which to perform this operation at one time. This is dependent on the value of FailureToleranceCount MaxConcurrentCount is at most one more than the FailureToleranceCount .\n Note that this setting lets you specify the maximum for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.\n Conditional: You must specify either MaxConcurrentCount or MaxConcurrentPercentage , but not both.\n MaxConcurrentPercentage (integer) --The maximum percentage of accounts in which to perform this operation at one time.\n When calculating the number of accounts based on the specified percentage, AWS CloudFormation rounds down to the next whole number. This is true except in cases where rounding down would result is zero. In this case, CloudFormation sets the number as one instead.\n Note that this setting lets you specify the maximum for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.\n Conditional: You must specify either MaxConcurrentCount or MaxConcurrentPercentage , but not both.\n \n\n :type OperationId: string\n :param OperationId: The unique identifier for this stack set operation.\n The operation ID also functions as an idempotency token, to ensure that AWS CloudFormation performs the stack set operation only once, even if you retry the request multiple times. You might retry stack set operation requests to ensure that AWS CloudFormation successfully received them.\n If you don't specify an operation ID, the SDK generates one automatically.\n Repeating this stack set operation with a new operation ID retries all stack instances whose status is OUTDATED .\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'OperationId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_stack_set(StackSetName=None, Description=None, TemplateBody=None, TemplateURL=None, Parameters=None, Capabilities=None, Tags=None, AdministrationRoleARN=None, ExecutionRoleName=None, ClientRequestToken=None):\n \"\"\"\n Creates a stack set.\n See also: AWS API Documentation\n \n \n :example: response = client.create_stack_set(\n StackSetName='string',\n Description='string',\n TemplateBody='string',\n TemplateURL='string',\n Parameters=[\n {\n 'ParameterKey': 'string',\n 'ParameterValue': 'string',\n 'UsePreviousValue': True|False,\n 'ResolvedValue': 'string'\n },\n ],\n Capabilities=[\n 'CAPABILITY_IAM'|'CAPABILITY_NAMED_IAM'|'CAPABILITY_AUTO_EXPAND',\n ],\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n AdministrationRoleARN='string',\n ExecutionRoleName='string',\n ClientRequestToken='string'\n )\n \n \n :type StackSetName: string\n :param StackSetName: [REQUIRED]\n The name to associate with the stack set. The name must be unique in the region where you create your stack set.\n Note\n A stack name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphabetic character and can't be longer than 128 characters.\n \n\n :type Description: string\n :param Description: A description of the stack set. You can use the description to identify the stack set's purpose or other important information.\n\n :type TemplateBody: string\n :param TemplateBody: The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, see Template Anatomy in the AWS CloudFormation User Guide.\n Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.\n \n\n :type TemplateURL: string\n :param TemplateURL: The location of the file that contains the template body. The URL must point to a template (maximum size: 460,800 bytes) that's located in an Amazon S3 bucket. For more information, see Template Anatomy in the AWS CloudFormation User Guide.\n Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.\n \n\n :type Parameters: list\n :param Parameters: The input parameters for the stack set template.\n (dict) --The Parameter data type.\n ParameterKey (string) --The key associated with the parameter. If you don't specify a key and value for a particular parameter, AWS CloudFormation uses the default value that is specified in your template.\n ParameterValue (string) --The input value associated with the parameter.\n UsePreviousValue (boolean) --During a stack update, use the existing parameter value that the stack is using for a given parameter key. If you specify true , do not specify a parameter value.\n ResolvedValue (string) --Read-only. The value that corresponds to a Systems Manager parameter key. This field is returned only for ` SSM parameter types <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html#aws-ssm-parameter-types>`__ in the template.\n \n \n\n :type Capabilities: list\n :param Capabilities: In some cases, you must explicity acknowledge that your stack set template contains certain capabilities in order for AWS CloudFormation to create the stack set and related stack instances.\n CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your AWS account; for example, by creating new AWS Identity and Access Management (IAM) users. For those stack sets, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.\n If you have IAM resources, you can specify either capability.\n If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM .\n If you don't specify either of these capabilities, AWS CloudFormation returns an InsufficientCapabilities error.\n If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.\n AWS::IAM::AccessKey\n AWS::IAM::Group\n AWS::IAM::InstanceProfile\n AWS::IAM::Policy\n AWS::IAM::Role\n AWS::IAM::User\n AWS::IAM::UserToGroupAddition\n For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates .\n CAPABILITY_AUTO_EXPAND Some templates contain macros. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. For more information, see Using AWS CloudFormation Macros to Perform Custom Processing on Templates .\n Note\n Stack sets do not currently support macros in stack templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by AWS CloudFormation.) Even if you specify this capability, if you include a macro in your template the stack set operation will fail.\n (string) --\n \n\n :type Tags: list\n :param Tags: The key-value pairs to associate with this stack set and the stacks created from it. AWS CloudFormation also propagates these tags to supported resources that are created in the stacks. A maximum number of 50 tags can be specified.\n If you specify tags as part of a CreateStackSet action, AWS CloudFormation checks to see if you have the required IAM permission to tag resources. If you don't, the entire CreateStackSet action fails with an access denied error, and the stack set is not created.\n (dict) --The Tag type enables you to specify a key-value pair that can be used to store information about an AWS CloudFormation stack.\n Key (string) -- [REQUIRED]\n Required . A string used to identify this tag. You can specify a maximum of 128 characters for a tag key. Tags owned by Amazon Web Services (AWS) have the reserved prefix: aws: .\n Value (string) -- [REQUIRED]\n Required . A string containing the value for this tag. You can specify a maximum of 256 characters for a tag value.\n \n \n\n :type AdministrationRoleARN: string\n :param AdministrationRoleARN: The Amazon Resource Number (ARN) of the IAM role to use to create this stack set.\n Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Prerequisites: Granting Permissions for Stack Set Operations in the AWS CloudFormation User Guide .\n \n\n :type ExecutionRoleName: string\n :param ExecutionRoleName: The name of the IAM execution role to use to create the stack set. If you do not specify an execution role, AWS CloudFormation uses the AWSCloudFormationStackSetExecutionRole role for the stack set operation.\n Specify an IAM role only if you are using customized execution roles to control which stack resources users and groups can include in their stack sets.\n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: A unique identifier for this CreateStackSet request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to create another stack set with the same name. You might retry CreateStackSet requests to ensure that AWS CloudFormation successfully received them.\n If you don't specify an operation ID, the SDK generates one automatically.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'StackSetId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_change_set(ChangeSetName=None, StackName=None):\n \"\"\"\n Deletes the specified change set. Deleting change sets ensures that no one executes the wrong change set.\n If the call successfully completes, AWS CloudFormation successfully deleted the change set.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_change_set(\n ChangeSetName='string',\n StackName='string'\n )\n \n \n :type ChangeSetName: string\n :param ChangeSetName: [REQUIRED]\n The name or Amazon Resource Name (ARN) of the change set that you want to delete.\n \n\n :type StackName: string\n :param StackName: If you specified the name of a change set to delete, specify the stack name or ID (ARN) that is associated with it.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_stack(StackName=None, RetainResources=None, RoleARN=None, ClientRequestToken=None):\n \"\"\"\n Deletes a specified stack. Once the call completes successfully, stack deletion starts. Deleted stacks do not show up in the DescribeStacks API if the deletion has been completed successfully.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_stack(\n StackName='string',\n RetainResources=[\n 'string',\n ],\n RoleARN='string',\n ClientRequestToken='string'\n )\n \n \n :type StackName: string\n :param StackName: [REQUIRED]\n The name or the unique stack ID that is associated with the stack.\n \n\n :type RetainResources: list\n :param RetainResources: For stacks in the DELETE_FAILED state, a list of resource logical IDs that are associated with the resources you want to retain. During deletion, AWS CloudFormation deletes the stack but does not delete the retained resources.\n Retaining resources is useful when you cannot delete a resource, such as a non-empty S3 bucket, but you want to delete the stack.\n (string) --\n \n\n :type RoleARN: string\n :param RoleARN: The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role that AWS CloudFormation assumes to delete the stack. AWS CloudFormation uses the role's credentials to make calls on your behalf.\n If you don't specify a value, AWS CloudFormation uses the role that was previously associated with the stack. If no role is available, AWS CloudFormation uses a temporary session that is generated from your user credentials.\n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: A unique identifier for this DeleteStack request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to delete a stack with the same name. You might retry DeleteStack requests to ensure that AWS CloudFormation successfully received them.\n All events triggered by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1 , then all the StackEvents generated by that operation will have ClientRequestToken set as token1 .\n In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID , which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002 .\n \n\n \"\"\"\n pass\n\ndef delete_stack_instances(StackSetName=None, Accounts=None, Regions=None, OperationPreferences=None, RetainStacks=None, OperationId=None):\n \"\"\"\n Deletes stack instances for the specified accounts, in the specified regions.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_stack_instances(\n StackSetName='string',\n Accounts=[\n 'string',\n ],\n Regions=[\n 'string',\n ],\n OperationPreferences={\n 'RegionOrder': [\n 'string',\n ],\n 'FailureToleranceCount': 123,\n 'FailureTolerancePercentage': 123,\n 'MaxConcurrentCount': 123,\n 'MaxConcurrentPercentage': 123\n },\n RetainStacks=True|False,\n OperationId='string'\n )\n \n \n :type StackSetName: string\n :param StackSetName: [REQUIRED]\n The name or unique ID of the stack set that you want to delete stack instances for.\n \n\n :type Accounts: list\n :param Accounts: [REQUIRED]\n The names of the AWS accounts that you want to delete stack instances for.\n (string) --\n \n\n :type Regions: list\n :param Regions: [REQUIRED]\n The regions where you want to delete stack set instances.\n (string) --\n \n\n :type OperationPreferences: dict\n :param OperationPreferences: Preferences for how AWS CloudFormation performs this stack set operation.\n RegionOrder (list) --The order of the regions in where you want to perform the stack operation.\n (string) --\n FailureToleranceCount (integer) --The number of accounts, per region, for which this operation can fail before AWS CloudFormation stops the operation in that region. If the operation is stopped in a region, AWS CloudFormation doesn't attempt the operation in any subsequent regions.\n Conditional: You must specify either FailureToleranceCount or FailureTolerancePercentage (but not both).\n FailureTolerancePercentage (integer) --The percentage of accounts, per region, for which this stack operation can fail before AWS CloudFormation stops the operation in that region. If the operation is stopped in a region, AWS CloudFormation doesn't attempt the operation in any subsequent regions.\n When calculating the number of accounts based on the specified percentage, AWS CloudFormation rounds down to the next whole number.\n Conditional: You must specify either FailureToleranceCount or FailureTolerancePercentage , but not both.\n MaxConcurrentCount (integer) --The maximum number of accounts in which to perform this operation at one time. This is dependent on the value of FailureToleranceCount MaxConcurrentCount is at most one more than the FailureToleranceCount .\n Note that this setting lets you specify the maximum for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.\n Conditional: You must specify either MaxConcurrentCount or MaxConcurrentPercentage , but not both.\n MaxConcurrentPercentage (integer) --The maximum percentage of accounts in which to perform this operation at one time.\n When calculating the number of accounts based on the specified percentage, AWS CloudFormation rounds down to the next whole number. This is true except in cases where rounding down would result is zero. In this case, CloudFormation sets the number as one instead.\n Note that this setting lets you specify the maximum for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.\n Conditional: You must specify either MaxConcurrentCount or MaxConcurrentPercentage , but not both.\n \n\n :type RetainStacks: boolean\n :param RetainStacks: [REQUIRED]\n Removes the stack instances from the specified stack set, but doesn't delete the stacks. You can't reassociate a retained stack or add an existing, saved stack to a new stack set.\n For more information, see Stack set operation options .\n \n\n :type OperationId: string\n :param OperationId: The unique identifier for this stack set operation.\n If you don't specify an operation ID, the SDK generates one automatically.\n The operation ID also functions as an idempotency token, to ensure that AWS CloudFormation performs the stack set operation only once, even if you retry the request multiple times. You can retry stack set operation requests to ensure that AWS CloudFormation successfully received them.\n Repeating this stack set operation with a new operation ID retries all stack instances whose status is OUTDATED .\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'OperationId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_stack_set(StackSetName=None):\n \"\"\"\n Deletes a stack set. Before you can delete a stack set, all of its member stack instances must be deleted. For more information about how to do this, see DeleteStackInstances .\n See also: AWS API Documentation\n \n \n :example: response = client.delete_stack_set(\n StackSetName='string'\n )\n \n \n :type StackSetName: string\n :param StackSetName: [REQUIRED]\n The name or unique ID of the stack set that you're deleting. You can obtain this value by running ListStackSets .\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef describe_account_limits(NextToken=None):\n \"\"\"\n Retrieves your account's AWS CloudFormation limits, such as the maximum number of stacks that you can create in your account.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_account_limits(\n NextToken='string'\n )\n \n \n :type NextToken: string\n :param NextToken: A string that identifies the next page of limits that you want to retrieve.\n\n :rtype: dict\n :return: {\n 'AccountLimits': [\n {\n 'Name': 'string',\n 'Value': 123\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_change_set(ChangeSetName=None, StackName=None, NextToken=None):\n \"\"\"\n Returns the inputs for the change set and a list of changes that AWS CloudFormation will make if you execute the change set. For more information, see Updating Stacks Using Change Sets in the AWS CloudFormation User Guide.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_change_set(\n ChangeSetName='string',\n StackName='string',\n NextToken='string'\n )\n \n \n :type ChangeSetName: string\n :param ChangeSetName: [REQUIRED]\n The name or Amazon Resource Name (ARN) of the change set that you want to describe.\n \n\n :type StackName: string\n :param StackName: If you specified the name of a change set, specify the stack name or ID (ARN) of the change set you want to describe.\n\n :type NextToken: string\n :param NextToken: A string (provided by the DescribeChangeSet response output) that identifies the next page of information that you want to retrieve.\n\n :rtype: dict\n :return: {\n 'ChangeSetName': 'string',\n 'ChangeSetId': 'string',\n 'StackId': 'string',\n 'StackName': 'string',\n 'Description': 'string',\n 'Parameters': [\n {\n 'ParameterKey': 'string',\n 'ParameterValue': 'string',\n 'UsePreviousValue': True|False,\n 'ResolvedValue': 'string'\n },\n ],\n 'CreationTime': datetime(2015, 1, 1),\n 'ExecutionStatus': 'UNAVAILABLE'|'AVAILABLE'|'EXECUTE_IN_PROGRESS'|'EXECUTE_COMPLETE'|'EXECUTE_FAILED'|'OBSOLETE',\n 'Status': 'CREATE_PENDING'|'CREATE_IN_PROGRESS'|'CREATE_COMPLETE'|'DELETE_COMPLETE'|'FAILED',\n 'StatusReason': 'string',\n 'NotificationARNs': [\n 'string',\n ],\n 'RollbackConfiguration': {\n 'RollbackTriggers': [\n {\n 'Arn': 'string',\n 'Type': 'string'\n },\n ],\n 'MonitoringTimeInMinutes': 123\n },\n 'Capabilities': [\n 'CAPABILITY_IAM'|'CAPABILITY_NAMED_IAM'|'CAPABILITY_AUTO_EXPAND',\n ],\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'Changes': [\n {\n 'Type': 'Resource',\n 'ResourceChange': {\n 'Action': 'Add'|'Modify'|'Remove',\n 'LogicalResourceId': 'string',\n 'PhysicalResourceId': 'string',\n 'ResourceType': 'string',\n 'Replacement': 'True'|'False'|'Conditional',\n 'Scope': [\n 'Properties'|'Metadata'|'CreationPolicy'|'UpdatePolicy'|'DeletionPolicy'|'Tags',\n ],\n 'Details': [\n {\n 'Target': {\n 'Attribute': 'Properties'|'Metadata'|'CreationPolicy'|'UpdatePolicy'|'DeletionPolicy'|'Tags',\n 'Name': 'string',\n 'RequiresRecreation': 'Never'|'Conditionally'|'Always'\n },\n 'Evaluation': 'Static'|'Dynamic',\n 'ChangeSource': 'ResourceReference'|'ParameterReference'|'ResourceAttribute'|'DirectModification'|'Automatic',\n 'CausingEntity': 'string'\n },\n ]\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_stack_drift_detection_status(StackDriftDetectionId=None):\n \"\"\"\n Returns information about a stack drift detection operation. A stack drift detection operation detects whether a stack's actual configuration differs, or has drifted , from it's expected configuration, as defined in the stack template and any values specified as template parameters. A stack is considered to have drifted if one or more of its resources have drifted. For more information on stack and resource drift, see Detecting Unregulated Configuration Changes to Stacks and Resources .\n Use DetectStackDrift to initiate a stack drift detection operation. DetectStackDrift returns a StackDriftDetectionId you can use to monitor the progress of the operation using DescribeStackDriftDetectionStatus . Once the drift detection operation has completed, use DescribeStackResourceDrifts to return drift information about the stack and its resources.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_stack_drift_detection_status(\n StackDriftDetectionId='string'\n )\n \n \n :type StackDriftDetectionId: string\n :param StackDriftDetectionId: [REQUIRED]\n The ID of the drift detection results of this operation.\n AWS CloudFormation generates new results, with a new drift detection ID, each time this operation is run. However, the number of drift results AWS CloudFormation retains for any given stack, and for how long, may vary.\n \n\n :rtype: dict\n :return: {\n 'StackId': 'string',\n 'StackDriftDetectionId': 'string',\n 'StackDriftStatus': 'DRIFTED'|'IN_SYNC'|'UNKNOWN'|'NOT_CHECKED',\n 'DetectionStatus': 'DETECTION_IN_PROGRESS'|'DETECTION_FAILED'|'DETECTION_COMPLETE',\n 'DetectionStatusReason': 'string',\n 'DriftedStackResourceCount': 123,\n 'Timestamp': datetime(2015, 1, 1)\n }\n \n \n :returns: \n DETECTION_COMPLETE : The stack drift detection operation has successfully completed for all resources in the stack that support drift detection. (Resources that do not currently support stack detection remain unchecked.) If you specified logical resource IDs for AWS CloudFormation to use as a filter for the stack drift detection operation, only the resources with those logical IDs are checked for drift.\n DETECTION_FAILED : The stack drift detection operation has failed for at least one resource in the stack. Results will be available for resources on which AWS CloudFormation successfully completed drift detection.\n DETECTION_IN_PROGRESS : The stack drift detection operation is currently in progress.\n \n \"\"\"\n pass\n\ndef describe_stack_events(StackName=None, NextToken=None):\n \"\"\"\n Returns all stack related events for a specified stack in reverse chronological order. For more information about a stack's event history, go to Stacks in the AWS CloudFormation User Guide.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_stack_events(\n StackName='string',\n NextToken='string'\n )\n \n \n :type StackName: string\n :param StackName: The name or the unique stack ID that is associated with the stack, which are not always interchangeable:\n Running stacks: You can specify either the stack's name or its unique stack ID.\n Deleted stacks: You must specify the unique stack ID.\n Default: There is no default value.\n \n\n :type NextToken: string\n :param NextToken: A string that identifies the next page of events that you want to retrieve.\n\n :rtype: dict\n :return: {\n 'StackEvents': [\n {\n 'StackId': 'string',\n 'EventId': 'string',\n 'StackName': 'string',\n 'LogicalResourceId': 'string',\n 'PhysicalResourceId': 'string',\n 'ResourceType': 'string',\n 'Timestamp': datetime(2015, 1, 1),\n 'ResourceStatus': 'CREATE_IN_PROGRESS'|'CREATE_FAILED'|'CREATE_COMPLETE'|'DELETE_IN_PROGRESS'|'DELETE_FAILED'|'DELETE_COMPLETE'|'DELETE_SKIPPED'|'UPDATE_IN_PROGRESS'|'UPDATE_FAILED'|'UPDATE_COMPLETE',\n 'ResourceStatusReason': 'string',\n 'ResourceProperties': 'string',\n 'ClientRequestToken': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_stack_instance(StackSetName=None, StackInstanceAccount=None, StackInstanceRegion=None):\n \"\"\"\n Returns the stack instance that's associated with the specified stack set, AWS account, and region.\n For a list of stack instances that are associated with a specific stack set, use ListStackInstances .\n See also: AWS API Documentation\n \n \n :example: response = client.describe_stack_instance(\n StackSetName='string',\n StackInstanceAccount='string',\n StackInstanceRegion='string'\n )\n \n \n :type StackSetName: string\n :param StackSetName: [REQUIRED]\n The name or the unique stack ID of the stack set that you want to get stack instance information for.\n \n\n :type StackInstanceAccount: string\n :param StackInstanceAccount: [REQUIRED]\n The ID of an AWS account that's associated with this stack instance.\n \n\n :type StackInstanceRegion: string\n :param StackInstanceRegion: [REQUIRED]\n The name of a region that's associated with this stack instance.\n \n\n :rtype: dict\n :return: {\n 'StackInstance': {\n 'StackSetId': 'string',\n 'Region': 'string',\n 'Account': 'string',\n 'StackId': 'string',\n 'ParameterOverrides': [\n {\n 'ParameterKey': 'string',\n 'ParameterValue': 'string',\n 'UsePreviousValue': True|False,\n 'ResolvedValue': 'string'\n },\n ],\n 'Status': 'CURRENT'|'OUTDATED'|'INOPERABLE',\n 'StatusReason': 'string'\n }\n }\n \n \n :returns: \n INOPERABLE : A DeleteStackInstances operation has failed and left the stack in an unstable state. Stacks in this state are excluded from further UpdateStackSet operations. You might need to perform a DeleteStackInstances operation, with RetainStacks set to true , to delete the stack instance, and then delete the stack manually.\n OUTDATED : The stack isn't currently up to date with the stack set because:\n The associated stack failed during a CreateStackSet or UpdateStackSet operation.\n The stack was part of a CreateStackSet or UpdateStackSet operation that failed or was stopped before the stack was created or updated.\n \n \n CURRENT : The stack is currently up to date with the stack set.\n \n \"\"\"\n pass\n\ndef describe_stack_resource(StackName=None, LogicalResourceId=None):\n \"\"\"\n Returns a description of the specified resource in the specified stack.\n For deleted stacks, DescribeStackResource returns resource information for up to 90 days after the stack has been deleted.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_stack_resource(\n StackName='string',\n LogicalResourceId='string'\n )\n \n \n :type StackName: string\n :param StackName: [REQUIRED]\n The name or the unique stack ID that is associated with the stack, which are not always interchangeable:\n Running stacks: You can specify either the stack's name or its unique stack ID.\n Deleted stacks: You must specify the unique stack ID.\n Default: There is no default value.\n \n\n :type LogicalResourceId: string\n :param LogicalResourceId: [REQUIRED]\n The logical name of the resource as specified in the template.\n Default: There is no default value.\n \n\n :rtype: dict\n :return: {\n 'StackResourceDetail': {\n 'StackName': 'string',\n 'StackId': 'string',\n 'LogicalResourceId': 'string',\n 'PhysicalResourceId': 'string',\n 'ResourceType': 'string',\n 'LastUpdatedTimestamp': datetime(2015, 1, 1),\n 'ResourceStatus': 'CREATE_IN_PROGRESS'|'CREATE_FAILED'|'CREATE_COMPLETE'|'DELETE_IN_PROGRESS'|'DELETE_FAILED'|'DELETE_COMPLETE'|'DELETE_SKIPPED'|'UPDATE_IN_PROGRESS'|'UPDATE_FAILED'|'UPDATE_COMPLETE',\n 'ResourceStatusReason': 'string',\n 'Description': 'string',\n 'Metadata': 'string',\n 'DriftInformation': {\n 'StackResourceDriftStatus': 'IN_SYNC'|'MODIFIED'|'DELETED'|'NOT_CHECKED',\n 'LastCheckTimestamp': datetime(2015, 1, 1)\n }\n }\n }\n \n \n :returns: \n DELETED : The resource differs from its expected configuration in that it has been deleted.\n MODIFIED : The resource differs from its expected configuration.\n NOT_CHECKED : AWS CloudFormation has not checked if the resource differs from its expected configuration. Any resources that do not currently support drift detection have a status of NOT_CHECKED . For more information, see Resources that Support Drift Detection .\n IN_SYNC : The resources's actual configuration matches its expected configuration.\n \n \"\"\"\n pass\n\ndef describe_stack_resource_drifts(StackName=None, StackResourceDriftStatusFilters=None, NextToken=None, MaxResults=None):\n \"\"\"\n Returns drift information for the resources that have been checked for drift in the specified stack. This includes actual and expected configuration values for resources where AWS CloudFormation detects configuration drift.\n For a given stack, there will be one StackResourceDrift for each stack resource that has been checked for drift. Resources that have not yet been checked for drift are not included. Resources that do not currently support drift detection are not checked, and so not included. For a list of resources that support drift detection, see Resources that Support Drift Detection .\n Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all supported resources for a given stack.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_stack_resource_drifts(\n StackName='string',\n StackResourceDriftStatusFilters=[\n 'IN_SYNC'|'MODIFIED'|'DELETED'|'NOT_CHECKED',\n ],\n NextToken='string',\n MaxResults=123\n )\n \n \n :type StackName: string\n :param StackName: [REQUIRED]\n The name of the stack for which you want drift information.\n \n\n :type StackResourceDriftStatusFilters: list\n :param StackResourceDriftStatusFilters: The resource drift status values to use as filters for the resource drift results returned.\n DELETED : The resource differs from its expected template configuration in that the resource has been deleted.\n MODIFIED : One or more resource properties differ from their expected template values.\n IN_SYNC : The resources's actual configuration matches its expected template configuration.\n NOT_CHECKED : AWS CloudFormation does not currently return this value.\n (string) --\n \n\n :type NextToken: string\n :param NextToken: A string that identifies the next page of stack resource drift results.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.\n\n :rtype: dict\n :return: {\n 'StackResourceDrifts': [\n {\n 'StackId': 'string',\n 'LogicalResourceId': 'string',\n 'PhysicalResourceId': 'string',\n 'PhysicalResourceIdContext': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'ResourceType': 'string',\n 'ExpectedProperties': 'string',\n 'ActualProperties': 'string',\n 'PropertyDifferences': [\n {\n 'PropertyPath': 'string',\n 'ExpectedValue': 'string',\n 'ActualValue': 'string',\n 'DifferenceType': 'ADD'|'REMOVE'|'NOT_EQUAL'\n },\n ],\n 'StackResourceDriftStatus': 'IN_SYNC'|'MODIFIED'|'DELETED'|'NOT_CHECKED',\n 'Timestamp': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n ADD : A value has been added to a resource property that is an array or list data type.\n REMOVE : The property has been removed from the current resource configuration.\n NOT_EQUAL : The current property value differs from its expected value (as defined in the stack template and any values specified as template parameters).\n \n \"\"\"\n pass\n\ndef describe_stack_resources(StackName=None, LogicalResourceId=None, PhysicalResourceId=None):\n \"\"\"\n Returns AWS resource descriptions for running and deleted stacks. If StackName is specified, all the associated resources that are part of the stack are returned. If PhysicalResourceId is specified, the associated resources of the stack that the resource belongs to are returned.\n For deleted stacks, DescribeStackResources returns resource information for up to 90 days after the stack has been deleted.\n You must specify either StackName or PhysicalResourceId , but not both. In addition, you can specify LogicalResourceId to filter the returned result. For more information about resources, the LogicalResourceId and PhysicalResourceId , go to the AWS CloudFormation User Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.describe_stack_resources(\n StackName='string',\n LogicalResourceId='string',\n PhysicalResourceId='string'\n )\n \n \n :type StackName: string\n :param StackName: The name or the unique stack ID that is associated with the stack, which are not always interchangeable:\n Running stacks: You can specify either the stack's name or its unique stack ID.\n Deleted stacks: You must specify the unique stack ID.\n Default: There is no default value.\n Required: Conditional. If you do not specify StackName , you must specify PhysicalResourceId .\n \n\n :type LogicalResourceId: string\n :param LogicalResourceId: The logical name of the resource as specified in the template.\n Default: There is no default value.\n \n\n :type PhysicalResourceId: string\n :param PhysicalResourceId: The name or unique identifier that corresponds to a physical instance ID of a resource supported by AWS CloudFormation.\n For example, for an Amazon Elastic Compute Cloud (EC2) instance, PhysicalResourceId corresponds to the InstanceId . You can pass the EC2 InstanceId to DescribeStackResources to find which stack the instance belongs to and what other resources are part of the stack.\n Required: Conditional. If you do not specify PhysicalResourceId , you must specify StackName .\n Default: There is no default value.\n \n\n :rtype: dict\n :return: {\n 'StackResources': [\n {\n 'StackName': 'string',\n 'StackId': 'string',\n 'LogicalResourceId': 'string',\n 'PhysicalResourceId': 'string',\n 'ResourceType': 'string',\n 'Timestamp': datetime(2015, 1, 1),\n 'ResourceStatus': 'CREATE_IN_PROGRESS'|'CREATE_FAILED'|'CREATE_COMPLETE'|'DELETE_IN_PROGRESS'|'DELETE_FAILED'|'DELETE_COMPLETE'|'DELETE_SKIPPED'|'UPDATE_IN_PROGRESS'|'UPDATE_FAILED'|'UPDATE_COMPLETE',\n 'ResourceStatusReason': 'string',\n 'Description': 'string',\n 'DriftInformation': {\n 'StackResourceDriftStatus': 'IN_SYNC'|'MODIFIED'|'DELETED'|'NOT_CHECKED',\n 'LastCheckTimestamp': datetime(2015, 1, 1)\n }\n },\n ]\n }\n \n \n :returns: \n DELETED : The resource differs from its expected configuration in that it has been deleted.\n MODIFIED : The resource differs from its expected configuration.\n NOT_CHECKED : AWS CloudFormation has not checked if the resource differs from its expected configuration. Any resources that do not currently support drift detection have a status of NOT_CHECKED . For more information, see Resources that Support Drift Detection .\n IN_SYNC : The resources's actual configuration matches its expected configuration.\n \n \"\"\"\n pass\n\ndef describe_stack_set(StackSetName=None):\n \"\"\"\n Returns the description of the specified stack set.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_stack_set(\n StackSetName='string'\n )\n \n \n :type StackSetName: string\n :param StackSetName: [REQUIRED]\n The name or unique ID of the stack set whose description you want.\n \n\n :rtype: dict\n :return: {\n 'StackSet': {\n 'StackSetName': 'string',\n 'StackSetId': 'string',\n 'Description': 'string',\n 'Status': 'ACTIVE'|'DELETED',\n 'TemplateBody': 'string',\n 'Parameters': [\n {\n 'ParameterKey': 'string',\n 'ParameterValue': 'string',\n 'UsePreviousValue': True|False,\n 'ResolvedValue': 'string'\n },\n ],\n 'Capabilities': [\n 'CAPABILITY_IAM'|'CAPABILITY_NAMED_IAM'|'CAPABILITY_AUTO_EXPAND',\n ],\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'StackSetARN': 'string',\n 'AdministrationRoleARN': 'string',\n 'ExecutionRoleName': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_stack_set_operation(StackSetName=None, OperationId=None):\n \"\"\"\n Returns the description of the specified stack set operation.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_stack_set_operation(\n StackSetName='string',\n OperationId='string'\n )\n \n \n :type StackSetName: string\n :param StackSetName: [REQUIRED]\n The name or the unique stack ID of the stack set for the stack operation.\n \n\n :type OperationId: string\n :param OperationId: [REQUIRED]\n The unique ID of the stack set operation.\n \n\n :rtype: dict\n :return: {\n 'StackSetOperation': {\n 'OperationId': 'string',\n 'StackSetId': 'string',\n 'Action': 'CREATE'|'UPDATE'|'DELETE',\n 'Status': 'RUNNING'|'SUCCEEDED'|'FAILED'|'STOPPING'|'STOPPED',\n 'OperationPreferences': {\n 'RegionOrder': [\n 'string',\n ],\n 'FailureToleranceCount': 123,\n 'FailureTolerancePercentage': 123,\n 'MaxConcurrentCount': 123,\n 'MaxConcurrentPercentage': 123\n },\n 'RetainStacks': True|False,\n 'AdministrationRoleARN': 'string',\n 'ExecutionRoleName': 'string',\n 'CreationTimestamp': datetime(2015, 1, 1),\n 'EndTimestamp': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n FAILED : The operation exceeded the specified failure tolerance. The failure tolerance value that you've set for an operation is applied for each region during stack create and update operations. If the number of failed stacks within a region exceeds the failure tolerance, the status of the operation in the region is set to FAILED . This in turn sets the status of the operation as a whole to FAILED , and AWS CloudFormation cancels the operation in any remaining regions.\n RUNNING : The operation is currently being performed.\n STOPPED : The user has cancelled the operation.\n STOPPING : The operation is in the process of stopping, at user request.\n SUCCEEDED : The operation completed creating or updating all the specified stacks without exceeding the failure tolerance for the operation.\n \n \"\"\"\n pass\n\ndef describe_stacks(StackName=None, NextToken=None):\n \"\"\"\n Returns the description for the specified stack; if no stack name was specified, then it returns the description for all the stacks created.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_stacks(\n StackName='string',\n NextToken='string'\n )\n \n \n :type StackName: string\n :param StackName: The name or the unique stack ID that is associated with the stack, which are not always interchangeable:\n Running stacks: You can specify either the stack's name or its unique stack ID.\n Deleted stacks: You must specify the unique stack ID.\n Default: There is no default value.\n \n\n :type NextToken: string\n :param NextToken: A string that identifies the next page of stacks that you want to retrieve.\n\n :rtype: dict\n :return: {\n 'Stacks': [\n {\n 'StackId': 'string',\n 'StackName': 'string',\n 'ChangeSetId': 'string',\n 'Description': 'string',\n 'Parameters': [\n {\n 'ParameterKey': 'string',\n 'ParameterValue': 'string',\n 'UsePreviousValue': True|False,\n 'ResolvedValue': 'string'\n },\n ],\n 'CreationTime': datetime(2015, 1, 1),\n 'DeletionTime': datetime(2015, 1, 1),\n 'LastUpdatedTime': datetime(2015, 1, 1),\n 'RollbackConfiguration': {\n 'RollbackTriggers': [\n {\n 'Arn': 'string',\n 'Type': 'string'\n },\n ],\n 'MonitoringTimeInMinutes': 123\n },\n 'StackStatus': 'CREATE_IN_PROGRESS'|'CREATE_FAILED'|'CREATE_COMPLETE'|'ROLLBACK_IN_PROGRESS'|'ROLLBACK_FAILED'|'ROLLBACK_COMPLETE'|'DELETE_IN_PROGRESS'|'DELETE_FAILED'|'DELETE_COMPLETE'|'UPDATE_IN_PROGRESS'|'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS'|'UPDATE_COMPLETE'|'UPDATE_ROLLBACK_IN_PROGRESS'|'UPDATE_ROLLBACK_FAILED'|'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS'|'UPDATE_ROLLBACK_COMPLETE'|'REVIEW_IN_PROGRESS',\n 'StackStatusReason': 'string',\n 'DisableRollback': True|False,\n 'NotificationARNs': [\n 'string',\n ],\n 'TimeoutInMinutes': 123,\n 'Capabilities': [\n 'CAPABILITY_IAM'|'CAPABILITY_NAMED_IAM'|'CAPABILITY_AUTO_EXPAND',\n ],\n 'Outputs': [\n {\n 'OutputKey': 'string',\n 'OutputValue': 'string',\n 'Description': 'string',\n 'ExportName': 'string'\n },\n ],\n 'RoleARN': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'EnableTerminationProtection': True|False,\n 'ParentId': 'string',\n 'RootId': 'string',\n 'DriftInformation': {\n 'StackDriftStatus': 'DRIFTED'|'IN_SYNC'|'UNKNOWN'|'NOT_CHECKED',\n 'LastCheckTimestamp': datetime(2015, 1, 1)\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n To use the rollback triggers previously specified for this stack, if any, don't specify this parameter.\n To specify new or updated rollback triggers, you must specify all the triggers that you want used for this stack, even triggers you've specifed before (for example, when creating the stack or during a previous stack update). Any triggers that you don't include in the updated list of triggers are no longer applied to the stack.\n To remove all currently specified triggers, specify an empty list for this parameter.\n \n \"\"\"\n pass\n\ndef detect_stack_drift(StackName=None, LogicalResourceIds=None):\n \"\"\"\n Detects whether a stack's actual configuration differs, or has drifted , from it's expected configuration, as defined in the stack template and any values specified as template parameters. For each resource in the stack that supports drift detection, AWS CloudFormation compares the actual configuration of the resource with its expected template configuration. Only resource properties explicitly defined in the stack template are checked for drift. A stack is considered to have drifted if one or more of its resources differ from their expected template configurations. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources .\n Use DetectStackDrift to detect drift on all supported resources for a given stack, or DetectStackResourceDrift to detect drift on individual resources.\n For a list of stack resources that currently support drift detection, see Resources that Support Drift Detection .\n When detecting drift on a stack, AWS CloudFormation does not detect drift on any nested stacks belonging to that stack. Perform DetectStackDrift directly on the nested stack itself.\n See also: AWS API Documentation\n \n \n :example: response = client.detect_stack_drift(\n StackName='string',\n LogicalResourceIds=[\n 'string',\n ]\n )\n \n \n :type StackName: string\n :param StackName: [REQUIRED]\n The name of the stack for which you want to detect drift.\n \n\n :type LogicalResourceIds: list\n :param LogicalResourceIds: The logical names of any resources you want to use as filters.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'StackDriftDetectionId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef detect_stack_resource_drift(StackName=None, LogicalResourceId=None):\n \"\"\"\n Returns information about whether a resource's actual configuration differs, or has drifted , from it's expected configuration, as defined in the stack template and any values specified as template parameters. This information includes actual and expected property values for resources in which AWS CloudFormation detects drift. Only resource properties explicitly defined in the stack template are checked for drift. For more information about stack and resource drift, see Detecting Unregulated Configuration Changes to Stacks and Resources .\n Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all resources in a given stack that support drift detection.\n Resources that do not currently support drift detection cannot be checked. For a list of resources that support drift detection, see Resources that Support Drift Detection .\n See also: AWS API Documentation\n \n \n :example: response = client.detect_stack_resource_drift(\n StackName='string',\n LogicalResourceId='string'\n )\n \n \n :type StackName: string\n :param StackName: [REQUIRED]\n The name of the stack to which the resource belongs.\n \n\n :type LogicalResourceId: string\n :param LogicalResourceId: [REQUIRED]\n The logical name of the resource for which to return drift information.\n \n\n :rtype: dict\n :return: {\n 'StackResourceDrift': {\n 'StackId': 'string',\n 'LogicalResourceId': 'string',\n 'PhysicalResourceId': 'string',\n 'PhysicalResourceIdContext': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'ResourceType': 'string',\n 'ExpectedProperties': 'string',\n 'ActualProperties': 'string',\n 'PropertyDifferences': [\n {\n 'PropertyPath': 'string',\n 'ExpectedValue': 'string',\n 'ActualValue': 'string',\n 'DifferenceType': 'ADD'|'REMOVE'|'NOT_EQUAL'\n },\n ],\n 'StackResourceDriftStatus': 'IN_SYNC'|'MODIFIED'|'DELETED'|'NOT_CHECKED',\n 'Timestamp': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n ADD : A value has been added to a resource property that is an array or list data type.\n REMOVE : The property has been removed from the current resource configuration.\n NOT_EQUAL : The current property value differs from its expected value (as defined in the stack template and any values specified as template parameters).\n \n \"\"\"\n pass\n\ndef estimate_template_cost(TemplateBody=None, TemplateURL=None, Parameters=None):\n \"\"\"\n Returns the estimated monthly cost of a template. The return value is an AWS Simple Monthly Calculator URL with a query string that describes the resources required to run the template.\n See also: AWS API Documentation\n \n \n :example: response = client.estimate_template_cost(\n TemplateBody='string',\n TemplateURL='string',\n Parameters=[\n {\n 'ParameterKey': 'string',\n 'ParameterValue': 'string',\n 'UsePreviousValue': True|False,\n 'ResolvedValue': 'string'\n },\n ]\n )\n \n \n :type TemplateBody: string\n :param TemplateBody: Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. (For more information, go to Template Anatomy in the AWS CloudFormation User Guide.)\n Conditional: You must pass TemplateBody or TemplateURL . If both are passed, only TemplateBody is used.\n \n\n :type TemplateURL: string\n :param TemplateURL: Location of file containing the template body. The URL must point to a template that is located in an Amazon S3 bucket. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.\n Conditional: You must pass TemplateURL or TemplateBody . If both are passed, only TemplateBody is used.\n \n\n :type Parameters: list\n :param Parameters: A list of Parameter structures that specify input parameters.\n (dict) --The Parameter data type.\n ParameterKey (string) --The key associated with the parameter. If you don't specify a key and value for a particular parameter, AWS CloudFormation uses the default value that is specified in your template.\n ParameterValue (string) --The input value associated with the parameter.\n UsePreviousValue (boolean) --During a stack update, use the existing parameter value that the stack is using for a given parameter key. If you specify true , do not specify a parameter value.\n ResolvedValue (string) --Read-only. The value that corresponds to a Systems Manager parameter key. This field is returned only for ` SSM parameter types <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html#aws-ssm-parameter-types>`__ in the template.\n \n \n\n :rtype: dict\n :return: {\n 'Url': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef execute_change_set(ChangeSetName=None, StackName=None, ClientRequestToken=None):\n \"\"\"\n Updates a stack using the input information that was provided when the specified change set was created. After the call successfully completes, AWS CloudFormation starts updating the stack. Use the DescribeStacks action to view the status of the update.\n When you execute a change set, AWS CloudFormation deletes all other change sets associated with the stack because they aren't valid for the updated stack.\n If a stack policy is associated with the stack, AWS CloudFormation enforces the policy during the update. You can't specify a temporary stack policy that overrides the current policy.\n See also: AWS API Documentation\n \n \n :example: response = client.execute_change_set(\n ChangeSetName='string',\n StackName='string',\n ClientRequestToken='string'\n )\n \n \n :type ChangeSetName: string\n :param ChangeSetName: [REQUIRED]\n The name or ARN of the change set that you want use to update the specified stack.\n \n\n :type StackName: string\n :param StackName: If you specified the name of a change set, specify the stack name or ID (ARN) that is associated with the change set you want to execute.\n\n :type ClientRequestToken: string\n :param ClientRequestToken: A unique identifier for this ExecuteChangeSet request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to execute a change set to update a stack with the same name. You might retry ExecuteChangeSet requests to ensure that AWS CloudFormation successfully received them.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_stack_policy(StackName=None):\n \"\"\"\n Returns the stack policy for a specified stack. If a stack doesn't have a policy, a null value is returned.\n See also: AWS API Documentation\n \n \n :example: response = client.get_stack_policy(\n StackName='string'\n )\n \n \n :type StackName: string\n :param StackName: [REQUIRED]\n The name or unique stack ID that is associated with the stack whose policy you want to get.\n \n\n :rtype: dict\n :return: {\n 'StackPolicyBody': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_template(StackName=None, ChangeSetName=None, TemplateStage=None):\n \"\"\"\n Returns the template body for a specified stack. You can get the template for running or deleted stacks.\n For deleted stacks, GetTemplate returns the template for up to 90 days after the stack has been deleted.\n See also: AWS API Documentation\n \n \n :example: response = client.get_template(\n StackName='string',\n ChangeSetName='string',\n TemplateStage='Original'|'Processed'\n )\n \n \n :type StackName: string\n :param StackName: The name or the unique stack ID that is associated with the stack, which are not always interchangeable:\n Running stacks: You can specify either the stack's name or its unique stack ID.\n Deleted stacks: You must specify the unique stack ID.\n Default: There is no default value.\n \n\n :type ChangeSetName: string\n :param ChangeSetName: The name or Amazon Resource Name (ARN) of a change set for which AWS CloudFormation returns the associated template. If you specify a name, you must also specify the StackName .\n\n :type TemplateStage: string\n :param TemplateStage: For templates that include transforms, the stage of the template that AWS CloudFormation returns. To get the user-submitted template, specify Original . To get the template after AWS CloudFormation has processed all transforms, specify Processed .\n If the template doesn't include transforms, Original and Processed return the same template. By default, AWS CloudFormation specifies Original .\n \n\n :rtype: dict\n :return: {\n 'TemplateBody': {},\n 'StagesAvailable': [\n 'Original'|'Processed',\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_template_summary(TemplateBody=None, TemplateURL=None, StackName=None, StackSetName=None):\n \"\"\"\n Returns information about a new or existing template. The GetTemplateSummary action is useful for viewing parameter information, such as default parameter values and parameter types, before you create or update a stack or stack set.\n You can use the GetTemplateSummary action when you submit a template, or you can get template information for a stack set, or a running or deleted stack.\n For deleted stacks, GetTemplateSummary returns the template information for up to 90 days after the stack has been deleted. If the template does not exist, a ValidationError is returned.\n See also: AWS API Documentation\n \n \n :example: response = client.get_template_summary(\n TemplateBody='string',\n TemplateURL='string',\n StackName='string',\n StackSetName='string'\n )\n \n \n :type TemplateBody: string\n :param TemplateBody: Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information about templates, see Template Anatomy in the AWS CloudFormation User Guide.\n Conditional: You must specify only one of the following parameters: StackName , StackSetName , TemplateBody , or TemplateURL .\n \n\n :type TemplateURL: string\n :param TemplateURL: Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information about templates, see Template Anatomy in the AWS CloudFormation User Guide.\n Conditional: You must specify only one of the following parameters: StackName , StackSetName , TemplateBody , or TemplateURL .\n \n\n :type StackName: string\n :param StackName: The name or the stack ID that is associated with the stack, which are not always interchangeable. For running stacks, you can specify either the stack's name or its unique stack ID. For deleted stack, you must specify the unique stack ID.\n Conditional: You must specify only one of the following parameters: StackName , StackSetName , TemplateBody , or TemplateURL .\n \n\n :type StackSetName: string\n :param StackSetName: The name or unique ID of the stack set from which the stack was created.\n Conditional: You must specify only one of the following parameters: StackName , StackSetName , TemplateBody , or TemplateURL .\n \n\n :rtype: dict\n :return: {\n 'Parameters': [\n {\n 'ParameterKey': 'string',\n 'DefaultValue': 'string',\n 'ParameterType': 'string',\n 'NoEcho': True|False,\n 'Description': 'string',\n 'ParameterConstraints': {\n 'AllowedValues': [\n 'string',\n ]\n }\n },\n ],\n 'Description': 'string',\n 'Capabilities': [\n 'CAPABILITY_IAM'|'CAPABILITY_NAMED_IAM'|'CAPABILITY_AUTO_EXPAND',\n ],\n 'CapabilitiesReason': 'string',\n 'ResourceTypes': [\n 'string',\n ],\n 'Version': 'string',\n 'Metadata': 'string',\n 'DeclaredTransforms': [\n 'string',\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_change_sets(StackName=None, NextToken=None):\n \"\"\"\n Returns the ID and status of each active change set for a stack. For example, AWS CloudFormation lists change sets that are in the CREATE_IN_PROGRESS or CREATE_PENDING state.\n See also: AWS API Documentation\n \n \n :example: response = client.list_change_sets(\n StackName='string',\n NextToken='string'\n )\n \n \n :type StackName: string\n :param StackName: [REQUIRED]\n The name or the Amazon Resource Name (ARN) of the stack for which you want to list change sets.\n \n\n :type NextToken: string\n :param NextToken: A string (provided by the ListChangeSets response output) that identifies the next page of change sets that you want to retrieve.\n\n :rtype: dict\n :return: {\n 'Summaries': [\n {\n 'StackId': 'string',\n 'StackName': 'string',\n 'ChangeSetId': 'string',\n 'ChangeSetName': 'string',\n 'ExecutionStatus': 'UNAVAILABLE'|'AVAILABLE'|'EXECUTE_IN_PROGRESS'|'EXECUTE_COMPLETE'|'EXECUTE_FAILED'|'OBSOLETE',\n 'Status': 'CREATE_PENDING'|'CREATE_IN_PROGRESS'|'CREATE_COMPLETE'|'DELETE_COMPLETE'|'FAILED',\n 'StatusReason': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'Description': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_exports(NextToken=None):\n \"\"\"\n Lists all exported output values in the account and region in which you call this action. Use this action to see the exported output values that you can import into other stacks. To import values, use the ` Fn::ImportValue http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-importvalue.html`__ function.\n For more information, see AWS CloudFormation Export Stack Output Values .\n See also: AWS API Documentation\n \n \n :example: response = client.list_exports(\n NextToken='string'\n )\n \n \n :type NextToken: string\n :param NextToken: A string (provided by the ListExports response output) that identifies the next page of exported output values that you asked to retrieve.\n\n :rtype: dict\n :return: {\n 'Exports': [\n {\n 'ExportingStackId': 'string',\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_imports(ExportName=None, NextToken=None):\n \"\"\"\n Lists all stacks that are importing an exported output value. To modify or remove an exported output value, first use this action to see which stacks are using it. To see the exported output values in your account, see ListExports .\n For more information about importing an exported output value, see the ` Fn::ImportValue http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-importvalue.html`__ function.\n See also: AWS API Documentation\n \n \n :example: response = client.list_imports(\n ExportName='string',\n NextToken='string'\n )\n \n \n :type ExportName: string\n :param ExportName: [REQUIRED]\n The name of the exported output value. AWS CloudFormation returns the stack names that are importing this value.\n \n\n :type NextToken: string\n :param NextToken: A string (provided by the ListImports response output) that identifies the next page of stacks that are importing the specified exported output value.\n\n :rtype: dict\n :return: {\n 'Imports': [\n 'string',\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_stack_instances(StackSetName=None, NextToken=None, MaxResults=None, StackInstanceAccount=None, StackInstanceRegion=None):\n \"\"\"\n Returns summary information about stack instances that are associated with the specified stack set. You can filter for stack instances that are associated with a specific AWS account name or region.\n See also: AWS API Documentation\n \n \n :example: response = client.list_stack_instances(\n StackSetName='string',\n NextToken='string',\n MaxResults=123,\n StackInstanceAccount='string',\n StackInstanceRegion='string'\n )\n \n \n :type StackSetName: string\n :param StackSetName: [REQUIRED]\n The name or unique ID of the stack set that you want to list stack instances for.\n \n\n :type NextToken: string\n :param NextToken: If the previous request didn't return all of the remaining results, the response's NextToken parameter value is set to a token. To retrieve the next set of results, call ListStackInstances again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null .\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.\n\n :type StackInstanceAccount: string\n :param StackInstanceAccount: The name of the AWS account that you want to list stack instances for.\n\n :type StackInstanceRegion: string\n :param StackInstanceRegion: The name of the region where you want to list stack instances.\n\n :rtype: dict\n :return: {\n 'Summaries': [\n {\n 'StackSetId': 'string',\n 'Region': 'string',\n 'Account': 'string',\n 'StackId': 'string',\n 'Status': 'CURRENT'|'OUTDATED'|'INOPERABLE',\n 'StatusReason': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n INOPERABLE : A DeleteStackInstances operation has failed and left the stack in an unstable state. Stacks in this state are excluded from further UpdateStackSet operations. You might need to perform a DeleteStackInstances operation, with RetainStacks set to true , to delete the stack instance, and then delete the stack manually.\n OUTDATED : The stack isn't currently up to date with the stack set because:\n The associated stack failed during a CreateStackSet or UpdateStackSet operation.\n The stack was part of a CreateStackSet or UpdateStackSet operation that failed or was stopped before the stack was created or updated.\n \n \n CURRENT : The stack is currently up to date with the stack set.\n \n \"\"\"\n pass\n\ndef list_stack_resources(StackName=None, NextToken=None):\n \"\"\"\n Returns descriptions of all resources of the specified stack.\n For deleted stacks, ListStackResources returns resource information for up to 90 days after the stack has been deleted.\n See also: AWS API Documentation\n \n \n :example: response = client.list_stack_resources(\n StackName='string',\n NextToken='string'\n )\n \n \n :type StackName: string\n :param StackName: [REQUIRED]\n The name or the unique stack ID that is associated with the stack, which are not always interchangeable:\n Running stacks: You can specify either the stack's name or its unique stack ID.\n Deleted stacks: You must specify the unique stack ID.\n Default: There is no default value.\n \n\n :type NextToken: string\n :param NextToken: A string that identifies the next page of stack resources that you want to retrieve.\n\n :rtype: dict\n :return: {\n 'StackResourceSummaries': [\n {\n 'LogicalResourceId': 'string',\n 'PhysicalResourceId': 'string',\n 'ResourceType': 'string',\n 'LastUpdatedTimestamp': datetime(2015, 1, 1),\n 'ResourceStatus': 'CREATE_IN_PROGRESS'|'CREATE_FAILED'|'CREATE_COMPLETE'|'DELETE_IN_PROGRESS'|'DELETE_FAILED'|'DELETE_COMPLETE'|'DELETE_SKIPPED'|'UPDATE_IN_PROGRESS'|'UPDATE_FAILED'|'UPDATE_COMPLETE',\n 'ResourceStatusReason': 'string',\n 'DriftInformation': {\n 'StackResourceDriftStatus': 'IN_SYNC'|'MODIFIED'|'DELETED'|'NOT_CHECKED',\n 'LastCheckTimestamp': datetime(2015, 1, 1)\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n DELETED : The resource differs from its expected configuration in that it has been deleted.\n MODIFIED : The resource differs from its expected configuration.\n NOT_CHECKED : AWS CloudFormation has not checked if the resource differs from its expected configuration. Any resources that do not currently support drift detection have a status of NOT_CHECKED . For more information, see Resources that Support Drift Detection . If you performed an ContinueUpdateRollback operation on a stack, any resources included in ResourcesToSkip will also have a status of NOT_CHECKED . For more information on skipping resources during rollback operations, see Continue Rolling Back an Update in the AWS CloudFormation User Guide.\n IN_SYNC : The resources's actual configuration matches its expected configuration.\n \n \"\"\"\n pass\n\ndef list_stack_set_operation_results(StackSetName=None, OperationId=None, NextToken=None, MaxResults=None):\n \"\"\"\n Returns summary information about the results of a stack set operation.\n See also: AWS API Documentation\n \n \n :example: response = client.list_stack_set_operation_results(\n StackSetName='string',\n OperationId='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type StackSetName: string\n :param StackSetName: [REQUIRED]\n The name or unique ID of the stack set that you want to get operation results for.\n \n\n :type OperationId: string\n :param OperationId: [REQUIRED]\n The ID of the stack set operation.\n \n\n :type NextToken: string\n :param NextToken: If the previous request didn't return all of the remaining results, the response object's NextToken parameter value is set to a token. To retrieve the next set of results, call ListStackSetOperationResults again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null .\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.\n\n :rtype: dict\n :return: {\n 'Summaries': [\n {\n 'Account': 'string',\n 'Region': 'string',\n 'Status': 'PENDING'|'RUNNING'|'SUCCEEDED'|'FAILED'|'CANCELLED',\n 'StatusReason': 'string',\n 'AccountGateResult': {\n 'Status': 'SUCCEEDED'|'FAILED'|'SKIPPED',\n 'StatusReason': 'string'\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n CANCELLED : The operation in the specified account and region has been cancelled. This is either because a user has stopped the stack set operation, or because the failure tolerance of the stack set operation has been exceeded.\n FAILED : The operation in the specified account and region failed. If the stack set operation fails in enough accounts within a region, the failure tolerance for the stack set operation as a whole might be exceeded.\n RUNNING : The operation in the specified account and region is currently in progress.\n PENDING : The operation in the specified account and region has yet to start.\n SUCCEEDED : The operation in the specified account and region completed successfully.\n \n \"\"\"\n pass\n\ndef list_stack_set_operations(StackSetName=None, NextToken=None, MaxResults=None):\n \"\"\"\n Returns summary information about operations performed on a stack set.\n See also: AWS API Documentation\n \n \n :example: response = client.list_stack_set_operations(\n StackSetName='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type StackSetName: string\n :param StackSetName: [REQUIRED]\n The name or unique ID of the stack set that you want to get operation summaries for.\n \n\n :type NextToken: string\n :param NextToken: If the previous paginated request didn't return all of the remaining results, the response object's NextToken parameter value is set to a token. To retrieve the next set of results, call ListStackSetOperations again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null .\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.\n\n :rtype: dict\n :return: {\n 'Summaries': [\n {\n 'OperationId': 'string',\n 'Action': 'CREATE'|'UPDATE'|'DELETE',\n 'Status': 'RUNNING'|'SUCCEEDED'|'FAILED'|'STOPPING'|'STOPPED',\n 'CreationTimestamp': datetime(2015, 1, 1),\n 'EndTimestamp': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n FAILED : The operation exceeded the specified failure tolerance. The failure tolerance value that you've set for an operation is applied for each region during stack create and update operations. If the number of failed stacks within a region exceeds the failure tolerance, the status of the operation in the region is set to FAILED . This in turn sets the status of the operation as a whole to FAILED , and AWS CloudFormation cancels the operation in any remaining regions.\n RUNNING : The operation is currently being performed.\n STOPPED : The user has cancelled the operation.\n STOPPING : The operation is in the process of stopping, at user request.\n SUCCEEDED : The operation completed creating or updating all the specified stacks without exceeding the failure tolerance for the operation.\n \n \"\"\"\n pass\n\ndef list_stack_sets(NextToken=None, MaxResults=None, Status=None):\n \"\"\"\n Returns summary information about stack sets that are associated with the user.\n See also: AWS API Documentation\n \n \n :example: response = client.list_stack_sets(\n NextToken='string',\n MaxResults=123,\n Status='ACTIVE'|'DELETED'\n )\n \n \n :type NextToken: string\n :param NextToken: If the previous paginated request didn't return all of the remaining results, the response object's NextToken parameter value is set to a token. To retrieve the next set of results, call ListStackSets again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null .\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.\n\n :type Status: string\n :param Status: The status of the stack sets that you want to get summary information about.\n\n :rtype: dict\n :return: {\n 'Summaries': [\n {\n 'StackSetName': 'string',\n 'StackSetId': 'string',\n 'Description': 'string',\n 'Status': 'ACTIVE'|'DELETED'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_stacks(NextToken=None, StackStatusFilter=None):\n \"\"\"\n Returns the summary information for stacks whose status matches the specified StackStatusFilter. Summary information for stacks that have been deleted is kept for 90 days after the stack is deleted. If no StackStatusFilter is specified, summary information for all stacks is returned (including existing stacks and stacks that have been deleted).\n See also: AWS API Documentation\n \n \n :example: response = client.list_stacks(\n NextToken='string',\n StackStatusFilter=[\n 'CREATE_IN_PROGRESS'|'CREATE_FAILED'|'CREATE_COMPLETE'|'ROLLBACK_IN_PROGRESS'|'ROLLBACK_FAILED'|'ROLLBACK_COMPLETE'|'DELETE_IN_PROGRESS'|'DELETE_FAILED'|'DELETE_COMPLETE'|'UPDATE_IN_PROGRESS'|'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS'|'UPDATE_COMPLETE'|'UPDATE_ROLLBACK_IN_PROGRESS'|'UPDATE_ROLLBACK_FAILED'|'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS'|'UPDATE_ROLLBACK_COMPLETE'|'REVIEW_IN_PROGRESS',\n ]\n )\n \n \n :type NextToken: string\n :param NextToken: A string that identifies the next page of stacks that you want to retrieve.\n\n :type StackStatusFilter: list\n :param StackStatusFilter: Stack status to use as a filter. Specify one or more stack status codes to list only stacks with the specified status codes. For a complete list of stack status codes, see the StackStatus parameter of the Stack data type.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'StackSummaries': [\n {\n 'StackId': 'string',\n 'StackName': 'string',\n 'TemplateDescription': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdatedTime': datetime(2015, 1, 1),\n 'DeletionTime': datetime(2015, 1, 1),\n 'StackStatus': 'CREATE_IN_PROGRESS'|'CREATE_FAILED'|'CREATE_COMPLETE'|'ROLLBACK_IN_PROGRESS'|'ROLLBACK_FAILED'|'ROLLBACK_COMPLETE'|'DELETE_IN_PROGRESS'|'DELETE_FAILED'|'DELETE_COMPLETE'|'UPDATE_IN_PROGRESS'|'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS'|'UPDATE_COMPLETE'|'UPDATE_ROLLBACK_IN_PROGRESS'|'UPDATE_ROLLBACK_FAILED'|'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS'|'UPDATE_ROLLBACK_COMPLETE'|'REVIEW_IN_PROGRESS',\n 'StackStatusReason': 'string',\n 'ParentId': 'string',\n 'RootId': 'string',\n 'DriftInformation': {\n 'StackDriftStatus': 'DRIFTED'|'IN_SYNC'|'UNKNOWN'|'NOT_CHECKED',\n 'LastCheckTimestamp': datetime(2015, 1, 1)\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n DRIFTED : The stack differs from its expected template configuration. A stack is considered to have drifted if one or more of its resources have drifted.\n NOT_CHECKED : AWS CloudFormation has not checked if the stack differs from its expected template configuration.\n IN_SYNC : The stack's actual configuration matches its expected template configuration.\n UNKNOWN : This value is reserved for future use.\n \n \"\"\"\n pass\n\ndef set_stack_policy(StackName=None, StackPolicyBody=None, StackPolicyURL=None):\n \"\"\"\n Sets a stack policy for a specified stack.\n See also: AWS API Documentation\n \n \n :example: response = client.set_stack_policy(\n StackName='string',\n StackPolicyBody='string',\n StackPolicyURL='string'\n )\n \n \n :type StackName: string\n :param StackName: [REQUIRED]\n The name or unique stack ID that you want to associate a policy with.\n \n\n :type StackPolicyBody: string\n :param StackPolicyBody: Structure containing the stack policy body. For more information, go to Prevent Updates to Stack Resources in the AWS CloudFormation User Guide. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.\n\n :type StackPolicyURL: string\n :param StackPolicyURL: Location of a file containing the stack policy. The URL must point to a policy (maximum size: 16 KB) located in an S3 bucket in the same region as the stack. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.\n\n \"\"\"\n pass\n\ndef signal_resource(StackName=None, LogicalResourceId=None, UniqueId=None, Status=None):\n \"\"\"\n Sends a signal to the specified resource with a success or failure status. You can use the SignalResource API in conjunction with a creation policy or update policy. AWS CloudFormation doesn't proceed with a stack creation or update until resources receive the required number of signals or the timeout period is exceeded. The SignalResource API is useful in cases where you want to send signals from anywhere other than an Amazon EC2 instance.\n See also: AWS API Documentation\n \n \n :example: response = client.signal_resource(\n StackName='string',\n LogicalResourceId='string',\n UniqueId='string',\n Status='SUCCESS'|'FAILURE'\n )\n \n \n :type StackName: string\n :param StackName: [REQUIRED]\n The stack name or unique stack ID that includes the resource that you want to signal.\n \n\n :type LogicalResourceId: string\n :param LogicalResourceId: [REQUIRED]\n The logical ID of the resource that you want to signal. The logical ID is the name of the resource that given in the template.\n \n\n :type UniqueId: string\n :param UniqueId: [REQUIRED]\n A unique ID of the signal. When you signal Amazon EC2 instances or Auto Scaling groups, specify the instance ID that you are signaling as the unique ID. If you send multiple signals to a single resource (such as signaling a wait condition), each signal requires a different unique ID.\n \n\n :type Status: string\n :param Status: [REQUIRED]\n The status of the signal, which is either success or failure. A failure signal causes AWS CloudFormation to immediately fail the stack creation or update.\n \n\n \"\"\"\n pass\n\ndef stop_stack_set_operation(StackSetName=None, OperationId=None):\n \"\"\"\n Stops an in-progress operation on a stack set and its associated stack instances.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_stack_set_operation(\n StackSetName='string',\n OperationId='string'\n )\n \n \n :type StackSetName: string\n :param StackSetName: [REQUIRED]\n The name or unique ID of the stack set that you want to stop the operation for.\n \n\n :type OperationId: string\n :param OperationId: [REQUIRED]\n The ID of the stack operation.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_stack(StackName=None, TemplateBody=None, TemplateURL=None, UsePreviousTemplate=None, StackPolicyDuringUpdateBody=None, StackPolicyDuringUpdateURL=None, Parameters=None, Capabilities=None, ResourceTypes=None, RoleARN=None, RollbackConfiguration=None, StackPolicyBody=None, StackPolicyURL=None, NotificationARNs=None, Tags=None, ClientRequestToken=None):\n \"\"\"\n Updates a stack as specified in the template. After the call completes successfully, the stack update starts. You can check the status of the stack via the DescribeStacks action.\n To get a copy of the template for an existing stack, you can use the GetTemplate action.\n For more information about creating an update template, updating a stack, and monitoring the progress of the update, see Updating a Stack .\n See also: AWS API Documentation\n \n Examples\n This example adds two stack notification topics to the specified stack.\n Expected Output:\n \n :example: response = client.update_stack(\n StackName='string',\n TemplateBody='string',\n TemplateURL='string',\n UsePreviousTemplate=True|False,\n StackPolicyDuringUpdateBody='string',\n StackPolicyDuringUpdateURL='string',\n Parameters=[\n {\n 'ParameterKey': 'string',\n 'ParameterValue': 'string',\n 'UsePreviousValue': True|False,\n 'ResolvedValue': 'string'\n },\n ],\n Capabilities=[\n 'CAPABILITY_IAM'|'CAPABILITY_NAMED_IAM'|'CAPABILITY_AUTO_EXPAND',\n ],\n ResourceTypes=[\n 'string',\n ],\n RoleARN='string',\n RollbackConfiguration={\n 'RollbackTriggers': [\n {\n 'Arn': 'string',\n 'Type': 'string'\n },\n ],\n 'MonitoringTimeInMinutes': 123\n },\n StackPolicyBody='string',\n StackPolicyURL='string',\n NotificationARNs=[\n 'string',\n ],\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n ClientRequestToken='string'\n )\n \n \n :type StackName: string\n :param StackName: [REQUIRED]\n The name or unique stack ID of the stack to update.\n \n\n :type TemplateBody: string\n :param TemplateBody: Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. (For more information, go to Template Anatomy in the AWS CloudFormation User Guide.)\n Conditional: You must specify only one of the following parameters: TemplateBody , TemplateURL , or set the UsePreviousTemplate to true .\n \n\n :type TemplateURL: string\n :param TemplateURL: Location of file containing the template body. The URL must point to a template that is located in an Amazon S3 bucket. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.\n Conditional: You must specify only one of the following parameters: TemplateBody , TemplateURL , or set the UsePreviousTemplate to true .\n \n\n :type UsePreviousTemplate: boolean\n :param UsePreviousTemplate: Reuse the existing template that is associated with the stack that you are updating.\n Conditional: You must specify only one of the following parameters: TemplateBody , TemplateURL , or set the UsePreviousTemplate to true .\n \n\n :type StackPolicyDuringUpdateBody: string\n :param StackPolicyDuringUpdateBody: Structure containing the temporary overriding stack policy body. You can specify either the StackPolicyDuringUpdateBody or the StackPolicyDuringUpdateURL parameter, but not both.\n If you want to update protected resources, specify a temporary overriding stack policy during this update. If you do not specify a stack policy, the current policy that is associated with the stack will be used.\n \n\n :type StackPolicyDuringUpdateURL: string\n :param StackPolicyDuringUpdateURL: Location of a file containing the temporary overriding stack policy. The URL must point to a policy (max size: 16KB) located in an S3 bucket in the same region as the stack. You can specify either the StackPolicyDuringUpdateBody or the StackPolicyDuringUpdateURL parameter, but not both.\n If you want to update protected resources, specify a temporary overriding stack policy during this update. If you do not specify a stack policy, the current policy that is associated with the stack will be used.\n \n\n :type Parameters: list\n :param Parameters: A list of Parameter structures that specify input parameters for the stack. For more information, see the Parameter data type.\n (dict) --The Parameter data type.\n ParameterKey (string) --The key associated with the parameter. If you don't specify a key and value for a particular parameter, AWS CloudFormation uses the default value that is specified in your template.\n ParameterValue (string) --The input value associated with the parameter.\n UsePreviousValue (boolean) --During a stack update, use the existing parameter value that the stack is using for a given parameter key. If you specify true , do not specify a parameter value.\n ResolvedValue (string) --Read-only. The value that corresponds to a Systems Manager parameter key. This field is returned only for ` SSM parameter types <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html#aws-ssm-parameter-types>`__ in the template.\n \n \n\n :type Capabilities: list\n :param Capabilities: In some cases, you must explicity acknowledge that your stack template contains certain capabilities in order for AWS CloudFormation to update the stack.\n CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your AWS account; for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.\n If you have IAM resources, you can specify either capability.\n If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM .\n If you don't specify either of these capabilities, AWS CloudFormation returns an InsufficientCapabilities error.\n If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.\n AWS::IAM::AccessKey\n AWS::IAM::Group\n AWS::IAM::InstanceProfile\n AWS::IAM::Policy\n AWS::IAM::Role\n AWS::IAM::User\n AWS::IAM::UserToGroupAddition\n For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates .\n CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually updating the stack. If your stack template contains one or more macros, and you choose to update a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by AWS CloudFormation. Change sets do not currently support nested stacks. If you want to update a stack from a stack template that contains macros and nested stacks, you must update the stack directly from the template using this capability.\n Warning\n You should only update stacks directly from a stack template that contains macros if you know what processing the macro performs. Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without AWS CloudFormation being notified.\n For more information, see Using AWS CloudFormation Macros to Perform Custom Processing on Templates .\n (string) --\n \n\n :type ResourceTypes: list\n :param ResourceTypes: The template resource types that you have permissions to work with for this update stack action, such as AWS::EC2::Instance , AWS::EC2::* , or Custom::MyCustomInstance .\n If the list of resource types doesn't include a resource that you're updating, the stack update fails. By default, AWS CloudFormation grants permissions to all resource types. AWS Identity and Access Management (IAM) uses this parameter for AWS CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with AWS Identity and Access Management .\n (string) --\n \n\n :type RoleARN: string\n :param RoleARN: The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role that AWS CloudFormation assumes to update the stack. AWS CloudFormation uses the role's credentials to make calls on your behalf. AWS CloudFormation always uses this role for all future operations on the stack. As long as users have permission to operate on the stack, AWS CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege.\n If you don't specify a value, AWS CloudFormation uses the role that was previously associated with the stack. If no role is available, AWS CloudFormation uses a temporary session that is generated from your user credentials.\n \n\n :type RollbackConfiguration: dict\n :param RollbackConfiguration: The rollback triggers for AWS CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards.\n RollbackTriggers (list) --The triggers to monitor during stack creation or update actions.\n By default, AWS CloudFormation saves the rollback triggers specified for a stack and applies them to any subsequent update operations for the stack, unless you specify otherwise. If you do specify rollback triggers for this parameter, those triggers replace any list of triggers previously specified for the stack. This means:\n To use the rollback triggers previously specified for this stack, if any, don't specify this parameter.\n To specify new or updated rollback triggers, you must specify all the triggers that you want used for this stack, even triggers you've specifed before (for example, when creating the stack or during a previous stack update). Any triggers that you don't include in the updated list of triggers are no longer applied to the stack.\n To remove all currently specified triggers, specify an empty list for this parameter.\n If a specified trigger is missing, the entire stack operation fails and is rolled back.\n (dict) --A rollback trigger AWS CloudFormation monitors during creation and updating of stacks. If any of the alarms you specify goes to ALARM state during the stack operation or within the specified monitoring period afterwards, CloudFormation rolls back the entire stack operation.\n Arn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the rollback trigger.\n If a specified trigger is missing, the entire stack operation fails and is rolled back.\n Type (string) -- [REQUIRED]The resource type of the rollback trigger. Currently, AWS::CloudWatch::Alarm is the only supported resource type.\n \n MonitoringTimeInMinutes (integer) --The amount of time, in minutes, during which CloudFormation should monitor all the rollback triggers after the stack creation or update operation deploys all necessary resources.\n The default is 0 minutes.\n If you specify a monitoring period but do not specify any rollback triggers, CloudFormation still waits the specified period of time before cleaning up old resources after update operations. You can use this monitoring period to perform any manual stack validation desired, and manually cancel the stack creation or update (using CancelUpdateStack , for example) as necessary.\n If you specify 0 for this parameter, CloudFormation still monitors the specified rollback triggers during stack creation and update operations. Then, for update operations, it begins disposing of old resources immediately once the operation completes.\n \n\n :type StackPolicyBody: string\n :param StackPolicyBody: Structure containing a new stack policy body. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.\n You might update the stack policy, for example, in order to protect a new resource that you created during a stack update. If you do not specify a stack policy, the current policy that is associated with the stack is unchanged.\n \n\n :type StackPolicyURL: string\n :param StackPolicyURL: Location of a file containing the updated stack policy. The URL must point to a policy (max size: 16KB) located in an S3 bucket in the same region as the stack. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.\n You might update the stack policy, for example, in order to protect a new resource that you created during a stack update. If you do not specify a stack policy, the current policy that is associated with the stack is unchanged.\n \n\n :type NotificationARNs: list\n :param NotificationARNs: Amazon Simple Notification Service topic Amazon Resource Names (ARNs) that AWS CloudFormation associates with the stack. Specify an empty list to remove all notification topics.\n (string) --\n \n\n :type Tags: list\n :param Tags: Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to supported resources in the stack. You can specify a maximum number of 50 tags.\n If you don't specify this parameter, AWS CloudFormation doesn't modify the stack's tags. If you specify an empty value, AWS CloudFormation removes all associated tags.\n (dict) --The Tag type enables you to specify a key-value pair that can be used to store information about an AWS CloudFormation stack.\n Key (string) -- [REQUIRED]\n Required . A string used to identify this tag. You can specify a maximum of 128 characters for a tag key. Tags owned by Amazon Web Services (AWS) have the reserved prefix: aws: .\n Value (string) -- [REQUIRED]\n Required . A string containing the value for this tag. You can specify a maximum of 256 characters for a tag value.\n \n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: A unique identifier for this UpdateStack request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to update a stack with the same name. You might retry UpdateStack requests to ensure that AWS CloudFormation successfully received them.\n All events triggered by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1 , then all the StackEvents generated by that operation will have ClientRequestToken set as token1 .\n In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID , which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002 .\n \n\n :rtype: dict\n :return: {\n 'StackId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef update_stack_instances(StackSetName=None, Accounts=None, Regions=None, ParameterOverrides=None, OperationPreferences=None, OperationId=None):\n \"\"\"\n Updates the parameter values for stack instances for the specified accounts, within the specified regions. A stack instance refers to a stack in a specific account and region.\n You can only update stack instances in regions and accounts where they already exist; to create additional stack instances, use CreateStackInstances .\n During stack set updates, any parameters overridden for a stack instance are not updated, but retain their overridden value.\n You can only update the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet to update the stack set template. If you add a parameter to a template, before you can override the parameter value specified in the stack set you must first use UpdateStackSet to update all stack instances with the updated template and parameter value specified in the stack set. Once a stack instance has been updated with the new parameter, you can then override the parameter value using UpdateStackInstances .\n See also: AWS API Documentation\n \n \n :example: response = client.update_stack_instances(\n StackSetName='string',\n Accounts=[\n 'string',\n ],\n Regions=[\n 'string',\n ],\n ParameterOverrides=[\n {\n 'ParameterKey': 'string',\n 'ParameterValue': 'string',\n 'UsePreviousValue': True|False,\n 'ResolvedValue': 'string'\n },\n ],\n OperationPreferences={\n 'RegionOrder': [\n 'string',\n ],\n 'FailureToleranceCount': 123,\n 'FailureTolerancePercentage': 123,\n 'MaxConcurrentCount': 123,\n 'MaxConcurrentPercentage': 123\n },\n OperationId='string'\n )\n \n \n :type StackSetName: string\n :param StackSetName: [REQUIRED]\n The name or unique ID of the stack set associated with the stack instances.\n \n\n :type Accounts: list\n :param Accounts: [REQUIRED]\n The names of one or more AWS accounts for which you want to update parameter values for stack instances. The overridden parameter values will be applied to all stack instances in the specified accounts and regions.\n (string) --\n \n\n :type Regions: list\n :param Regions: [REQUIRED]\n The names of one or more regions in which you want to update parameter values for stack instances. The overridden parameter values will be applied to all stack instances in the specified accounts and regions.\n (string) --\n \n\n :type ParameterOverrides: list\n :param ParameterOverrides: A list of input parameters whose values you want to update for the specified stack instances.\n Any overridden parameter values will be applied to all stack instances in the specified accounts and regions. When specifying parameters and their values, be aware of how AWS CloudFormation sets parameter values during stack instance update operations:\n To override the current value for a parameter, include the parameter and specify its value.\n To leave a parameter set to its present value, you can do one of the following:\n Do not include the parameter in the list.\n Include the parameter and specify UsePreviousValue as true . (You cannot specify both a value and set UsePreviousValue to true .)\n To set all overridden parameter back to the values specified in the stack set, specify a parameter list but do not include any parameters.\n To leave all parameters set to their present values, do not specify this property at all.\n During stack set updates, any parameter values overridden for a stack instance are not updated, but retain their overridden value.\n You can only override the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet to update the stack set template. If you add a parameter to a template, before you can override the parameter value specified in the stack set you must first use UpdateStackSet to update all stack instances with the updated template and parameter value specified in the stack set. Once a stack instance has been updated with the new parameter, you can then override the parameter value using UpdateStackInstances .\n (dict) --The Parameter data type.\n ParameterKey (string) --The key associated with the parameter. If you don't specify a key and value for a particular parameter, AWS CloudFormation uses the default value that is specified in your template.\n ParameterValue (string) --The input value associated with the parameter.\n UsePreviousValue (boolean) --During a stack update, use the existing parameter value that the stack is using for a given parameter key. If you specify true , do not specify a parameter value.\n ResolvedValue (string) --Read-only. The value that corresponds to a Systems Manager parameter key. This field is returned only for ` SSM parameter types <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html#aws-ssm-parameter-types>`__ in the template.\n \n \n\n :type OperationPreferences: dict\n :param OperationPreferences: Preferences for how AWS CloudFormation performs this stack set operation.\n RegionOrder (list) --The order of the regions in where you want to perform the stack operation.\n (string) --\n FailureToleranceCount (integer) --The number of accounts, per region, for which this operation can fail before AWS CloudFormation stops the operation in that region. If the operation is stopped in a region, AWS CloudFormation doesn't attempt the operation in any subsequent regions.\n Conditional: You must specify either FailureToleranceCount or FailureTolerancePercentage (but not both).\n FailureTolerancePercentage (integer) --The percentage of accounts, per region, for which this stack operation can fail before AWS CloudFormation stops the operation in that region. If the operation is stopped in a region, AWS CloudFormation doesn't attempt the operation in any subsequent regions.\n When calculating the number of accounts based on the specified percentage, AWS CloudFormation rounds down to the next whole number.\n Conditional: You must specify either FailureToleranceCount or FailureTolerancePercentage , but not both.\n MaxConcurrentCount (integer) --The maximum number of accounts in which to perform this operation at one time. This is dependent on the value of FailureToleranceCount MaxConcurrentCount is at most one more than the FailureToleranceCount .\n Note that this setting lets you specify the maximum for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.\n Conditional: You must specify either MaxConcurrentCount or MaxConcurrentPercentage , but not both.\n MaxConcurrentPercentage (integer) --The maximum percentage of accounts in which to perform this operation at one time.\n When calculating the number of accounts based on the specified percentage, AWS CloudFormation rounds down to the next whole number. This is true except in cases where rounding down would result is zero. In this case, CloudFormation sets the number as one instead.\n Note that this setting lets you specify the maximum for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.\n Conditional: You must specify either MaxConcurrentCount or MaxConcurrentPercentage , but not both.\n \n\n :type OperationId: string\n :param OperationId: The unique identifier for this stack set operation.\n The operation ID also functions as an idempotency token, to ensure that AWS CloudFormation performs the stack set operation only once, even if you retry the request multiple times. You might retry stack set operation requests to ensure that AWS CloudFormation successfully received them.\n If you don't specify an operation ID, the SDK generates one automatically.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'OperationId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef update_stack_set(StackSetName=None, Description=None, TemplateBody=None, TemplateURL=None, UsePreviousTemplate=None, Parameters=None, Capabilities=None, Tags=None, OperationPreferences=None, AdministrationRoleARN=None, ExecutionRoleName=None, OperationId=None, Accounts=None, Regions=None):\n \"\"\"\n Updates the stack set, and associated stack instances in the specified accounts and regions.\n Even if the stack set operation created by updating the stack set fails (completely or partially, below or above a specified failure tolerance), the stack set is updated with your changes. Subsequent CreateStackInstances calls on the specified stack set use the updated stack set.\n See also: AWS API Documentation\n \n \n :example: response = client.update_stack_set(\n StackSetName='string',\n Description='string',\n TemplateBody='string',\n TemplateURL='string',\n UsePreviousTemplate=True|False,\n Parameters=[\n {\n 'ParameterKey': 'string',\n 'ParameterValue': 'string',\n 'UsePreviousValue': True|False,\n 'ResolvedValue': 'string'\n },\n ],\n Capabilities=[\n 'CAPABILITY_IAM'|'CAPABILITY_NAMED_IAM'|'CAPABILITY_AUTO_EXPAND',\n ],\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n OperationPreferences={\n 'RegionOrder': [\n 'string',\n ],\n 'FailureToleranceCount': 123,\n 'FailureTolerancePercentage': 123,\n 'MaxConcurrentCount': 123,\n 'MaxConcurrentPercentage': 123\n },\n AdministrationRoleARN='string',\n ExecutionRoleName='string',\n OperationId='string',\n Accounts=[\n 'string',\n ],\n Regions=[\n 'string',\n ]\n )\n \n \n :type StackSetName: string\n :param StackSetName: [REQUIRED]\n The name or unique ID of the stack set that you want to update.\n \n\n :type Description: string\n :param Description: A brief description of updates that you are making.\n\n :type TemplateBody: string\n :param TemplateBody: The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, see Template Anatomy in the AWS CloudFormation User Guide.\n Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL or set UsePreviousTemplate to true.\n \n\n :type TemplateURL: string\n :param TemplateURL: The location of the file that contains the template body. The URL must point to a template (maximum size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information, see Template Anatomy in the AWS CloudFormation User Guide.\n Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL or set UsePreviousTemplate to true.\n \n\n :type UsePreviousTemplate: boolean\n :param UsePreviousTemplate: Use the existing template that's associated with the stack set that you're updating.\n Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL or set UsePreviousTemplate to true.\n \n\n :type Parameters: list\n :param Parameters: A list of input parameters for the stack set template.\n (dict) --The Parameter data type.\n ParameterKey (string) --The key associated with the parameter. If you don't specify a key and value for a particular parameter, AWS CloudFormation uses the default value that is specified in your template.\n ParameterValue (string) --The input value associated with the parameter.\n UsePreviousValue (boolean) --During a stack update, use the existing parameter value that the stack is using for a given parameter key. If you specify true , do not specify a parameter value.\n ResolvedValue (string) --Read-only. The value that corresponds to a Systems Manager parameter key. This field is returned only for ` SSM parameter types <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html#aws-ssm-parameter-types>`__ in the template.\n \n \n\n :type Capabilities: list\n :param Capabilities: In some cases, you must explicity acknowledge that your stack template contains certain capabilities in order for AWS CloudFormation to update the stack set and its associated stack instances.\n CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your AWS account; for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks sets, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.\n If you have IAM resources, you can specify either capability.\n If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM .\n If you don't specify either of these capabilities, AWS CloudFormation returns an InsufficientCapabilities error.\n If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.\n AWS::IAM::AccessKey\n AWS::IAM::Group\n AWS::IAM::InstanceProfile\n AWS::IAM::Policy\n AWS::IAM::Role\n AWS::IAM::User\n AWS::IAM::UserToGroupAddition\n For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates .\n CAPABILITY_AUTO_EXPAND Some templates contain macros. If your stack template contains one or more macros, and you choose to update a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. For more information, see Using AWS CloudFormation Macros to Perform Custom Processing on Templates .\n Warning\n Stack sets do not currently support macros in stack templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by AWS CloudFormation.) Even if you specify this capability, if you include a macro in your template the stack set operation will fail.\n (string) --\n \n\n :type Tags: list\n :param Tags: The key-value pairs to associate with this stack set and the stacks created from it. AWS CloudFormation also propagates these tags to supported resources that are created in the stacks. You can specify a maximum number of 50 tags.\n If you specify tags for this parameter, those tags replace any list of tags that are currently associated with this stack set. This means:\n If you don't specify this parameter, AWS CloudFormation doesn't modify the stack's tags.\n If you specify any tags using this parameter, you must specify all the tags that you want associated with this stack set, even tags you've specifed before (for example, when creating the stack set or during a previous update of the stack set.). Any tags that you don't include in the updated list of tags are removed from the stack set, and therefore from the stacks and resources as well.\n If you specify an empty value, AWS CloudFormation removes all currently associated tags.\n If you specify new tags as part of an UpdateStackSet action, AWS CloudFormation checks to see if you have the required IAM permission to tag resources. If you omit tags that are currently associated with the stack set from the list of tags you specify, AWS CloudFormation assumes that you want to remove those tags from the stack set, and checks to see if you have permission to untag resources. If you don't have the necessary permission(s), the entire UpdateStackSet action fails with an access denied error, and the stack set is not updated.\n (dict) --The Tag type enables you to specify a key-value pair that can be used to store information about an AWS CloudFormation stack.\n Key (string) -- [REQUIRED]\n Required . A string used to identify this tag. You can specify a maximum of 128 characters for a tag key. Tags owned by Amazon Web Services (AWS) have the reserved prefix: aws: .\n Value (string) -- [REQUIRED]\n Required . A string containing the value for this tag. You can specify a maximum of 256 characters for a tag value.\n \n \n\n :type OperationPreferences: dict\n :param OperationPreferences: Preferences for how AWS CloudFormation performs this stack set operation.\n RegionOrder (list) --The order of the regions in where you want to perform the stack operation.\n (string) --\n FailureToleranceCount (integer) --The number of accounts, per region, for which this operation can fail before AWS CloudFormation stops the operation in that region. If the operation is stopped in a region, AWS CloudFormation doesn't attempt the operation in any subsequent regions.\n Conditional: You must specify either FailureToleranceCount or FailureTolerancePercentage (but not both).\n FailureTolerancePercentage (integer) --The percentage of accounts, per region, for which this stack operation can fail before AWS CloudFormation stops the operation in that region. If the operation is stopped in a region, AWS CloudFormation doesn't attempt the operation in any subsequent regions.\n When calculating the number of accounts based on the specified percentage, AWS CloudFormation rounds down to the next whole number.\n Conditional: You must specify either FailureToleranceCount or FailureTolerancePercentage , but not both.\n MaxConcurrentCount (integer) --The maximum number of accounts in which to perform this operation at one time. This is dependent on the value of FailureToleranceCount MaxConcurrentCount is at most one more than the FailureToleranceCount .\n Note that this setting lets you specify the maximum for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.\n Conditional: You must specify either MaxConcurrentCount or MaxConcurrentPercentage , but not both.\n MaxConcurrentPercentage (integer) --The maximum percentage of accounts in which to perform this operation at one time.\n When calculating the number of accounts based on the specified percentage, AWS CloudFormation rounds down to the next whole number. This is true except in cases where rounding down would result is zero. In this case, CloudFormation sets the number as one instead.\n Note that this setting lets you specify the maximum for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.\n Conditional: You must specify either MaxConcurrentCount or MaxConcurrentPercentage , but not both.\n \n\n :type AdministrationRoleARN: string\n :param AdministrationRoleARN: The Amazon Resource Number (ARN) of the IAM role to use to update this stack set.\n Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Define Permissions for Multiple Administrators in the AWS CloudFormation User Guide .\n If you specify a customized administrator role, AWS CloudFormation uses that role to update the stack. If you do not specify a customized administrator role, AWS CloudFormation performs the update using the role previously associated with the stack set, so long as you have permissions to perform operations on the stack set.\n \n\n :type ExecutionRoleName: string\n :param ExecutionRoleName: The name of the IAM execution role to use to update the stack set. If you do not specify an execution role, AWS CloudFormation uses the AWSCloudFormationStackSetExecutionRole role for the stack set operation.\n Specify an IAM role only if you are using customized execution roles to control which stack resources users and groups can include in their stack sets.\n If you specify a customized execution role, AWS CloudFormation uses that role to update the stack. If you do not specify a customized execution role, AWS CloudFormation performs the update using the role previously associated with the stack set, so long as you have permissions to perform operations on the stack set.\n \n\n :type OperationId: string\n :param OperationId: The unique ID for this stack set operation.\n The operation ID also functions as an idempotency token, to ensure that AWS CloudFormation performs the stack set operation only once, even if you retry the request multiple times. You might retry stack set operation requests to ensure that AWS CloudFormation successfully received them.\n If you don't specify an operation ID, AWS CloudFormation generates one automatically.\n Repeating this stack set operation with a new operation ID retries all stack instances whose status is OUTDATED .\n This field is autopopulated if not provided.\n \n\n :type Accounts: list\n :param Accounts: The accounts in which to update associated stack instances. If you specify accounts, you must also specify the regions in which to update stack set instances.\n To update all the stack instances associated with this stack set, do not specify the Accounts or Regions properties.\n If the stack set update includes changes to the template (that is, if the TemplateBody or TemplateURL properties are specified), or the Parameters property, AWS CloudFormation marks all stack instances with a status of OUTDATED prior to updating the stack instances in the specified accounts and regions. If the stack set update does not include changes to the template or parameters, AWS CloudFormation updates the stack instances in the specified accounts and regions, while leaving all other stack instances with their existing stack instance status.\n (string) --\n \n\n :type Regions: list\n :param Regions: The regions in which to update associated stack instances. If you specify regions, you must also specify accounts in which to update stack set instances.\n To update all the stack instances associated with this stack set, do not specify the Accounts or Regions properties.\n If the stack set update includes changes to the template (that is, if the TemplateBody or TemplateURL properties are specified), or the Parameters property, AWS CloudFormation marks all stack instances with a status of OUTDATED prior to updating the stack instances in the specified accounts and regions. If the stack set update does not include changes to the template or parameters, AWS CloudFormation updates the stack instances in the specified accounts and regions, while leaving all other stack instances with their existing stack instance status.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'OperationId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef update_termination_protection(EnableTerminationProtection=None, StackName=None):\n \"\"\"\n Updates termination protection for the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protecting a Stack From Being Deleted in the AWS CloudFormation User Guide .\n For nested stacks , termination protection is set on the root stack and cannot be changed directly on the nested stack.\n See also: AWS API Documentation\n \n \n :example: response = client.update_termination_protection(\n EnableTerminationProtection=True|False,\n StackName='string'\n )\n \n \n :type EnableTerminationProtection: boolean\n :param EnableTerminationProtection: [REQUIRED]\n Whether to enable termination protection on the specified stack.\n \n\n :type StackName: string\n :param StackName: [REQUIRED]\n The name or unique ID of the stack for which you want to set termination protection.\n \n\n :rtype: dict\n :return: {\n 'StackId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef validate_template(TemplateBody=None, TemplateURL=None):\n \"\"\"\n Validates a specified template. AWS CloudFormation first checks if the template is valid JSON. If it isn't, AWS CloudFormation checks if the template is valid YAML. If both these checks fail, AWS CloudFormation returns a template validation error.\n See also: AWS API Documentation\n \n Examples\n This example validates the specified template.\n Expected Output:\n \n :example: response = client.validate_template(\n TemplateBody='string',\n TemplateURL='string'\n )\n \n \n :type TemplateBody: string\n :param TemplateBody: Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.\n Conditional: You must pass TemplateURL or TemplateBody . If both are passed, only TemplateBody is used.\n \n\n :type TemplateURL: string\n :param TemplateURL: Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.\n Conditional: You must pass TemplateURL or TemplateBody . If both are passed, only TemplateBody is used.\n \n\n :rtype: dict\n :return: {\n 'Parameters': [\n {\n 'ParameterKey': 'string',\n 'DefaultValue': 'string',\n 'NoEcho': True|False,\n 'Description': 'string'\n },\n ],\n 'Description': 'string',\n 'Capabilities': [\n 'CAPABILITY_IAM'|'CAPABILITY_NAMED_IAM'|'CAPABILITY_AUTO_EXPAND',\n ],\n 'CapabilitiesReason': 'string',\n 'DeclaredTransforms': [\n 'string',\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.7139837741851807, "alphanum_fraction": 0.7196573615074158, "avg_line_length": 59.12374496459961, "blob_id": "9ec46290b5d96f22c5caead5cceee8f7d85cf4c8", "content_id": "2b9d0e51b4358f6b041af35fc1c294e57c6c1de6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17978, "license_type": "permissive", "max_line_length": 821, "num_lines": 299, "path": "/pyboto3/kinesisvideoarchivedmedia.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_hls_streaming_session_url(StreamName=None, StreamARN=None, PlaybackMode=None, HLSFragmentSelector=None, DiscontinuityMode=None, Expires=None, MaxMediaPlaylistFragmentResults=None):\n \"\"\"\n Retrieves an HTTP Live Streaming (HLS) URL for the stream. You can then open the URL in a browser or media player to view the stream contents.\n You must specify either the StreamName or the StreamARN .\n An Amazon Kinesis video stream has the following requirements for providing data through HLS:\n Kinesis Video Streams HLS sessions contain fragments in the fragmented MPEG-4 form (also called fMP4 or CMAF), rather than the MPEG-2 form (also called TS chunks, which the HLS specification also supports). For more information about HLS fragment types, see the HLS specification .\n The following procedure shows how to use HLS with Kinesis Video Streams:\n You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams . For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing . Charges for both HLS sessions and outgoing AWS data apply.\n For more information about HLS, see HTTP Live Streaming on the Apple Developer site .\n See also: AWS API Documentation\n \n \n :example: response = client.get_hls_streaming_session_url(\n StreamName='string',\n StreamARN='string',\n PlaybackMode='LIVE'|'ON_DEMAND',\n HLSFragmentSelector={\n 'FragmentSelectorType': 'PRODUCER_TIMESTAMP'|'SERVER_TIMESTAMP',\n 'TimestampRange': {\n 'StartTimestamp': datetime(2015, 1, 1),\n 'EndTimestamp': datetime(2015, 1, 1)\n }\n },\n DiscontinuityMode='ALWAYS'|'NEVER',\n Expires=123,\n MaxMediaPlaylistFragmentResults=123\n )\n \n \n :type StreamName: string\n :param StreamName: The name of the stream for which to retrieve the HLS master playlist URL.\n You must specify either the StreamName or the StreamARN .\n \n\n :type StreamARN: string\n :param StreamARN: The Amazon Resource Name (ARN) of the stream for which to retrieve the HLS master playlist URL.\n You must specify either the StreamName or the StreamARN .\n \n\n :type PlaybackMode: string\n :param PlaybackMode: Whether to retrieve live or archived, on-demand data.\n Features of the two types of session include the following:\n **LIVE ** : For sessions of this type, the HLS media playlist is continually updated with the latest fragments as they become available. We recommend that the media player retrieve a new playlist on a one-second interval. When this type of session is played in a media player, the user interface typically displays a 'live' notification, with no scrubber control for choosing the position in the playback window to display.\n Note\n In LIVE mode, the newest available fragments are included in an HLS media playlist, even if there is a gap between fragments (that is, if a fragment is missing). A gap like this might cause a media player to halt or cause a jump in playback. In this mode, fragments are not added to the HLS media playlist if they are older than the newest fragment in the playlist. If the missing fragment becomes available after a subsequent fragment is added to the playlist, the older fragment is not added, and the gap is not filled.\n **ON_DEMAND ** : For sessions of this type, the HLS media playlist contains all the fragments for the session, up to the number that is specified in MaxMediaPlaylistFragmentResults . The playlist must be retrieved only once for each session. When this type of session is played in a media player, the user interface typically displays a scrubber control for choosing the position in the playback window to display.\n In both playback modes, if FragmentSelectorType is PRODUCER_TIMESTAMP , and if there are multiple fragments with the same start time stamp, the fragment that has the larger fragment number (that is, the newer fragment) is included in the HLS media playlist. The other fragments are not included. Fragments that have different time stamps but have overlapping durations are still included in the HLS media playlist. This can lead to unexpected behavior in the media player.\n The default is LIVE .\n \n\n :type HLSFragmentSelector: dict\n :param HLSFragmentSelector: The time range of the requested fragment, and the source of the time stamps.\n This parameter is required if PlaybackMode is ON_DEMAND . This parameter is optional if PlaybackMode is LIVE . If PlaybackMode is LIVE , the FragmentSelectorType can be set, but the TimestampRange should not be set. If PlaybackMode is ON_DEMAND , both FragmentSelectorType and TimestampRange must be set.\n FragmentSelectorType (string) --The source of the time stamps for the requested media.\n When FragmentSelectorType is set to PRODUCER_TIMESTAMP and GetHLSStreamingSessionURLInput$PlaybackMode is ON_DEMAND , the first fragment ingested with a producer time stamp within the specified FragmentSelector$TimestampRange is included in the media playlist. In addition, the fragments with producer time stamps within the TimestampRange ingested immediately following the first fragment (up to the GetHLSStreamingSessionURLInput$MaxMediaPlaylistFragmentResults value) are included.\n Fragments that have duplicate producer time stamps are deduplicated. This means that if producers are producing a stream of fragments with producer time stamps that are approximately equal to the true clock time, the HLS media playlists will contain all of the fragments within the requested time stamp range. If some fragments are ingested within the same time range and very different points in time, only the oldest ingested collection of fragments are returned.\n When FragmentSelectorType is set to PRODUCER_TIMESTAMP and GetHLSStreamingSessionURLInput$PlaybackMode is LIVE , the producer time stamps are used in the MP4 fragments and for deduplication. But the most recently ingested fragments based on server time stamps are included in the HLS media playlist. This means that even if fragments ingested in the past have producer time stamps with values now, they are not included in the HLS media playlist.\n The default is SERVER_TIMESTAMP .\n TimestampRange (dict) --The start and end of the time stamp range for the requested media.\n This value should not be present if PlaybackType is LIVE .\n StartTimestamp (datetime) --The start of the time stamp range for the requested media.\n If the HLSTimestampRange value is specified, the StartTimestamp value is required.\n Note\n This value is inclusive. Fragments that start before the StartTimestamp and continue past it are included in the session. If FragmentSelectorType is SERVER_TIMESTAMP , the StartTimestamp must be later than the stream head.\n EndTimestamp (datetime) --The end of the time stamp range for the requested media. This value must be within 3 hours of the specified StartTimestamp , and it must be later than the StartTimestamp value.\n If FragmentSelectorType for the request is SERVER_TIMESTAMP , this value must be in the past.\n If the HLSTimestampRange value is specified, the EndTimestamp value is required.\n Note\n This value is inclusive. The EndTimestamp is compared to the (starting) time stamp of the fragment. Fragments that start before the EndTimestamp value and continue past it are included in the session.\n \n \n\n :type DiscontinuityMode: string\n :param DiscontinuityMode: Specifies when flags marking discontinuities between fragments will be added to the media playlists. The default is ALWAYS when HLSFragmentSelector is SERVER_TIMESTAMP , and NEVER when it is PRODUCER_TIMESTAMP .\n Media players typically build a timeline of media content to play, based on the time stamps of each fragment. This means that if there is any overlap between fragments (as is typical if HLSFragmentSelector is SERVER_TIMESTAMP ), the media player timeline has small gaps between fragments in some places, and overwrites frames in other places. When there are discontinuity flags between fragments, the media player is expected to reset the timeline, resulting in the fragment being played immediately after the previous fragment. We recommend that you always have discontinuity flags between fragments if the fragment time stamps are not accurate or if fragments might be missing. You should not place discontinuity flags between fragments for the player timeline to accurately map to the producer time stamps.\n \n\n :type Expires: integer\n :param Expires: The time in seconds until the requested session expires. This value can be between 300 (5 minutes) and 43200 (12 hours).\n When a session expires, no new calls to GetHLSMasterPlaylist , GetHLSMediaPlaylist , GetMP4InitFragment , or GetMP4MediaFragment can be made for that session.\n The default is 300 (5 minutes).\n \n\n :type MaxMediaPlaylistFragmentResults: integer\n :param MaxMediaPlaylistFragmentResults: The maximum number of fragments that are returned in the HLS media playlists.\n When the PlaybackMode is LIVE , the most recent fragments are returned up to this value. When the PlaybackMode is ON_DEMAND , the oldest fragments are returned, up to this maximum number.\n When there are a higher number of fragments available in a live HLS media playlist, video players often buffer content before starting playback. Increasing the buffer size increases the playback latency, but it decreases the likelihood that rebuffering will occur during playback. We recommend that a live HLS media playlist have a minimum of 3 fragments and a maximum of 10 fragments.\n The default is 5 fragments if PlaybackMode is LIVE , and 1,000 if PlaybackMode is ON_DEMAND .\n The maximum value of 1,000 fragments corresponds to more than 16 minutes of video on streams with 1-second fragments, and more than 2 1/2 hours of video on streams with 10-second fragments.\n \n\n :rtype: dict\n :return: {\n 'HLSStreamingSessionURL': 'string'\n }\n \n \n :returns: \n Get an endpoint using GetDataEndpoint , specifying GET_HLS_STREAMING_SESSION_URL for the APIName parameter.\n Retrieve the HLS URL using GetHLSStreamingSessionURL . Kinesis Video Streams creates an HLS streaming session to be used for accessing content in a stream using the HLS protocol. GetHLSStreamingSessionURL returns an authenticated URL (that includes an encrypted session token) for the session's HLS master playlist (the root resource needed for streaming with HLS).\n \n \"\"\"\n pass\n\ndef get_media_for_fragment_list(StreamName=None, Fragments=None):\n \"\"\"\n Gets media for a list of fragments (specified by fragment number) from the archived data in an Amazon Kinesis video stream.\n The following limits apply when using the GetMediaForFragmentList API:\n See also: AWS API Documentation\n \n \n :example: response = client.get_media_for_fragment_list(\n StreamName='string',\n Fragments=[\n 'string',\n ]\n )\n \n \n :type StreamName: string\n :param StreamName: [REQUIRED]\n The name of the stream from which to retrieve fragment media.\n \n\n :type Fragments: list\n :param Fragments: [REQUIRED]\n A list of the numbers of fragments for which to retrieve media. You retrieve these values with ListFragments .\n (string) --\n \n\n :rtype: dict\n :return: {\n 'ContentType': 'string',\n 'Payload': StreamingBody()\n }\n \n \n :returns: \n StreamName (string) -- [REQUIRED]\n The name of the stream from which to retrieve fragment media.\n \n Fragments (list) -- [REQUIRED]\n A list of the numbers of fragments for which to retrieve media. You retrieve these values with ListFragments .\n \n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_fragments(StreamName=None, MaxResults=None, NextToken=None, FragmentSelector=None):\n \"\"\"\n Returns a list of Fragment objects from the specified stream and start location within the archived data.\n See also: AWS API Documentation\n \n \n :example: response = client.list_fragments(\n StreamName='string',\n MaxResults=123,\n NextToken='string',\n FragmentSelector={\n 'FragmentSelectorType': 'PRODUCER_TIMESTAMP'|'SERVER_TIMESTAMP',\n 'TimestampRange': {\n 'StartTimestamp': datetime(2015, 1, 1),\n 'EndTimestamp': datetime(2015, 1, 1)\n }\n }\n )\n \n \n :type StreamName: string\n :param StreamName: [REQUIRED]\n The name of the stream from which to retrieve a fragment list.\n \n\n :type MaxResults: integer\n :param MaxResults: The total number of fragments to return. If the total number of fragments available is more than the value specified in max-results , then a ListFragmentsOutput$NextToken is provided in the output that you can use to resume pagination.\n\n :type NextToken: string\n :param NextToken: A token to specify where to start paginating. This is the ListFragmentsOutput$NextToken from a previously truncated response.\n\n :type FragmentSelector: dict\n :param FragmentSelector: Describes the time stamp range and time stamp origin for the range of fragments to return.\n FragmentSelectorType (string) -- [REQUIRED]The origin of the time stamps to use (Server or Producer).\n TimestampRange (dict) -- [REQUIRED]The range of time stamps to return.\n StartTimestamp (datetime) -- [REQUIRED]The starting time stamp in the range of time stamps for which to return fragments.\n EndTimestamp (datetime) -- [REQUIRED]The ending time stamp in the range of time stamps for which to return fragments.\n \n \n\n :rtype: dict\n :return: {\n 'Fragments': [\n {\n 'FragmentNumber': 'string',\n 'FragmentSizeInBytes': 123,\n 'ProducerTimestamp': datetime(2015, 1, 1),\n 'ServerTimestamp': datetime(2015, 1, 1),\n 'FragmentLengthInMilliseconds': 123\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.5901327133178711, "alphanum_fraction": 0.5976133346557617, "avg_line_length": 58.07185363769531, "blob_id": "6f62c2ef26993bfb220ce3ae6b4ecaee6e66d7c7", "content_id": "a7e05e93793275da32f7f32385930a2be3f51eb1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 455445, "license_type": "permissive", "max_line_length": 796, "num_lines": 7710, "path": "/pyboto3/dynamodb.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef batch_get_item(RequestItems=None, ReturnConsumedCapacity=None):\n \"\"\"\n The BatchGetItem operation returns the attributes of one or more items from one or more tables. You identify requested items by primary key.\n A single operation can retrieve up to 16 MB of data, which can contain as many as 100 items. BatchGetItem will return a partial result if the response size limit is exceeded, the table's provisioned throughput is exceeded, or an internal processing failure occurs. If a partial result is returned, the operation returns a value for UnprocessedKeys . You can use this value to retry the operation starting with the next item to get.\n For example, if you ask to retrieve 100 items, but each individual item is 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns an appropriate UnprocessedKeys value so you can get the next page of results. If desired, your application can include its own logic to assemble the pages of results into one data set.\n If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchGetItem will return a ProvisionedThroughputExceededException . If at least one of the items is successfully processed, then BatchGetItem completes successfully, while returning the keys of the unread items in UnprocessedKeys .\n By default, BatchGetItem performs eventually consistent reads on every table in the request. If you want strongly consistent reads instead, you can set ConsistentRead to true for any or all tables.\n In order to minimize response latency, BatchGetItem retrieves items in parallel.\n When designing your application, keep in mind that DynamoDB does not return items in any particular order. To help parse the response by item, include the primary key values for the items in your request in the ProjectionExpression parameter.\n If a requested item does not exist, it is not returned in the result. Requests for nonexistent items consume the minimum read capacity units according to the type of read. For more information, see Capacity Units Calculations in the Amazon DynamoDB Developer Guide .\n See also: AWS API Documentation\n \n Examples\n This example reads multiple items from the Music table using a batch of three GetItem requests. Only the AlbumTitle attribute is returned.\n Expected Output:\n \n :example: response = client.batch_get_item(\n RequestItems={\n 'string': {\n 'Keys': [\n {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n ],\n 'AttributesToGet': [\n 'string',\n ],\n 'ConsistentRead': True|False,\n 'ProjectionExpression': 'string',\n 'ExpressionAttributeNames': {\n 'string': 'string'\n }\n }\n },\n ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE'\n )\n \n \n :type RequestItems: dict\n :param RequestItems: [REQUIRED]\n A map of one or more table names and, for each table, a map that describes one or more items to retrieve from that table. Each table name can be used only once per BatchGetItem request.\n Each element in the map of items to retrieve consists of the following:\n ConsistentRead - If true , a strongly consistent read is used; if false (the default), an eventually consistent read is used.\n ExpressionAttributeNames - One or more substitution tokens for attribute names in the ProjectionExpression parameter. The following are some use cases for using ExpressionAttributeNames :\n To access an attribute whose name conflicts with a DynamoDB reserved word.\n To create a placeholder for repeating occurrences of an attribute name in an expression.\n To prevent special characters in an attribute name from being misinterpreted in an expression.\n Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:\n Percentile\n The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide ). To work around this, you could specify the following for ExpressionAttributeNames :\n {'#P':'Percentile'}\n You could then use this substitution in an expression, as in this example:\n #P = :val\n Note\n Tokens that begin with the : character are expression attribute values , which are placeholders for the actual value at runtime.\n For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide .\n Keys - An array of primary key attribute values that define specific items in the table. For each primary key, you must provide all of the key attributes. For example, with a simple primary key, you only need to provide the partition key value. For a composite key, you must provide both the partition key value and the sort key value.\n ProjectionExpression - A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas. If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result. For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide .\n AttributesToGet - This is a legacy parameter. Use ProjectionExpression instead. For more information, see AttributesToGet in the Amazon DynamoDB Developer Guide .\n (string) --\n (dict) --Represents a set of primary keys and, for each key, the attributes to retrieve from the table.\n For each primary key, you must provide all of the key attributes. For example, with a simple primary key, you only need to provide the partition key. For a composite primary key, you must provide both the partition key and the sort key.\n Keys (list) -- [REQUIRED]The primary key attribute values that define the items and the attributes associated with the items.\n (dict) --\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n \n AttributesToGet (list) --This is a legacy parameter. Use ProjectionExpression instead. For more information, see Legacy Conditional Parameters in the Amazon DynamoDB Developer Guide .\n (string) --\n ConsistentRead (boolean) --The consistency of a read operation. If set to true , then a strongly consistent read is used; otherwise, an eventually consistent read is used.\n ProjectionExpression (string) --A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the ProjectionExpression must be separated by commas.\n If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.\n For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide .\n ExpressionAttributeNames (dict) --One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames :\n To access an attribute whose name conflicts with a DynamoDB reserved word.\n To create a placeholder for repeating occurrences of an attribute name in an expression.\n To prevent special characters in an attribute name from being misinterpreted in an expression.\n Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:\n Percentile\n The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide ). To work around this, you could specify the following for ExpressionAttributeNames :\n {'#P':'Percentile'}\n You could then use this substitution in an expression, as in this example:\n #P = :val\n Note\n Tokens that begin with the : character are expression attribute values , which are placeholders for the actual value at runtime.\n For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide .\n (string) --\n (string) --\n \n \n \n\n :type ReturnConsumedCapacity: string\n :param ReturnConsumedCapacity: Determines the level of detail about provisioned throughput consumption that is returned in the response:\n INDEXES - The response includes the aggregate ConsumedCapacity for the operation, together with ConsumedCapacity for each table and secondary index that was accessed. Note that some operations, such as GetItem and BatchGetItem , do not access any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity information for table(s).\n TOTAL - The response includes only the aggregate ConsumedCapacity for the operation.\n NONE - No ConsumedCapacity details are included in the response.\n \n\n :rtype: dict\n :return: {\n 'Responses': {\n 'string': [\n {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n ]\n },\n 'UnprocessedKeys': {\n 'string': {\n 'Keys': [\n {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n ],\n 'AttributesToGet': [\n 'string',\n ],\n 'ConsistentRead': True|False,\n 'ProjectionExpression': 'string',\n 'ExpressionAttributeNames': {\n 'string': 'string'\n }\n }\n },\n 'ConsumedCapacity': [\n {\n 'TableName': 'string',\n 'CapacityUnits': 123.0,\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'Table': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n },\n 'LocalSecondaryIndexes': {\n 'string': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n }\n },\n 'GlobalSecondaryIndexes': {\n 'string': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n }\n }\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef batch_write_item(RequestItems=None, ReturnConsumedCapacity=None, ReturnItemCollectionMetrics=None):\n \"\"\"\n The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can write up to 16 MB of data, which can comprise as many as 25 put or delete requests. Individual items to be written can be as large as 400 KB.\n The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed.\n Note that if none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem will return a ProvisionedThroughputExceededException .\n With BatchWriteItem , you can efficiently write or delete large amounts of data, such as from Amazon Elastic MapReduce (EMR), or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response.\n If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application.\n Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit.\n If one or more of the following is true, DynamoDB rejects the entire batch write operation:\n See also: AWS API Documentation\n \n Examples\n This example adds three new items to the Music table using a batch of three PutItem requests.\n Expected Output:\n \n :example: response = client.batch_write_item(\n RequestItems={\n 'string': [\n {\n 'PutRequest': {\n 'Item': {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n }\n },\n 'DeleteRequest': {\n 'Key': {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n }\n }\n },\n ]\n },\n ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE',\n ReturnItemCollectionMetrics='SIZE'|'NONE'\n )\n \n \n :type RequestItems: dict\n :param RequestItems: [REQUIRED]\n A map of one or more table names and, for each table, a list of operations to be performed (DeleteRequest or PutRequest ). Each element in the map consists of the following:\n DeleteRequest - Perform a DeleteItem operation on the specified item. The item to be deleted is identified by a Key subelement:\n Key - A map of primary key attribute values that uniquely identify the item. Each entry in this map consists of an attribute name and an attribute value. For each primary key, you must provide all of the key attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key.\n PutRequest - Perform a PutItem operation on the specified item. The item to be put is identified by an Item subelement:\n Item - A map of attributes and their values. Each entry in this map consists of an attribute name and an attribute value. Attribute values must not be null; string and binary type attributes must have lengths greater than zero; and set type attributes must not be empty. Requests that contain empty values will be rejected with a ValidationException exception. If you specify any attributes that are part of an index key, then the data types for those attributes must match those of the schema in the table's attribute definition.\n \n (string) --\n (list) --\n (dict) --Represents an operation to perform - either DeleteItem or PutItem . You can only request one of these operations, not both, in a single WriteRequest . If you do need to perform both of these operations, you will need to provide two separate WriteRequest objects.\n PutRequest (dict) --A request to perform a PutItem operation.\n Item (dict) -- [REQUIRED]A map of attribute name to attribute values, representing the primary key of an item to be processed by PutItem . All of the table's primary key attributes must be specified, and their data types must match those of the table's key schema. If any attributes are present in the item which are part of an index key schema for the table, their types must match the index key schema.\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n \n DeleteRequest (dict) --A request to perform a DeleteItem operation.\n Key (dict) -- [REQUIRED]A map of attribute name to attribute values, representing the primary key of the item to delete. All of the table's primary key attributes must be specified, and their data types must match those of the table's key schema.\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n \n \n \n \n\n :type ReturnConsumedCapacity: string\n :param ReturnConsumedCapacity: Determines the level of detail about provisioned throughput consumption that is returned in the response:\n INDEXES - The response includes the aggregate ConsumedCapacity for the operation, together with ConsumedCapacity for each table and secondary index that was accessed. Note that some operations, such as GetItem and BatchGetItem , do not access any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity information for table(s).\n TOTAL - The response includes only the aggregate ConsumedCapacity for the operation.\n NONE - No ConsumedCapacity details are included in the response.\n \n\n :type ReturnItemCollectionMetrics: string\n :param ReturnItemCollectionMetrics: Determines whether item collection metrics are returned. If set to SIZE , the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.\n\n :rtype: dict\n :return: {\n 'UnprocessedItems': {\n 'string': [\n {\n 'PutRequest': {\n 'Item': {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n }\n },\n 'DeleteRequest': {\n 'Key': {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n }\n }\n },\n ]\n },\n 'ItemCollectionMetrics': {\n 'string': [\n {\n 'ItemCollectionKey': {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n 'SizeEstimateRangeGB': [\n 123.0,\n ]\n },\n ]\n },\n 'ConsumedCapacity': [\n {\n 'TableName': 'string',\n 'CapacityUnits': 123.0,\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'Table': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n },\n 'LocalSecondaryIndexes': {\n 'string': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n }\n },\n 'GlobalSecondaryIndexes': {\n 'string': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n }\n }\n },\n ]\n }\n \n \n :returns: \n RequestItems (dict) -- [REQUIRED]\n A map of one or more table names and, for each table, a list of operations to be performed (DeleteRequest or PutRequest ). Each element in the map consists of the following:\n \n DeleteRequest - Perform a DeleteItem operation on the specified item. The item to be deleted is identified by a Key subelement:\n Key - A map of primary key attribute values that uniquely identify the item. Each entry in this map consists of an attribute name and an attribute value. For each primary key, you must provide all of the key attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key.\n \n \n PutRequest - Perform a PutItem operation on the specified item. The item to be put is identified by an Item subelement:\n Item - A map of attributes and their values. Each entry in this map consists of an attribute name and an attribute value. Attribute values must not be null; string and binary type attributes must have lengths greater than zero; and set type attributes must not be empty. Requests that contain empty values will be rejected with a ValidationException exception. If you specify any attributes that are part of an index key, then the data types for those attributes must match those of the schema in the table's attribute definition.\n \n \n \n \n (string) --\n (list) --\n (dict) --Represents an operation to perform - either DeleteItem or PutItem . You can only request one of these operations, not both, in a single WriteRequest . If you do need to perform both of these operations, you will need to provide two separate WriteRequest objects.\n \n PutRequest (dict) --A request to perform a PutItem operation.\n \n Item (dict) -- [REQUIRED]A map of attribute name to attribute values, representing the primary key of an item to be processed by PutItem . All of the table's primary key attributes must be specified, and their data types must match those of the table's key schema. If any attributes are present in the item which are part of an index key schema for the table, their types must match the index key schema.\n \n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n S (string) --An attribute of type String. For example:\n \n \"S\": \"Hello\"\n \n N (string) --An attribute of type Number. For example:\n \n \"N\": \"123.45\"\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n \n B (bytes) --An attribute of type Binary. For example:\n \n \"B\": \"dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk\"\n \n SS (list) --An attribute of type String Set. For example:\n \n \"SS\": [\"Giraffe\", \"Hippo\" ,\"Zebra\"]\n \n (string) --\n \n \n NS (list) --An attribute of type Number Set. For example:\n \n \"NS\": [\"42.2\", \"-19\", \"7.5\", \"3.14\"]\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n \n (string) --\n \n \n BS (list) --An attribute of type Binary Set. For example:\n \n \"BS\": [\"U3Vubnk=\", \"UmFpbnk=\", \"U25vd3k=\"]\n \n (bytes) --\n \n \n M (dict) --An attribute of type Map. For example:\n \n \"M\": {\"Name\": {\"S\": \"Joe\"}, \"Age\": {\"N\": \"35\"}}\n \n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n \n \n \n \n L (list) --An attribute of type List. For example:\n \n \"L\": [\"Cookies\", \"Coffee\", 3.14159]\n \n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n \n \n NULL (boolean) --An attribute of type Null. For example:\n \n \"NULL\": true\n \n BOOL (boolean) --An attribute of type Boolean. For example:\n \n \"BOOL\": true\n \n \n \n \n \n \n \n \n \n DeleteRequest (dict) --A request to perform a DeleteItem operation.\n \n Key (dict) -- [REQUIRED]A map of attribute name to attribute values, representing the primary key of the item to delete. All of the table's primary key attributes must be specified, and their data types must match those of the table's key schema.\n \n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n S (string) --An attribute of type String. For example:\n \n \"S\": \"Hello\"\n \n N (string) --An attribute of type Number. For example:\n \n \"N\": \"123.45\"\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n \n B (bytes) --An attribute of type Binary. For example:\n \n \"B\": \"dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk\"\n \n SS (list) --An attribute of type String Set. For example:\n \n \"SS\": [\"Giraffe\", \"Hippo\" ,\"Zebra\"]\n \n (string) --\n \n \n NS (list) --An attribute of type Number Set. For example:\n \n \"NS\": [\"42.2\", \"-19\", \"7.5\", \"3.14\"]\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n \n (string) --\n \n \n BS (list) --An attribute of type Binary Set. For example:\n \n \"BS\": [\"U3Vubnk=\", \"UmFpbnk=\", \"U25vd3k=\"]\n \n (bytes) --\n \n \n M (dict) --An attribute of type Map. For example:\n \n \"M\": {\"Name\": {\"S\": \"Joe\"}, \"Age\": {\"N\": \"35\"}}\n \n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n \n \n \n \n L (list) --An attribute of type List. For example:\n \n \"L\": [\"Cookies\", \"Coffee\", 3.14159]\n \n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n \n \n NULL (boolean) --An attribute of type Null. For example:\n \n \"NULL\": true\n \n BOOL (boolean) --An attribute of type Boolean. For example:\n \n \"BOOL\": true\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ReturnConsumedCapacity (string) -- Determines the level of detail about provisioned throughput consumption that is returned in the response:\n \n INDEXES - The response includes the aggregate ConsumedCapacity for the operation, together with ConsumedCapacity for each table and secondary index that was accessed. Note that some operations, such as GetItem and BatchGetItem , do not access any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity information for table(s).\n TOTAL - The response includes only the aggregate ConsumedCapacity for the operation.\n NONE - No ConsumedCapacity details are included in the response.\n \n \n ReturnItemCollectionMetrics (string) -- Determines whether item collection metrics are returned. If set to SIZE , the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.\n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_backup(TableName=None, BackupName=None):\n \"\"\"\n Creates a backup for an existing table.\n Each time you create an On-Demand Backup, the entire table data is backed up. There is no limit to the number of on-demand backups that can be taken.\n When you create an On-Demand Backup, a time marker of the request is cataloged, and the backup is created asynchronously, by applying all changes until the time of the request to the last full table snapshot. Backup requests are processed instantaneously and become available for restore within minutes.\n You can call CreateBackup at a maximum rate of 50 times per second.\n All backups in DynamoDB work without consuming any provisioned throughput on the table.\n If you submit a backup request on 2018-12-14 at 14:25:00, the backup is guaranteed to contain all data committed to the table up to 14:24:00, and data committed after 14:26:00 will not be. The backup may or may not contain data modifications made between 14:24:00 and 14:26:00. On-Demand Backup does not support causal consistency.\n Along with data, the following are also included on the backups:\n See also: AWS API Documentation\n \n \n :example: response = client.create_backup(\n TableName='string',\n BackupName='string'\n )\n \n \n :type TableName: string\n :param TableName: [REQUIRED]\n The name of the table.\n \n\n :type BackupName: string\n :param BackupName: [REQUIRED]\n Specified name for the backup.\n \n\n :rtype: dict\n :return: {\n 'BackupDetails': {\n 'BackupArn': 'string',\n 'BackupName': 'string',\n 'BackupSizeBytes': 123,\n 'BackupStatus': 'CREATING'|'DELETED'|'AVAILABLE',\n 'BackupType': 'USER'|'SYSTEM',\n 'BackupCreationDateTime': datetime(2015, 1, 1),\n 'BackupExpiryDateTime': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n TableName (string) -- [REQUIRED]\n The name of the table.\n \n BackupName (string) -- [REQUIRED]\n Specified name for the backup.\n \n \n \"\"\"\n pass\n\ndef create_global_table(GlobalTableName=None, ReplicationGroup=None):\n \"\"\"\n Creates a global table from an existing table. A global table creates a replication relationship between two or more DynamoDB tables with the same table name in the provided regions.\n If you want to add a new replica table to a global table, each of the following conditions must be true:\n If global secondary indexes are specified, then the following conditions must also be met:\n See also: AWS API Documentation\n \n \n :example: response = client.create_global_table(\n GlobalTableName='string',\n ReplicationGroup=[\n {\n 'RegionName': 'string'\n },\n ]\n )\n \n \n :type GlobalTableName: string\n :param GlobalTableName: [REQUIRED]\n The global table name.\n \n\n :type ReplicationGroup: list\n :param ReplicationGroup: [REQUIRED]\n The regions where the global table needs to be created.\n (dict) --Represents the properties of a replica.\n RegionName (string) --The region where the replica needs to be created.\n \n \n\n :rtype: dict\n :return: {\n 'GlobalTableDescription': {\n 'ReplicationGroup': [\n {\n 'RegionName': 'string'\n },\n ],\n 'GlobalTableArn': 'string',\n 'CreationDateTime': datetime(2015, 1, 1),\n 'GlobalTableStatus': 'CREATING'|'ACTIVE'|'DELETING'|'UPDATING',\n 'GlobalTableName': 'string'\n }\n }\n \n \n :returns: \n The global secondary indexes must have the same name.\n The global secondary indexes must have the same hash key and sort key (if present).\n \n \"\"\"\n pass\n\ndef create_table(AttributeDefinitions=None, TableName=None, KeySchema=None, LocalSecondaryIndexes=None, GlobalSecondaryIndexes=None, BillingMode=None, ProvisionedThroughput=None, StreamSpecification=None, SSESpecification=None):\n \"\"\"\n The CreateTable operation adds a new table to your account. In an AWS account, table names must be unique within each region. That is, you can have two tables with same name if you create the tables in different regions.\n You can optionally define secondary indexes on the new table, as part of the CreateTable operation. If you want to create multiple tables with secondary indexes on them, you must create the tables sequentially. Only one table with secondary indexes can be in the CREATING state at any given time.\n You can use the DescribeTable action to check the table status.\n See also: AWS API Documentation\n \n Examples\n This example creates a table named Music.\n Expected Output:\n \n :example: response = client.create_table(\n AttributeDefinitions=[\n {\n 'AttributeName': 'string',\n 'AttributeType': 'S'|'N'|'B'\n },\n ],\n TableName='string',\n KeySchema=[\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n LocalSecondaryIndexes=[\n {\n 'IndexName': 'string',\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'Projection': {\n 'ProjectionType': 'ALL'|'KEYS_ONLY'|'INCLUDE',\n 'NonKeyAttributes': [\n 'string',\n ]\n }\n },\n ],\n GlobalSecondaryIndexes=[\n {\n 'IndexName': 'string',\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'Projection': {\n 'ProjectionType': 'ALL'|'KEYS_ONLY'|'INCLUDE',\n 'NonKeyAttributes': [\n 'string',\n ]\n },\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': 123,\n 'WriteCapacityUnits': 123\n }\n },\n ],\n BillingMode='PROVISIONED'|'PAY_PER_REQUEST',\n ProvisionedThroughput={\n 'ReadCapacityUnits': 123,\n 'WriteCapacityUnits': 123\n },\n StreamSpecification={\n 'StreamEnabled': True|False,\n 'StreamViewType': 'NEW_IMAGE'|'OLD_IMAGE'|'NEW_AND_OLD_IMAGES'|'KEYS_ONLY'\n },\n SSESpecification={\n 'Enabled': True|False,\n 'SSEType': 'AES256'|'KMS',\n 'KMSMasterKeyId': 'string'\n }\n )\n \n \n :type AttributeDefinitions: list\n :param AttributeDefinitions: [REQUIRED]\n An array of attributes that describe the key schema for the table and indexes.\n (dict) --Represents an attribute for describing the key schema for the table and indexes.\n AttributeName (string) -- [REQUIRED]A name for the attribute.\n AttributeType (string) -- [REQUIRED]The data type for the attribute, where:\n S - the attribute is of type String\n N - the attribute is of type Number\n B - the attribute is of type Binary\n \n \n\n :type TableName: string\n :param TableName: [REQUIRED]\n The name of the table to create.\n \n\n :type KeySchema: list\n :param KeySchema: [REQUIRED]\n Specifies the attributes that make up the primary key for a table or an index. The attributes in KeySchema must also be defined in the AttributeDefinitions array. For more information, see Data Model in the Amazon DynamoDB Developer Guide .\n Each KeySchemaElement in the array is composed of:\n AttributeName - The name of this key attribute.\n KeyType - The role that the key attribute will assume:\n HASH - partition key\n RANGE - sort key\n \n Note\n The partition key of an item is also known as its hash attribute . The term 'hash attribute' derives from DynamoDB' usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.\n The sort key of an item is also known as its range attribute . The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.\n For a simple primary key (partition key), you must provide exactly one element with a KeyType of HASH .\n For a composite primary key (partition key and sort key), you must provide exactly two elements, in this order: The first element must have a KeyType of HASH , and the second element must have a KeyType of RANGE .\n For more information, see Specifying the Primary Key in the Amazon DynamoDB Developer Guide .\n (dict) --Represents a single element of a key schema. A key schema specifies the attributes that make up the primary key of a table, or the key attributes of an index.\n A KeySchemaElement represents exactly one attribute of the primary key. For example, a simple primary key would be represented by one KeySchemaElement (for the partition key). A composite primary key would require one KeySchemaElement for the partition key, and another KeySchemaElement for the sort key.\n A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute). The data type must be one of String, Number, or Binary. The attribute cannot be nested within a List or a Map.\n AttributeName (string) -- [REQUIRED]The name of a key attribute.\n KeyType (string) -- [REQUIRED]The role that this key attribute will assume:\n HASH - partition key\n RANGE - sort key\n Note\n The partition key of an item is also known as its hash attribute . The term 'hash attribute' derives from DynamoDB' usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.\n The sort key of an item is also known as its range attribute . The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.\n \n \n\n :type LocalSecondaryIndexes: list\n :param LocalSecondaryIndexes: One or more local secondary indexes (the maximum is five) to be created on the table. Each index is scoped to a given partition key value. There is a 10 GB size limit per partition key value; otherwise, the size of a local secondary index is unconstrained.\n Each local secondary index in the array includes the following:\n IndexName - The name of the local secondary index. Must be unique only for this table.\n KeySchema - Specifies the key schema for the local secondary index. The key schema must begin with the same partition key as the table.\n Projection - Specifies attributes that are copied (projected) from the table into the index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Each attribute specification is composed of:\n ProjectionType - One of the following:\n KEYS_ONLY - Only the index and primary keys are projected into the index.\n INCLUDE - Only the specified table attributes are projected into the index. The list of projected attributes are in NonKeyAttributes .\n ALL - All of the table attributes are projected into the index.\n NonKeyAttributes - A list of one or more non-key attribute names that are projected into the secondary index. The total count of attributes provided in NonKeyAttributes , summed across all of the secondary indexes, must not exceed 20. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.\n \n (dict) --Represents the properties of a local secondary index.\n IndexName (string) -- [REQUIRED]The name of the local secondary index. The name must be unique among all other indexes on this table.\n KeySchema (list) -- [REQUIRED]The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types:\n HASH - partition key\n RANGE - sort key\n Note\n The partition key of an item is also known as its hash attribute . The term 'hash attribute' derives from DynamoDB' usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.\n The sort key of an item is also known as its range attribute . The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.\n (dict) --Represents a single element of a key schema. A key schema specifies the attributes that make up the primary key of a table, or the key attributes of an index.\n A KeySchemaElement represents exactly one attribute of the primary key. For example, a simple primary key would be represented by one KeySchemaElement (for the partition key). A composite primary key would require one KeySchemaElement for the partition key, and another KeySchemaElement for the sort key.\n A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute). The data type must be one of String, Number, or Binary. The attribute cannot be nested within a List or a Map.\n AttributeName (string) -- [REQUIRED]The name of a key attribute.\n KeyType (string) -- [REQUIRED]The role that this key attribute will assume:\n HASH - partition key\n RANGE - sort key\n Note\n The partition key of an item is also known as its hash attribute . The term 'hash attribute' derives from DynamoDB' usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.\n The sort key of an item is also known as its range attribute . The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.\n \n Projection (dict) -- [REQUIRED]Represents attributes that are copied (projected) from the table into the local secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.\n ProjectionType (string) --The set of attributes that are projected into the index:\n KEYS_ONLY - Only the index and primary keys are projected into the index.\n INCLUDE - Only the specified table attributes are projected into the index. The list of projected attributes are in NonKeyAttributes .\n ALL - All of the table attributes are projected into the index.\n NonKeyAttributes (list) --Represents the non-key attribute names which will be projected into the index.\n For local secondary indexes, the total count of NonKeyAttributes summed across all of the local secondary indexes, must not exceed 20. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.\n (string) --\n \n \n\n :type GlobalSecondaryIndexes: list\n :param GlobalSecondaryIndexes: One or more global secondary indexes (the maximum is five) to be created on the table. Each global secondary index in the array includes the following:\n IndexName - The name of the global secondary index. Must be unique only for this table.\n KeySchema - Specifies the key schema for the global secondary index.\n Projection - Specifies attributes that are copied (projected) from the table into the index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Each attribute specification is composed of:\n ProjectionType - One of the following:\n KEYS_ONLY - Only the index and primary keys are projected into the index.\n INCLUDE - Only the specified table attributes are projected into the index. The list of projected attributes are in NonKeyAttributes .\n ALL - All of the table attributes are projected into the index.\n NonKeyAttributes - A list of one or more non-key attribute names that are projected into the secondary index. The total count of attributes provided in NonKeyAttributes , summed across all of the secondary indexes, must not exceed 20. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.\n ProvisionedThroughput - The provisioned throughput settings for the global secondary index, consisting of read and write capacity units.\n (dict) --Represents the properties of a global secondary index.\n IndexName (string) -- [REQUIRED]The name of the global secondary index. The name must be unique among all other indexes on this table.\n KeySchema (list) -- [REQUIRED]The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types:\n HASH - partition key\n RANGE - sort key\n Note\n The partition key of an item is also known as its hash attribute . The term 'hash attribute' derives from DynamoDB' usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.\n The sort key of an item is also known as its range attribute . The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.\n (dict) --Represents a single element of a key schema. A key schema specifies the attributes that make up the primary key of a table, or the key attributes of an index.\n A KeySchemaElement represents exactly one attribute of the primary key. For example, a simple primary key would be represented by one KeySchemaElement (for the partition key). A composite primary key would require one KeySchemaElement for the partition key, and another KeySchemaElement for the sort key.\n A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute). The data type must be one of String, Number, or Binary. The attribute cannot be nested within a List or a Map.\n AttributeName (string) -- [REQUIRED]The name of a key attribute.\n KeyType (string) -- [REQUIRED]The role that this key attribute will assume:\n HASH - partition key\n RANGE - sort key\n Note\n The partition key of an item is also known as its hash attribute . The term 'hash attribute' derives from DynamoDB' usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.\n The sort key of an item is also known as its range attribute . The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.\n \n Projection (dict) -- [REQUIRED]Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.\n ProjectionType (string) --The set of attributes that are projected into the index:\n KEYS_ONLY - Only the index and primary keys are projected into the index.\n INCLUDE - Only the specified table attributes are projected into the index. The list of projected attributes are in NonKeyAttributes .\n ALL - All of the table attributes are projected into the index.\n NonKeyAttributes (list) --Represents the non-key attribute names which will be projected into the index.\n For local secondary indexes, the total count of NonKeyAttributes summed across all of the local secondary indexes, must not exceed 20. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.\n (string) --\n \n ProvisionedThroughput (dict) --Represents the provisioned throughput settings for the specified global secondary index.\n For current minimum and maximum provisioned throughput values, see Limits in the Amazon DynamoDB Developer Guide .\n ReadCapacityUnits (integer) -- [REQUIRED]The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException . For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide .\n If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.\n WriteCapacityUnits (integer) -- [REQUIRED]The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException . For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide .\n If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.\n \n \n\n :type BillingMode: string\n :param BillingMode: Controls how you are charged for read and write throughput and how you manage capacity. This setting can be changed later.\n PROVISIONED - Sets the billing mode to PROVISIONED . We recommend using PROVISIONED for predictable workloads.\n PAY_PER_REQUEST - Sets the billing mode to PAY_PER_REQUEST . We recommend using PAY_PER_REQUEST for unpredictable workloads.\n \n\n :type ProvisionedThroughput: dict\n :param ProvisionedThroughput: Represents the provisioned throughput settings for a specified table or index. The settings can be modified using the UpdateTable operation.\n If you set BillingMode as PROVISIONED , you must specify this property. If you set BillingMode as PAY_PER_REQUEST , you cannot specify this property.\n For current minimum and maximum provisioned throughput values, see Limits in the Amazon DynamoDB Developer Guide .\n ReadCapacityUnits (integer) -- [REQUIRED]The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException . For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide .\n If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.\n WriteCapacityUnits (integer) -- [REQUIRED]The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException . For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide .\n If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.\n \n\n :type StreamSpecification: dict\n :param StreamSpecification: The settings for DynamoDB Streams on the table. These settings consist of:\n StreamEnabled - Indicates whether Streams is to be enabled (true) or disabled (false).\n StreamViewType - When an item in the table is modified, StreamViewType determines what information is written to the table's stream. Valid values for StreamViewType are:\n KEYS_ONLY - Only the key attributes of the modified item are written to the stream.\n NEW_IMAGE - The entire item, as it appears after it was modified, is written to the stream.\n OLD_IMAGE - The entire item, as it appeared before it was modified, is written to the stream.\n NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are written to the stream.\n \n StreamEnabled (boolean) --Indicates whether DynamoDB Streams is enabled (true) or disabled (false) on the table.\n StreamViewType (string) --When an item in the table is modified, StreamViewType determines what information is written to the stream for this table. Valid values for StreamViewType are:\n KEYS_ONLY - Only the key attributes of the modified item are written to the stream.\n NEW_IMAGE - The entire item, as it appears after it was modified, is written to the stream.\n OLD_IMAGE - The entire item, as it appeared before it was modified, is written to the stream.\n NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are written to the stream.\n \n\n :type SSESpecification: dict\n :param SSESpecification: Represents the settings used to enable server-side encryption.\n Enabled (boolean) --Indicates whether server-side encryption is enabled (true) or disabled (false) on the table. If enabled (true), server-side encryption type is set to KMS . If disabled (false) or not specified, server-side encryption is set to AWS owned CMK.\n SSEType (string) --Server-side encryption type:\n AES256 - Server-side encryption which uses the AES256 algorithm (not applicable).\n KMS - Server-side encryption which uses AWS Key Management Service. Key is stored in your account and is managed by AWS KMS (KMS charges apply).\n KMSMasterKeyId (string) --The KMS Master Key (CMK) which should be used for the KMS encryption. To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB KMS Master Key alias/aws/dynamodb.\n \n\n :rtype: dict\n :return: {\n 'TableDescription': {\n 'AttributeDefinitions': [\n {\n 'AttributeName': 'string',\n 'AttributeType': 'S'|'N'|'B'\n },\n ],\n 'TableName': 'string',\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'TableStatus': 'CREATING'|'UPDATING'|'DELETING'|'ACTIVE',\n 'CreationDateTime': datetime(2015, 1, 1),\n 'ProvisionedThroughput': {\n 'LastIncreaseDateTime': datetime(2015, 1, 1),\n 'LastDecreaseDateTime': datetime(2015, 1, 1),\n 'NumberOfDecreasesToday': 123,\n 'ReadCapacityUnits': 123,\n 'WriteCapacityUnits': 123\n },\n 'TableSizeBytes': 123,\n 'ItemCount': 123,\n 'TableArn': 'string',\n 'TableId': 'string',\n 'BillingModeSummary': {\n 'BillingMode': 'PROVISIONED'|'PAY_PER_REQUEST',\n 'LastUpdateToPayPerRequestDateTime': datetime(2015, 1, 1)\n },\n 'LocalSecondaryIndexes': [\n {\n 'IndexName': 'string',\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'Projection': {\n 'ProjectionType': 'ALL'|'KEYS_ONLY'|'INCLUDE',\n 'NonKeyAttributes': [\n 'string',\n ]\n },\n 'IndexSizeBytes': 123,\n 'ItemCount': 123,\n 'IndexArn': 'string'\n },\n ],\n 'GlobalSecondaryIndexes': [\n {\n 'IndexName': 'string',\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'Projection': {\n 'ProjectionType': 'ALL'|'KEYS_ONLY'|'INCLUDE',\n 'NonKeyAttributes': [\n 'string',\n ]\n },\n 'IndexStatus': 'CREATING'|'UPDATING'|'DELETING'|'ACTIVE',\n 'Backfilling': True|False,\n 'ProvisionedThroughput': {\n 'LastIncreaseDateTime': datetime(2015, 1, 1),\n 'LastDecreaseDateTime': datetime(2015, 1, 1),\n 'NumberOfDecreasesToday': 123,\n 'ReadCapacityUnits': 123,\n 'WriteCapacityUnits': 123\n },\n 'IndexSizeBytes': 123,\n 'ItemCount': 123,\n 'IndexArn': 'string'\n },\n ],\n 'StreamSpecification': {\n 'StreamEnabled': True|False,\n 'StreamViewType': 'NEW_IMAGE'|'OLD_IMAGE'|'NEW_AND_OLD_IMAGES'|'KEYS_ONLY'\n },\n 'LatestStreamLabel': 'string',\n 'LatestStreamArn': 'string',\n 'RestoreSummary': {\n 'SourceBackupArn': 'string',\n 'SourceTableArn': 'string',\n 'RestoreDateTime': datetime(2015, 1, 1),\n 'RestoreInProgress': True|False\n },\n 'SSEDescription': {\n 'Status': 'ENABLING'|'ENABLED'|'DISABLING'|'DISABLED'|'UPDATING',\n 'SSEType': 'AES256'|'KMS',\n 'KMSMasterKeyArn': 'string'\n }\n }\n }\n \n \n :returns: \n AttributeName - The name of the attribute.\n AttributeType - The data type for the attribute.\n \n \"\"\"\n pass\n\ndef delete_backup(BackupArn=None):\n \"\"\"\n Deletes an existing backup of a table.\n You can call DeleteBackup at a maximum rate of 10 times per second.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_backup(\n BackupArn='string'\n )\n \n \n :type BackupArn: string\n :param BackupArn: [REQUIRED]\n The ARN associated with the backup.\n \n\n :rtype: dict\n :return: {\n 'BackupDescription': {\n 'BackupDetails': {\n 'BackupArn': 'string',\n 'BackupName': 'string',\n 'BackupSizeBytes': 123,\n 'BackupStatus': 'CREATING'|'DELETED'|'AVAILABLE',\n 'BackupType': 'USER'|'SYSTEM',\n 'BackupCreationDateTime': datetime(2015, 1, 1),\n 'BackupExpiryDateTime': datetime(2015, 1, 1)\n },\n 'SourceTableDetails': {\n 'TableName': 'string',\n 'TableId': 'string',\n 'TableArn': 'string',\n 'TableSizeBytes': 123,\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'TableCreationDateTime': datetime(2015, 1, 1),\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': 123,\n 'WriteCapacityUnits': 123\n },\n 'ItemCount': 123,\n 'BillingMode': 'PROVISIONED'|'PAY_PER_REQUEST'\n },\n 'SourceTableFeatureDetails': {\n 'LocalSecondaryIndexes': [\n {\n 'IndexName': 'string',\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'Projection': {\n 'ProjectionType': 'ALL'|'KEYS_ONLY'|'INCLUDE',\n 'NonKeyAttributes': [\n 'string',\n ]\n }\n },\n ],\n 'GlobalSecondaryIndexes': [\n {\n 'IndexName': 'string',\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'Projection': {\n 'ProjectionType': 'ALL'|'KEYS_ONLY'|'INCLUDE',\n 'NonKeyAttributes': [\n 'string',\n ]\n },\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': 123,\n 'WriteCapacityUnits': 123\n }\n },\n ],\n 'StreamDescription': {\n 'StreamEnabled': True|False,\n 'StreamViewType': 'NEW_IMAGE'|'OLD_IMAGE'|'NEW_AND_OLD_IMAGES'|'KEYS_ONLY'\n },\n 'TimeToLiveDescription': {\n 'TimeToLiveStatus': 'ENABLING'|'DISABLING'|'ENABLED'|'DISABLED',\n 'AttributeName': 'string'\n },\n 'SSEDescription': {\n 'Status': 'ENABLING'|'ENABLED'|'DISABLING'|'DISABLED'|'UPDATING',\n 'SSEType': 'AES256'|'KMS',\n 'KMSMasterKeyArn': 'string'\n }\n }\n }\n }\n \n \n :returns: \n HASH - partition key\n RANGE - sort key\n \n \"\"\"\n pass\n\ndef delete_item(TableName=None, Key=None, Expected=None, ConditionalOperator=None, ReturnValues=None, ReturnConsumedCapacity=None, ReturnItemCollectionMetrics=None, ConditionExpression=None, ExpressionAttributeNames=None, ExpressionAttributeValues=None):\n \"\"\"\n Deletes a single item in a table by primary key. You can perform a conditional delete operation that deletes the item if it exists, or if it has an expected attribute value.\n In addition to deleting an item, you can also return the item's attribute values in the same operation, using the ReturnValues parameter.\n Unless you specify conditions, the DeleteItem is an idempotent operation; running it multiple times on the same item or attribute does not result in an error response.\n Conditional deletes are useful for deleting items only if specific conditions are met. If those conditions are met, DynamoDB performs the delete. Otherwise, the item is not deleted.\n See also: AWS API Documentation\n \n Examples\n This example deletes an item from the Music table.\n Expected Output:\n \n :example: response = client.delete_item(\n TableName='string',\n Key={\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n Expected={\n 'string': {\n 'Value': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n },\n 'Exists': True|False,\n 'ComparisonOperator': 'EQ'|'NE'|'IN'|'LE'|'LT'|'GE'|'GT'|'BETWEEN'|'NOT_NULL'|'NULL'|'CONTAINS'|'NOT_CONTAINS'|'BEGINS_WITH',\n 'AttributeValueList': [\n {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n },\n ]\n }\n },\n ConditionalOperator='AND'|'OR',\n ReturnValues='NONE'|'ALL_OLD'|'UPDATED_OLD'|'ALL_NEW'|'UPDATED_NEW',\n ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE',\n ReturnItemCollectionMetrics='SIZE'|'NONE',\n ConditionExpression='string',\n ExpressionAttributeNames={\n 'string': 'string'\n },\n ExpressionAttributeValues={\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n }\n )\n \n \n :type TableName: string\n :param TableName: [REQUIRED]\n The name of the table from which to delete the item.\n \n\n :type Key: dict\n :param Key: [REQUIRED]\n A map of attribute names to AttributeValue objects, representing the primary key of the item to delete.\n For the primary key, you must provide all of the attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key.\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n \n\n :type Expected: dict\n :param Expected: This is a legacy parameter. Use ConditionExpression instead. For more information, see Expected in the Amazon DynamoDB Developer Guide .\n (string) --\n (dict) --Represents a condition to be compared with an attribute value. This condition can be used with DeleteItem , PutItem or UpdateItem operations; if the comparison evaluates to true, the operation succeeds; if not, the operation fails. You can use ExpectedAttributeValue in one of two different ways:\n Use AttributeValueList to specify one or more values to compare against an attribute. Use ComparisonOperator to specify how you want to perform the comparison. If the comparison evaluates to true, then the conditional operation succeeds.\n Use Value to specify a value that DynamoDB will compare against an attribute. If the values match, then ExpectedAttributeValue evaluates to true and the conditional operation succeeds. Optionally, you can also set Exists to false, indicating that you do not expect to find the attribute value in the table. In this case, the conditional operation succeeds only if the comparison evaluates to false.\n Value and Exists are incompatible with AttributeValueList and ComparisonOperator . Note that if you use both sets of parameters at once, DynamoDB will return a ValidationException exception.\n Value (dict) --Represents the data for the expected attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n Exists (boolean) --Causes DynamoDB to evaluate the value before attempting a conditional operation:\n If Exists is true , DynamoDB will check to see if that attribute value already exists in the table. If it is found, then the operation succeeds. If it is not found, the operation fails with a ConditionCheckFailedException .\n If Exists is false , DynamoDB assumes that the attribute value does not exist in the table. If in fact the value does not exist, then the assumption is valid and the operation succeeds. If the value is found, despite the assumption that it does not exist, the operation fails with a ConditionCheckFailedException .\n The default setting for Exists is true . If you supply a Value all by itself, DynamoDB assumes the attribute exists: You don't have to set Exists to true , because it is implied.\n DynamoDB returns a ValidationException if:\n Exists is true but there is no Value to check. (You expect a value to exist, but don't specify what that value is.)\n Exists is false but you also provide a Value . (You cannot expect an attribute to have a value, while also expecting it not to exist.)\n ComparisonOperator (string) --A comparator for evaluating attributes in the AttributeValueList . For example, equals, greater than, less than, etc.\n The following comparison operators are available:\n EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN\n The following are descriptions of each comparison operator.\n EQ : Equal. EQ is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} .\n NE : Not equal. NE is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} .\n LE : Less than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .\n LT : Less than. AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .\n GE : Greater than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .\n GT : Greater than. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .\n NOT_NULL : The attribute exists. NOT_NULL is supported for all data types, including lists and maps.\n Note\n This operator tests for the existence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NOT_NULL , the result is a Boolean true . This result is because the attribute 'a ' exists; its data type is not relevant to the NOT_NULL comparison operator.\n NULL : The attribute does not exist. NULL is supported for all data types, including lists and maps.\n Note\n This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NULL , the result is a Boolean false . This is because the attribute 'a ' exists; its data type is not relevant to the NULL comparison operator.\n CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it finds an exact match with any member of the set. CONTAINS is supported for lists: When evaluating 'a CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list.\n NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it does not find an exact match with any member of the set. NOT_CONTAINS is supported for lists: When evaluating 'a NOT CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list.\n BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type).\n IN : Checks for matching elements in a list. AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary. These attributes are compared against an existing attribute of an item. If any elements of the input are equal to the item attribute, the expression evaluates to true.\n BETWEEN : Greater than or equal to the first value, and less than or equal to the second value. AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not compare to {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']}\n AttributeValueList (list) --One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used.\n For type Number, value comparisons are numeric.\n String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A , and a is greater than B . For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters .\n For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.\n For information on specifying data types in JSON, see JSON Data Format in the Amazon DynamoDB Developer Guide .\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n \n \n\n :type ConditionalOperator: string\n :param ConditionalOperator: This is a legacy parameter. Use ConditionExpression instead. For more information, see ConditionalOperator in the Amazon DynamoDB Developer Guide .\n\n :type ReturnValues: string\n :param ReturnValues: Use ReturnValues if you want to get the item attributes as they appeared before they were deleted. For DeleteItem , the valid values are:\n NONE - If ReturnValues is not specified, or if its value is NONE , then nothing is returned. (This setting is the default for ReturnValues .)\n ALL_OLD - The content of the old item is returned.\n Note\n The ReturnValues parameter is used by several DynamoDB operations; however, DeleteItem does not recognize any values other than NONE or ALL_OLD .\n \n\n :type ReturnConsumedCapacity: string\n :param ReturnConsumedCapacity: Determines the level of detail about provisioned throughput consumption that is returned in the response:\n INDEXES - The response includes the aggregate ConsumedCapacity for the operation, together with ConsumedCapacity for each table and secondary index that was accessed. Note that some operations, such as GetItem and BatchGetItem , do not access any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity information for table(s).\n TOTAL - The response includes only the aggregate ConsumedCapacity for the operation.\n NONE - No ConsumedCapacity details are included in the response.\n \n\n :type ReturnItemCollectionMetrics: string\n :param ReturnItemCollectionMetrics: Determines whether item collection metrics are returned. If set to SIZE , the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.\n\n :type ConditionExpression: string\n :param ConditionExpression: A condition that must be satisfied in order for a conditional DeleteItem to succeed.\n An expression can contain any of the following:\n Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size These function names are case-sensitive.\n Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN\n Logical operators: AND | OR | NOT\n For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide .\n \n\n :type ExpressionAttributeNames: dict\n :param ExpressionAttributeNames: One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames :\n To access an attribute whose name conflicts with a DynamoDB reserved word.\n To create a placeholder for repeating occurrences of an attribute name in an expression.\n To prevent special characters in an attribute name from being misinterpreted in an expression.\n Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:\n Percentile\n The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide ). To work around this, you could specify the following for ExpressionAttributeNames :\n {'#P':'Percentile'}\n You could then use this substitution in an expression, as in this example:\n #P = :val\n Note\n Tokens that begin with the : character are expression attribute values , which are placeholders for the actual value at runtime.\n For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide .\n (string) --\n (string) --\n \n\n :type ExpressionAttributeValues: dict\n :param ExpressionAttributeValues: One or more values that can be substituted in an expression.\n Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:\n Available | Backordered | Discontinued\n You would first need to specify ExpressionAttributeValues as follows:\n { ':avail':{'S':'Available'}, ':back':{'S':'Backordered'}, ':disc':{'S':'Discontinued'} }\n You could then use these values in an expression, such as this:\n ProductStatus IN (:avail, :back, :disc)\n For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide .\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n \n\n :rtype: dict\n :return: {\n 'Attributes': {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n 'ConsumedCapacity': {\n 'TableName': 'string',\n 'CapacityUnits': 123.0,\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'Table': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n },\n 'LocalSecondaryIndexes': {\n 'string': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n }\n },\n 'GlobalSecondaryIndexes': {\n 'string': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n }\n }\n },\n 'ItemCollectionMetrics': {\n 'ItemCollectionKey': {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n 'SizeEstimateRangeGB': [\n 123.0,\n ]\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef delete_table(TableName=None):\n \"\"\"\n The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in the DELETING state until DynamoDB completes the deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException . If the specified table does not exist, DynamoDB returns a ResourceNotFoundException . If table is already in the DELETING state, no error is returned.\n When you delete a table, any indexes on that table are also deleted.\n If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes into the DISABLED state, and the stream is automatically deleted after 24 hours.\n Use the DescribeTable action to check the status of the table.\n See also: AWS API Documentation\n \n Examples\n This example deletes the Music table.\n Expected Output:\n \n :example: response = client.delete_table(\n TableName='string'\n )\n \n \n :type TableName: string\n :param TableName: [REQUIRED]\n The name of the table to delete.\n \n\n :rtype: dict\n :return: {\n 'TableDescription': {\n 'AttributeDefinitions': [\n {\n 'AttributeName': 'string',\n 'AttributeType': 'S'|'N'|'B'\n },\n ],\n 'TableName': 'string',\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'TableStatus': 'CREATING'|'UPDATING'|'DELETING'|'ACTIVE',\n 'CreationDateTime': datetime(2015, 1, 1),\n 'ProvisionedThroughput': {\n 'LastIncreaseDateTime': datetime(2015, 1, 1),\n 'LastDecreaseDateTime': datetime(2015, 1, 1),\n 'NumberOfDecreasesToday': 123,\n 'ReadCapacityUnits': 123,\n 'WriteCapacityUnits': 123\n },\n 'TableSizeBytes': 123,\n 'ItemCount': 123,\n 'TableArn': 'string',\n 'TableId': 'string',\n 'BillingModeSummary': {\n 'BillingMode': 'PROVISIONED'|'PAY_PER_REQUEST',\n 'LastUpdateToPayPerRequestDateTime': datetime(2015, 1, 1)\n },\n 'LocalSecondaryIndexes': [\n {\n 'IndexName': 'string',\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'Projection': {\n 'ProjectionType': 'ALL'|'KEYS_ONLY'|'INCLUDE',\n 'NonKeyAttributes': [\n 'string',\n ]\n },\n 'IndexSizeBytes': 123,\n 'ItemCount': 123,\n 'IndexArn': 'string'\n },\n ],\n 'GlobalSecondaryIndexes': [\n {\n 'IndexName': 'string',\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'Projection': {\n 'ProjectionType': 'ALL'|'KEYS_ONLY'|'INCLUDE',\n 'NonKeyAttributes': [\n 'string',\n ]\n },\n 'IndexStatus': 'CREATING'|'UPDATING'|'DELETING'|'ACTIVE',\n 'Backfilling': True|False,\n 'ProvisionedThroughput': {\n 'LastIncreaseDateTime': datetime(2015, 1, 1),\n 'LastDecreaseDateTime': datetime(2015, 1, 1),\n 'NumberOfDecreasesToday': 123,\n 'ReadCapacityUnits': 123,\n 'WriteCapacityUnits': 123\n },\n 'IndexSizeBytes': 123,\n 'ItemCount': 123,\n 'IndexArn': 'string'\n },\n ],\n 'StreamSpecification': {\n 'StreamEnabled': True|False,\n 'StreamViewType': 'NEW_IMAGE'|'OLD_IMAGE'|'NEW_AND_OLD_IMAGES'|'KEYS_ONLY'\n },\n 'LatestStreamLabel': 'string',\n 'LatestStreamArn': 'string',\n 'RestoreSummary': {\n 'SourceBackupArn': 'string',\n 'SourceTableArn': 'string',\n 'RestoreDateTime': datetime(2015, 1, 1),\n 'RestoreInProgress': True|False\n },\n 'SSEDescription': {\n 'Status': 'ENABLING'|'ENABLED'|'DISABLING'|'DISABLED'|'UPDATING',\n 'SSEType': 'AES256'|'KMS',\n 'KMSMasterKeyArn': 'string'\n }\n }\n }\n \n \n :returns: \n S - the attribute is of type String\n N - the attribute is of type Number\n B - the attribute is of type Binary\n \n \"\"\"\n pass\n\ndef describe_backup(BackupArn=None):\n \"\"\"\n Describes an existing backup of a table.\n You can call DescribeBackup at a maximum rate of 10 times per second.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_backup(\n BackupArn='string'\n )\n \n \n :type BackupArn: string\n :param BackupArn: [REQUIRED]\n The ARN associated with the backup.\n \n\n :rtype: dict\n :return: {\n 'BackupDescription': {\n 'BackupDetails': {\n 'BackupArn': 'string',\n 'BackupName': 'string',\n 'BackupSizeBytes': 123,\n 'BackupStatus': 'CREATING'|'DELETED'|'AVAILABLE',\n 'BackupType': 'USER'|'SYSTEM',\n 'BackupCreationDateTime': datetime(2015, 1, 1),\n 'BackupExpiryDateTime': datetime(2015, 1, 1)\n },\n 'SourceTableDetails': {\n 'TableName': 'string',\n 'TableId': 'string',\n 'TableArn': 'string',\n 'TableSizeBytes': 123,\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'TableCreationDateTime': datetime(2015, 1, 1),\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': 123,\n 'WriteCapacityUnits': 123\n },\n 'ItemCount': 123,\n 'BillingMode': 'PROVISIONED'|'PAY_PER_REQUEST'\n },\n 'SourceTableFeatureDetails': {\n 'LocalSecondaryIndexes': [\n {\n 'IndexName': 'string',\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'Projection': {\n 'ProjectionType': 'ALL'|'KEYS_ONLY'|'INCLUDE',\n 'NonKeyAttributes': [\n 'string',\n ]\n }\n },\n ],\n 'GlobalSecondaryIndexes': [\n {\n 'IndexName': 'string',\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'Projection': {\n 'ProjectionType': 'ALL'|'KEYS_ONLY'|'INCLUDE',\n 'NonKeyAttributes': [\n 'string',\n ]\n },\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': 123,\n 'WriteCapacityUnits': 123\n }\n },\n ],\n 'StreamDescription': {\n 'StreamEnabled': True|False,\n 'StreamViewType': 'NEW_IMAGE'|'OLD_IMAGE'|'NEW_AND_OLD_IMAGES'|'KEYS_ONLY'\n },\n 'TimeToLiveDescription': {\n 'TimeToLiveStatus': 'ENABLING'|'DISABLING'|'ENABLED'|'DISABLED',\n 'AttributeName': 'string'\n },\n 'SSEDescription': {\n 'Status': 'ENABLING'|'ENABLED'|'DISABLING'|'DISABLED'|'UPDATING',\n 'SSEType': 'AES256'|'KMS',\n 'KMSMasterKeyArn': 'string'\n }\n }\n }\n }\n \n \n :returns: \n HASH - partition key\n RANGE - sort key\n \n \"\"\"\n pass\n\ndef describe_continuous_backups(TableName=None):\n \"\"\"\n Checks the status of continuous backups and point in time recovery on the specified table. Continuous backups are ENABLED on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will be set to ENABLED.\n Once continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime .\n You can call DescribeContinuousBackups at a maximum rate of 10 times per second.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_continuous_backups(\n TableName='string'\n )\n \n \n :type TableName: string\n :param TableName: [REQUIRED]\n Name of the table for which the customer wants to check the continuous backups and point in time recovery settings.\n \n\n :rtype: dict\n :return: {\n 'ContinuousBackupsDescription': {\n 'ContinuousBackupsStatus': 'ENABLED'|'DISABLED',\n 'PointInTimeRecoveryDescription': {\n 'PointInTimeRecoveryStatus': 'ENABLED'|'DISABLED',\n 'EarliestRestorableDateTime': datetime(2015, 1, 1),\n 'LatestRestorableDateTime': datetime(2015, 1, 1)\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_endpoints():\n \"\"\"\n See also: AWS API Documentation\n \n \n :example: response = client.describe_endpoints()\n \n \n :rtype: dict\n :return: {\n 'Endpoints': [\n {\n 'Address': 'string',\n 'CachePeriodInMinutes': 123\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef describe_global_table(GlobalTableName=None):\n \"\"\"\n Returns information about the specified global table.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_global_table(\n GlobalTableName='string'\n )\n \n \n :type GlobalTableName: string\n :param GlobalTableName: [REQUIRED]\n The name of the global table.\n \n\n :rtype: dict\n :return: {\n 'GlobalTableDescription': {\n 'ReplicationGroup': [\n {\n 'RegionName': 'string'\n },\n ],\n 'GlobalTableArn': 'string',\n 'CreationDateTime': datetime(2015, 1, 1),\n 'GlobalTableStatus': 'CREATING'|'ACTIVE'|'DELETING'|'UPDATING',\n 'GlobalTableName': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_global_table_settings(GlobalTableName=None):\n \"\"\"\n Describes region specific settings for a global table.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_global_table_settings(\n GlobalTableName='string'\n )\n \n \n :type GlobalTableName: string\n :param GlobalTableName: [REQUIRED]\n The name of the global table to describe.\n \n\n :rtype: dict\n :return: {\n 'GlobalTableName': 'string',\n 'ReplicaSettings': [\n {\n 'RegionName': 'string',\n 'ReplicaStatus': 'CREATING'|'UPDATING'|'DELETING'|'ACTIVE',\n 'ReplicaBillingModeSummary': {\n 'BillingMode': 'PROVISIONED'|'PAY_PER_REQUEST',\n 'LastUpdateToPayPerRequestDateTime': datetime(2015, 1, 1)\n },\n 'ReplicaProvisionedReadCapacityUnits': 123,\n 'ReplicaProvisionedReadCapacityAutoScalingSettings': {\n 'MinimumUnits': 123,\n 'MaximumUnits': 123,\n 'AutoScalingDisabled': True|False,\n 'AutoScalingRoleArn': 'string',\n 'ScalingPolicies': [\n {\n 'PolicyName': 'string',\n 'TargetTrackingScalingPolicyConfiguration': {\n 'DisableScaleIn': True|False,\n 'ScaleInCooldown': 123,\n 'ScaleOutCooldown': 123,\n 'TargetValue': 123.0\n }\n },\n ]\n },\n 'ReplicaProvisionedWriteCapacityUnits': 123,\n 'ReplicaProvisionedWriteCapacityAutoScalingSettings': {\n 'MinimumUnits': 123,\n 'MaximumUnits': 123,\n 'AutoScalingDisabled': True|False,\n 'AutoScalingRoleArn': 'string',\n 'ScalingPolicies': [\n {\n 'PolicyName': 'string',\n 'TargetTrackingScalingPolicyConfiguration': {\n 'DisableScaleIn': True|False,\n 'ScaleInCooldown': 123,\n 'ScaleOutCooldown': 123,\n 'TargetValue': 123.0\n }\n },\n ]\n },\n 'ReplicaGlobalSecondaryIndexSettings': [\n {\n 'IndexName': 'string',\n 'IndexStatus': 'CREATING'|'UPDATING'|'DELETING'|'ACTIVE',\n 'ProvisionedReadCapacityUnits': 123,\n 'ProvisionedReadCapacityAutoScalingSettings': {\n 'MinimumUnits': 123,\n 'MaximumUnits': 123,\n 'AutoScalingDisabled': True|False,\n 'AutoScalingRoleArn': 'string',\n 'ScalingPolicies': [\n {\n 'PolicyName': 'string',\n 'TargetTrackingScalingPolicyConfiguration': {\n 'DisableScaleIn': True|False,\n 'ScaleInCooldown': 123,\n 'ScaleOutCooldown': 123,\n 'TargetValue': 123.0\n }\n },\n ]\n },\n 'ProvisionedWriteCapacityUnits': 123,\n 'ProvisionedWriteCapacityAutoScalingSettings': {\n 'MinimumUnits': 123,\n 'MaximumUnits': 123,\n 'AutoScalingDisabled': True|False,\n 'AutoScalingRoleArn': 'string',\n 'ScalingPolicies': [\n {\n 'PolicyName': 'string',\n 'TargetTrackingScalingPolicyConfiguration': {\n 'DisableScaleIn': True|False,\n 'ScaleInCooldown': 123,\n 'ScaleOutCooldown': 123,\n 'TargetValue': 123.0\n }\n },\n ]\n }\n },\n ]\n },\n ]\n }\n \n \n :returns: \n PROVISIONED - Sets the read/write capacity mode to PROVISIONED . We recommend using PROVISIONED for predictable workloads.\n PAY_PER_REQUEST - Sets the read/write capacity mode to PAY_PER_REQUEST . We recommend using PAY_PER_REQUEST for unpredictable workloads.\n \n \"\"\"\n pass\n\ndef describe_limits():\n \"\"\"\n Returns the current provisioned-capacity limits for your AWS account in a region, both for the region as a whole and for any one DynamoDB table that you create there.\n When you establish an AWS account, the account has initial limits on the maximum read capacity units and write capacity units that you can provision across all of your DynamoDB tables in a given region. Also, there are per-table limits that apply when you create a table there. For more information, see Limits page in the Amazon DynamoDB Developer Guide .\n Although you can increase these limits by filing a case at AWS Support Center , obtaining the increase is not instantaneous. The DescribeLimits action lets you write code to compare the capacity you are currently using to those limits imposed by your account so that you have enough time to apply for an increase before you hit a limit.\n For example, you could use one of the AWS SDKs to do the following:\n This will let you see whether you are getting close to your account-level limits.\n The per-table limits apply only when you are creating a new table. They restrict the sum of the provisioned capacity of the new table itself and all its global secondary indexes.\n For existing tables and their GSIs, DynamoDB will not let you increase provisioned capacity extremely rapidly, but the only upper limit that applies is that the aggregate provisioned capacity over all your tables and GSIs cannot exceed either of the per-account limits.\n The DescribeLimits Request element has no content.\n See also: AWS API Documentation\n \n Examples\n The following example returns the maximum read and write capacity units per table, and for the AWS account, in the current AWS region.\n Expected Output:\n \n :example: response = client.describe_limits()\n \n \n :rtype: dict\n :return: {\n 'AccountMaxReadCapacityUnits': 123,\n 'AccountMaxWriteCapacityUnits': 123,\n 'TableMaxReadCapacityUnits': 123,\n 'TableMaxWriteCapacityUnits': 123\n }\n \n \n \"\"\"\n pass\n\ndef describe_table(TableName=None):\n \"\"\"\n Returns information about the table, including the current status of the table, when it was created, the primary key schema, and any indexes on the table.\n See also: AWS API Documentation\n \n Examples\n This example describes the Music table.\n Expected Output:\n \n :example: response = client.describe_table(\n TableName='string'\n )\n \n \n :type TableName: string\n :param TableName: [REQUIRED]\n The name of the table to describe.\n \n\n :rtype: dict\n :return: {\n 'Table': {\n 'AttributeDefinitions': [\n {\n 'AttributeName': 'string',\n 'AttributeType': 'S'|'N'|'B'\n },\n ],\n 'TableName': 'string',\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'TableStatus': 'CREATING'|'UPDATING'|'DELETING'|'ACTIVE',\n 'CreationDateTime': datetime(2015, 1, 1),\n 'ProvisionedThroughput': {\n 'LastIncreaseDateTime': datetime(2015, 1, 1),\n 'LastDecreaseDateTime': datetime(2015, 1, 1),\n 'NumberOfDecreasesToday': 123,\n 'ReadCapacityUnits': 123,\n 'WriteCapacityUnits': 123\n },\n 'TableSizeBytes': 123,\n 'ItemCount': 123,\n 'TableArn': 'string',\n 'TableId': 'string',\n 'BillingModeSummary': {\n 'BillingMode': 'PROVISIONED'|'PAY_PER_REQUEST',\n 'LastUpdateToPayPerRequestDateTime': datetime(2015, 1, 1)\n },\n 'LocalSecondaryIndexes': [\n {\n 'IndexName': 'string',\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'Projection': {\n 'ProjectionType': 'ALL'|'KEYS_ONLY'|'INCLUDE',\n 'NonKeyAttributes': [\n 'string',\n ]\n },\n 'IndexSizeBytes': 123,\n 'ItemCount': 123,\n 'IndexArn': 'string'\n },\n ],\n 'GlobalSecondaryIndexes': [\n {\n 'IndexName': 'string',\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'Projection': {\n 'ProjectionType': 'ALL'|'KEYS_ONLY'|'INCLUDE',\n 'NonKeyAttributes': [\n 'string',\n ]\n },\n 'IndexStatus': 'CREATING'|'UPDATING'|'DELETING'|'ACTIVE',\n 'Backfilling': True|False,\n 'ProvisionedThroughput': {\n 'LastIncreaseDateTime': datetime(2015, 1, 1),\n 'LastDecreaseDateTime': datetime(2015, 1, 1),\n 'NumberOfDecreasesToday': 123,\n 'ReadCapacityUnits': 123,\n 'WriteCapacityUnits': 123\n },\n 'IndexSizeBytes': 123,\n 'ItemCount': 123,\n 'IndexArn': 'string'\n },\n ],\n 'StreamSpecification': {\n 'StreamEnabled': True|False,\n 'StreamViewType': 'NEW_IMAGE'|'OLD_IMAGE'|'NEW_AND_OLD_IMAGES'|'KEYS_ONLY'\n },\n 'LatestStreamLabel': 'string',\n 'LatestStreamArn': 'string',\n 'RestoreSummary': {\n 'SourceBackupArn': 'string',\n 'SourceTableArn': 'string',\n 'RestoreDateTime': datetime(2015, 1, 1),\n 'RestoreInProgress': True|False\n },\n 'SSEDescription': {\n 'Status': 'ENABLING'|'ENABLED'|'DISABLING'|'DISABLED'|'UPDATING',\n 'SSEType': 'AES256'|'KMS',\n 'KMSMasterKeyArn': 'string'\n }\n }\n }\n \n \n :returns: \n S - the attribute is of type String\n N - the attribute is of type Number\n B - the attribute is of type Binary\n \n \"\"\"\n pass\n\ndef describe_time_to_live(TableName=None):\n \"\"\"\n Gives a description of the Time to Live (TTL) status on the specified table.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_time_to_live(\n TableName='string'\n )\n \n \n :type TableName: string\n :param TableName: [REQUIRED]\n The name of the table to be described.\n \n\n :rtype: dict\n :return: {\n 'TimeToLiveDescription': {\n 'TimeToLiveStatus': 'ENABLING'|'DISABLING'|'ENABLED'|'DISABLED',\n 'AttributeName': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_item(TableName=None, Key=None, AttributesToGet=None, ConsistentRead=None, ReturnConsumedCapacity=None, ProjectionExpression=None, ExpressionAttributeNames=None):\n \"\"\"\n The GetItem operation returns a set of attributes for the item with the given primary key. If there is no matching item, GetItem does not return any data and there will be no Item element in the response.\n See also: AWS API Documentation\n \n Examples\n This example retrieves an item from the Music table. The table has a partition key and a sort key (Artist and SongTitle), so you must specify both of these attributes.\n Expected Output:\n \n :example: response = client.get_item(\n TableName='string',\n Key={\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n AttributesToGet=[\n 'string',\n ],\n ConsistentRead=True|False,\n ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE',\n ProjectionExpression='string',\n ExpressionAttributeNames={\n 'string': 'string'\n }\n )\n \n \n :type TableName: string\n :param TableName: [REQUIRED]\n The name of the table containing the requested item.\n \n\n :type Key: dict\n :param Key: [REQUIRED]\n A map of attribute names to AttributeValue objects, representing the primary key of the item to retrieve.\n For the primary key, you must provide all of the attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key.\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n \n\n :type AttributesToGet: list\n :param AttributesToGet: This is a legacy parameter. Use ProjectionExpression instead. For more information, see AttributesToGet in the Amazon DynamoDB Developer Guide .\n (string) --\n \n\n :type ConsistentRead: boolean\n :param ConsistentRead: Determines the read consistency model: If set to true , then the operation uses strongly consistent reads; otherwise, the operation uses eventually consistent reads.\n\n :type ReturnConsumedCapacity: string\n :param ReturnConsumedCapacity: Determines the level of detail about provisioned throughput consumption that is returned in the response:\n INDEXES - The response includes the aggregate ConsumedCapacity for the operation, together with ConsumedCapacity for each table and secondary index that was accessed. Note that some operations, such as GetItem and BatchGetItem , do not access any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity information for table(s).\n TOTAL - The response includes only the aggregate ConsumedCapacity for the operation.\n NONE - No ConsumedCapacity details are included in the response.\n \n\n :type ProjectionExpression: string\n :param ProjectionExpression: A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas.\n If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.\n For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide .\n \n\n :type ExpressionAttributeNames: dict\n :param ExpressionAttributeNames: One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames :\n To access an attribute whose name conflicts with a DynamoDB reserved word.\n To create a placeholder for repeating occurrences of an attribute name in an expression.\n To prevent special characters in an attribute name from being misinterpreted in an expression.\n Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:\n Percentile\n The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide ). To work around this, you could specify the following for ExpressionAttributeNames :\n {'#P':'Percentile'}\n You could then use this substitution in an expression, as in this example:\n #P = :val\n Note\n Tokens that begin with the : character are expression attribute values , which are placeholders for the actual value at runtime.\n For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide .\n (string) --\n (string) --\n \n\n :rtype: dict\n :return: {\n 'Item': {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n 'ConsumedCapacity': {\n 'TableName': 'string',\n 'CapacityUnits': 123.0,\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'Table': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n },\n 'LocalSecondaryIndexes': {\n 'string': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n }\n },\n 'GlobalSecondaryIndexes': {\n 'string': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n }\n }\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_backups(TableName=None, Limit=None, TimeRangeLowerBound=None, TimeRangeUpperBound=None, ExclusiveStartBackupArn=None, BackupType=None):\n \"\"\"\n List backups associated with an AWS account. To list backups for a given table, specify TableName . ListBackups returns a paginated list of results with at most 1MB worth of items in a page. You can also specify a limit for the maximum number of entries to be returned in a page.\n In the request, start time is inclusive but end time is exclusive. Note that these limits are for the time at which the original backup was requested.\n You can call ListBackups a maximum of 5 times per second.\n See also: AWS API Documentation\n \n \n :example: response = client.list_backups(\n TableName='string',\n Limit=123,\n TimeRangeLowerBound=datetime(2015, 1, 1),\n TimeRangeUpperBound=datetime(2015, 1, 1),\n ExclusiveStartBackupArn='string',\n BackupType='USER'|'SYSTEM'|'ALL'\n )\n \n \n :type TableName: string\n :param TableName: The backups from the table specified by TableName are listed.\n\n :type Limit: integer\n :param Limit: Maximum number of backups to return at once.\n\n :type TimeRangeLowerBound: datetime\n :param TimeRangeLowerBound: Only backups created after this time are listed. TimeRangeLowerBound is inclusive.\n\n :type TimeRangeUpperBound: datetime\n :param TimeRangeUpperBound: Only backups created before this time are listed. TimeRangeUpperBound is exclusive.\n\n :type ExclusiveStartBackupArn: string\n :param ExclusiveStartBackupArn: LastEvaluatedBackupArn is the ARN of the backup last evaluated when the current page of results was returned, inclusive of the current page of results. This value may be specified as the ExclusiveStartBackupArn of a new ListBackups operation in order to fetch the next page of results.\n\n :type BackupType: string\n :param BackupType: The backups from the table specified by BackupType are listed.\n Where BackupType can be:\n USER - On-demand backup created by you.\n SYSTEM - On-demand backup automatically created by DynamoDB.\n ALL - All types of on-demand backups (USER and SYSTEM).\n \n\n :rtype: dict\n :return: {\n 'BackupSummaries': [\n {\n 'TableName': 'string',\n 'TableId': 'string',\n 'TableArn': 'string',\n 'BackupArn': 'string',\n 'BackupName': 'string',\n 'BackupCreationDateTime': datetime(2015, 1, 1),\n 'BackupExpiryDateTime': datetime(2015, 1, 1),\n 'BackupStatus': 'CREATING'|'DELETED'|'AVAILABLE',\n 'BackupType': 'USER'|'SYSTEM',\n 'BackupSizeBytes': 123\n },\n ],\n 'LastEvaluatedBackupArn': 'string'\n }\n \n \n :returns: \n USER - You create and manage these using the on-demand backup feature.\n SYSTEM - If you delete a table with point-in-time recovery enabled, a SYSTEM backup is automatically created and is retained for 35 days (at no additional cost). System backups allow you to restore the deleted table to the state it was in just before the point of deletion.\n \n \"\"\"\n pass\n\ndef list_global_tables(ExclusiveStartGlobalTableName=None, Limit=None, RegionName=None):\n \"\"\"\n Lists all global tables that have a replica in the specified region.\n See also: AWS API Documentation\n \n \n :example: response = client.list_global_tables(\n ExclusiveStartGlobalTableName='string',\n Limit=123,\n RegionName='string'\n )\n \n \n :type ExclusiveStartGlobalTableName: string\n :param ExclusiveStartGlobalTableName: The first global table name that this operation will evaluate.\n\n :type Limit: integer\n :param Limit: The maximum number of table names to return.\n\n :type RegionName: string\n :param RegionName: Lists the global tables in a specific region.\n\n :rtype: dict\n :return: {\n 'GlobalTables': [\n {\n 'GlobalTableName': 'string',\n 'ReplicationGroup': [\n {\n 'RegionName': 'string'\n },\n ]\n },\n ],\n 'LastEvaluatedGlobalTableName': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_tables(ExclusiveStartTableName=None, Limit=None):\n \"\"\"\n Returns an array of table names associated with the current account and endpoint. The output from ListTables is paginated, with each page returning a maximum of 100 table names.\n See also: AWS API Documentation\n \n Examples\n This example lists all of the tables associated with the current AWS account and endpoint.\n Expected Output:\n \n :example: response = client.list_tables(\n ExclusiveStartTableName='string',\n Limit=123\n )\n \n \n :type ExclusiveStartTableName: string\n :param ExclusiveStartTableName: The first table name that this operation will evaluate. Use the value that was returned for LastEvaluatedTableName in a previous operation, so that you can obtain the next page of results.\n\n :type Limit: integer\n :param Limit: A maximum number of table names to return. If this parameter is not specified, the limit is 100.\n\n :rtype: dict\n :return: {\n 'TableNames': [\n 'string',\n ],\n 'LastEvaluatedTableName': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_tags_of_resource(ResourceArn=None, NextToken=None):\n \"\"\"\n List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource up to 10 times per second, per account.\n For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.list_tags_of_resource(\n ResourceArn='string',\n NextToken='string'\n )\n \n \n :type ResourceArn: string\n :param ResourceArn: [REQUIRED]\n The Amazon DynamoDB resource with tags to be listed. This value is an Amazon Resource Name (ARN).\n \n\n :type NextToken: string\n :param NextToken: An optional string that, if supplied, must be copied from the output of a previous call to ListTagOfResource. When provided in this manner, this API fetches the next page of results.\n\n :rtype: dict\n :return: {\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef put_item(TableName=None, Item=None, Expected=None, ReturnValues=None, ReturnConsumedCapacity=None, ReturnItemCollectionMetrics=None, ConditionalOperator=None, ConditionExpression=None, ExpressionAttributeNames=None, ExpressionAttributeValues=None):\n \"\"\"\n Creates a new item, or replaces an old item with a new item. If an item that has the same primary key as the new item already exists in the specified table, the new item completely replaces the existing item. You can perform a conditional put operation (add a new item if one with the specified primary key doesn't exist), or replace an existing item if it has certain attribute values. You can return the item's attribute values in the same operation, using the ReturnValues parameter.\n When you add an item, the primary key attribute(s) are the only required attributes. Attribute values cannot be null. String and Binary type attributes must have lengths greater than zero. Set type attributes cannot be empty. Requests with empty values will be rejected with a ValidationException exception.\n For more information about PutItem , see Working with Items in the Amazon DynamoDB Developer Guide .\n See also: AWS API Documentation\n \n Examples\n This example adds a new item to the Music table.\n Expected Output:\n \n :example: response = client.put_item(\n TableName='string',\n Item={\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n Expected={\n 'string': {\n 'Value': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n },\n 'Exists': True|False,\n 'ComparisonOperator': 'EQ'|'NE'|'IN'|'LE'|'LT'|'GE'|'GT'|'BETWEEN'|'NOT_NULL'|'NULL'|'CONTAINS'|'NOT_CONTAINS'|'BEGINS_WITH',\n 'AttributeValueList': [\n {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n },\n ]\n }\n },\n ReturnValues='NONE'|'ALL_OLD'|'UPDATED_OLD'|'ALL_NEW'|'UPDATED_NEW',\n ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE',\n ReturnItemCollectionMetrics='SIZE'|'NONE',\n ConditionalOperator='AND'|'OR',\n ConditionExpression='string',\n ExpressionAttributeNames={\n 'string': 'string'\n },\n ExpressionAttributeValues={\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n }\n )\n \n \n :type TableName: string\n :param TableName: [REQUIRED]\n The name of the table to contain the item.\n \n\n :type Item: dict\n :param Item: [REQUIRED]\n A map of attribute name/value pairs, one for each attribute. Only the primary key attributes are required; you can optionally provide other attribute name-value pairs for the item.\n You must provide all of the attributes for the primary key. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide both values for both the partition key and the sort key.\n If you specify any attributes that are part of an index key, then the data types for those attributes must match those of the schema in the table's attribute definition.\n For more information about primary keys, see Primary Key in the Amazon DynamoDB Developer Guide .\n Each element in the Item map is an AttributeValue object.\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n \n\n :type Expected: dict\n :param Expected: This is a legacy parameter. Use ConditionExpression instead. For more information, see Expected in the Amazon DynamoDB Developer Guide .\n (string) --\n (dict) --Represents a condition to be compared with an attribute value. This condition can be used with DeleteItem , PutItem or UpdateItem operations; if the comparison evaluates to true, the operation succeeds; if not, the operation fails. You can use ExpectedAttributeValue in one of two different ways:\n Use AttributeValueList to specify one or more values to compare against an attribute. Use ComparisonOperator to specify how you want to perform the comparison. If the comparison evaluates to true, then the conditional operation succeeds.\n Use Value to specify a value that DynamoDB will compare against an attribute. If the values match, then ExpectedAttributeValue evaluates to true and the conditional operation succeeds. Optionally, you can also set Exists to false, indicating that you do not expect to find the attribute value in the table. In this case, the conditional operation succeeds only if the comparison evaluates to false.\n Value and Exists are incompatible with AttributeValueList and ComparisonOperator . Note that if you use both sets of parameters at once, DynamoDB will return a ValidationException exception.\n Value (dict) --Represents the data for the expected attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n Exists (boolean) --Causes DynamoDB to evaluate the value before attempting a conditional operation:\n If Exists is true , DynamoDB will check to see if that attribute value already exists in the table. If it is found, then the operation succeeds. If it is not found, the operation fails with a ConditionCheckFailedException .\n If Exists is false , DynamoDB assumes that the attribute value does not exist in the table. If in fact the value does not exist, then the assumption is valid and the operation succeeds. If the value is found, despite the assumption that it does not exist, the operation fails with a ConditionCheckFailedException .\n The default setting for Exists is true . If you supply a Value all by itself, DynamoDB assumes the attribute exists: You don't have to set Exists to true , because it is implied.\n DynamoDB returns a ValidationException if:\n Exists is true but there is no Value to check. (You expect a value to exist, but don't specify what that value is.)\n Exists is false but you also provide a Value . (You cannot expect an attribute to have a value, while also expecting it not to exist.)\n ComparisonOperator (string) --A comparator for evaluating attributes in the AttributeValueList . For example, equals, greater than, less than, etc.\n The following comparison operators are available:\n EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN\n The following are descriptions of each comparison operator.\n EQ : Equal. EQ is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} .\n NE : Not equal. NE is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} .\n LE : Less than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .\n LT : Less than. AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .\n GE : Greater than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .\n GT : Greater than. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .\n NOT_NULL : The attribute exists. NOT_NULL is supported for all data types, including lists and maps.\n Note\n This operator tests for the existence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NOT_NULL , the result is a Boolean true . This result is because the attribute 'a ' exists; its data type is not relevant to the NOT_NULL comparison operator.\n NULL : The attribute does not exist. NULL is supported for all data types, including lists and maps.\n Note\n This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NULL , the result is a Boolean false . This is because the attribute 'a ' exists; its data type is not relevant to the NULL comparison operator.\n CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it finds an exact match with any member of the set. CONTAINS is supported for lists: When evaluating 'a CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list.\n NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it does not find an exact match with any member of the set. NOT_CONTAINS is supported for lists: When evaluating 'a NOT CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list.\n BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type).\n IN : Checks for matching elements in a list. AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary. These attributes are compared against an existing attribute of an item. If any elements of the input are equal to the item attribute, the expression evaluates to true.\n BETWEEN : Greater than or equal to the first value, and less than or equal to the second value. AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not compare to {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']}\n AttributeValueList (list) --One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used.\n For type Number, value comparisons are numeric.\n String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A , and a is greater than B . For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters .\n For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.\n For information on specifying data types in JSON, see JSON Data Format in the Amazon DynamoDB Developer Guide .\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n \n \n\n :type ReturnValues: string\n :param ReturnValues: Use ReturnValues if you want to get the item attributes as they appeared before they were updated with the PutItem request. For PutItem , the valid values are:\n NONE - If ReturnValues is not specified, or if its value is NONE , then nothing is returned. (This setting is the default for ReturnValues .)\n ALL_OLD - If PutItem overwrote an attribute name-value pair, then the content of the old item is returned.\n Note\n The ReturnValues parameter is used by several DynamoDB operations; however, PutItem does not recognize any values other than NONE or ALL_OLD .\n \n\n :type ReturnConsumedCapacity: string\n :param ReturnConsumedCapacity: Determines the level of detail about provisioned throughput consumption that is returned in the response:\n INDEXES - The response includes the aggregate ConsumedCapacity for the operation, together with ConsumedCapacity for each table and secondary index that was accessed. Note that some operations, such as GetItem and BatchGetItem , do not access any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity information for table(s).\n TOTAL - The response includes only the aggregate ConsumedCapacity for the operation.\n NONE - No ConsumedCapacity details are included in the response.\n \n\n :type ReturnItemCollectionMetrics: string\n :param ReturnItemCollectionMetrics: Determines whether item collection metrics are returned. If set to SIZE , the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.\n\n :type ConditionalOperator: string\n :param ConditionalOperator: This is a legacy parameter. Use ConditionExpression instead. For more information, see ConditionalOperator in the Amazon DynamoDB Developer Guide .\n\n :type ConditionExpression: string\n :param ConditionExpression: A condition that must be satisfied in order for a conditional PutItem operation to succeed.\n An expression can contain any of the following:\n Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size These function names are case-sensitive.\n Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN\n Logical operators: AND | OR | NOT\n For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide .\n \n\n :type ExpressionAttributeNames: dict\n :param ExpressionAttributeNames: One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames :\n To access an attribute whose name conflicts with a DynamoDB reserved word.\n To create a placeholder for repeating occurrences of an attribute name in an expression.\n To prevent special characters in an attribute name from being misinterpreted in an expression.\n Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:\n Percentile\n The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide ). To work around this, you could specify the following for ExpressionAttributeNames :\n {'#P':'Percentile'}\n You could then use this substitution in an expression, as in this example:\n #P = :val\n Note\n Tokens that begin with the : character are expression attribute values , which are placeholders for the actual value at runtime.\n For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide .\n (string) --\n (string) --\n \n\n :type ExpressionAttributeValues: dict\n :param ExpressionAttributeValues: One or more values that can be substituted in an expression.\n Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:\n Available | Backordered | Discontinued\n You would first need to specify ExpressionAttributeValues as follows:\n { ':avail':{'S':'Available'}, ':back':{'S':'Backordered'}, ':disc':{'S':'Discontinued'} }\n You could then use these values in an expression, such as this:\n ProductStatus IN (:avail, :back, :disc)\n For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide .\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n \n\n :rtype: dict\n :return: {\n 'Attributes': {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n 'ConsumedCapacity': {\n 'TableName': 'string',\n 'CapacityUnits': 123.0,\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'Table': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n },\n 'LocalSecondaryIndexes': {\n 'string': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n }\n },\n 'GlobalSecondaryIndexes': {\n 'string': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n }\n }\n },\n 'ItemCollectionMetrics': {\n 'ItemCollectionKey': {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n 'SizeEstimateRangeGB': [\n 123.0,\n ]\n }\n }\n \n \n :returns: \n TableName (string) -- [REQUIRED]\n The name of the table to contain the item.\n \n Item (dict) -- [REQUIRED]\n A map of attribute name/value pairs, one for each attribute. Only the primary key attributes are required; you can optionally provide other attribute name-value pairs for the item.\n You must provide all of the attributes for the primary key. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide both values for both the partition key and the sort key.\n If you specify any attributes that are part of an index key, then the data types for those attributes must match those of the schema in the table's attribute definition.\n For more information about primary keys, see Primary Key in the Amazon DynamoDB Developer Guide .\n Each element in the Item map is an AttributeValue object.\n \n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n S (string) --An attribute of type String. For example:\n \n \"S\": \"Hello\"\n \n N (string) --An attribute of type Number. For example:\n \n \"N\": \"123.45\"\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n \n B (bytes) --An attribute of type Binary. For example:\n \n \"B\": \"dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk\"\n \n SS (list) --An attribute of type String Set. For example:\n \n \"SS\": [\"Giraffe\", \"Hippo\" ,\"Zebra\"]\n \n (string) --\n \n \n NS (list) --An attribute of type Number Set. For example:\n \n \"NS\": [\"42.2\", \"-19\", \"7.5\", \"3.14\"]\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n \n (string) --\n \n \n BS (list) --An attribute of type Binary Set. For example:\n \n \"BS\": [\"U3Vubnk=\", \"UmFpbnk=\", \"U25vd3k=\"]\n \n (bytes) --\n \n \n M (dict) --An attribute of type Map. For example:\n \n \"M\": {\"Name\": {\"S\": \"Joe\"}, \"Age\": {\"N\": \"35\"}}\n \n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n \n \n \n \n L (list) --An attribute of type List. For example:\n \n \"L\": [\"Cookies\", \"Coffee\", 3.14159]\n \n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n \n \n NULL (boolean) --An attribute of type Null. For example:\n \n \"NULL\": true\n \n BOOL (boolean) --An attribute of type Boolean. For example:\n \n \"BOOL\": true\n \n \n \n \n \n \n \n Expected (dict) -- This is a legacy parameter. Use ConditionExpression instead. For more information, see Expected in the Amazon DynamoDB Developer Guide .\n \n (string) --\n (dict) --Represents a condition to be compared with an attribute value. This condition can be used with DeleteItem , PutItem or UpdateItem operations; if the comparison evaluates to true, the operation succeeds; if not, the operation fails. You can use ExpectedAttributeValue in one of two different ways:\n \n Use AttributeValueList to specify one or more values to compare against an attribute. Use ComparisonOperator to specify how you want to perform the comparison. If the comparison evaluates to true, then the conditional operation succeeds.\n Use Value to specify a value that DynamoDB will compare against an attribute. If the values match, then ExpectedAttributeValue evaluates to true and the conditional operation succeeds. Optionally, you can also set Exists to false, indicating that you do not expect to find the attribute value in the table. In this case, the conditional operation succeeds only if the comparison evaluates to false.\n \n \n Value and Exists are incompatible with AttributeValueList and ComparisonOperator . Note that if you use both sets of parameters at once, DynamoDB will return a ValidationException exception.\n \n Value (dict) --Represents the data for the expected attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n S (string) --An attribute of type String. For example:\n \n \"S\": \"Hello\"\n \n N (string) --An attribute of type Number. For example:\n \n \"N\": \"123.45\"\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n \n B (bytes) --An attribute of type Binary. For example:\n \n \"B\": \"dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk\"\n \n SS (list) --An attribute of type String Set. For example:\n \n \"SS\": [\"Giraffe\", \"Hippo\" ,\"Zebra\"]\n \n (string) --\n \n \n NS (list) --An attribute of type Number Set. For example:\n \n \"NS\": [\"42.2\", \"-19\", \"7.5\", \"3.14\"]\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n \n (string) --\n \n \n BS (list) --An attribute of type Binary Set. For example:\n \n \"BS\": [\"U3Vubnk=\", \"UmFpbnk=\", \"U25vd3k=\"]\n \n (bytes) --\n \n \n M (dict) --An attribute of type Map. For example:\n \n \"M\": {\"Name\": {\"S\": \"Joe\"}, \"Age\": {\"N\": \"35\"}}\n \n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n \n \n \n \n L (list) --An attribute of type List. For example:\n \n \"L\": [\"Cookies\", \"Coffee\", 3.14159]\n \n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n \n \n NULL (boolean) --An attribute of type Null. For example:\n \n \"NULL\": true\n \n BOOL (boolean) --An attribute of type Boolean. For example:\n \n \"BOOL\": true\n \n \n \n Exists (boolean) --Causes DynamoDB to evaluate the value before attempting a conditional operation:\n \n If Exists is true , DynamoDB will check to see if that attribute value already exists in the table. If it is found, then the operation succeeds. If it is not found, the operation fails with a ConditionCheckFailedException .\n If Exists is false , DynamoDB assumes that the attribute value does not exist in the table. If in fact the value does not exist, then the assumption is valid and the operation succeeds. If the value is found, despite the assumption that it does not exist, the operation fails with a ConditionCheckFailedException .\n \n The default setting for Exists is true . If you supply a Value all by itself, DynamoDB assumes the attribute exists: You don't have to set Exists to true , because it is implied.\n DynamoDB returns a ValidationException if:\n \n Exists is true but there is no Value to check. (You expect a value to exist, but don't specify what that value is.)\n Exists is false but you also provide a Value . (You cannot expect an attribute to have a value, while also expecting it not to exist.)\n \n \n ComparisonOperator (string) --A comparator for evaluating attributes in the AttributeValueList . For example, equals, greater than, less than, etc.\n The following comparison operators are available:\n \n EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN\n The following are descriptions of each comparison operator.\n \n EQ : Equal. EQ is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"} . Also, {\"N\":\"6\"} does not equal {\"NS\":[\"6\", \"2\", \"1\"]} .\n NE : Not equal. NE is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"} . Also, {\"N\":\"6\"} does not equal {\"NS\":[\"6\", \"2\", \"1\"]} .\n LE : Less than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"} . Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]} .\n LT : Less than. AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"} . Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]} .\n GE : Greater than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"} . Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]} .\n GT : Greater than. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"} . Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]} .\n NOT_NULL : The attribute exists. NOT_NULL is supported for all data types, including lists and maps.\n \n \n Note\n This operator tests for the existence of an attribute, not its data type. If the data type of attribute \"a \" is null, and you evaluate it using NOT_NULL , the result is a Boolean true . This result is because the attribute \"a \" exists; its data type is not relevant to the NOT_NULL comparison operator.\n \n \n NULL : The attribute does not exist. NULL is supported for all data types, including lists and maps.\n \n \n Note\n This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute \"a \" is null, and you evaluate it using NULL , the result is a Boolean false . This is because the attribute \"a \" exists; its data type is not relevant to the NULL comparison operator.\n \n \n CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set (\"SS \", \"NS \", or \"BS \"), then the operator evaluates to true if it finds an exact match with any member of the set. CONTAINS is supported for lists: When evaluating \"a CONTAINS b \", \"a \" can be a list; however, \"b \" cannot be a set, a map, or a list.\n NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set (\"SS \", \"NS \", or \"BS \"), then the operator evaluates to true if it does not find an exact match with any member of the set. NOT_CONTAINS is supported for lists: When evaluating \"a NOT CONTAINS b \", \"a \" can be a list; however, \"b \" cannot be a set, a map, or a list.\n BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type).\n IN : Checks for matching elements in a list. AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary. These attributes are compared against an existing attribute of an item. If any elements of the input are equal to the item attribute, the expression evaluates to true.\n BETWEEN : Greater than or equal to the first value, and less than or equal to the second value. AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not compare to {\"N\":\"6\"} . Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}\n \n \n AttributeValueList (list) --One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used.\n For type Number, value comparisons are numeric.\n String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A , and a is greater than B . For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters .\n For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.\n For information on specifying data types in JSON, see JSON Data Format in the Amazon DynamoDB Developer Guide .\n \n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n S (string) --An attribute of type String. For example:\n \n \"S\": \"Hello\"\n \n N (string) --An attribute of type Number. For example:\n \n \"N\": \"123.45\"\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n \n B (bytes) --An attribute of type Binary. For example:\n \n \"B\": \"dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk\"\n \n SS (list) --An attribute of type String Set. For example:\n \n \"SS\": [\"Giraffe\", \"Hippo\" ,\"Zebra\"]\n \n (string) --\n \n \n NS (list) --An attribute of type Number Set. For example:\n \n \"NS\": [\"42.2\", \"-19\", \"7.5\", \"3.14\"]\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n \n (string) --\n \n \n BS (list) --An attribute of type Binary Set. For example:\n \n \"BS\": [\"U3Vubnk=\", \"UmFpbnk=\", \"U25vd3k=\"]\n \n (bytes) --\n \n \n M (dict) --An attribute of type Map. For example:\n \n \"M\": {\"Name\": {\"S\": \"Joe\"}, \"Age\": {\"N\": \"35\"}}\n \n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n \n \n \n \n L (list) --An attribute of type List. For example:\n \n \"L\": [\"Cookies\", \"Coffee\", 3.14159]\n \n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n \n \n NULL (boolean) --An attribute of type Null. For example:\n \n \"NULL\": true\n \n BOOL (boolean) --An attribute of type Boolean. For example:\n \n \"BOOL\": true\n \n \n \n \n \n \n \n \n \n \n \n ReturnValues (string) -- Use ReturnValues if you want to get the item attributes as they appeared before they were updated with the PutItem request. For PutItem , the valid values are:\n \n NONE - If ReturnValues is not specified, or if its value is NONE , then nothing is returned. (This setting is the default for ReturnValues .)\n ALL_OLD - If PutItem overwrote an attribute name-value pair, then the content of the old item is returned.\n \n \n Note\n The ReturnValues parameter is used by several DynamoDB operations; however, PutItem does not recognize any values other than NONE or ALL_OLD .\n \n \n ReturnConsumedCapacity (string) -- Determines the level of detail about provisioned throughput consumption that is returned in the response:\n \n INDEXES - The response includes the aggregate ConsumedCapacity for the operation, together with ConsumedCapacity for each table and secondary index that was accessed. Note that some operations, such as GetItem and BatchGetItem , do not access any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity information for table(s).\n TOTAL - The response includes only the aggregate ConsumedCapacity for the operation.\n NONE - No ConsumedCapacity details are included in the response.\n \n \n ReturnItemCollectionMetrics (string) -- Determines whether item collection metrics are returned. If set to SIZE , the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.\n ConditionalOperator (string) -- This is a legacy parameter. Use ConditionExpression instead. For more information, see ConditionalOperator in the Amazon DynamoDB Developer Guide .\n ConditionExpression (string) -- A condition that must be satisfied in order for a conditional PutItem operation to succeed.\n An expression can contain any of the following:\n \n Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size These function names are case-sensitive.\n Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN\n Logical operators: AND | OR | NOT\n \n For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide .\n \n ExpressionAttributeNames (dict) -- One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames :\n \n To access an attribute whose name conflicts with a DynamoDB reserved word.\n To create a placeholder for repeating occurrences of an attribute name in an expression.\n To prevent special characters in an attribute name from being misinterpreted in an expression.\n \n Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:\n \n Percentile\n \n The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide ). To work around this, you could specify the following for ExpressionAttributeNames :\n \n {\"#P\":\"Percentile\"}\n \n You could then use this substitution in an expression, as in this example:\n \n #P = :val\n \n \n Note\n Tokens that begin with the : character are expression attribute values , which are placeholders for the actual value at runtime.\n \n For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide .\n \n (string) --\n (string) --\n \n \n \n \n ExpressionAttributeValues (dict) -- One or more values that can be substituted in an expression.\n Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:\n \n Available | Backordered | Discontinued\n You would first need to specify ExpressionAttributeValues as follows:\n \n { \":avail\":{\"S\":\"Available\"}, \":back\":{\"S\":\"Backordered\"}, \":disc\":{\"S\":\"Discontinued\"} }\n You could then use these values in an expression, such as this:\n \n ProductStatus IN (:avail, :back, :disc)\n For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide .\n \n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n S (string) --An attribute of type String. For example:\n \n \"S\": \"Hello\"\n \n N (string) --An attribute of type Number. For example:\n \n \"N\": \"123.45\"\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n \n B (bytes) --An attribute of type Binary. For example:\n \n \"B\": \"dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk\"\n \n SS (list) --An attribute of type String Set. For example:\n \n \"SS\": [\"Giraffe\", \"Hippo\" ,\"Zebra\"]\n \n (string) --\n \n \n NS (list) --An attribute of type Number Set. For example:\n \n \"NS\": [\"42.2\", \"-19\", \"7.5\", \"3.14\"]\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n \n (string) --\n \n \n BS (list) --An attribute of type Binary Set. For example:\n \n \"BS\": [\"U3Vubnk=\", \"UmFpbnk=\", \"U25vd3k=\"]\n \n (bytes) --\n \n \n M (dict) --An attribute of type Map. For example:\n \n \"M\": {\"Name\": {\"S\": \"Joe\"}, \"Age\": {\"N\": \"35\"}}\n \n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n \n \n \n \n L (list) --An attribute of type List. For example:\n \n \"L\": [\"Cookies\", \"Coffee\", 3.14159]\n \n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n \n \n NULL (boolean) --An attribute of type Null. For example:\n \n \"NULL\": true\n \n BOOL (boolean) --An attribute of type Boolean. For example:\n \n \"BOOL\": true\n \n \n \n \n \n \n \n \n \"\"\"\n pass\n\ndef query(TableName=None, IndexName=None, Select=None, AttributesToGet=None, Limit=None, ConsistentRead=None, KeyConditions=None, QueryFilter=None, ConditionalOperator=None, ScanIndexForward=None, ExclusiveStartKey=None, ReturnConsumedCapacity=None, ProjectionExpression=None, FilterExpression=None, KeyConditionExpression=None, ExpressionAttributeNames=None, ExpressionAttributeValues=None):\n \"\"\"\n The Query operation finds items based on primary key values. You can query any table or secondary index that has a composite primary key (a partition key and a sort key).\n Use the KeyConditionExpression parameter to provide a specific value for the partition key. The Query operation will return all of the items from the table or index with that partition key value. You can optionally narrow the scope of the Query operation by specifying a sort key value and a comparison operator in KeyConditionExpression . To further refine the Query results, you can optionally provide a FilterExpression . A FilterExpression determines which items within the results should be returned to you. All of the other results are discarded.\n A Query operation always returns a result set. If no matching items are found, the result set will be empty. Queries that do not return results consume the minimum number of read capacity units for that type of read operation.\n A single Query operation will read up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression . If LastEvaluatedKey is present in the response, you will need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide .\n You can query a table, a local secondary index, or a global secondary index. For a query on a table or on a local secondary index, you can set the ConsistentRead parameter to true and obtain a strongly consistent result. Global secondary indexes support eventually consistent reads only, so do not specify ConsistentRead when querying a global secondary index.\n See also: AWS API Documentation\n \n Examples\n This example queries items in the Music table. The table has a partition key and sort key (Artist and SongTitle), but this query only specifies the partition key value. It returns song titles by the artist named \"No One You Know\".\n Expected Output:\n \n :example: response = client.query(\n TableName='string',\n IndexName='string',\n Select='ALL_ATTRIBUTES'|'ALL_PROJECTED_ATTRIBUTES'|'SPECIFIC_ATTRIBUTES'|'COUNT',\n AttributesToGet=[\n 'string',\n ],\n Limit=123,\n ConsistentRead=True|False,\n KeyConditions={\n 'string': {\n 'AttributeValueList': [\n {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n },\n ],\n 'ComparisonOperator': 'EQ'|'NE'|'IN'|'LE'|'LT'|'GE'|'GT'|'BETWEEN'|'NOT_NULL'|'NULL'|'CONTAINS'|'NOT_CONTAINS'|'BEGINS_WITH'\n }\n },\n QueryFilter={\n 'string': {\n 'AttributeValueList': [\n {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n },\n ],\n 'ComparisonOperator': 'EQ'|'NE'|'IN'|'LE'|'LT'|'GE'|'GT'|'BETWEEN'|'NOT_NULL'|'NULL'|'CONTAINS'|'NOT_CONTAINS'|'BEGINS_WITH'\n }\n },\n ConditionalOperator='AND'|'OR',\n ScanIndexForward=True|False,\n ExclusiveStartKey={\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE',\n ProjectionExpression='string',\n FilterExpression='string',\n KeyConditionExpression='string',\n ExpressionAttributeNames={\n 'string': 'string'\n },\n ExpressionAttributeValues={\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n }\n )\n \n \n :type TableName: string\n :param TableName: [REQUIRED]\n The name of the table containing the requested items.\n \n\n :type IndexName: string\n :param IndexName: The name of an index to query. This index can be any local secondary index or global secondary index on the table. Note that if you use the IndexName parameter, you must also provide TableName.\n\n :type Select: string\n :param Select: The attributes to be returned in the result. You can retrieve all item attributes, specific item attributes, the count of matching items, or in the case of an index, some or all of the attributes projected into the index.\n ALL_ATTRIBUTES - Returns all of the item attributes from the specified table or index. If you query a local secondary index, then for each matching item in the index DynamoDB will fetch the entire item from the parent table. If the index is configured to project all item attributes, then all of the data can be obtained from the local secondary index, and no fetching is required.\n ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves all attributes that have been projected into the index. If the index is configured to project all attributes, this return value is equivalent to specifying ALL_ATTRIBUTES .\n COUNT - Returns the number of matching items, rather than the matching items themselves.\n SPECIFIC_ATTRIBUTES - Returns only the attributes listed in AttributesToGet . This return value is equivalent to specifying AttributesToGet without specifying any value for Select . If you query or scan a local secondary index and request only attributes that are projected into that index, the operation will read only the index and not the table. If any of the requested attributes are not projected into the local secondary index, DynamoDB will fetch each of these attributes from the parent table. This extra fetching incurs additional throughput cost and latency. If you query or scan a global secondary index, you can only request attributes that are projected into the index. Global secondary index queries cannot fetch attributes from the parent table.\n If neither Select nor AttributesToGet are specified, DynamoDB defaults to ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when accessing an index. You cannot use both Select and AttributesToGet together in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES . (This usage is equivalent to specifying AttributesToGet without any value for Select .)\n Note\n If you use the ProjectionExpression parameter, then the value for Select can only be SPECIFIC_ATTRIBUTES . Any other value for Select will return an error.\n \n\n :type AttributesToGet: list\n :param AttributesToGet: This is a legacy parameter. Use ProjectionExpression instead. For more information, see AttributesToGet in the Amazon DynamoDB Developer Guide .\n (string) --\n \n\n :type Limit: integer\n :param Limit: The maximum number of items to evaluate (not necessarily the number of matching items). If DynamoDB processes the number of items up to the limit while processing the results, it stops the operation and returns the matching values up to that point, and a key in LastEvaluatedKey to apply in a subsequent operation, so that you can pick up where you left off. Also, if the processed data set size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation and returns the matching values up to the limit, and a key in LastEvaluatedKey to apply in a subsequent operation to continue the operation. For more information, see Query and Scan in the Amazon DynamoDB Developer Guide .\n\n :type ConsistentRead: boolean\n :param ConsistentRead: Determines the read consistency model: If set to true , then the operation uses strongly consistent reads; otherwise, the operation uses eventually consistent reads.\n Strongly consistent reads are not supported on global secondary indexes. If you query a global secondary index with ConsistentRead set to true , you will receive a ValidationException .\n \n\n :type KeyConditions: dict\n :param KeyConditions: This is a legacy parameter. Use KeyConditionExpression instead. For more information, see KeyConditions in the Amazon DynamoDB Developer Guide .\n (string) --\n (dict) --Represents the selection criteria for a Query or Scan operation:\n For a Query operation, Condition is used for specifying the KeyConditions to use when querying a table or an index. For KeyConditions , only the following comparison operators are supported: EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN Condition is also used in a QueryFilter , which evaluates the query results and returns only the desired values.\n For a Scan operation, Condition is used in a ScanFilter , which evaluates the scan results and returns only the desired values.\n AttributeValueList (list) --One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used.\n For type Number, value comparisons are numeric.\n String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A , and a is greater than B . For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters .\n For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n ComparisonOperator (string) -- [REQUIRED]A comparator for evaluating attributes. For example, equals, greater than, less than, etc.\n The following comparison operators are available:\n EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN\n The following are descriptions of each comparison operator.\n EQ : Equal. EQ is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} .\n NE : Not equal. NE is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} .\n LE : Less than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .\n LT : Less than. AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .\n GE : Greater than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .\n GT : Greater than. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .\n NOT_NULL : The attribute exists. NOT_NULL is supported for all data types, including lists and maps.\n Note\n This operator tests for the existence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NOT_NULL , the result is a Boolean true . This result is because the attribute 'a ' exists; its data type is not relevant to the NOT_NULL comparison operator.\n NULL : The attribute does not exist. NULL is supported for all data types, including lists and maps.\n Note\n This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NULL , the result is a Boolean false . This is because the attribute 'a ' exists; its data type is not relevant to the NULL comparison operator.\n CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it finds an exact match with any member of the set. CONTAINS is supported for lists: When evaluating 'a CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list.\n NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it does not find an exact match with any member of the set. NOT_CONTAINS is supported for lists: When evaluating 'a NOT CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list.\n BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type).\n IN : Checks for matching elements in a list. AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary. These attributes are compared against an existing attribute of an item. If any elements of the input are equal to the item attribute, the expression evaluates to true.\n BETWEEN : Greater than or equal to the first value, and less than or equal to the second value. AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not compare to {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']}\n For usage examples of AttributeValueList and ComparisonOperator , see Legacy Conditional Parameters in the Amazon DynamoDB Developer Guide .\n \n \n\n :type QueryFilter: dict\n :param QueryFilter: This is a legacy parameter. Use FilterExpression instead. For more information, see QueryFilter in the Amazon DynamoDB Developer Guide .\n (string) --\n (dict) --Represents the selection criteria for a Query or Scan operation:\n For a Query operation, Condition is used for specifying the KeyConditions to use when querying a table or an index. For KeyConditions , only the following comparison operators are supported: EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN Condition is also used in a QueryFilter , which evaluates the query results and returns only the desired values.\n For a Scan operation, Condition is used in a ScanFilter , which evaluates the scan results and returns only the desired values.\n AttributeValueList (list) --One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used.\n For type Number, value comparisons are numeric.\n String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A , and a is greater than B . For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters .\n For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n ComparisonOperator (string) -- [REQUIRED]A comparator for evaluating attributes. For example, equals, greater than, less than, etc.\n The following comparison operators are available:\n EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN\n The following are descriptions of each comparison operator.\n EQ : Equal. EQ is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} .\n NE : Not equal. NE is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} .\n LE : Less than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .\n LT : Less than. AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .\n GE : Greater than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .\n GT : Greater than. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .\n NOT_NULL : The attribute exists. NOT_NULL is supported for all data types, including lists and maps.\n Note\n This operator tests for the existence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NOT_NULL , the result is a Boolean true . This result is because the attribute 'a ' exists; its data type is not relevant to the NOT_NULL comparison operator.\n NULL : The attribute does not exist. NULL is supported for all data types, including lists and maps.\n Note\n This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NULL , the result is a Boolean false . This is because the attribute 'a ' exists; its data type is not relevant to the NULL comparison operator.\n CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it finds an exact match with any member of the set. CONTAINS is supported for lists: When evaluating 'a CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list.\n NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it does not find an exact match with any member of the set. NOT_CONTAINS is supported for lists: When evaluating 'a NOT CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list.\n BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type).\n IN : Checks for matching elements in a list. AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary. These attributes are compared against an existing attribute of an item. If any elements of the input are equal to the item attribute, the expression evaluates to true.\n BETWEEN : Greater than or equal to the first value, and less than or equal to the second value. AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not compare to {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']}\n For usage examples of AttributeValueList and ComparisonOperator , see Legacy Conditional Parameters in the Amazon DynamoDB Developer Guide .\n \n \n\n :type ConditionalOperator: string\n :param ConditionalOperator: This is a legacy parameter. Use FilterExpression instead. For more information, see ConditionalOperator in the Amazon DynamoDB Developer Guide .\n\n :type ScanIndexForward: boolean\n :param ScanIndexForward: Specifies the order for index traversal: If true (default), the traversal is performed in ascending order; if false , the traversal is performed in descending order.\n Items with the same partition key value are stored in sorted order by sort key. If the sort key data type is Number, the results are stored in numeric order. For type String, the results are stored in order of UTF-8 bytes. For type Binary, DynamoDB treats each byte of the binary data as unsigned.\n If ScanIndexForward is true , DynamoDB returns the results in the order in which they are stored (by sort key value). This is the default behavior. If ScanIndexForward is false , DynamoDB reads the results in reverse order by sort key value, and then returns the results to the client.\n \n\n :type ExclusiveStartKey: dict\n :param ExclusiveStartKey: The primary key of the first item that this operation will evaluate. Use the value that was returned for LastEvaluatedKey in the previous operation.\n The data type for ExclusiveStartKey must be String, Number or Binary. No set data types are allowed.\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n \n\n :type ReturnConsumedCapacity: string\n :param ReturnConsumedCapacity: Determines the level of detail about provisioned throughput consumption that is returned in the response:\n INDEXES - The response includes the aggregate ConsumedCapacity for the operation, together with ConsumedCapacity for each table and secondary index that was accessed. Note that some operations, such as GetItem and BatchGetItem , do not access any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity information for table(s).\n TOTAL - The response includes only the aggregate ConsumedCapacity for the operation.\n NONE - No ConsumedCapacity details are included in the response.\n \n\n :type ProjectionExpression: string\n :param ProjectionExpression: A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas.\n If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.\n For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide .\n \n\n :type FilterExpression: string\n :param FilterExpression: A string that contains conditions that DynamoDB applies after the Query operation, but before the data is returned to you. Items that do not satisfy the FilterExpression criteria are not returned.\n A FilterExpression does not allow key attributes. You cannot define a filter expression based on a partition key or a sort key.\n Note\n A FilterExpression is applied after the items have already been read; the process of filtering does not consume any additional read capacity units.\n For more information, see Filter Expressions in the Amazon DynamoDB Developer Guide .\n \n\n :type KeyConditionExpression: string\n :param KeyConditionExpression: The condition that specifies the key value(s) for items to be retrieved by the Query action.\n The condition must perform an equality test on a single partition key value.\n The condition can optionally perform one of several comparison tests on a single sort key value. This allows Query to retrieve one item with a given partition key value and sort key value, or several items that have the same partition key value but different sort key values.\n The partition key equality test is required, and must be specified in the following format:\n partitionKeyName = :partitionkeyval\n If you also want to provide a condition for the sort key, it must be combined using AND with the condition for the sort key. Following is an example, using the = comparison operator for the sort key:\n partitionKeyName = :partitionkeyval AND sortKeyName = :sortkeyval\n Valid comparisons for the sort key condition are as follows:\n sortKeyName = :sortkeyval - true if the sort key value is equal to :sortkeyval .\n sortKeyName < :sortkeyval - true if the sort key value is less than :sortkeyval .\n sortKeyName <= :sortkeyval - true if the sort key value is less than or equal to :sortkeyval .\n sortKeyName > :sortkeyval - true if the sort key value is greater than :sortkeyval .\n sortKeyName >= :sortkeyval - true if the sort key value is greater than or equal to :sortkeyval .\n sortKeyName BETWEEN :sortkeyval1 AND :sortkeyval2 - true if the sort key value is greater than or equal to :sortkeyval1 , and less than or equal to :sortkeyval2 .\n begins_with ( sortKeyName , :sortkeyval ) - true if the sort key value begins with a particular operand. (You cannot use this function with a sort key that is of type Number.) Note that the function name begins_with is case-sensitive.\n Use the ExpressionAttributeValues parameter to replace tokens such as :partitionval and :sortval with actual values at runtime.\n You can optionally use the ExpressionAttributeNames parameter to replace the names of the partition key and sort key with placeholder tokens. This option might be necessary if an attribute name conflicts with a DynamoDB reserved word. For example, the following KeyConditionExpression parameter causes an error because Size is a reserved word:\n Size = :myval\n To work around this, define a placeholder (such a #S ) to represent the attribute name Size . KeyConditionExpression then is as follows:\n #S = :myval\n For a list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide .\n For more information on ExpressionAttributeNames and ExpressionAttributeValues , see Using Placeholders for Attribute Names and Values in the Amazon DynamoDB Developer Guide .\n \n\n :type ExpressionAttributeNames: dict\n :param ExpressionAttributeNames: One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames :\n To access an attribute whose name conflicts with a DynamoDB reserved word.\n To create a placeholder for repeating occurrences of an attribute name in an expression.\n To prevent special characters in an attribute name from being misinterpreted in an expression.\n Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:\n Percentile\n The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide ). To work around this, you could specify the following for ExpressionAttributeNames :\n {'#P':'Percentile'}\n You could then use this substitution in an expression, as in this example:\n #P = :val\n Note\n Tokens that begin with the : character are expression attribute values , which are placeholders for the actual value at runtime.\n For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide .\n (string) --\n (string) --\n \n\n :type ExpressionAttributeValues: dict\n :param ExpressionAttributeValues: One or more values that can be substituted in an expression.\n Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:\n Available | Backordered | Discontinued\n You would first need to specify ExpressionAttributeValues as follows:\n { ':avail':{'S':'Available'}, ':back':{'S':'Backordered'}, ':disc':{'S':'Discontinued'} }\n You could then use these values in an expression, such as this:\n ProductStatus IN (:avail, :back, :disc)\n For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide .\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n \n\n :rtype: dict\n :return: {\n 'Items': [\n {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n ],\n 'Count': 123,\n 'ScannedCount': 123,\n 'LastEvaluatedKey': {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n 'ConsumedCapacity': {\n 'TableName': 'string',\n 'CapacityUnits': 123.0,\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'Table': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n },\n 'LocalSecondaryIndexes': {\n 'string': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n }\n },\n 'GlobalSecondaryIndexes': {\n 'string': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n }\n }\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef restore_table_from_backup(TargetTableName=None, BackupArn=None):\n \"\"\"\n Creates a new table from an existing backup. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.\n You can call RestoreTableFromBackup at a maximum rate of 10 times per second.\n You must manually set up the following on the restored table:\n See also: AWS API Documentation\n \n \n :example: response = client.restore_table_from_backup(\n TargetTableName='string',\n BackupArn='string'\n )\n \n \n :type TargetTableName: string\n :param TargetTableName: [REQUIRED]\n The name of the new table to which the backup must be restored.\n \n\n :type BackupArn: string\n :param BackupArn: [REQUIRED]\n The ARN associated with the backup.\n \n\n :rtype: dict\n :return: {\n 'TableDescription': {\n 'AttributeDefinitions': [\n {\n 'AttributeName': 'string',\n 'AttributeType': 'S'|'N'|'B'\n },\n ],\n 'TableName': 'string',\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'TableStatus': 'CREATING'|'UPDATING'|'DELETING'|'ACTIVE',\n 'CreationDateTime': datetime(2015, 1, 1),\n 'ProvisionedThroughput': {\n 'LastIncreaseDateTime': datetime(2015, 1, 1),\n 'LastDecreaseDateTime': datetime(2015, 1, 1),\n 'NumberOfDecreasesToday': 123,\n 'ReadCapacityUnits': 123,\n 'WriteCapacityUnits': 123\n },\n 'TableSizeBytes': 123,\n 'ItemCount': 123,\n 'TableArn': 'string',\n 'TableId': 'string',\n 'BillingModeSummary': {\n 'BillingMode': 'PROVISIONED'|'PAY_PER_REQUEST',\n 'LastUpdateToPayPerRequestDateTime': datetime(2015, 1, 1)\n },\n 'LocalSecondaryIndexes': [\n {\n 'IndexName': 'string',\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'Projection': {\n 'ProjectionType': 'ALL'|'KEYS_ONLY'|'INCLUDE',\n 'NonKeyAttributes': [\n 'string',\n ]\n },\n 'IndexSizeBytes': 123,\n 'ItemCount': 123,\n 'IndexArn': 'string'\n },\n ],\n 'GlobalSecondaryIndexes': [\n {\n 'IndexName': 'string',\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'Projection': {\n 'ProjectionType': 'ALL'|'KEYS_ONLY'|'INCLUDE',\n 'NonKeyAttributes': [\n 'string',\n ]\n },\n 'IndexStatus': 'CREATING'|'UPDATING'|'DELETING'|'ACTIVE',\n 'Backfilling': True|False,\n 'ProvisionedThroughput': {\n 'LastIncreaseDateTime': datetime(2015, 1, 1),\n 'LastDecreaseDateTime': datetime(2015, 1, 1),\n 'NumberOfDecreasesToday': 123,\n 'ReadCapacityUnits': 123,\n 'WriteCapacityUnits': 123\n },\n 'IndexSizeBytes': 123,\n 'ItemCount': 123,\n 'IndexArn': 'string'\n },\n ],\n 'StreamSpecification': {\n 'StreamEnabled': True|False,\n 'StreamViewType': 'NEW_IMAGE'|'OLD_IMAGE'|'NEW_AND_OLD_IMAGES'|'KEYS_ONLY'\n },\n 'LatestStreamLabel': 'string',\n 'LatestStreamArn': 'string',\n 'RestoreSummary': {\n 'SourceBackupArn': 'string',\n 'SourceTableArn': 'string',\n 'RestoreDateTime': datetime(2015, 1, 1),\n 'RestoreInProgress': True|False\n },\n 'SSEDescription': {\n 'Status': 'ENABLING'|'ENABLED'|'DISABLING'|'DISABLED'|'UPDATING',\n 'SSEType': 'AES256'|'KMS',\n 'KMSMasterKeyArn': 'string'\n }\n }\n }\n \n \n :returns: \n TargetTableName (string) -- [REQUIRED]\n The name of the new table to which the backup must be restored.\n \n BackupArn (string) -- [REQUIRED]\n The ARN associated with the backup.\n \n \n \"\"\"\n pass\n\ndef restore_table_to_point_in_time(SourceTableName=None, TargetTableName=None, UseLatestRestorableTime=None, RestoreDateTime=None):\n \"\"\"\n Restores the specified table to the specified point in time within EarliestRestorableDateTime and LatestRestorableDateTime . You can restore your table to any point in time during the last 35 days. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.\n When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table.\n Along with data, the following are also included on the new restored table using point in time recovery:\n You must manually set up the following on the restored table:\n See also: AWS API Documentation\n \n \n :example: response = client.restore_table_to_point_in_time(\n SourceTableName='string',\n TargetTableName='string',\n UseLatestRestorableTime=True|False,\n RestoreDateTime=datetime(2015, 1, 1)\n )\n \n \n :type SourceTableName: string\n :param SourceTableName: [REQUIRED]\n Name of the source table that is being restored.\n \n\n :type TargetTableName: string\n :param TargetTableName: [REQUIRED]\n The name of the new table to which it must be restored to.\n \n\n :type UseLatestRestorableTime: boolean\n :param UseLatestRestorableTime: Restore the table to the latest possible time. LatestRestorableDateTime is typically 5 minutes before the current time.\n\n :type RestoreDateTime: datetime\n :param RestoreDateTime: Time in the past to restore the table to.\n\n :rtype: dict\n :return: {\n 'TableDescription': {\n 'AttributeDefinitions': [\n {\n 'AttributeName': 'string',\n 'AttributeType': 'S'|'N'|'B'\n },\n ],\n 'TableName': 'string',\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'TableStatus': 'CREATING'|'UPDATING'|'DELETING'|'ACTIVE',\n 'CreationDateTime': datetime(2015, 1, 1),\n 'ProvisionedThroughput': {\n 'LastIncreaseDateTime': datetime(2015, 1, 1),\n 'LastDecreaseDateTime': datetime(2015, 1, 1),\n 'NumberOfDecreasesToday': 123,\n 'ReadCapacityUnits': 123,\n 'WriteCapacityUnits': 123\n },\n 'TableSizeBytes': 123,\n 'ItemCount': 123,\n 'TableArn': 'string',\n 'TableId': 'string',\n 'BillingModeSummary': {\n 'BillingMode': 'PROVISIONED'|'PAY_PER_REQUEST',\n 'LastUpdateToPayPerRequestDateTime': datetime(2015, 1, 1)\n },\n 'LocalSecondaryIndexes': [\n {\n 'IndexName': 'string',\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'Projection': {\n 'ProjectionType': 'ALL'|'KEYS_ONLY'|'INCLUDE',\n 'NonKeyAttributes': [\n 'string',\n ]\n },\n 'IndexSizeBytes': 123,\n 'ItemCount': 123,\n 'IndexArn': 'string'\n },\n ],\n 'GlobalSecondaryIndexes': [\n {\n 'IndexName': 'string',\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'Projection': {\n 'ProjectionType': 'ALL'|'KEYS_ONLY'|'INCLUDE',\n 'NonKeyAttributes': [\n 'string',\n ]\n },\n 'IndexStatus': 'CREATING'|'UPDATING'|'DELETING'|'ACTIVE',\n 'Backfilling': True|False,\n 'ProvisionedThroughput': {\n 'LastIncreaseDateTime': datetime(2015, 1, 1),\n 'LastDecreaseDateTime': datetime(2015, 1, 1),\n 'NumberOfDecreasesToday': 123,\n 'ReadCapacityUnits': 123,\n 'WriteCapacityUnits': 123\n },\n 'IndexSizeBytes': 123,\n 'ItemCount': 123,\n 'IndexArn': 'string'\n },\n ],\n 'StreamSpecification': {\n 'StreamEnabled': True|False,\n 'StreamViewType': 'NEW_IMAGE'|'OLD_IMAGE'|'NEW_AND_OLD_IMAGES'|'KEYS_ONLY'\n },\n 'LatestStreamLabel': 'string',\n 'LatestStreamArn': 'string',\n 'RestoreSummary': {\n 'SourceBackupArn': 'string',\n 'SourceTableArn': 'string',\n 'RestoreDateTime': datetime(2015, 1, 1),\n 'RestoreInProgress': True|False\n },\n 'SSEDescription': {\n 'Status': 'ENABLING'|'ENABLED'|'DISABLING'|'DISABLED'|'UPDATING',\n 'SSEType': 'AES256'|'KMS',\n 'KMSMasterKeyArn': 'string'\n }\n }\n }\n \n \n :returns: \n Auto scaling policies\n IAM policies\n Cloudwatch metrics and alarms\n Tags\n Stream settings\n Time to Live (TTL) settings\n Point in time recovery settings\n \n \"\"\"\n pass\n\ndef scan(TableName=None, IndexName=None, AttributesToGet=None, Limit=None, Select=None, ScanFilter=None, ConditionalOperator=None, ExclusiveStartKey=None, ReturnConsumedCapacity=None, TotalSegments=None, Segment=None, ProjectionExpression=None, FilterExpression=None, ExpressionAttributeNames=None, ExpressionAttributeValues=None, ConsistentRead=None):\n \"\"\"\n The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression operation.\n If the total number of scanned items exceeds the maximum data set size limit of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey value to continue the scan in a subsequent operation. The results also include the number of items exceeding the limit. A scan can result in no table data meeting the filter criteria.\n A single Scan operation will read up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression . If LastEvaluatedKey is present in the response, you will need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide .\n See also: AWS API Documentation\n \n Examples\n This example scans the entire Music table, and then narrows the results to songs by the artist \"No One You Know\". For each item, only the album title and song title are returned.\n Expected Output:\n \n :example: response = client.scan(\n TableName='string',\n IndexName='string',\n AttributesToGet=[\n 'string',\n ],\n Limit=123,\n Select='ALL_ATTRIBUTES'|'ALL_PROJECTED_ATTRIBUTES'|'SPECIFIC_ATTRIBUTES'|'COUNT',\n ScanFilter={\n 'string': {\n 'AttributeValueList': [\n {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n },\n ],\n 'ComparisonOperator': 'EQ'|'NE'|'IN'|'LE'|'LT'|'GE'|'GT'|'BETWEEN'|'NOT_NULL'|'NULL'|'CONTAINS'|'NOT_CONTAINS'|'BEGINS_WITH'\n }\n },\n ConditionalOperator='AND'|'OR',\n ExclusiveStartKey={\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE',\n TotalSegments=123,\n Segment=123,\n ProjectionExpression='string',\n FilterExpression='string',\n ExpressionAttributeNames={\n 'string': 'string'\n },\n ExpressionAttributeValues={\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n ConsistentRead=True|False\n )\n \n \n :type TableName: string\n :param TableName: [REQUIRED]\n The name of the table containing the requested items; or, if you provide IndexName , the name of the table to which that index belongs.\n \n\n :type IndexName: string\n :param IndexName: The name of a secondary index to scan. This index can be any local secondary index or global secondary index. Note that if you use the IndexName parameter, you must also provide TableName .\n\n :type AttributesToGet: list\n :param AttributesToGet: This is a legacy parameter. Use ProjectionExpression instead. For more information, see AttributesToGet in the Amazon DynamoDB Developer Guide .\n (string) --\n \n\n :type Limit: integer\n :param Limit: The maximum number of items to evaluate (not necessarily the number of matching items). If DynamoDB processes the number of items up to the limit while processing the results, it stops the operation and returns the matching values up to that point, and a key in LastEvaluatedKey to apply in a subsequent operation, so that you can pick up where you left off. Also, if the processed data set size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation and returns the matching values up to the limit, and a key in LastEvaluatedKey to apply in a subsequent operation to continue the operation. For more information, see Query and Scan in the Amazon DynamoDB Developer Guide .\n\n :type Select: string\n :param Select: The attributes to be returned in the result. You can retrieve all item attributes, specific item attributes, the count of matching items, or in the case of an index, some or all of the attributes projected into the index.\n ALL_ATTRIBUTES - Returns all of the item attributes from the specified table or index. If you query a local secondary index, then for each matching item in the index DynamoDB will fetch the entire item from the parent table. If the index is configured to project all item attributes, then all of the data can be obtained from the local secondary index, and no fetching is required.\n ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves all attributes that have been projected into the index. If the index is configured to project all attributes, this return value is equivalent to specifying ALL_ATTRIBUTES .\n COUNT - Returns the number of matching items, rather than the matching items themselves.\n SPECIFIC_ATTRIBUTES - Returns only the attributes listed in AttributesToGet . This return value is equivalent to specifying AttributesToGet without specifying any value for Select . If you query or scan a local secondary index and request only attributes that are projected into that index, the operation will read only the index and not the table. If any of the requested attributes are not projected into the local secondary index, DynamoDB will fetch each of these attributes from the parent table. This extra fetching incurs additional throughput cost and latency. If you query or scan a global secondary index, you can only request attributes that are projected into the index. Global secondary index queries cannot fetch attributes from the parent table.\n If neither Select nor AttributesToGet are specified, DynamoDB defaults to ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when accessing an index. You cannot use both Select and AttributesToGet together in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES . (This usage is equivalent to specifying AttributesToGet without any value for Select .)\n Note\n If you use the ProjectionExpression parameter, then the value for Select can only be SPECIFIC_ATTRIBUTES . Any other value for Select will return an error.\n \n\n :type ScanFilter: dict\n :param ScanFilter: This is a legacy parameter. Use FilterExpression instead. For more information, see ScanFilter in the Amazon DynamoDB Developer Guide .\n (string) --\n (dict) --Represents the selection criteria for a Query or Scan operation:\n For a Query operation, Condition is used for specifying the KeyConditions to use when querying a table or an index. For KeyConditions , only the following comparison operators are supported: EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN Condition is also used in a QueryFilter , which evaluates the query results and returns only the desired values.\n For a Scan operation, Condition is used in a ScanFilter , which evaluates the scan results and returns only the desired values.\n AttributeValueList (list) --One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used.\n For type Number, value comparisons are numeric.\n String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A , and a is greater than B . For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters .\n For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n ComparisonOperator (string) -- [REQUIRED]A comparator for evaluating attributes. For example, equals, greater than, less than, etc.\n The following comparison operators are available:\n EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN\n The following are descriptions of each comparison operator.\n EQ : Equal. EQ is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} .\n NE : Not equal. NE is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} .\n LE : Less than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .\n LT : Less than. AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .\n GE : Greater than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .\n GT : Greater than. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .\n NOT_NULL : The attribute exists. NOT_NULL is supported for all data types, including lists and maps.\n Note\n This operator tests for the existence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NOT_NULL , the result is a Boolean true . This result is because the attribute 'a ' exists; its data type is not relevant to the NOT_NULL comparison operator.\n NULL : The attribute does not exist. NULL is supported for all data types, including lists and maps.\n Note\n This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NULL , the result is a Boolean false . This is because the attribute 'a ' exists; its data type is not relevant to the NULL comparison operator.\n CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it finds an exact match with any member of the set. CONTAINS is supported for lists: When evaluating 'a CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list.\n NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it does not find an exact match with any member of the set. NOT_CONTAINS is supported for lists: When evaluating 'a NOT CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list.\n BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type).\n IN : Checks for matching elements in a list. AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary. These attributes are compared against an existing attribute of an item. If any elements of the input are equal to the item attribute, the expression evaluates to true.\n BETWEEN : Greater than or equal to the first value, and less than or equal to the second value. AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not compare to {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']}\n For usage examples of AttributeValueList and ComparisonOperator , see Legacy Conditional Parameters in the Amazon DynamoDB Developer Guide .\n \n \n\n :type ConditionalOperator: string\n :param ConditionalOperator: This is a legacy parameter. Use FilterExpression instead. For more information, see ConditionalOperator in the Amazon DynamoDB Developer Guide .\n\n :type ExclusiveStartKey: dict\n :param ExclusiveStartKey: The primary key of the first item that this operation will evaluate. Use the value that was returned for LastEvaluatedKey in the previous operation.\n The data type for ExclusiveStartKey must be String, Number or Binary. No set data types are allowed.\n In a parallel scan, a Scan request that includes ExclusiveStartKey must specify the same segment whose previous Scan returned the corresponding value of LastEvaluatedKey .\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n \n\n :type ReturnConsumedCapacity: string\n :param ReturnConsumedCapacity: Determines the level of detail about provisioned throughput consumption that is returned in the response:\n INDEXES - The response includes the aggregate ConsumedCapacity for the operation, together with ConsumedCapacity for each table and secondary index that was accessed. Note that some operations, such as GetItem and BatchGetItem , do not access any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity information for table(s).\n TOTAL - The response includes only the aggregate ConsumedCapacity for the operation.\n NONE - No ConsumedCapacity details are included in the response.\n \n\n :type TotalSegments: integer\n :param TotalSegments: For a parallel Scan request, TotalSegments represents the total number of segments into which the Scan operation will be divided. The value of TotalSegments corresponds to the number of application workers that will perform the parallel scan. For example, if you want to use four application threads to scan a table or an index, specify a TotalSegments value of 4.\n The value for TotalSegments must be greater than or equal to 1, and less than or equal to 1000000. If you specify a TotalSegments value of 1, the Scan operation will be sequential rather than parallel.\n If you specify TotalSegments , you must also specify Segment .\n \n\n :type Segment: integer\n :param Segment: For a parallel Scan request, Segment identifies an individual segment to be scanned by an application worker.\n Segment IDs are zero-based, so the first segment is always 0. For example, if you want to use four application threads to scan a table or an index, then the first thread specifies a Segment value of 0, the second thread specifies 1, and so on.\n The value of LastEvaluatedKey returned from a parallel Scan request must be used as ExclusiveStartKey with the same segment ID in a subsequent Scan operation.\n The value for Segment must be greater than or equal to 0, and less than the value provided for TotalSegments .\n If you provide Segment , you must also provide TotalSegments .\n \n\n :type ProjectionExpression: string\n :param ProjectionExpression: A string that identifies one or more attributes to retrieve from the specified table or index. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas.\n If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.\n For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide .\n \n\n :type FilterExpression: string\n :param FilterExpression: A string that contains conditions that DynamoDB applies after the Scan operation, but before the data is returned to you. Items that do not satisfy the FilterExpression criteria are not returned.\n Note\n A FilterExpression is applied after the items have already been read; the process of filtering does not consume any additional read capacity units.\n For more information, see Filter Expressions in the Amazon DynamoDB Developer Guide .\n \n\n :type ExpressionAttributeNames: dict\n :param ExpressionAttributeNames: One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames :\n To access an attribute whose name conflicts with a DynamoDB reserved word.\n To create a placeholder for repeating occurrences of an attribute name in an expression.\n To prevent special characters in an attribute name from being misinterpreted in an expression.\n Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:\n Percentile\n The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide ). To work around this, you could specify the following for ExpressionAttributeNames :\n {'#P':'Percentile'}\n You could then use this substitution in an expression, as in this example:\n #P = :val\n Note\n Tokens that begin with the : character are expression attribute values , which are placeholders for the actual value at runtime.\n For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide .\n (string) --\n (string) --\n \n\n :type ExpressionAttributeValues: dict\n :param ExpressionAttributeValues: One or more values that can be substituted in an expression.\n Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:\n Available | Backordered | Discontinued\n You would first need to specify ExpressionAttributeValues as follows:\n { ':avail':{'S':'Available'}, ':back':{'S':'Backordered'}, ':disc':{'S':'Discontinued'} }\n You could then use these values in an expression, such as this:\n ProductStatus IN (:avail, :back, :disc)\n For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide .\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n \n\n :type ConsistentRead: boolean\n :param ConsistentRead: A Boolean value that determines the read consistency model during the scan:\n If ConsistentRead is false , then the data returned from Scan might not contain the results from other recently completed write operations (PutItem, UpdateItem or DeleteItem).\n If ConsistentRead is true , then all of the write operations that completed before the Scan began are guaranteed to be contained in the Scan response.\n The default setting for ConsistentRead is false .\n The ConsistentRead parameter is not supported on global secondary indexes. If you scan a global secondary index with ConsistentRead set to true, you will receive a ValidationException .\n \n\n :rtype: dict\n :return: {\n 'Items': [\n {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n ],\n 'Count': 123,\n 'ScannedCount': 123,\n 'LastEvaluatedKey': {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n 'ConsumedCapacity': {\n 'TableName': 'string',\n 'CapacityUnits': 123.0,\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'Table': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n },\n 'LocalSecondaryIndexes': {\n 'string': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n }\n },\n 'GlobalSecondaryIndexes': {\n 'string': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n }\n }\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef tag_resource(ResourceArn=None, Tags=None):\n \"\"\"\n Associate a set of tags with an Amazon DynamoDB resource. You can then activate these user-defined tags so that they appear on the Billing and Cost Management console for cost allocation tracking. You can call TagResource up to 5 times per second, per account.\n For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.tag_resource(\n ResourceArn='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type ResourceArn: string\n :param ResourceArn: [REQUIRED]\n Identifies the Amazon DynamoDB resource to which tags should be added. This value is an Amazon Resource Name (ARN).\n \n\n :type Tags: list\n :param Tags: [REQUIRED]\n The tags to be assigned to the Amazon DynamoDB resource.\n (dict) --Describes a tag. A tag is a key-value pair. You can add up to 50 tags to a single DynamoDB table.\n AWS-assigned tag names and values are automatically assigned the aws: prefix, which the user cannot assign. AWS-assigned tag names do not count towards the tag limit of 50. User-assigned tag names have the prefix user: in the Cost Allocation Report. You cannot backdate the application of a tag.\n For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide .\n Key (string) -- [REQUIRED]The key of the tag.Tag keys are case sensitive. Each DynamoDB table can only have up to one tag with the same key. If you try to add an existing tag (same key), the existing tag value will be updated to the new value.\n Value (string) -- [REQUIRED]The value of the tag. Tag values are case-sensitive and can be null.\n \n \n\n \"\"\"\n pass\n\ndef transact_get_items(TransactItems=None, ReturnConsumedCapacity=None):\n \"\"\"\n DynamoDB rejects the entire TransactGetItems request if any of the following is true:\n See also: AWS API Documentation\n \n \n :example: response = client.transact_get_items(\n TransactItems=[\n {\n 'Get': {\n 'Key': {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n 'TableName': 'string',\n 'ProjectionExpression': 'string',\n 'ExpressionAttributeNames': {\n 'string': 'string'\n }\n }\n },\n ],\n ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE'\n )\n \n \n :type TransactItems: list\n :param TransactItems: [REQUIRED]\n An ordered array of up to 10 TransactGetItem objects, each of which contains a Get structure.\n (dict) --Specifies an item to be retrieved as part of the transaction.\n Get (dict) -- [REQUIRED]Contains the primary key that identifies the item to get, together with the name of the table that contains the item, and optionally the specific attributes of the item to retrieve.\n Key (dict) -- [REQUIRED]A map of attribute names to AttributeValue objects that specifies the primary key of the item to retrieve.\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n TableName (string) -- [REQUIRED]The name of the table from which to retrieve the specified item.\n ProjectionExpression (string) --A string that identifies one or more attributes of the specified item to retrieve from the table. The attributes in the expression must be separated by commas. If no attribute names are specified, then all attributes of the specified item are returned. If any of the requested attributes are not found, they do not appear in the result.\n ExpressionAttributeNames (dict) --One or more substitution tokens for attribute names in the ProjectionExpression parameter.\n (string) --\n (string) --\n \n \n \n\n :type ReturnConsumedCapacity: string\n :param ReturnConsumedCapacity: A value of TOTAL causes consumed capacity information to be returned, and a value of NONE prevents that information from being returned. No other value is valid.\n\n :rtype: dict\n :return: {\n 'ConsumedCapacity': [\n {\n 'TableName': 'string',\n 'CapacityUnits': 123.0,\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'Table': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n },\n 'LocalSecondaryIndexes': {\n 'string': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n }\n },\n 'GlobalSecondaryIndexes': {\n 'string': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n }\n }\n },\n ],\n 'Responses': [\n {\n 'Item': {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n }\n },\n ]\n }\n \n \n :returns: \n TransactItems (list) -- [REQUIRED]\n An ordered array of up to 10 TransactGetItem objects, each of which contains a Get structure.\n \n (dict) --Specifies an item to be retrieved as part of the transaction.\n \n Get (dict) -- [REQUIRED]Contains the primary key that identifies the item to get, together with the name of the table that contains the item, and optionally the specific attributes of the item to retrieve.\n \n Key (dict) -- [REQUIRED]A map of attribute names to AttributeValue objects that specifies the primary key of the item to retrieve.\n \n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n S (string) --An attribute of type String. For example:\n \n \"S\": \"Hello\"\n \n N (string) --An attribute of type Number. For example:\n \n \"N\": \"123.45\"\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n \n B (bytes) --An attribute of type Binary. For example:\n \n \"B\": \"dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk\"\n \n SS (list) --An attribute of type String Set. For example:\n \n \"SS\": [\"Giraffe\", \"Hippo\" ,\"Zebra\"]\n \n (string) --\n \n \n NS (list) --An attribute of type Number Set. For example:\n \n \"NS\": [\"42.2\", \"-19\", \"7.5\", \"3.14\"]\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n \n (string) --\n \n \n BS (list) --An attribute of type Binary Set. For example:\n \n \"BS\": [\"U3Vubnk=\", \"UmFpbnk=\", \"U25vd3k=\"]\n \n (bytes) --\n \n \n M (dict) --An attribute of type Map. For example:\n \n \"M\": {\"Name\": {\"S\": \"Joe\"}, \"Age\": {\"N\": \"35\"}}\n \n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n \n \n \n \n L (list) --An attribute of type List. For example:\n \n \"L\": [\"Cookies\", \"Coffee\", 3.14159]\n \n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n \n \n NULL (boolean) --An attribute of type Null. For example:\n \n \"NULL\": true\n \n BOOL (boolean) --An attribute of type Boolean. For example:\n \n \"BOOL\": true\n \n \n \n \n \n \n \n TableName (string) -- [REQUIRED]The name of the table from which to retrieve the specified item.\n \n ProjectionExpression (string) --A string that identifies one or more attributes of the specified item to retrieve from the table. The attributes in the expression must be separated by commas. If no attribute names are specified, then all attributes of the specified item are returned. If any of the requested attributes are not found, they do not appear in the result.\n \n ExpressionAttributeNames (dict) --One or more substitution tokens for attribute names in the ProjectionExpression parameter.\n \n (string) --\n (string) --\n \n \n \n \n \n \n \n \n \n \n ReturnConsumedCapacity (string) -- A value of TOTAL causes consumed capacity information to be returned, and a value of NONE prevents that information from being returned. No other value is valid.\n \n \"\"\"\n pass\n\ndef transact_write_items(TransactItems=None, ReturnConsumedCapacity=None, ReturnItemCollectionMetrics=None, ClientRequestToken=None):\n \"\"\"\n The actions are completed atomically so that either all of them succeed, or all of them fail. They are defined by the following objects:\n DynamoDB rejects the entire TransactWriteItems request if any of the following is true:\n See also: AWS API Documentation\n \n \n :example: response = client.transact_write_items(\n TransactItems=[\n {\n 'ConditionCheck': {\n 'Key': {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n 'TableName': 'string',\n 'ConditionExpression': 'string',\n 'ExpressionAttributeNames': {\n 'string': 'string'\n },\n 'ExpressionAttributeValues': {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n 'ReturnValuesOnConditionCheckFailure': 'ALL_OLD'|'NONE'\n },\n 'Put': {\n 'Item': {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n 'TableName': 'string',\n 'ConditionExpression': 'string',\n 'ExpressionAttributeNames': {\n 'string': 'string'\n },\n 'ExpressionAttributeValues': {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n 'ReturnValuesOnConditionCheckFailure': 'ALL_OLD'|'NONE'\n },\n 'Delete': {\n 'Key': {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n 'TableName': 'string',\n 'ConditionExpression': 'string',\n 'ExpressionAttributeNames': {\n 'string': 'string'\n },\n 'ExpressionAttributeValues': {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n 'ReturnValuesOnConditionCheckFailure': 'ALL_OLD'|'NONE'\n },\n 'Update': {\n 'Key': {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n 'UpdateExpression': 'string',\n 'TableName': 'string',\n 'ConditionExpression': 'string',\n 'ExpressionAttributeNames': {\n 'string': 'string'\n },\n 'ExpressionAttributeValues': {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n 'ReturnValuesOnConditionCheckFailure': 'ALL_OLD'|'NONE'\n }\n },\n ],\n ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE',\n ReturnItemCollectionMetrics='SIZE'|'NONE',\n ClientRequestToken='string'\n )\n \n \n :type TransactItems: list\n :param TransactItems: [REQUIRED]\n An ordered array of up to 10 TransactWriteItem objects, each of which contains a ConditionCheck , Put , Update , or Delete object. These can operate on items in different tables, but the tables must reside in the same AWS account and region, and no two of them can operate on the same item.\n (dict) --A list of requests that can perform update, put, delete, or check operations on multiple items in one or more tables atomically.\n ConditionCheck (dict) --A request to perform a check item operation.\n Key (dict) -- [REQUIRED]The primary key of the item to be checked. Each element consists of an attribute name and a value for that attribute.\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n TableName (string) -- [REQUIRED]Name of the table for the check item request.\n ConditionExpression (string) -- [REQUIRED]A condition that must be satisfied in order for a conditional update to succeed.\n ExpressionAttributeNames (dict) --One or more substitution tokens for attribute names in an expression.\n (string) --\n (string) --\n \n ExpressionAttributeValues (dict) --One or more values that can be substituted in an expression.\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n ReturnValuesOnConditionCheckFailure (string) --Use ReturnValuesOnConditionCheckFailure to get the item attributes if the ConditionCheck condition fails. For ReturnValuesOnConditionCheckFailure , the valid values are: NONE and ALL_OLD.\n Put (dict) --A request to perform a PutItem operation.\n Item (dict) -- [REQUIRED]A map of attribute name to attribute values, representing the primary key of the item to be written by PutItem . All of the table's primary key attributes must be specified, and their data types must match those of the table's key schema. If any attributes are present in the item that are part of an index key schema for the table, their types must match the index key schema.\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n TableName (string) -- [REQUIRED]Name of the table in which to write the item.\n ConditionExpression (string) --A condition that must be satisfied in order for a conditional update to succeed.\n ExpressionAttributeNames (dict) --One or more substitution tokens for attribute names in an expression.\n (string) --\n (string) --\n \n ExpressionAttributeValues (dict) --One or more values that can be substituted in an expression.\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n ReturnValuesOnConditionCheckFailure (string) --Use ReturnValuesOnConditionCheckFailure to get the item attributes if the Put condition fails. For ReturnValuesOnConditionCheckFailure , the valid values are: NONE and ALL_OLD.\n Delete (dict) --A request to perform a DeleteItem operation.\n Key (dict) -- [REQUIRED]The primary key of the item to be deleted. Each element consists of an attribute name and a value for that attribute.\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n TableName (string) -- [REQUIRED]Name of the table in which the item to be deleted resides.\n ConditionExpression (string) --A condition that must be satisfied in order for a conditional delete to succeed.\n ExpressionAttributeNames (dict) --One or more substitution tokens for attribute names in an expression.\n (string) --\n (string) --\n \n ExpressionAttributeValues (dict) --One or more values that can be substituted in an expression.\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n ReturnValuesOnConditionCheckFailure (string) --Use ReturnValuesOnConditionCheckFailure to get the item attributes if the Delete condition fails. For ReturnValuesOnConditionCheckFailure , the valid values are: NONE and ALL_OLD.\n Update (dict) --A request to perform an UpdateItem operation.\n Key (dict) -- [REQUIRED]The primary key of the item to be updated. Each element consists of an attribute name and a value for that attribute.\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n UpdateExpression (string) -- [REQUIRED]An expression that defines one or more attributes to be updated, the action to be performed on them, and new value(s) for them.\n TableName (string) -- [REQUIRED]Name of the table for the UpdateItem request.\n ConditionExpression (string) --A condition that must be satisfied in order for a conditional update to succeed.\n ExpressionAttributeNames (dict) --One or more substitution tokens for attribute names in an expression.\n (string) --\n (string) --\n \n ExpressionAttributeValues (dict) --One or more values that can be substituted in an expression.\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n ReturnValuesOnConditionCheckFailure (string) --Use ReturnValuesOnConditionCheckFailure to get the item attributes if the Update condition fails. For ReturnValuesOnConditionCheckFailure , the valid values are: NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW.\n \n \n\n :type ReturnConsumedCapacity: string\n :param ReturnConsumedCapacity: Determines the level of detail about provisioned throughput consumption that is returned in the response:\n INDEXES - The response includes the aggregate ConsumedCapacity for the operation, together with ConsumedCapacity for each table and secondary index that was accessed. Note that some operations, such as GetItem and BatchGetItem , do not access any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity information for table(s).\n TOTAL - The response includes only the aggregate ConsumedCapacity for the operation.\n NONE - No ConsumedCapacity details are included in the response.\n \n\n :type ReturnItemCollectionMetrics: string\n :param ReturnItemCollectionMetrics: Determines whether item collection metrics are returned. If set to SIZE , the response includes statistics about item collections (if any), that were modified during the operation and are returned in the response. If set to NONE (the default), no statistics are returned.\n\n :type ClientRequestToken: string\n :param ClientRequestToken: Providing a ClientRequestToken makes the call to TransactWriteItems idempotent, meaning that multiple identical calls have the same effect as one single call.\n Although multiple identical calls using the same client request token produce the same result on the server (no side effects), the responses to the calls may not be the same. If the ReturnConsumedCapacity> parameter is set, then the initial TransactWriteItems call returns the amount of write capacity units consumed in making the changes, and subsequent TransactWriteItems calls with the same client token return the amount of read capacity units consumed in reading the item.\n A client request token is valid for 10 minutes after the first request that uses it completes. After 10 minutes, any request with the same client token is treated as a new request. Do not resubmit the same request with the same client token for more than 10 minutes or the result may not be idempotent.\n If you submit a request with the same client token but a change in other parameters within the 10 minute idempotency window, DynamoDB returns an IdempotentParameterMismatch exception.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'ConsumedCapacity': [\n {\n 'TableName': 'string',\n 'CapacityUnits': 123.0,\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'Table': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n },\n 'LocalSecondaryIndexes': {\n 'string': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n }\n },\n 'GlobalSecondaryIndexes': {\n 'string': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n }\n }\n },\n ],\n 'ItemCollectionMetrics': {\n 'string': [\n {\n 'ItemCollectionKey': {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n 'SizeEstimateRangeGB': [\n 123.0,\n ]\n },\n ]\n }\n }\n \n \n :returns: \n A condition in one of the condition expressions is not met.\n A conflicting operation is in the process of updating the same item.\n There is insufficient provisioned capacity for the transaction to be completed.\n An item size becomes too large (bigger than 400 KB), a Local Secondary Index (LSI) becomes too large, or a similar validation error occurs because of changes made by the transaction.\n There is a user error, such as an invalid data format.\n \n \"\"\"\n pass\n\ndef untag_resource(ResourceArn=None, TagKeys=None):\n \"\"\"\n Removes the association of tags from an Amazon DynamoDB resource. You can call UntagResource up to 5 times per second, per account.\n For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.untag_resource(\n ResourceArn='string',\n TagKeys=[\n 'string',\n ]\n )\n \n \n :type ResourceArn: string\n :param ResourceArn: [REQUIRED]\n The Amazon DyanamoDB resource the tags will be removed from. This value is an Amazon Resource Name (ARN).\n \n\n :type TagKeys: list\n :param TagKeys: [REQUIRED]\n A list of tag keys. Existing tags of the resource whose keys are members of this list will be removed from the Amazon DynamoDB resource.\n (string) --\n \n\n \"\"\"\n pass\n\ndef update_continuous_backups(TableName=None, PointInTimeRecoverySpecification=None):\n \"\"\"\n Once continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime .\n See also: AWS API Documentation\n \n \n :example: response = client.update_continuous_backups(\n TableName='string',\n PointInTimeRecoverySpecification={\n 'PointInTimeRecoveryEnabled': True|False\n }\n )\n \n \n :type TableName: string\n :param TableName: [REQUIRED]\n The name of the table.\n \n\n :type PointInTimeRecoverySpecification: dict\n :param PointInTimeRecoverySpecification: [REQUIRED]\n Represents the settings used to enable point in time recovery.\n PointInTimeRecoveryEnabled (boolean) -- [REQUIRED]Indicates whether point in time recovery is enabled (true) or disabled (false) on the table.\n \n\n :rtype: dict\n :return: {\n 'ContinuousBackupsDescription': {\n 'ContinuousBackupsStatus': 'ENABLED'|'DISABLED',\n 'PointInTimeRecoveryDescription': {\n 'PointInTimeRecoveryStatus': 'ENABLED'|'DISABLED',\n 'EarliestRestorableDateTime': datetime(2015, 1, 1),\n 'LatestRestorableDateTime': datetime(2015, 1, 1)\n }\n }\n }\n \n \n :returns: \n ENABLING - Point in time recovery is being enabled.\n ENABLED - Point in time recovery is enabled.\n DISABLED - Point in time recovery is disabled.\n \n \"\"\"\n pass\n\ndef update_global_table(GlobalTableName=None, ReplicaUpdates=None):\n \"\"\"\n Adds or removes replicas in the specified global table. The global table must already exist to be able to use this operation. Any replica to be added must be empty, must have the same name as the global table, must have the same key schema, and must have DynamoDB Streams enabled and must have same provisioned and maximum write capacity units.\n If global secondary indexes are specified, then the following conditions must also be met:\n See also: AWS API Documentation\n \n \n :example: response = client.update_global_table(\n GlobalTableName='string',\n ReplicaUpdates=[\n {\n 'Create': {\n 'RegionName': 'string'\n },\n 'Delete': {\n 'RegionName': 'string'\n }\n },\n ]\n )\n \n \n :type GlobalTableName: string\n :param GlobalTableName: [REQUIRED]\n The global table name.\n \n\n :type ReplicaUpdates: list\n :param ReplicaUpdates: [REQUIRED]\n A list of regions that should be added or removed from the global table.\n (dict) --Represents one of the following:\n A new replica to be added to an existing global table.\n New parameters for an existing replica.\n An existing replica to be removed from an existing global table.\n Create (dict) --The parameters required for creating a replica on an existing global table.\n RegionName (string) -- [REQUIRED]The region of the replica to be added.\n Delete (dict) --The name of the existing replica to be removed.\n RegionName (string) -- [REQUIRED]The region of the replica to be removed.\n \n \n\n :rtype: dict\n :return: {\n 'GlobalTableDescription': {\n 'ReplicationGroup': [\n {\n 'RegionName': 'string'\n },\n ],\n 'GlobalTableArn': 'string',\n 'CreationDateTime': datetime(2015, 1, 1),\n 'GlobalTableStatus': 'CREATING'|'ACTIVE'|'DELETING'|'UPDATING',\n 'GlobalTableName': 'string'\n }\n }\n \n \n :returns: \n GlobalTableName (string) -- [REQUIRED]\n The global table name.\n \n ReplicaUpdates (list) -- [REQUIRED]\n A list of regions that should be added or removed from the global table.\n \n (dict) --Represents one of the following:\n \n A new replica to be added to an existing global table.\n New parameters for an existing replica.\n An existing replica to be removed from an existing global table.\n \n \n Create (dict) --The parameters required for creating a replica on an existing global table.\n \n RegionName (string) -- [REQUIRED]The region of the replica to be added.\n \n \n \n Delete (dict) --The name of the existing replica to be removed.\n \n RegionName (string) -- [REQUIRED]The region of the replica to be removed.\n \n \n \n \n \n \n \n \n \"\"\"\n pass\n\ndef update_global_table_settings(GlobalTableName=None, GlobalTableBillingMode=None, GlobalTableProvisionedWriteCapacityUnits=None, GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate=None, GlobalTableGlobalSecondaryIndexSettingsUpdate=None, ReplicaSettingsUpdate=None):\n \"\"\"\n Updates settings for a global table.\n See also: AWS API Documentation\n \n \n :example: response = client.update_global_table_settings(\n GlobalTableName='string',\n GlobalTableBillingMode='PROVISIONED'|'PAY_PER_REQUEST',\n GlobalTableProvisionedWriteCapacityUnits=123,\n GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate={\n 'MinimumUnits': 123,\n 'MaximumUnits': 123,\n 'AutoScalingDisabled': True|False,\n 'AutoScalingRoleArn': 'string',\n 'ScalingPolicyUpdate': {\n 'PolicyName': 'string',\n 'TargetTrackingScalingPolicyConfiguration': {\n 'DisableScaleIn': True|False,\n 'ScaleInCooldown': 123,\n 'ScaleOutCooldown': 123,\n 'TargetValue': 123.0\n }\n }\n },\n GlobalTableGlobalSecondaryIndexSettingsUpdate=[\n {\n 'IndexName': 'string',\n 'ProvisionedWriteCapacityUnits': 123,\n 'ProvisionedWriteCapacityAutoScalingSettingsUpdate': {\n 'MinimumUnits': 123,\n 'MaximumUnits': 123,\n 'AutoScalingDisabled': True|False,\n 'AutoScalingRoleArn': 'string',\n 'ScalingPolicyUpdate': {\n 'PolicyName': 'string',\n 'TargetTrackingScalingPolicyConfiguration': {\n 'DisableScaleIn': True|False,\n 'ScaleInCooldown': 123,\n 'ScaleOutCooldown': 123,\n 'TargetValue': 123.0\n }\n }\n }\n },\n ],\n ReplicaSettingsUpdate=[\n {\n 'RegionName': 'string',\n 'ReplicaProvisionedReadCapacityUnits': 123,\n 'ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate': {\n 'MinimumUnits': 123,\n 'MaximumUnits': 123,\n 'AutoScalingDisabled': True|False,\n 'AutoScalingRoleArn': 'string',\n 'ScalingPolicyUpdate': {\n 'PolicyName': 'string',\n 'TargetTrackingScalingPolicyConfiguration': {\n 'DisableScaleIn': True|False,\n 'ScaleInCooldown': 123,\n 'ScaleOutCooldown': 123,\n 'TargetValue': 123.0\n }\n }\n },\n 'ReplicaGlobalSecondaryIndexSettingsUpdate': [\n {\n 'IndexName': 'string',\n 'ProvisionedReadCapacityUnits': 123,\n 'ProvisionedReadCapacityAutoScalingSettingsUpdate': {\n 'MinimumUnits': 123,\n 'MaximumUnits': 123,\n 'AutoScalingDisabled': True|False,\n 'AutoScalingRoleArn': 'string',\n 'ScalingPolicyUpdate': {\n 'PolicyName': 'string',\n 'TargetTrackingScalingPolicyConfiguration': {\n 'DisableScaleIn': True|False,\n 'ScaleInCooldown': 123,\n 'ScaleOutCooldown': 123,\n 'TargetValue': 123.0\n }\n }\n }\n },\n ]\n },\n ]\n )\n \n \n :type GlobalTableName: string\n :param GlobalTableName: [REQUIRED]\n The name of the global table\n \n\n :type GlobalTableBillingMode: string\n :param GlobalTableBillingMode: The billing mode of the global table. If GlobalTableBillingMode is not specified, the global table defaults to PROVISIONED capacity billing mode.\n\n :type GlobalTableProvisionedWriteCapacityUnits: integer\n :param GlobalTableProvisionedWriteCapacityUnits: The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException.\n\n :type GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate: dict\n :param GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate: AutoScaling settings for managing provisioned write capacity for the global table.\n MinimumUnits (integer) --The minimum capacity units that a global table or global secondary index should be scaled down to.\n MaximumUnits (integer) --The maximum capacity units that a global table or global secondary index should be scaled up to.\n AutoScalingDisabled (boolean) --Disabled autoscaling for this global table or global secondary index.\n AutoScalingRoleArn (string) --Role ARN used for configuring autoscaling policy.\n ScalingPolicyUpdate (dict) --The scaling policy to apply for scaling target global table or global secondary index capacity units.\n PolicyName (string) --The name of the scaling policy.\n TargetTrackingScalingPolicyConfiguration (dict) -- [REQUIRED]Represents a target tracking scaling policy configuration.\n DisableScaleIn (boolean) --Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the scalable resource. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the scalable resource. The default value is false.\n ScaleInCooldown (integer) --The amount of time, in seconds, after a scale in activity completes before another scale in activity can start. The cooldown period is used to block subsequent scale in requests until it has expired. You should scale in conservatively to protect your application's availability. However, if another alarm triggers a scale out policy during the cooldown period after a scale-in, application autoscaling scales out your scalable target immediately.\n ScaleOutCooldown (integer) --The amount of time, in seconds, after a scale out activity completes before another scale out activity can start. While the cooldown period is in effect, the capacity that has been added by the previous scale out event that initiated the cooldown is calculated as part of the desired capacity for the next scale out. You should continuously (but not excessively) scale out.\n TargetValue (float) -- [REQUIRED]The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2).\n \n \n\n :type GlobalTableGlobalSecondaryIndexSettingsUpdate: list\n :param GlobalTableGlobalSecondaryIndexSettingsUpdate: Represents the settings of a global secondary index for a global table that will be modified.\n (dict) --Represents the settings of a global secondary index for a global table that will be modified.\n IndexName (string) -- [REQUIRED]The name of the global secondary index. The name must be unique among all other indexes on this table.\n ProvisionedWriteCapacityUnits (integer) --The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException.\n ProvisionedWriteCapacityAutoScalingSettingsUpdate (dict) --AutoScaling settings for managing a global secondary index's write capacity units.\n MinimumUnits (integer) --The minimum capacity units that a global table or global secondary index should be scaled down to.\n MaximumUnits (integer) --The maximum capacity units that a global table or global secondary index should be scaled up to.\n AutoScalingDisabled (boolean) --Disabled autoscaling for this global table or global secondary index.\n AutoScalingRoleArn (string) --Role ARN used for configuring autoscaling policy.\n ScalingPolicyUpdate (dict) --The scaling policy to apply for scaling target global table or global secondary index capacity units.\n PolicyName (string) --The name of the scaling policy.\n TargetTrackingScalingPolicyConfiguration (dict) -- [REQUIRED]Represents a target tracking scaling policy configuration.\n DisableScaleIn (boolean) --Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the scalable resource. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the scalable resource. The default value is false.\n ScaleInCooldown (integer) --The amount of time, in seconds, after a scale in activity completes before another scale in activity can start. The cooldown period is used to block subsequent scale in requests until it has expired. You should scale in conservatively to protect your application's availability. However, if another alarm triggers a scale out policy during the cooldown period after a scale-in, application autoscaling scales out your scalable target immediately.\n ScaleOutCooldown (integer) --The amount of time, in seconds, after a scale out activity completes before another scale out activity can start. While the cooldown period is in effect, the capacity that has been added by the previous scale out event that initiated the cooldown is calculated as part of the desired capacity for the next scale out. You should continuously (but not excessively) scale out.\n TargetValue (float) -- [REQUIRED]The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2).\n \n \n \n\n :type ReplicaSettingsUpdate: list\n :param ReplicaSettingsUpdate: Represents the settings for a global table in a region that will be modified.\n (dict) --Represents the settings for a global table in a region that will be modified.\n RegionName (string) -- [REQUIRED]The region of the replica to be added.\n ReplicaProvisionedReadCapacityUnits (integer) --The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException . For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide .\n ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate (dict) --Autoscaling settings for managing a global table replica's read capacity units.\n MinimumUnits (integer) --The minimum capacity units that a global table or global secondary index should be scaled down to.\n MaximumUnits (integer) --The maximum capacity units that a global table or global secondary index should be scaled up to.\n AutoScalingDisabled (boolean) --Disabled autoscaling for this global table or global secondary index.\n AutoScalingRoleArn (string) --Role ARN used for configuring autoscaling policy.\n ScalingPolicyUpdate (dict) --The scaling policy to apply for scaling target global table or global secondary index capacity units.\n PolicyName (string) --The name of the scaling policy.\n TargetTrackingScalingPolicyConfiguration (dict) -- [REQUIRED]Represents a target tracking scaling policy configuration.\n DisableScaleIn (boolean) --Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the scalable resource. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the scalable resource. The default value is false.\n ScaleInCooldown (integer) --The amount of time, in seconds, after a scale in activity completes before another scale in activity can start. The cooldown period is used to block subsequent scale in requests until it has expired. You should scale in conservatively to protect your application's availability. However, if another alarm triggers a scale out policy during the cooldown period after a scale-in, application autoscaling scales out your scalable target immediately.\n ScaleOutCooldown (integer) --The amount of time, in seconds, after a scale out activity completes before another scale out activity can start. While the cooldown period is in effect, the capacity that has been added by the previous scale out event that initiated the cooldown is calculated as part of the desired capacity for the next scale out. You should continuously (but not excessively) scale out.\n TargetValue (float) -- [REQUIRED]The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2).\n \n ReplicaGlobalSecondaryIndexSettingsUpdate (list) --Represents the settings of a global secondary index for a global table that will be modified.\n (dict) --Represents the settings of a global secondary index for a global table that will be modified.\n IndexName (string) -- [REQUIRED]The name of the global secondary index. The name must be unique among all other indexes on this table.\n ProvisionedReadCapacityUnits (integer) --The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException .\n ProvisionedReadCapacityAutoScalingSettingsUpdate (dict) --Autoscaling settings for managing a global secondary index replica's read capacity units.\n MinimumUnits (integer) --The minimum capacity units that a global table or global secondary index should be scaled down to.\n MaximumUnits (integer) --The maximum capacity units that a global table or global secondary index should be scaled up to.\n AutoScalingDisabled (boolean) --Disabled autoscaling for this global table or global secondary index.\n AutoScalingRoleArn (string) --Role ARN used for configuring autoscaling policy.\n ScalingPolicyUpdate (dict) --The scaling policy to apply for scaling target global table or global secondary index capacity units.\n PolicyName (string) --The name of the scaling policy.\n TargetTrackingScalingPolicyConfiguration (dict) -- [REQUIRED]Represents a target tracking scaling policy configuration.\n DisableScaleIn (boolean) --Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the scalable resource. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the scalable resource. The default value is false.\n ScaleInCooldown (integer) --The amount of time, in seconds, after a scale in activity completes before another scale in activity can start. The cooldown period is used to block subsequent scale in requests until it has expired. You should scale in conservatively to protect your application's availability. However, if another alarm triggers a scale out policy during the cooldown period after a scale-in, application autoscaling scales out your scalable target immediately.\n ScaleOutCooldown (integer) --The amount of time, in seconds, after a scale out activity completes before another scale out activity can start. While the cooldown period is in effect, the capacity that has been added by the previous scale out event that initiated the cooldown is calculated as part of the desired capacity for the next scale out. You should continuously (but not excessively) scale out.\n TargetValue (float) -- [REQUIRED]The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2).\n \n \n \n \n\n :rtype: dict\n :return: {\n 'GlobalTableName': 'string',\n 'ReplicaSettings': [\n {\n 'RegionName': 'string',\n 'ReplicaStatus': 'CREATING'|'UPDATING'|'DELETING'|'ACTIVE',\n 'ReplicaBillingModeSummary': {\n 'BillingMode': 'PROVISIONED'|'PAY_PER_REQUEST',\n 'LastUpdateToPayPerRequestDateTime': datetime(2015, 1, 1)\n },\n 'ReplicaProvisionedReadCapacityUnits': 123,\n 'ReplicaProvisionedReadCapacityAutoScalingSettings': {\n 'MinimumUnits': 123,\n 'MaximumUnits': 123,\n 'AutoScalingDisabled': True|False,\n 'AutoScalingRoleArn': 'string',\n 'ScalingPolicies': [\n {\n 'PolicyName': 'string',\n 'TargetTrackingScalingPolicyConfiguration': {\n 'DisableScaleIn': True|False,\n 'ScaleInCooldown': 123,\n 'ScaleOutCooldown': 123,\n 'TargetValue': 123.0\n }\n },\n ]\n },\n 'ReplicaProvisionedWriteCapacityUnits': 123,\n 'ReplicaProvisionedWriteCapacityAutoScalingSettings': {\n 'MinimumUnits': 123,\n 'MaximumUnits': 123,\n 'AutoScalingDisabled': True|False,\n 'AutoScalingRoleArn': 'string',\n 'ScalingPolicies': [\n {\n 'PolicyName': 'string',\n 'TargetTrackingScalingPolicyConfiguration': {\n 'DisableScaleIn': True|False,\n 'ScaleInCooldown': 123,\n 'ScaleOutCooldown': 123,\n 'TargetValue': 123.0\n }\n },\n ]\n },\n 'ReplicaGlobalSecondaryIndexSettings': [\n {\n 'IndexName': 'string',\n 'IndexStatus': 'CREATING'|'UPDATING'|'DELETING'|'ACTIVE',\n 'ProvisionedReadCapacityUnits': 123,\n 'ProvisionedReadCapacityAutoScalingSettings': {\n 'MinimumUnits': 123,\n 'MaximumUnits': 123,\n 'AutoScalingDisabled': True|False,\n 'AutoScalingRoleArn': 'string',\n 'ScalingPolicies': [\n {\n 'PolicyName': 'string',\n 'TargetTrackingScalingPolicyConfiguration': {\n 'DisableScaleIn': True|False,\n 'ScaleInCooldown': 123,\n 'ScaleOutCooldown': 123,\n 'TargetValue': 123.0\n }\n },\n ]\n },\n 'ProvisionedWriteCapacityUnits': 123,\n 'ProvisionedWriteCapacityAutoScalingSettings': {\n 'MinimumUnits': 123,\n 'MaximumUnits': 123,\n 'AutoScalingDisabled': True|False,\n 'AutoScalingRoleArn': 'string',\n 'ScalingPolicies': [\n {\n 'PolicyName': 'string',\n 'TargetTrackingScalingPolicyConfiguration': {\n 'DisableScaleIn': True|False,\n 'ScaleInCooldown': 123,\n 'ScaleOutCooldown': 123,\n 'TargetValue': 123.0\n }\n },\n ]\n }\n },\n ]\n },\n ]\n }\n \n \n :returns: \n CREATING - The region is being created.\n UPDATING - The region is being updated.\n DELETING - The region is being deleted.\n ACTIVE - The region is ready for use.\n \n \"\"\"\n pass\n\ndef update_item(TableName=None, Key=None, AttributeUpdates=None, Expected=None, ConditionalOperator=None, ReturnValues=None, ReturnConsumedCapacity=None, ReturnItemCollectionMetrics=None, UpdateExpression=None, ConditionExpression=None, ExpressionAttributeNames=None, ExpressionAttributeValues=None):\n \"\"\"\n Edits an existing item's attributes, or adds a new item to the table if it does not already exist. You can put, delete, or add attribute values. You can also perform a conditional update on an existing item (insert a new attribute name-value pair if it doesn't exist, or replace an existing name-value pair if it has certain expected attribute values).\n You can also return the item's attribute values in the same UpdateItem operation using the ReturnValues parameter.\n See also: AWS API Documentation\n \n Examples\n This example updates an item in the Music table. It adds a new attribute (Year) and modifies the AlbumTitle attribute. All of the attributes in the item, as they appear after the update, are returned in the response.\n Expected Output:\n \n :example: response = client.update_item(\n TableName='string',\n Key={\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n AttributeUpdates={\n 'string': {\n 'Value': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n },\n 'Action': 'ADD'|'PUT'|'DELETE'\n }\n },\n Expected={\n 'string': {\n 'Value': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n },\n 'Exists': True|False,\n 'ComparisonOperator': 'EQ'|'NE'|'IN'|'LE'|'LT'|'GE'|'GT'|'BETWEEN'|'NOT_NULL'|'NULL'|'CONTAINS'|'NOT_CONTAINS'|'BEGINS_WITH',\n 'AttributeValueList': [\n {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n },\n ]\n }\n },\n ConditionalOperator='AND'|'OR',\n ReturnValues='NONE'|'ALL_OLD'|'UPDATED_OLD'|'ALL_NEW'|'UPDATED_NEW',\n ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE',\n ReturnItemCollectionMetrics='SIZE'|'NONE',\n UpdateExpression='string',\n ConditionExpression='string',\n ExpressionAttributeNames={\n 'string': 'string'\n },\n ExpressionAttributeValues={\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n }\n )\n \n \n :type TableName: string\n :param TableName: [REQUIRED]\n The name of the table containing the item to update.\n \n\n :type Key: dict\n :param Key: [REQUIRED]\n The primary key of the item to be updated. Each element consists of an attribute name and a value for that attribute.\n For the primary key, you must provide all of the attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key.\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n \n\n :type AttributeUpdates: dict\n :param AttributeUpdates: This is a legacy parameter. Use UpdateExpression instead. For more information, see AttributeUpdates in the Amazon DynamoDB Developer Guide .\n (string) --\n (dict) --For the UpdateItem operation, represents the attributes to be modified, the action to perform on each, and the new value for each.\n Note\n You cannot use UpdateItem to update any primary key attributes. Instead, you will need to delete the item, and then use PutItem to create a new item with new attributes.\n Attribute values cannot be null; string and binary type attributes must have lengths greater than zero; and set type attributes must not be empty. Requests with empty values will be rejected with a ValidationException exception.\n Value (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n Action (string) --Specifies how to perform the update. Valid values are PUT (default), DELETE , and ADD . The behavior depends on whether the specified primary key already exists in the table.\n If an item with the specified *Key* is found in the table:\n PUT - Adds the specified attribute to the item. If the attribute already exists, it is replaced by the new value.\n DELETE - If no value is specified, the attribute and its value are removed from the item. The data type of the specified value must match the existing value's data type. If a set of values is specified, then those values are subtracted from the old set. For example, if the attribute value was the set [a,b,c] and the DELETE action specified [a,c] , then the final attribute value would be [b] . Specifying an empty set is an error.\n ADD - If the attribute does not already exist, then the attribute and its values are added to the item. If the attribute does exist, then the behavior of ADD depends on the data type of the attribute:\n If the existing attribute is a number, and if Value is also a number, then the Value is mathematically added to the existing attribute. If Value is a negative number, then it is subtracted from the existing attribute.\n Note\n If you use ADD to increment or decrement a number value for an item that doesn't exist before the update, DynamoDB uses 0 as the initial value. In addition, if you use ADD to update an existing item, and intend to increment or decrement an attribute value which does not yet exist, DynamoDB uses 0 as the initial value. For example, suppose that the item you want to update does not yet have an attribute named itemcount , but you decide to ADD the number 3 to this attribute anyway, even though it currently does not exist. DynamoDB will create the itemcount attribute, set its initial value to 0 , and finally add 3 to it. The result will be a new itemcount attribute in the item, with a value of 3 .\n If the existing data type is a set, and if the Value is also a set, then the Value is added to the existing set. (This is a set operation, not mathematical addition.) For example, if the attribute value was the set [1,2] , and the ADD action specified [3] , then the final attribute value would be [1,2,3] . An error occurs if an Add action is specified for a set attribute and the attribute type specified does not match the existing set type. Both sets must have the same primitive data type. For example, if the existing data type is a set of strings, the Value must also be a set of strings. The same holds true for number sets and binary sets.\n This action is only valid for an existing attribute whose data type is number or is a set. Do not use ADD for any other data types.\n If no item with the specified *Key* is found:\n PUT - DynamoDB creates a new item with the specified primary key, and then adds the attribute.\n DELETE - Nothing happens; there is no attribute to delete.\n ADD - DynamoDB creates an item with the supplied primary key and number (or set of numbers) for the attribute value. The only data types allowed are number and number set; no other data types can be specified.\n \n \n\n :type Expected: dict\n :param Expected: This is a legacy parameter. Use ConditionExpression instead. For more information, see Expected in the Amazon DynamoDB Developer Guide .\n (string) --\n (dict) --Represents a condition to be compared with an attribute value. This condition can be used with DeleteItem , PutItem or UpdateItem operations; if the comparison evaluates to true, the operation succeeds; if not, the operation fails. You can use ExpectedAttributeValue in one of two different ways:\n Use AttributeValueList to specify one or more values to compare against an attribute. Use ComparisonOperator to specify how you want to perform the comparison. If the comparison evaluates to true, then the conditional operation succeeds.\n Use Value to specify a value that DynamoDB will compare against an attribute. If the values match, then ExpectedAttributeValue evaluates to true and the conditional operation succeeds. Optionally, you can also set Exists to false, indicating that you do not expect to find the attribute value in the table. In this case, the conditional operation succeeds only if the comparison evaluates to false.\n Value and Exists are incompatible with AttributeValueList and ComparisonOperator . Note that if you use both sets of parameters at once, DynamoDB will return a ValidationException exception.\n Value (dict) --Represents the data for the expected attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n Exists (boolean) --Causes DynamoDB to evaluate the value before attempting a conditional operation:\n If Exists is true , DynamoDB will check to see if that attribute value already exists in the table. If it is found, then the operation succeeds. If it is not found, the operation fails with a ConditionCheckFailedException .\n If Exists is false , DynamoDB assumes that the attribute value does not exist in the table. If in fact the value does not exist, then the assumption is valid and the operation succeeds. If the value is found, despite the assumption that it does not exist, the operation fails with a ConditionCheckFailedException .\n The default setting for Exists is true . If you supply a Value all by itself, DynamoDB assumes the attribute exists: You don't have to set Exists to true , because it is implied.\n DynamoDB returns a ValidationException if:\n Exists is true but there is no Value to check. (You expect a value to exist, but don't specify what that value is.)\n Exists is false but you also provide a Value . (You cannot expect an attribute to have a value, while also expecting it not to exist.)\n ComparisonOperator (string) --A comparator for evaluating attributes in the AttributeValueList . For example, equals, greater than, less than, etc.\n The following comparison operators are available:\n EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN\n The following are descriptions of each comparison operator.\n EQ : Equal. EQ is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} .\n NE : Not equal. NE is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} .\n LE : Less than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .\n LT : Less than. AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .\n GE : Greater than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .\n GT : Greater than. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .\n NOT_NULL : The attribute exists. NOT_NULL is supported for all data types, including lists and maps.\n Note\n This operator tests for the existence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NOT_NULL , the result is a Boolean true . This result is because the attribute 'a ' exists; its data type is not relevant to the NOT_NULL comparison operator.\n NULL : The attribute does not exist. NULL is supported for all data types, including lists and maps.\n Note\n This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NULL , the result is a Boolean false . This is because the attribute 'a ' exists; its data type is not relevant to the NULL comparison operator.\n CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it finds an exact match with any member of the set. CONTAINS is supported for lists: When evaluating 'a CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list.\n NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it does not find an exact match with any member of the set. NOT_CONTAINS is supported for lists: When evaluating 'a NOT CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list.\n BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type).\n IN : Checks for matching elements in a list. AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary. These attributes are compared against an existing attribute of an item. If any elements of the input are equal to the item attribute, the expression evaluates to true.\n BETWEEN : Greater than or equal to the first value, and less than or equal to the second value. AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not compare to {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']}\n AttributeValueList (list) --One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used.\n For type Number, value comparisons are numeric.\n String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A , and a is greater than B . For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters .\n For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.\n For information on specifying data types in JSON, see JSON Data Format in the Amazon DynamoDB Developer Guide .\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n \n \n\n :type ConditionalOperator: string\n :param ConditionalOperator: This is a legacy parameter. Use ConditionExpression instead. For more information, see ConditionalOperator in the Amazon DynamoDB Developer Guide .\n\n :type ReturnValues: string\n :param ReturnValues: Use ReturnValues if you want to get the item attributes as they appear before or after they are updated. For UpdateItem , the valid values are:\n NONE - If ReturnValues is not specified, or if its value is NONE , then nothing is returned. (This setting is the default for ReturnValues .)\n ALL_OLD - Returns all of the attributes of the item, as they appeared before the UpdateItem operation.\n UPDATED_OLD - Returns only the updated attributes, as they appeared before the UpdateItem operation.\n ALL_NEW - Returns all of the attributes of the item, as they appear after the UpdateItem operation.\n UPDATED_NEW - Returns only the updated attributes, as they appear after the UpdateItem operation.\n There is no additional cost associated with requesting a return value aside from the small network and processing overhead of receiving a larger response. No read capacity units are consumed.\n The values returned are strongly consistent.\n \n\n :type ReturnConsumedCapacity: string\n :param ReturnConsumedCapacity: Determines the level of detail about provisioned throughput consumption that is returned in the response:\n INDEXES - The response includes the aggregate ConsumedCapacity for the operation, together with ConsumedCapacity for each table and secondary index that was accessed. Note that some operations, such as GetItem and BatchGetItem , do not access any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity information for table(s).\n TOTAL - The response includes only the aggregate ConsumedCapacity for the operation.\n NONE - No ConsumedCapacity details are included in the response.\n \n\n :type ReturnItemCollectionMetrics: string\n :param ReturnItemCollectionMetrics: Determines whether item collection metrics are returned. If set to SIZE , the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.\n\n :type UpdateExpression: string\n :param UpdateExpression: An expression that defines one or more attributes to be updated, the action to be performed on them, and new value(s) for them.\n The following action values are available for UpdateExpression .\n SET - Adds one or more attributes and values to an item. If any of these attribute already exist, they are replaced by the new values. You can also use SET to add or subtract from an attribute that is of type Number. For example: SET myNum = myNum + :val SET supports the following functions:\n if_not_exists (path, operand) - if the item does not contain an attribute at the specified path, then if_not_exists evaluates to operand; otherwise, it evaluates to path. You can use this function to avoid overwriting an attribute that may already be present in the item.\n list_append (operand, operand) - evaluates to a list with a new element added to it. You can append the new element to the start or the end of the list by reversing the order of the operands.\n These function names are case-sensitive.\n REMOVE - Removes one or more attributes from an item.\n ADD - Adds the specified value to the item, if the attribute does not already exist. If the attribute does exist, then the behavior of ADD depends on the data type of the attribute:\n If the existing attribute is a number, and if Value is also a number, then Value is mathematically added to the existing attribute. If Value is a negative number, then it is subtracted from the existing attribute.\n Note\n If you use ADD to increment or decrement a number value for an item that doesn't exist before the update, DynamoDB uses 0 as the initial value. Similarly, if you use ADD for an existing item to increment or decrement an attribute value that doesn't exist before the update, DynamoDB uses 0 as the initial value. For example, suppose that the item you want to update doesn't have an attribute named itemcount , but you decide to ADD the number 3 to this attribute anyway. DynamoDB will create the itemcount attribute, set its initial value to 0 , and finally add 3 to it. The result will be a new itemcount attribute in the item, with a value of 3 .\n If the existing data type is a set and if Value is also a set, then Value is added to the existing set. For example, if the attribute value is the set [1,2] , and the ADD action specified [3] , then the final attribute value is [1,2,3] . An error occurs if an ADD action is specified for a set attribute and the attribute type specified does not match the existing set type. Both sets must have the same primitive data type. For example, if the existing data type is a set of strings, the Value must also be a set of strings.\n \n Warning\n The ADD action only supports Number and set data types. In addition, ADD can only be used on top-level attributes, not nested attributes.\n DELETE - Deletes an element from a set. If a set of values is specified, then those values are subtracted from the old set. For example, if the attribute value was the set [a,b,c] and the DELETE action specifies [a,c] , then the final attribute value is [b] . Specifying an empty set is an error.\n Warning\n The DELETE action only supports set data types. In addition, DELETE can only be used on top-level attributes, not nested attributes.\n You can have many actions in a single expression, such as the following: SET a=:value1, b=:value2 DELETE :value3, :value4, :value5\n For more information on update expressions, see Modifying Items and Attributes in the Amazon DynamoDB Developer Guide .\n \n\n :type ConditionExpression: string\n :param ConditionExpression: A condition that must be satisfied in order for a conditional update to succeed.\n An expression can contain any of the following:\n Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size These function names are case-sensitive.\n Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN\n Logical operators: AND | OR | NOT\n For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide .\n \n\n :type ExpressionAttributeNames: dict\n :param ExpressionAttributeNames: One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames :\n To access an attribute whose name conflicts with a DynamoDB reserved word.\n To create a placeholder for repeating occurrences of an attribute name in an expression.\n To prevent special characters in an attribute name from being misinterpreted in an expression.\n Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:\n Percentile\n The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide ). To work around this, you could specify the following for ExpressionAttributeNames :\n {'#P':'Percentile'}\n You could then use this substitution in an expression, as in this example:\n #P = :val\n Note\n Tokens that begin with the : character are expression attribute values , which are placeholders for the actual value at runtime.\n For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide .\n (string) --\n (string) --\n \n\n :type ExpressionAttributeValues: dict\n :param ExpressionAttributeValues: One or more values that can be substituted in an expression.\n Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:\n Available | Backordered | Discontinued\n You would first need to specify ExpressionAttributeValues as follows:\n { ':avail':{'S':'Available'}, ':back':{'S':'Backordered'}, ':disc':{'S':'Discontinued'} }\n You could then use these values in an expression, such as this:\n ProductStatus IN (:avail, :back, :disc)\n For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide .\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n S (string) --An attribute of type String. For example:\n 'S': 'Hello'\n N (string) --An attribute of type Number. For example:\n 'N': '123.45'\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n B (bytes) --An attribute of type Binary. For example:\n 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n SS (list) --An attribute of type String Set. For example:\n 'SS': ['Giraffe', 'Hippo' ,'Zebra']\n (string) --\n NS (list) --An attribute of type Number Set. For example:\n 'NS': ['42.2', '-19', '7.5', '3.14']\n Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n (string) --\n BS (list) --An attribute of type Binary Set. For example:\n 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n (bytes) --\n M (dict) --An attribute of type Map. For example:\n 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n (string) --\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n \n L (list) --An attribute of type List. For example:\n 'L': ['Cookies', 'Coffee', 3.14159]\n (dict) --Represents the data for an attribute.\n Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\n For more information, see Data Types in the Amazon DynamoDB Developer Guide .\n NULL (boolean) --An attribute of type Null. For example:\n 'NULL': true\n BOOL (boolean) --An attribute of type Boolean. For example:\n 'BOOL': true\n \n \n\n :rtype: dict\n :return: {\n 'Attributes': {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n 'ConsumedCapacity': {\n 'TableName': 'string',\n 'CapacityUnits': 123.0,\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'Table': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n },\n 'LocalSecondaryIndexes': {\n 'string': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n }\n },\n 'GlobalSecondaryIndexes': {\n 'string': {\n 'ReadCapacityUnits': 123.0,\n 'WriteCapacityUnits': 123.0,\n 'CapacityUnits': 123.0\n }\n }\n },\n 'ItemCollectionMetrics': {\n 'ItemCollectionKey': {\n 'string': {\n 'S': 'string',\n 'N': 'string',\n 'B': b'bytes',\n 'SS': [\n 'string',\n ],\n 'NS': [\n 'string',\n ],\n 'BS': [\n b'bytes',\n ],\n 'M': {\n 'string': {'... recursive ...'}\n },\n 'L': [\n {'... recursive ...'},\n ],\n 'NULL': True|False,\n 'BOOL': True|False\n }\n },\n 'SizeEstimateRangeGB': [\n 123.0,\n ]\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef update_table(AttributeDefinitions=None, TableName=None, BillingMode=None, ProvisionedThroughput=None, GlobalSecondaryIndexUpdates=None, StreamSpecification=None, SSESpecification=None):\n \"\"\"\n Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.\n You can only perform one of the following operations at once:\n See also: AWS API Documentation\n \n Examples\n This example increases the provisioned read and write capacity on the Music table.\n Expected Output:\n \n :example: response = client.update_table(\n AttributeDefinitions=[\n {\n 'AttributeName': 'string',\n 'AttributeType': 'S'|'N'|'B'\n },\n ],\n TableName='string',\n BillingMode='PROVISIONED'|'PAY_PER_REQUEST',\n ProvisionedThroughput={\n 'ReadCapacityUnits': 123,\n 'WriteCapacityUnits': 123\n },\n GlobalSecondaryIndexUpdates=[\n {\n 'Update': {\n 'IndexName': 'string',\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': 123,\n 'WriteCapacityUnits': 123\n }\n },\n 'Create': {\n 'IndexName': 'string',\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'Projection': {\n 'ProjectionType': 'ALL'|'KEYS_ONLY'|'INCLUDE',\n 'NonKeyAttributes': [\n 'string',\n ]\n },\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': 123,\n 'WriteCapacityUnits': 123\n }\n },\n 'Delete': {\n 'IndexName': 'string'\n }\n },\n ],\n StreamSpecification={\n 'StreamEnabled': True|False,\n 'StreamViewType': 'NEW_IMAGE'|'OLD_IMAGE'|'NEW_AND_OLD_IMAGES'|'KEYS_ONLY'\n },\n SSESpecification={\n 'Enabled': True|False,\n 'SSEType': 'AES256'|'KMS',\n 'KMSMasterKeyId': 'string'\n }\n )\n \n \n :type AttributeDefinitions: list\n :param AttributeDefinitions: An array of attributes that describe the key schema for the table and indexes. If you are adding a new global secondary index to the table, AttributeDefinitions must include the key element(s) of the new index.\n (dict) --Represents an attribute for describing the key schema for the table and indexes.\n AttributeName (string) -- [REQUIRED]A name for the attribute.\n AttributeType (string) -- [REQUIRED]The data type for the attribute, where:\n S - the attribute is of type String\n N - the attribute is of type Number\n B - the attribute is of type Binary\n \n \n\n :type TableName: string\n :param TableName: [REQUIRED]\n The name of the table to be updated.\n \n\n :type BillingMode: string\n :param BillingMode: Controls how you are charged for read and write throughput and how you manage capacity. When switching from pay-per-request to provisioned capacity, initial provisioned capacity values must be set. The initial provisioned capacity values are estimated based on the consumed read and write capacity of your table and global secondary indexes over the past 30 minutes.\n PROVISIONED - Sets the billing mode to PROVISIONED . We recommend using PROVISIONED for predictable workloads.\n PAY_PER_REQUEST - Sets the billing mode to PAY_PER_REQUEST . We recommend using PAY_PER_REQUEST for unpredictable workloads.\n \n\n :type ProvisionedThroughput: dict\n :param ProvisionedThroughput: The new provisioned throughput settings for the specified table or index.\n ReadCapacityUnits (integer) -- [REQUIRED]The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException . For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide .\n If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.\n WriteCapacityUnits (integer) -- [REQUIRED]The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException . For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide .\n If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.\n \n\n :type GlobalSecondaryIndexUpdates: list\n :param GlobalSecondaryIndexUpdates: An array of one or more global secondary indexes for the table. For each index in the array, you can request one action:\n Create - add a new global secondary index to the table.\n Update - modify the provisioned throughput settings of an existing global secondary index.\n Delete - remove a global secondary index from the table.\n For more information, see Managing Global Secondary Indexes in the Amazon DynamoDB Developer Guide .\n (dict) --Represents one of the following:\n A new global secondary index to be added to an existing table.\n New provisioned throughput parameters for an existing global secondary index.\n An existing global secondary index to be removed from an existing table.\n Update (dict) --The name of an existing global secondary index, along with new provisioned throughput settings to be applied to that index.\n IndexName (string) -- [REQUIRED]The name of the global secondary index to be updated.\n ProvisionedThroughput (dict) -- [REQUIRED]Represents the provisioned throughput settings for the specified global secondary index.\n For current minimum and maximum provisioned throughput values, see Limits in the Amazon DynamoDB Developer Guide .\n ReadCapacityUnits (integer) -- [REQUIRED]The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException . For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide .\n If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.\n WriteCapacityUnits (integer) -- [REQUIRED]The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException . For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide .\n If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.\n \n Create (dict) --The parameters required for creating a global secondary index on an existing table:\n IndexName\n KeySchema\n AttributeDefinitions\n Projection\n ProvisionedThroughput\n IndexName (string) -- [REQUIRED]The name of the global secondary index to be created.\n KeySchema (list) -- [REQUIRED]The key schema for the global secondary index.\n (dict) --Represents a single element of a key schema. A key schema specifies the attributes that make up the primary key of a table, or the key attributes of an index.\n A KeySchemaElement represents exactly one attribute of the primary key. For example, a simple primary key would be represented by one KeySchemaElement (for the partition key). A composite primary key would require one KeySchemaElement for the partition key, and another KeySchemaElement for the sort key.\n A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute). The data type must be one of String, Number, or Binary. The attribute cannot be nested within a List or a Map.\n AttributeName (string) -- [REQUIRED]The name of a key attribute.\n KeyType (string) -- [REQUIRED]The role that this key attribute will assume:\n HASH - partition key\n RANGE - sort key\n Note\n The partition key of an item is also known as its hash attribute . The term 'hash attribute' derives from DynamoDB' usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.\n The sort key of an item is also known as its range attribute . The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.\n \n Projection (dict) -- [REQUIRED]Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.\n ProjectionType (string) --The set of attributes that are projected into the index:\n KEYS_ONLY - Only the index and primary keys are projected into the index.\n INCLUDE - Only the specified table attributes are projected into the index. The list of projected attributes are in NonKeyAttributes .\n ALL - All of the table attributes are projected into the index.\n NonKeyAttributes (list) --Represents the non-key attribute names which will be projected into the index.\n For local secondary indexes, the total count of NonKeyAttributes summed across all of the local secondary indexes, must not exceed 20. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.\n (string) --\n \n ProvisionedThroughput (dict) --Represents the provisioned throughput settings for the specified global secondary index.\n For current minimum and maximum provisioned throughput values, see Limits in the Amazon DynamoDB Developer Guide .\n ReadCapacityUnits (integer) -- [REQUIRED]The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException . For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide .\n If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.\n WriteCapacityUnits (integer) -- [REQUIRED]The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException . For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide .\n If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.\n \n Delete (dict) --The name of an existing global secondary index to be removed.\n IndexName (string) -- [REQUIRED]The name of the global secondary index to be deleted.\n \n \n\n :type StreamSpecification: dict\n :param StreamSpecification: Represents the DynamoDB Streams configuration for the table.\n Note\n You will receive a ResourceInUseException if you attempt to enable a stream on a table that already has a stream, or if you attempt to disable a stream on a table which does not have a stream.\n StreamEnabled (boolean) --Indicates whether DynamoDB Streams is enabled (true) or disabled (false) on the table.\n StreamViewType (string) --When an item in the table is modified, StreamViewType determines what information is written to the stream for this table. Valid values for StreamViewType are:\n KEYS_ONLY - Only the key attributes of the modified item are written to the stream.\n NEW_IMAGE - The entire item, as it appears after it was modified, is written to the stream.\n OLD_IMAGE - The entire item, as it appeared before it was modified, is written to the stream.\n NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are written to the stream.\n \n\n :type SSESpecification: dict\n :param SSESpecification: The new server-side encryption settings for the specified table.\n Enabled (boolean) --Indicates whether server-side encryption is enabled (true) or disabled (false) on the table. If enabled (true), server-side encryption type is set to KMS . If disabled (false) or not specified, server-side encryption is set to AWS owned CMK.\n SSEType (string) --Server-side encryption type:\n AES256 - Server-side encryption which uses the AES256 algorithm (not applicable).\n KMS - Server-side encryption which uses AWS Key Management Service. Key is stored in your account and is managed by AWS KMS (KMS charges apply).\n KMSMasterKeyId (string) --The KMS Master Key (CMK) which should be used for the KMS encryption. To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB KMS Master Key alias/aws/dynamodb.\n \n\n :rtype: dict\n :return: {\n 'TableDescription': {\n 'AttributeDefinitions': [\n {\n 'AttributeName': 'string',\n 'AttributeType': 'S'|'N'|'B'\n },\n ],\n 'TableName': 'string',\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'TableStatus': 'CREATING'|'UPDATING'|'DELETING'|'ACTIVE',\n 'CreationDateTime': datetime(2015, 1, 1),\n 'ProvisionedThroughput': {\n 'LastIncreaseDateTime': datetime(2015, 1, 1),\n 'LastDecreaseDateTime': datetime(2015, 1, 1),\n 'NumberOfDecreasesToday': 123,\n 'ReadCapacityUnits': 123,\n 'WriteCapacityUnits': 123\n },\n 'TableSizeBytes': 123,\n 'ItemCount': 123,\n 'TableArn': 'string',\n 'TableId': 'string',\n 'BillingModeSummary': {\n 'BillingMode': 'PROVISIONED'|'PAY_PER_REQUEST',\n 'LastUpdateToPayPerRequestDateTime': datetime(2015, 1, 1)\n },\n 'LocalSecondaryIndexes': [\n {\n 'IndexName': 'string',\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'Projection': {\n 'ProjectionType': 'ALL'|'KEYS_ONLY'|'INCLUDE',\n 'NonKeyAttributes': [\n 'string',\n ]\n },\n 'IndexSizeBytes': 123,\n 'ItemCount': 123,\n 'IndexArn': 'string'\n },\n ],\n 'GlobalSecondaryIndexes': [\n {\n 'IndexName': 'string',\n 'KeySchema': [\n {\n 'AttributeName': 'string',\n 'KeyType': 'HASH'|'RANGE'\n },\n ],\n 'Projection': {\n 'ProjectionType': 'ALL'|'KEYS_ONLY'|'INCLUDE',\n 'NonKeyAttributes': [\n 'string',\n ]\n },\n 'IndexStatus': 'CREATING'|'UPDATING'|'DELETING'|'ACTIVE',\n 'Backfilling': True|False,\n 'ProvisionedThroughput': {\n 'LastIncreaseDateTime': datetime(2015, 1, 1),\n 'LastDecreaseDateTime': datetime(2015, 1, 1),\n 'NumberOfDecreasesToday': 123,\n 'ReadCapacityUnits': 123,\n 'WriteCapacityUnits': 123\n },\n 'IndexSizeBytes': 123,\n 'ItemCount': 123,\n 'IndexArn': 'string'\n },\n ],\n 'StreamSpecification': {\n 'StreamEnabled': True|False,\n 'StreamViewType': 'NEW_IMAGE'|'OLD_IMAGE'|'NEW_AND_OLD_IMAGES'|'KEYS_ONLY'\n },\n 'LatestStreamLabel': 'string',\n 'LatestStreamArn': 'string',\n 'RestoreSummary': {\n 'SourceBackupArn': 'string',\n 'SourceTableArn': 'string',\n 'RestoreDateTime': datetime(2015, 1, 1),\n 'RestoreInProgress': True|False\n },\n 'SSEDescription': {\n 'Status': 'ENABLING'|'ENABLED'|'DISABLING'|'DISABLED'|'UPDATING',\n 'SSEType': 'AES256'|'KMS',\n 'KMSMasterKeyArn': 'string'\n }\n }\n }\n \n \n :returns: \n AttributeDefinitions (list) -- An array of attributes that describe the key schema for the table and indexes. If you are adding a new global secondary index to the table, AttributeDefinitions must include the key element(s) of the new index.\n \n (dict) --Represents an attribute for describing the key schema for the table and indexes.\n \n AttributeName (string) -- [REQUIRED]A name for the attribute.\n \n AttributeType (string) -- [REQUIRED]The data type for the attribute, where:\n \n S - the attribute is of type String\n N - the attribute is of type Number\n B - the attribute is of type Binary\n \n \n \n \n \n \n TableName (string) -- [REQUIRED]\n The name of the table to be updated.\n \n BillingMode (string) -- Controls how you are charged for read and write throughput and how you manage capacity. When switching from pay-per-request to provisioned capacity, initial provisioned capacity values must be set. The initial provisioned capacity values are estimated based on the consumed read and write capacity of your table and global secondary indexes over the past 30 minutes.\n \n PROVISIONED - Sets the billing mode to PROVISIONED . We recommend using PROVISIONED for predictable workloads.\n PAY_PER_REQUEST - Sets the billing mode to PAY_PER_REQUEST . We recommend using PAY_PER_REQUEST for unpredictable workloads.\n \n \n ProvisionedThroughput (dict) -- The new provisioned throughput settings for the specified table or index.\n \n ReadCapacityUnits (integer) -- [REQUIRED]The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException . For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide .\n If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.\n \n WriteCapacityUnits (integer) -- [REQUIRED]The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException . For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide .\n If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.\n \n \n \n GlobalSecondaryIndexUpdates (list) -- An array of one or more global secondary indexes for the table. For each index in the array, you can request one action:\n \n Create - add a new global secondary index to the table.\n Update - modify the provisioned throughput settings of an existing global secondary index.\n Delete - remove a global secondary index from the table.\n \n For more information, see Managing Global Secondary Indexes in the Amazon DynamoDB Developer Guide .\n \n (dict) --Represents one of the following:\n \n A new global secondary index to be added to an existing table.\n New provisioned throughput parameters for an existing global secondary index.\n An existing global secondary index to be removed from an existing table.\n \n \n Update (dict) --The name of an existing global secondary index, along with new provisioned throughput settings to be applied to that index.\n \n IndexName (string) -- [REQUIRED]The name of the global secondary index to be updated.\n \n ProvisionedThroughput (dict) -- [REQUIRED]Represents the provisioned throughput settings for the specified global secondary index.\n For current minimum and maximum provisioned throughput values, see Limits in the Amazon DynamoDB Developer Guide .\n \n ReadCapacityUnits (integer) -- [REQUIRED]The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException . For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide .\n If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.\n \n WriteCapacityUnits (integer) -- [REQUIRED]The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException . For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide .\n If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.\n \n \n \n \n \n Create (dict) --The parameters required for creating a global secondary index on an existing table:\n \n IndexName\n KeySchema\n AttributeDefinitions\n Projection\n ProvisionedThroughput\n \n \n IndexName (string) -- [REQUIRED]The name of the global secondary index to be created.\n \n KeySchema (list) -- [REQUIRED]The key schema for the global secondary index.\n \n (dict) --Represents a single element of a key schema. A key schema specifies the attributes that make up the primary key of a table, or the key attributes of an index.\n A KeySchemaElement represents exactly one attribute of the primary key. For example, a simple primary key would be represented by one KeySchemaElement (for the partition key). A composite primary key would require one KeySchemaElement for the partition key, and another KeySchemaElement for the sort key.\n A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute). The data type must be one of String, Number, or Binary. The attribute cannot be nested within a List or a Map.\n \n AttributeName (string) -- [REQUIRED]The name of a key attribute.\n \n KeyType (string) -- [REQUIRED]The role that this key attribute will assume:\n \n HASH - partition key\n RANGE - sort key\n \n \n Note\n The partition key of an item is also known as its hash attribute . The term \"hash attribute\" derives from DynamoDB' usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.\n The sort key of an item is also known as its range attribute . The term \"range attribute\" derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.\n \n \n \n \n \n \n Projection (dict) -- [REQUIRED]Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.\n \n ProjectionType (string) --The set of attributes that are projected into the index:\n \n KEYS_ONLY - Only the index and primary keys are projected into the index.\n INCLUDE - Only the specified table attributes are projected into the index. The list of projected attributes are in NonKeyAttributes .\n ALL - All of the table attributes are projected into the index.\n \n \n NonKeyAttributes (list) --Represents the non-key attribute names which will be projected into the index.\n For local secondary indexes, the total count of NonKeyAttributes summed across all of the local secondary indexes, must not exceed 20. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.\n \n (string) --\n \n \n \n \n ProvisionedThroughput (dict) --Represents the provisioned throughput settings for the specified global secondary index.\n For current minimum and maximum provisioned throughput values, see Limits in the Amazon DynamoDB Developer Guide .\n \n ReadCapacityUnits (integer) -- [REQUIRED]The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException . For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide .\n If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.\n \n WriteCapacityUnits (integer) -- [REQUIRED]The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException . For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide .\n If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.\n \n \n \n \n \n Delete (dict) --The name of an existing global secondary index to be removed.\n \n IndexName (string) -- [REQUIRED]The name of the global secondary index to be deleted.\n \n \n \n \n \n \n \n StreamSpecification (dict) -- Represents the DynamoDB Streams configuration for the table.\n \n Note\n You will receive a ResourceInUseException if you attempt to enable a stream on a table that already has a stream, or if you attempt to disable a stream on a table which does not have a stream.\n \n \n StreamEnabled (boolean) --Indicates whether DynamoDB Streams is enabled (true) or disabled (false) on the table.\n \n StreamViewType (string) --When an item in the table is modified, StreamViewType determines what information is written to the stream for this table. Valid values for StreamViewType are:\n \n KEYS_ONLY - Only the key attributes of the modified item are written to the stream.\n NEW_IMAGE - The entire item, as it appears after it was modified, is written to the stream.\n OLD_IMAGE - The entire item, as it appeared before it was modified, is written to the stream.\n NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are written to the stream.\n \n \n \n \n SSESpecification (dict) -- The new server-side encryption settings for the specified table.\n \n Enabled (boolean) --Indicates whether server-side encryption is enabled (true) or disabled (false) on the table. If enabled (true), server-side encryption type is set to KMS . If disabled (false) or not specified, server-side encryption is set to AWS owned CMK.\n \n SSEType (string) --Server-side encryption type:\n \n AES256 - Server-side encryption which uses the AES256 algorithm (not applicable).\n KMS - Server-side encryption which uses AWS Key Management Service. Key is stored in your account and is managed by AWS KMS (KMS charges apply).\n \n \n KMSMasterKeyId (string) --The KMS Master Key (CMK) which should be used for the KMS encryption. To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB KMS Master Key alias/aws/dynamodb.\n \n \n \n \n \"\"\"\n pass\n\ndef update_time_to_live(TableName=None, TimeToLiveSpecification=None):\n \"\"\"\n The UpdateTimeToLive method will enable or disable TTL for the specified table. A successful UpdateTimeToLive call returns the current TimeToLiveSpecification ; it may take up to one hour for the change to fully process. Any additional UpdateTimeToLive calls for the same table during this one hour duration result in a ValidationException .\n TTL compares the current time in epoch time format to the time stored in the TTL attribute of an item. If the epoch time value stored in the attribute is less than the current time, the item is marked as expired and subsequently deleted.\n DynamoDB deletes expired items on a best-effort basis to ensure availability of throughput for other data operations.\n As items are deleted, they are removed from any Local Secondary Index and Global Secondary Index immediately in the same eventually consistent way as a standard delete operation.\n For more information, see Time To Live in the Amazon DynamoDB Developer Guide.\n See also: AWS API Documentation\n \n \n :example: response = client.update_time_to_live(\n TableName='string',\n TimeToLiveSpecification={\n 'Enabled': True|False,\n 'AttributeName': 'string'\n }\n )\n \n \n :type TableName: string\n :param TableName: [REQUIRED]\n The name of the table to be configured.\n \n\n :type TimeToLiveSpecification: dict\n :param TimeToLiveSpecification: [REQUIRED]\n Represents the settings used to enable or disable Time to Live for the specified table.\n Enabled (boolean) -- [REQUIRED]Indicates whether Time To Live is to be enabled (true) or disabled (false) on the table.\n AttributeName (string) -- [REQUIRED]The name of the Time to Live attribute used to store the expiration time for items in the table.\n \n\n :rtype: dict\n :return: {\n 'TimeToLiveSpecification': {\n 'Enabled': True|False,\n 'AttributeName': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.5944454073905945, "alphanum_fraction": 0.5961862206459045, "avg_line_length": 27.674549102783203, "blob_id": "95b496d93ca4cf825ddb38a3c0850e43b0dc4e5e", "content_id": "f0425071efa7496d8432fa1479cc12d3f00f445f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 87315, "license_type": "permissive", "max_line_length": 325, "num_lines": 3045, "path": "/pyboto3/alexaforbusiness.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef approve_skill(SkillId=None):\n \"\"\"\n Associates a skill with the organization under the customer's AWS account. If a skill is private, the user implicitly accepts access to this skill during enablement.\n See also: AWS API Documentation\n \n \n :example: response = client.approve_skill(\n SkillId='string'\n )\n \n \n :type SkillId: string\n :param SkillId: [REQUIRED]\n The unique identifier of the skill.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef associate_contact_with_address_book(ContactArn=None, AddressBookArn=None):\n \"\"\"\n Associates a contact with a given address book.\n See also: AWS API Documentation\n \n \n :example: response = client.associate_contact_with_address_book(\n ContactArn='string',\n AddressBookArn='string'\n )\n \n \n :type ContactArn: string\n :param ContactArn: [REQUIRED]\n The ARN of the contact to associate with an address book.\n \n\n :type AddressBookArn: string\n :param AddressBookArn: [REQUIRED]\n The ARN of the address book with which to associate the contact.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef associate_device_with_room(DeviceArn=None, RoomArn=None):\n \"\"\"\n Associates a device with a given room. This applies all the settings from the room profile to the device, and all the skills in any skill groups added to that room. This operation requires the device to be online, or else a manual sync is required.\n See also: AWS API Documentation\n \n \n :example: response = client.associate_device_with_room(\n DeviceArn='string',\n RoomArn='string'\n )\n \n \n :type DeviceArn: string\n :param DeviceArn: The ARN of the device to associate to a room. Required.\n\n :type RoomArn: string\n :param RoomArn: The ARN of the room with which to associate the device. Required.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef associate_skill_group_with_room(SkillGroupArn=None, RoomArn=None):\n \"\"\"\n Associates a skill group with a given room. This enables all skills in the associated skill group on all devices in the room.\n See also: AWS API Documentation\n \n \n :example: response = client.associate_skill_group_with_room(\n SkillGroupArn='string',\n RoomArn='string'\n )\n \n \n :type SkillGroupArn: string\n :param SkillGroupArn: The ARN of the skill group to associate with a room. Required.\n\n :type RoomArn: string\n :param RoomArn: The ARN of the room with which to associate the skill group. Required.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef associate_skill_with_skill_group(SkillGroupArn=None, SkillId=None):\n \"\"\"\n Associates a skill with a skill group.\n See also: AWS API Documentation\n \n \n :example: response = client.associate_skill_with_skill_group(\n SkillGroupArn='string',\n SkillId='string'\n )\n \n \n :type SkillGroupArn: string\n :param SkillGroupArn: The ARN of the skill group to associate the skill to. Required.\n\n :type SkillId: string\n :param SkillId: [REQUIRED]\n The unique identifier of the skill.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef associate_skill_with_users(OrganizationArn=None, SkillId=None):\n \"\"\"\n Makes a private skill available for enrolled users to enable on their devices.\n See also: AWS API Documentation\n \n \n :example: response = client.associate_skill_with_users(\n OrganizationArn='string',\n SkillId='string'\n )\n \n \n :type OrganizationArn: string\n :param OrganizationArn: The ARN of the organization.\n\n :type SkillId: string\n :param SkillId: [REQUIRED]\n The private skill ID you want to make available to enrolled users.>\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_address_book(Name=None, Description=None, ClientRequestToken=None):\n \"\"\"\n Creates an address book with the specified details.\n See also: AWS API Documentation\n \n \n :example: response = client.create_address_book(\n Name='string',\n Description='string',\n ClientRequestToken='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the address book.\n \n\n :type Description: string\n :param Description: The description of the address book.\n\n :type ClientRequestToken: string\n :param ClientRequestToken: A unique, user-specified identifier for the request that ensures idempotency.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'AddressBookArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_business_report_schedule(ScheduleName=None, S3BucketName=None, S3KeyPrefix=None, Format=None, ContentRange=None, Recurrence=None, ClientRequestToken=None):\n \"\"\"\n Creates a recurring schedule for usage reports to deliver to the specified S3 location with a specified daily or weekly interval.\n See also: AWS API Documentation\n \n \n :example: response = client.create_business_report_schedule(\n ScheduleName='string',\n S3BucketName='string',\n S3KeyPrefix='string',\n Format='CSV'|'CSV_ZIP',\n ContentRange={\n 'Interval': 'ONE_DAY'|'ONE_WEEK'\n },\n Recurrence={\n 'StartDate': 'string'\n },\n ClientRequestToken='string'\n )\n \n \n :type ScheduleName: string\n :param ScheduleName: The name identifier of the schedule.\n\n :type S3BucketName: string\n :param S3BucketName: The S3 bucket name of the output reports.\n\n :type S3KeyPrefix: string\n :param S3KeyPrefix: The S3 key where the report is delivered.\n\n :type Format: string\n :param Format: [REQUIRED]\n The format of the generated report (individual CSV files or zipped files of individual files).\n \n\n :type ContentRange: dict\n :param ContentRange: [REQUIRED]\n The content range of the reports.\n Interval (string) --The interval of the content range.\n \n\n :type Recurrence: dict\n :param Recurrence: The recurrence of the reports.\n StartDate (string) --The start date.\n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: The client request token.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'ScheduleArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_conference_provider(ConferenceProviderName=None, ConferenceProviderType=None, IPDialIn=None, PSTNDialIn=None, MeetingSetting=None, ClientRequestToken=None):\n \"\"\"\n Adds a new conference provider under the user's AWS account.\n See also: AWS API Documentation\n \n \n :example: response = client.create_conference_provider(\n ConferenceProviderName='string',\n ConferenceProviderType='CHIME'|'BLUEJEANS'|'FUZE'|'GOOGLE_HANGOUTS'|'POLYCOM'|'RINGCENTRAL'|'SKYPE_FOR_BUSINESS'|'WEBEX'|'ZOOM'|'CUSTOM',\n IPDialIn={\n 'Endpoint': 'string',\n 'CommsProtocol': 'SIP'|'SIPS'|'H323'\n },\n PSTNDialIn={\n 'CountryCode': 'string',\n 'PhoneNumber': 'string',\n 'OneClickIdDelay': 'string',\n 'OneClickPinDelay': 'string'\n },\n MeetingSetting={\n 'RequirePin': 'YES'|'NO'|'OPTIONAL'\n },\n ClientRequestToken='string'\n )\n \n \n :type ConferenceProviderName: string\n :param ConferenceProviderName: [REQUIRED]\n The name of the conference provider.\n \n\n :type ConferenceProviderType: string\n :param ConferenceProviderType: [REQUIRED]\n Represents a type within a list of predefined types.\n \n\n :type IPDialIn: dict\n :param IPDialIn: The IP endpoint and protocol for calling.\n Endpoint (string) -- [REQUIRED]The IP address.\n CommsProtocol (string) -- [REQUIRED]The protocol, including SIP, SIPS, and H323.\n \n\n :type PSTNDialIn: dict\n :param PSTNDialIn: The information for PSTN conferencing.\n CountryCode (string) -- [REQUIRED]The zip code.\n PhoneNumber (string) -- [REQUIRED]The phone number to call to join the conference.\n OneClickIdDelay (string) -- [REQUIRED]The delay duration before Alexa enters the conference ID with dual-tone multi-frequency (DTMF). Each number on the dial pad corresponds to a DTMF tone, which is how we send data over the telephone network.\n OneClickPinDelay (string) -- [REQUIRED]The delay duration before Alexa enters the conference pin with dual-tone multi-frequency (DTMF). Each number on the dial pad corresponds to a DTMF tone, which is how we send data over the telephone network.\n \n\n :type MeetingSetting: dict\n :param MeetingSetting: [REQUIRED]\n The meeting settings for the conference provider.\n RequirePin (string) -- [REQUIRED]The values that indicate whether the pin is always required.\n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: The request token of the client.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'ConferenceProviderArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_contact(DisplayName=None, FirstName=None, LastName=None, PhoneNumber=None, ClientRequestToken=None):\n \"\"\"\n Creates a contact with the specified details.\n See also: AWS API Documentation\n \n \n :example: response = client.create_contact(\n DisplayName='string',\n FirstName='string',\n LastName='string',\n PhoneNumber='string',\n ClientRequestToken='string'\n )\n \n \n :type DisplayName: string\n :param DisplayName: The name of the contact to display on the console.\n\n :type FirstName: string\n :param FirstName: [REQUIRED]\n The first name of the contact that is used to call the contact on the device.\n \n\n :type LastName: string\n :param LastName: The last name of the contact that is used to call the contact on the device.\n\n :type PhoneNumber: string\n :param PhoneNumber: The phone number of the contact in E.164 format.\n\n :type ClientRequestToken: string\n :param ClientRequestToken: A unique, user-specified identifier for this request that ensures idempotency.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'ContactArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_profile(ProfileName=None, Timezone=None, Address=None, DistanceUnit=None, TemperatureUnit=None, WakeWord=None, ClientRequestToken=None, SetupModeDisabled=None, MaxVolumeLimit=None, PSTNEnabled=None):\n \"\"\"\n Creates a new room profile with the specified details.\n See also: AWS API Documentation\n \n \n :example: response = client.create_profile(\n ProfileName='string',\n Timezone='string',\n Address='string',\n DistanceUnit='METRIC'|'IMPERIAL',\n TemperatureUnit='FAHRENHEIT'|'CELSIUS',\n WakeWord='ALEXA'|'AMAZON'|'ECHO'|'COMPUTER',\n ClientRequestToken='string',\n SetupModeDisabled=True|False,\n MaxVolumeLimit=123,\n PSTNEnabled=True|False\n )\n \n \n :type ProfileName: string\n :param ProfileName: [REQUIRED]\n The name of a room profile.\n \n\n :type Timezone: string\n :param Timezone: [REQUIRED]\n The time zone used by a room profile.\n \n\n :type Address: string\n :param Address: [REQUIRED]\n The valid address for the room.\n \n\n :type DistanceUnit: string\n :param DistanceUnit: [REQUIRED]\n The distance unit to be used by devices in the profile.\n \n\n :type TemperatureUnit: string\n :param TemperatureUnit: [REQUIRED]\n The temperature unit to be used by devices in the profile.\n \n\n :type WakeWord: string\n :param WakeWord: [REQUIRED]\n A wake word for Alexa, Echo, Amazon, or a computer.\n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: The user-specified token that is used during the creation of a profile.\n This field is autopopulated if not provided.\n \n\n :type SetupModeDisabled: boolean\n :param SetupModeDisabled: Whether room profile setup is enabled.\n\n :type MaxVolumeLimit: integer\n :param MaxVolumeLimit: The maximum volume limit for a room profile.\n\n :type PSTNEnabled: boolean\n :param PSTNEnabled: Whether PSTN calling is enabled.\n\n :rtype: dict\n :return: {\n 'ProfileArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_room(RoomName=None, Description=None, ProfileArn=None, ProviderCalendarId=None, ClientRequestToken=None, Tags=None):\n \"\"\"\n Creates a room with the specified details.\n See also: AWS API Documentation\n \n \n :example: response = client.create_room(\n RoomName='string',\n Description='string',\n ProfileArn='string',\n ProviderCalendarId='string',\n ClientRequestToken='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type RoomName: string\n :param RoomName: [REQUIRED]\n The name for the room.\n \n\n :type Description: string\n :param Description: The description for the room.\n\n :type ProfileArn: string\n :param ProfileArn: The profile ARN for the room.\n\n :type ProviderCalendarId: string\n :param ProviderCalendarId: The calendar ARN for the room.\n\n :type ClientRequestToken: string\n :param ClientRequestToken: A unique, user-specified identifier for this request that ensures idempotency.\n This field is autopopulated if not provided.\n \n\n :type Tags: list\n :param Tags: The tags for the room.\n (dict) --A key-value pair that can be associated with a resource.\n Key (string) -- [REQUIRED]The key of a tag. Tag keys are case-sensitive.\n Value (string) -- [REQUIRED]The value of a tag. Tag values are case-sensitive and can be null.\n \n \n\n :rtype: dict\n :return: {\n 'RoomArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_skill_group(SkillGroupName=None, Description=None, ClientRequestToken=None):\n \"\"\"\n Creates a skill group with a specified name and description.\n See also: AWS API Documentation\n \n \n :example: response = client.create_skill_group(\n SkillGroupName='string',\n Description='string',\n ClientRequestToken='string'\n )\n \n \n :type SkillGroupName: string\n :param SkillGroupName: [REQUIRED]\n The name for the skill group.\n \n\n :type Description: string\n :param Description: The description for the skill group.\n\n :type ClientRequestToken: string\n :param ClientRequestToken: A unique, user-specified identifier for this request that ensures idempotency.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'SkillGroupArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_user(UserId=None, FirstName=None, LastName=None, Email=None, ClientRequestToken=None, Tags=None):\n \"\"\"\n Creates a user.\n See also: AWS API Documentation\n \n \n :example: response = client.create_user(\n UserId='string',\n FirstName='string',\n LastName='string',\n Email='string',\n ClientRequestToken='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type UserId: string\n :param UserId: [REQUIRED]\n The ARN for the user.\n \n\n :type FirstName: string\n :param FirstName: The first name for the user.\n\n :type LastName: string\n :param LastName: The last name for the user.\n\n :type Email: string\n :param Email: The email address for the user.\n\n :type ClientRequestToken: string\n :param ClientRequestToken: A unique, user-specified identifier for this request that ensures idempotency.\n This field is autopopulated if not provided.\n \n\n :type Tags: list\n :param Tags: The tags for the user.\n (dict) --A key-value pair that can be associated with a resource.\n Key (string) -- [REQUIRED]The key of a tag. Tag keys are case-sensitive.\n Value (string) -- [REQUIRED]The value of a tag. Tag values are case-sensitive and can be null.\n \n \n\n :rtype: dict\n :return: {\n 'UserArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_address_book(AddressBookArn=None):\n \"\"\"\n Deletes an address book by the address book ARN.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_address_book(\n AddressBookArn='string'\n )\n \n \n :type AddressBookArn: string\n :param AddressBookArn: [REQUIRED]\n The ARN of the address book to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_business_report_schedule(ScheduleArn=None):\n \"\"\"\n Deletes the recurring report delivery schedule with the specified schedule ARN.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_business_report_schedule(\n ScheduleArn='string'\n )\n \n \n :type ScheduleArn: string\n :param ScheduleArn: [REQUIRED]\n The ARN of the business report schedule.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_conference_provider(ConferenceProviderArn=None):\n \"\"\"\n Deletes a conference provider.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_conference_provider(\n ConferenceProviderArn='string'\n )\n \n \n :type ConferenceProviderArn: string\n :param ConferenceProviderArn: [REQUIRED]\n The ARN of the conference provider.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_contact(ContactArn=None):\n \"\"\"\n Deletes a contact by the contact ARN.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_contact(\n ContactArn='string'\n )\n \n \n :type ContactArn: string\n :param ContactArn: [REQUIRED]\n The ARN of the contact to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_device(DeviceArn=None):\n \"\"\"\n Removes a device from Alexa For Business.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_device(\n DeviceArn='string'\n )\n \n \n :type DeviceArn: string\n :param DeviceArn: [REQUIRED]\n The ARN of the device for which to request details.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_profile(ProfileArn=None):\n \"\"\"\n Deletes a room profile by the profile ARN.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_profile(\n ProfileArn='string'\n )\n \n \n :type ProfileArn: string\n :param ProfileArn: The ARN of the room profile to delete. Required.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_room(RoomArn=None):\n \"\"\"\n Deletes a room by the room ARN.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_room(\n RoomArn='string'\n )\n \n \n :type RoomArn: string\n :param RoomArn: The ARN of the room to delete. Required.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_room_skill_parameter(RoomArn=None, SkillId=None, ParameterKey=None):\n \"\"\"\n Deletes room skill parameter details by room, skill, and parameter key ID.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_room_skill_parameter(\n RoomArn='string',\n SkillId='string',\n ParameterKey='string'\n )\n \n \n :type RoomArn: string\n :param RoomArn: The ARN of the room from which to remove the room skill parameter details.\n\n :type SkillId: string\n :param SkillId: [REQUIRED]\n The ID of the skill from which to remove the room skill parameter details.\n \n\n :type ParameterKey: string\n :param ParameterKey: [REQUIRED]\n The room skill parameter key for which to remove details.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_skill_authorization(SkillId=None, RoomArn=None):\n \"\"\"\n Unlinks a third-party account from a skill.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_skill_authorization(\n SkillId='string',\n RoomArn='string'\n )\n \n \n :type SkillId: string\n :param SkillId: [REQUIRED]\n The unique identifier of a skill.\n \n\n :type RoomArn: string\n :param RoomArn: The room that the skill is authorized for.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_skill_group(SkillGroupArn=None):\n \"\"\"\n Deletes a skill group by skill group ARN.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_skill_group(\n SkillGroupArn='string'\n )\n \n \n :type SkillGroupArn: string\n :param SkillGroupArn: The ARN of the skill group to delete. Required.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_user(UserArn=None, EnrollmentId=None):\n \"\"\"\n Deletes a specified user by user ARN and enrollment ARN.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_user(\n UserArn='string',\n EnrollmentId='string'\n )\n \n \n :type UserArn: string\n :param UserArn: The ARN of the user to delete in the organization. Required.\n\n :type EnrollmentId: string\n :param EnrollmentId: [REQUIRED]\n The ARN of the user's enrollment in the organization. Required.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef disassociate_contact_from_address_book(ContactArn=None, AddressBookArn=None):\n \"\"\"\n Disassociates a contact from a given address book.\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_contact_from_address_book(\n ContactArn='string',\n AddressBookArn='string'\n )\n \n \n :type ContactArn: string\n :param ContactArn: [REQUIRED]\n The ARN of the contact to disassociate from an address book.\n \n\n :type AddressBookArn: string\n :param AddressBookArn: [REQUIRED]\n The ARN of the address from which to disassociate the contact.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef disassociate_device_from_room(DeviceArn=None):\n \"\"\"\n Disassociates a device from its current room. The device continues to be connected to the Wi-Fi network and is still registered to the account. The device settings and skills are removed from the room.\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_device_from_room(\n DeviceArn='string'\n )\n \n \n :type DeviceArn: string\n :param DeviceArn: The ARN of the device to disassociate from a room. Required.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef disassociate_skill_from_skill_group(SkillGroupArn=None, SkillId=None):\n \"\"\"\n Disassociates a skill from a skill group.\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_skill_from_skill_group(\n SkillGroupArn='string',\n SkillId='string'\n )\n \n \n :type SkillGroupArn: string\n :param SkillGroupArn: The unique identifier of a skill. Required.\n\n :type SkillId: string\n :param SkillId: [REQUIRED]\n The ARN of a skill group to associate to a skill.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef disassociate_skill_from_users(OrganizationArn=None, SkillId=None):\n \"\"\"\n Makes a private skill unavailable for enrolled users and prevents them from enabling it on their devices.\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_skill_from_users(\n OrganizationArn='string',\n SkillId='string'\n )\n \n \n :type OrganizationArn: string\n :param OrganizationArn: The ARN of the organization.\n\n :type SkillId: string\n :param SkillId: [REQUIRED]\n The private skill ID you want to make unavailable for enrolled users.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef disassociate_skill_group_from_room(SkillGroupArn=None, RoomArn=None):\n \"\"\"\n Disassociates a skill group from a specified room. This disables all skills in the skill group on all devices in the room.\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_skill_group_from_room(\n SkillGroupArn='string',\n RoomArn='string'\n )\n \n \n :type SkillGroupArn: string\n :param SkillGroupArn: The ARN of the skill group to disassociate from a room. Required.\n\n :type RoomArn: string\n :param RoomArn: The ARN of the room from which the skill group is to be disassociated. Required.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef forget_smart_home_appliances(RoomArn=None):\n \"\"\"\n Forgets smart home appliances associated to a room.\n See also: AWS API Documentation\n \n \n :example: response = client.forget_smart_home_appliances(\n RoomArn='string'\n )\n \n \n :type RoomArn: string\n :param RoomArn: [REQUIRED]\n The room that the appliances are associated with.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_address_book(AddressBookArn=None):\n \"\"\"\n Gets address the book details by the address book ARN.\n See also: AWS API Documentation\n \n \n :example: response = client.get_address_book(\n AddressBookArn='string'\n )\n \n \n :type AddressBookArn: string\n :param AddressBookArn: [REQUIRED]\n The ARN of the address book for which to request details.\n \n\n :rtype: dict\n :return: {\n 'AddressBook': {\n 'AddressBookArn': 'string',\n 'Name': 'string',\n 'Description': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_conference_preference():\n \"\"\"\n Retrieves the existing conference preferences.\n See also: AWS API Documentation\n \n \n :example: response = client.get_conference_preference()\n \n \n :rtype: dict\n :return: {\n 'Preference': {\n 'DefaultConferenceProviderArn': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_conference_provider(ConferenceProviderArn=None):\n \"\"\"\n Gets details about a specific conference provider.\n See also: AWS API Documentation\n \n \n :example: response = client.get_conference_provider(\n ConferenceProviderArn='string'\n )\n \n \n :type ConferenceProviderArn: string\n :param ConferenceProviderArn: [REQUIRED]\n The ARN of the newly created conference provider.\n \n\n :rtype: dict\n :return: {\n 'ConferenceProvider': {\n 'Arn': 'string',\n 'Name': 'string',\n 'Type': 'CHIME'|'BLUEJEANS'|'FUZE'|'GOOGLE_HANGOUTS'|'POLYCOM'|'RINGCENTRAL'|'SKYPE_FOR_BUSINESS'|'WEBEX'|'ZOOM'|'CUSTOM',\n 'IPDialIn': {\n 'Endpoint': 'string',\n 'CommsProtocol': 'SIP'|'SIPS'|'H323'\n },\n 'PSTNDialIn': {\n 'CountryCode': 'string',\n 'PhoneNumber': 'string',\n 'OneClickIdDelay': 'string',\n 'OneClickPinDelay': 'string'\n },\n 'MeetingSetting': {\n 'RequirePin': 'YES'|'NO'|'OPTIONAL'\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_contact(ContactArn=None):\n \"\"\"\n Gets the contact details by the contact ARN.\n See also: AWS API Documentation\n \n \n :example: response = client.get_contact(\n ContactArn='string'\n )\n \n \n :type ContactArn: string\n :param ContactArn: [REQUIRED]\n The ARN of the contact for which to request details.\n \n\n :rtype: dict\n :return: {\n 'Contact': {\n 'ContactArn': 'string',\n 'DisplayName': 'string',\n 'FirstName': 'string',\n 'LastName': 'string',\n 'PhoneNumber': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_device(DeviceArn=None):\n \"\"\"\n Gets the details of a device by device ARN.\n See also: AWS API Documentation\n \n \n :example: response = client.get_device(\n DeviceArn='string'\n )\n \n \n :type DeviceArn: string\n :param DeviceArn: The ARN of the device for which to request details. Required.\n\n :rtype: dict\n :return: {\n 'Device': {\n 'DeviceArn': 'string',\n 'DeviceSerialNumber': 'string',\n 'DeviceType': 'string',\n 'DeviceName': 'string',\n 'SoftwareVersion': 'string',\n 'MacAddress': 'string',\n 'RoomArn': 'string',\n 'DeviceStatus': 'READY'|'PENDING'|'WAS_OFFLINE'|'DEREGISTERED',\n 'DeviceStatusInfo': {\n 'DeviceStatusDetails': [\n {\n 'Code': 'DEVICE_SOFTWARE_UPDATE_NEEDED'|'DEVICE_WAS_OFFLINE'\n },\n ],\n 'ConnectionStatus': 'ONLINE'|'OFFLINE'\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_profile(ProfileArn=None):\n \"\"\"\n Gets the details of a room profile by profile ARN.\n See also: AWS API Documentation\n \n \n :example: response = client.get_profile(\n ProfileArn='string'\n )\n \n \n :type ProfileArn: string\n :param ProfileArn: The ARN of the room profile for which to request details. Required.\n\n :rtype: dict\n :return: {\n 'Profile': {\n 'ProfileArn': 'string',\n 'ProfileName': 'string',\n 'IsDefault': True|False,\n 'Address': 'string',\n 'Timezone': 'string',\n 'DistanceUnit': 'METRIC'|'IMPERIAL',\n 'TemperatureUnit': 'FAHRENHEIT'|'CELSIUS',\n 'WakeWord': 'ALEXA'|'AMAZON'|'ECHO'|'COMPUTER',\n 'SetupModeDisabled': True|False,\n 'MaxVolumeLimit': 123,\n 'PSTNEnabled': True|False,\n 'AddressBookArn': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_room(RoomArn=None):\n \"\"\"\n Gets room details by room ARN.\n See also: AWS API Documentation\n \n \n :example: response = client.get_room(\n RoomArn='string'\n )\n \n \n :type RoomArn: string\n :param RoomArn: The ARN of the room for which to request details. Required.\n\n :rtype: dict\n :return: {\n 'Room': {\n 'RoomArn': 'string',\n 'RoomName': 'string',\n 'Description': 'string',\n 'ProviderCalendarId': 'string',\n 'ProfileArn': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_room_skill_parameter(RoomArn=None, SkillId=None, ParameterKey=None):\n \"\"\"\n Gets room skill parameter details by room, skill, and parameter key ARN.\n See also: AWS API Documentation\n \n \n :example: response = client.get_room_skill_parameter(\n RoomArn='string',\n SkillId='string',\n ParameterKey='string'\n )\n \n \n :type RoomArn: string\n :param RoomArn: The ARN of the room from which to get the room skill parameter details.\n\n :type SkillId: string\n :param SkillId: [REQUIRED]\n The ARN of the skill from which to get the room skill parameter details. Required.\n \n\n :type ParameterKey: string\n :param ParameterKey: [REQUIRED]\n The room skill parameter key for which to get details. Required.\n \n\n :rtype: dict\n :return: {\n 'RoomSkillParameter': {\n 'ParameterKey': 'string',\n 'ParameterValue': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_skill_group(SkillGroupArn=None):\n \"\"\"\n Gets skill group details by skill group ARN.\n See also: AWS API Documentation\n \n \n :example: response = client.get_skill_group(\n SkillGroupArn='string'\n )\n \n \n :type SkillGroupArn: string\n :param SkillGroupArn: The ARN of the skill group for which to get details. Required.\n\n :rtype: dict\n :return: {\n 'SkillGroup': {\n 'SkillGroupArn': 'string',\n 'SkillGroupName': 'string',\n 'Description': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_business_report_schedules(NextToken=None, MaxResults=None):\n \"\"\"\n Lists the details of the schedules that a user configured.\n See also: AWS API Documentation\n \n \n :example: response = client.list_business_report_schedules(\n NextToken='string',\n MaxResults=123\n )\n \n \n :type NextToken: string\n :param NextToken: The token used to list the remaining schedules from the previous API call.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of schedules listed in the call.\n\n :rtype: dict\n :return: {\n 'BusinessReportSchedules': [\n {\n 'ScheduleArn': 'string',\n 'ScheduleName': 'string',\n 'S3BucketName': 'string',\n 'S3KeyPrefix': 'string',\n 'Format': 'CSV'|'CSV_ZIP',\n 'ContentRange': {\n 'Interval': 'ONE_DAY'|'ONE_WEEK'\n },\n 'Recurrence': {\n 'StartDate': 'string'\n },\n 'LastBusinessReport': {\n 'Status': 'RUNNING'|'SUCCEEDED'|'FAILED',\n 'FailureCode': 'ACCESS_DENIED'|'NO_SUCH_BUCKET'|'INTERNAL_FAILURE',\n 'S3Location': {\n 'Path': 'string',\n 'BucketName': 'string'\n },\n 'DeliveryTime': datetime(2015, 1, 1),\n 'DownloadUrl': 'string'\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_conference_providers(NextToken=None, MaxResults=None):\n \"\"\"\n Lists conference providers under a specific AWS account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_conference_providers(\n NextToken='string',\n MaxResults=123\n )\n \n \n :type NextToken: string\n :param NextToken: The tokens used for pagination.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of conference providers to be returned, per paginated calls.\n\n :rtype: dict\n :return: {\n 'ConferenceProviders': [\n {\n 'Arn': 'string',\n 'Name': 'string',\n 'Type': 'CHIME'|'BLUEJEANS'|'FUZE'|'GOOGLE_HANGOUTS'|'POLYCOM'|'RINGCENTRAL'|'SKYPE_FOR_BUSINESS'|'WEBEX'|'ZOOM'|'CUSTOM',\n 'IPDialIn': {\n 'Endpoint': 'string',\n 'CommsProtocol': 'SIP'|'SIPS'|'H323'\n },\n 'PSTNDialIn': {\n 'CountryCode': 'string',\n 'PhoneNumber': 'string',\n 'OneClickIdDelay': 'string',\n 'OneClickPinDelay': 'string'\n },\n 'MeetingSetting': {\n 'RequirePin': 'YES'|'NO'|'OPTIONAL'\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_device_events(DeviceArn=None, EventType=None, NextToken=None, MaxResults=None):\n \"\"\"\n Lists the device event history, including device connection status, for up to 30 days.\n See also: AWS API Documentation\n \n \n :example: response = client.list_device_events(\n DeviceArn='string',\n EventType='CONNECTION_STATUS'|'DEVICE_STATUS',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type DeviceArn: string\n :param DeviceArn: [REQUIRED]\n The ARN of a device.\n \n\n :type EventType: string\n :param EventType: The event type to filter device events. If EventType isn't specified, this returns a list of all device events in reverse chronological order. If EventType is specified, this returns a list of device events for that EventType in reverse chronological order.\n\n :type NextToken: string\n :param NextToken: An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response only includes results beyond the token, up to the value specified by MaxResults. When the end of results is reached, the response has a value of null.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to include in the response. The default value is 50. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.\n\n :rtype: dict\n :return: {\n 'DeviceEvents': [\n {\n 'Type': 'CONNECTION_STATUS'|'DEVICE_STATUS',\n 'Value': 'string',\n 'Timestamp': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_skills(SkillGroupArn=None, EnablementType=None, SkillType=None, NextToken=None, MaxResults=None):\n \"\"\"\n Lists all enabled skills in a specific skill group.\n See also: AWS API Documentation\n \n \n :example: response = client.list_skills(\n SkillGroupArn='string',\n EnablementType='ENABLED'|'PENDING',\n SkillType='PUBLIC'|'PRIVATE'|'ALL',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type SkillGroupArn: string\n :param SkillGroupArn: The ARN of the skill group for which to list enabled skills.\n\n :type EnablementType: string\n :param EnablementType: Whether the skill is enabled under the user's account, or if it requires linking to be used.\n\n :type SkillType: string\n :param SkillType: Whether the skill is publicly available or is a private skill.\n\n :type NextToken: string\n :param NextToken: An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults .\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.\n\n :rtype: dict\n :return: {\n 'SkillSummaries': [\n {\n 'SkillId': 'string',\n 'SkillName': 'string',\n 'SupportsLinking': True|False,\n 'EnablementType': 'ENABLED'|'PENDING',\n 'SkillType': 'PUBLIC'|'PRIVATE'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_skills_store_categories(NextToken=None, MaxResults=None):\n \"\"\"\n Lists all categories in the Alexa skill store.\n See also: AWS API Documentation\n \n \n :example: response = client.list_skills_store_categories(\n NextToken='string',\n MaxResults=123\n )\n \n \n :type NextToken: string\n :param NextToken: The tokens used for pagination.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of categories returned, per paginated calls.\n\n :rtype: dict\n :return: {\n 'CategoryList': [\n {\n 'CategoryId': 123,\n 'CategoryName': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_skills_store_skills_by_category(CategoryId=None, NextToken=None, MaxResults=None):\n \"\"\"\n Lists all skills in the Alexa skill store by category.\n See also: AWS API Documentation\n \n \n :example: response = client.list_skills_store_skills_by_category(\n CategoryId=123,\n NextToken='string',\n MaxResults=123\n )\n \n \n :type CategoryId: integer\n :param CategoryId: [REQUIRED]\n The category ID for which the skills are being retrieved from the skill store.\n \n\n :type NextToken: string\n :param NextToken: The tokens used for pagination.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of skills returned per paginated calls.\n\n :rtype: dict\n :return: {\n 'SkillsStoreSkills': [\n {\n 'SkillId': 'string',\n 'SkillName': 'string',\n 'ShortDescription': 'string',\n 'IconUrl': 'string',\n 'SampleUtterances': [\n 'string',\n ],\n 'SkillDetails': {\n 'ProductDescription': 'string',\n 'InvocationPhrase': 'string',\n 'ReleaseDate': 'string',\n 'EndUserLicenseAgreement': 'string',\n 'GenericKeywords': [\n 'string',\n ],\n 'BulletPoints': [\n 'string',\n ],\n 'NewInThisVersionBulletPoints': [\n 'string',\n ],\n 'SkillTypes': [\n 'string',\n ],\n 'Reviews': {\n 'string': 'string'\n },\n 'DeveloperInfo': {\n 'DeveloperName': 'string',\n 'PrivacyPolicy': 'string',\n 'Email': 'string',\n 'Url': 'string'\n }\n },\n 'SupportsLinking': True|False\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_smart_home_appliances(RoomArn=None, MaxResults=None, NextToken=None):\n \"\"\"\n Lists all of the smart home appliances associated with a room.\n See also: AWS API Documentation\n \n \n :example: response = client.list_smart_home_appliances(\n RoomArn='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type RoomArn: string\n :param RoomArn: [REQUIRED]\n The room that the appliances are associated with.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of appliances to be returned, per paginated calls.\n\n :type NextToken: string\n :param NextToken: The tokens used for pagination.\n\n :rtype: dict\n :return: {\n 'SmartHomeAppliances': [\n {\n 'FriendlyName': 'string',\n 'Description': 'string',\n 'ManufacturerName': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_tags(Arn=None, NextToken=None, MaxResults=None):\n \"\"\"\n Lists all tags for the specified resource.\n See also: AWS API Documentation\n \n \n :example: response = client.list_tags(\n Arn='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type Arn: string\n :param Arn: [REQUIRED]\n The ARN of the specified resource for which to list tags.\n \n\n :type NextToken: string\n :param NextToken: An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults .\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.\n\n :rtype: dict\n :return: {\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef put_conference_preference(ConferencePreference=None):\n \"\"\"\n Sets the conference preferences on a specific conference provider at the account level.\n See also: AWS API Documentation\n \n \n :example: response = client.put_conference_preference(\n ConferencePreference={\n 'DefaultConferenceProviderArn': 'string'\n }\n )\n \n \n :type ConferencePreference: dict\n :param ConferencePreference: [REQUIRED]\n The conference preference of a specific conference provider.\n DefaultConferenceProviderArn (string) --The ARN of the default conference provider.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef put_room_skill_parameter(RoomArn=None, SkillId=None, RoomSkillParameter=None):\n \"\"\"\n Updates room skill parameter details by room, skill, and parameter key ID. Not all skills have a room skill parameter.\n See also: AWS API Documentation\n \n \n :example: response = client.put_room_skill_parameter(\n RoomArn='string',\n SkillId='string',\n RoomSkillParameter={\n 'ParameterKey': 'string',\n 'ParameterValue': 'string'\n }\n )\n \n \n :type RoomArn: string\n :param RoomArn: The ARN of the room associated with the room skill parameter. Required.\n\n :type SkillId: string\n :param SkillId: [REQUIRED]\n The ARN of the skill associated with the room skill parameter. Required.\n \n\n :type RoomSkillParameter: dict\n :param RoomSkillParameter: [REQUIRED]\n The updated room skill parameter. Required.\n ParameterKey (string) -- [REQUIRED]The parameter key of a room skill parameter. ParameterKey is an enumerated type that only takes DEFAULT or SCOPE as valid values.\n ParameterValue (string) -- [REQUIRED]The parameter value of a room skill parameter.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef put_skill_authorization(AuthorizationResult=None, SkillId=None, RoomArn=None):\n \"\"\"\n Links a user's account to a third-party skill provider. If this API operation is called by an assumed IAM role, the skill being linked must be a private skill. Also, the skill must be owned by the AWS account that assumed the IAM role.\n See also: AWS API Documentation\n \n \n :example: response = client.put_skill_authorization(\n AuthorizationResult={\n 'string': 'string'\n },\n SkillId='string',\n RoomArn='string'\n )\n \n \n :type AuthorizationResult: dict\n :param AuthorizationResult: [REQUIRED]\n The authorization result specific to OAUTH code grant output. 'Code must be populated in the AuthorizationResult map to establish the authorization.\n (string) --\n (string) --\n \n\n :type SkillId: string\n :param SkillId: [REQUIRED]\n The unique identifier of a skill.\n \n\n :type RoomArn: string\n :param RoomArn: The room that the skill is authorized for.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef register_avs_device(ClientId=None, UserCode=None, ProductId=None, DeviceSerialNumber=None, AmazonId=None):\n \"\"\"\n Registers an Alexa-enabled device built by an Original Equipment Manufacturer (OEM) using Alexa Voice Service (AVS).\n See also: AWS API Documentation\n \n \n :example: response = client.register_avs_device(\n ClientId='string',\n UserCode='string',\n ProductId='string',\n DeviceSerialNumber='string',\n AmazonId='string'\n )\n \n \n :type ClientId: string\n :param ClientId: [REQUIRED]\n The client ID of the OEM used for code-based linking authorization on an AVS device.\n \n\n :type UserCode: string\n :param UserCode: [REQUIRED]\n The code that is obtained after your AVS device has made a POST request to LWA as a part of the Device Authorization Request component of the OAuth code-based linking specification.\n \n\n :type ProductId: string\n :param ProductId: [REQUIRED]\n The product ID used to identify your AVS device during authorization.\n \n\n :type DeviceSerialNumber: string\n :param DeviceSerialNumber: [REQUIRED]\n The key generated by the OEM that uniquely identifies a specified instance of your AVS device.\n \n\n :type AmazonId: string\n :param AmazonId: [REQUIRED]\n The device type ID for your AVS device generated by Amazon when the OEM creates a new product on Amazon's Developer Console.\n \n\n :rtype: dict\n :return: {\n 'DeviceArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef reject_skill(SkillId=None):\n \"\"\"\n Disassociates a skill from the organization under a user's AWS account. If the skill is a private skill, it moves to an AcceptStatus of PENDING. Any private or public skill that is rejected can be added later by calling the ApproveSkill API.\n See also: AWS API Documentation\n \n \n :example: response = client.reject_skill(\n SkillId='string'\n )\n \n \n :type SkillId: string\n :param SkillId: [REQUIRED]\n The unique identifier of the skill.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef resolve_room(UserId=None, SkillId=None):\n \"\"\"\n Determines the details for the room from which a skill request was invoked. This operation is used by skill developers.\n See also: AWS API Documentation\n \n \n :example: response = client.resolve_room(\n UserId='string',\n SkillId='string'\n )\n \n \n :type UserId: string\n :param UserId: [REQUIRED]\n The ARN of the user. Required.\n \n\n :type SkillId: string\n :param SkillId: [REQUIRED]\n The ARN of the skill that was requested. Required.\n \n\n :rtype: dict\n :return: {\n 'RoomArn': 'string',\n 'RoomName': 'string',\n 'RoomSkillParameters': [\n {\n 'ParameterKey': 'string',\n 'ParameterValue': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef revoke_invitation(UserArn=None, EnrollmentId=None):\n \"\"\"\n Revokes an invitation and invalidates the enrollment URL.\n See also: AWS API Documentation\n \n \n :example: response = client.revoke_invitation(\n UserArn='string',\n EnrollmentId='string'\n )\n \n \n :type UserArn: string\n :param UserArn: The ARN of the user for whom to revoke an enrollment invitation. Required.\n\n :type EnrollmentId: string\n :param EnrollmentId: The ARN of the enrollment invitation to revoke. Required.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef search_address_books(Filters=None, SortCriteria=None, NextToken=None, MaxResults=None):\n \"\"\"\n Searches address books and lists the ones that meet a set of filter and sort criteria.\n See also: AWS API Documentation\n \n \n :example: response = client.search_address_books(\n Filters=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n SortCriteria=[\n {\n 'Key': 'string',\n 'Value': 'ASC'|'DESC'\n },\n ],\n NextToken='string',\n MaxResults=123\n )\n \n \n :type Filters: list\n :param Filters: The filters to use to list a specified set of address books. The supported filter key is AddressBookName.\n (dict) --A filter name and value pair that is used to return a more specific list of results. Filters can be used to match a set of resources by various criteria.\n Key (string) -- [REQUIRED]The key of a filter.\n Values (list) -- [REQUIRED]The values of a filter.\n (string) --\n \n \n\n :type SortCriteria: list\n :param SortCriteria: The sort order to use in listing the specified set of address books. The supported sort key is AddressBookName.\n (dict) --An object representing a sort criteria.\n Key (string) -- [REQUIRED]The sort key of a sort object.\n Value (string) -- [REQUIRED]The sort value of a sort object.\n \n \n\n :type NextToken: string\n :param NextToken: An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response only includes results beyond the token, up to the value specified by MaxResults.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.\n\n :rtype: dict\n :return: {\n 'AddressBooks': [\n {\n 'AddressBookArn': 'string',\n 'Name': 'string',\n 'Description': 'string'\n },\n ],\n 'NextToken': 'string',\n 'TotalCount': 123\n }\n \n \n \"\"\"\n pass\n\ndef search_contacts(Filters=None, SortCriteria=None, NextToken=None, MaxResults=None):\n \"\"\"\n Searches contacts and lists the ones that meet a set of filter and sort criteria.\n See also: AWS API Documentation\n \n \n :example: response = client.search_contacts(\n Filters=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n SortCriteria=[\n {\n 'Key': 'string',\n 'Value': 'ASC'|'DESC'\n },\n ],\n NextToken='string',\n MaxResults=123\n )\n \n \n :type Filters: list\n :param Filters: The filters to use to list a specified set of address books. The supported filter keys are DisplayName, FirstName, LastName, and AddressBookArns.\n (dict) --A filter name and value pair that is used to return a more specific list of results. Filters can be used to match a set of resources by various criteria.\n Key (string) -- [REQUIRED]The key of a filter.\n Values (list) -- [REQUIRED]The values of a filter.\n (string) --\n \n \n\n :type SortCriteria: list\n :param SortCriteria: The sort order to use in listing the specified set of contacts. The supported sort keys are DisplayName, FirstName, and LastName.\n (dict) --An object representing a sort criteria.\n Key (string) -- [REQUIRED]The sort key of a sort object.\n Value (string) -- [REQUIRED]The sort value of a sort object.\n \n \n\n :type NextToken: string\n :param NextToken: An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response only includes results beyond the token, up to the value specified by MaxResults.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.\n\n :rtype: dict\n :return: {\n 'Contacts': [\n {\n 'ContactArn': 'string',\n 'DisplayName': 'string',\n 'FirstName': 'string',\n 'LastName': 'string',\n 'PhoneNumber': 'string'\n },\n ],\n 'NextToken': 'string',\n 'TotalCount': 123\n }\n \n \n \"\"\"\n pass\n\ndef search_devices(NextToken=None, MaxResults=None, Filters=None, SortCriteria=None):\n \"\"\"\n Searches devices and lists the ones that meet a set of filter criteria.\n See also: AWS API Documentation\n \n \n :example: response = client.search_devices(\n NextToken='string',\n MaxResults=123,\n Filters=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n SortCriteria=[\n {\n 'Key': 'string',\n 'Value': 'ASC'|'DESC'\n },\n ]\n )\n \n \n :type NextToken: string\n :param NextToken: An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults .\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.\n\n :type Filters: list\n :param Filters: The filters to use to list a specified set of devices. Supported filter keys are DeviceName, DeviceStatus, DeviceStatusDetailCode, RoomName, DeviceType, DeviceSerialNumber, UnassociatedOnly, and ConnectionStatus (ONLINE and OFFLINE).\n (dict) --A filter name and value pair that is used to return a more specific list of results. Filters can be used to match a set of resources by various criteria.\n Key (string) -- [REQUIRED]The key of a filter.\n Values (list) -- [REQUIRED]The values of a filter.\n (string) --\n \n \n\n :type SortCriteria: list\n :param SortCriteria: The sort order to use in listing the specified set of devices. Supported sort keys are DeviceName, DeviceStatus, RoomName, DeviceType, DeviceSerialNumber, and ConnectionStatus.\n (dict) --An object representing a sort criteria.\n Key (string) -- [REQUIRED]The sort key of a sort object.\n Value (string) -- [REQUIRED]The sort value of a sort object.\n \n \n\n :rtype: dict\n :return: {\n 'Devices': [\n {\n 'DeviceArn': 'string',\n 'DeviceSerialNumber': 'string',\n 'DeviceType': 'string',\n 'DeviceName': 'string',\n 'SoftwareVersion': 'string',\n 'MacAddress': 'string',\n 'DeviceStatus': 'READY'|'PENDING'|'WAS_OFFLINE'|'DEREGISTERED',\n 'RoomArn': 'string',\n 'RoomName': 'string',\n 'DeviceStatusInfo': {\n 'DeviceStatusDetails': [\n {\n 'Code': 'DEVICE_SOFTWARE_UPDATE_NEEDED'|'DEVICE_WAS_OFFLINE'\n },\n ],\n 'ConnectionStatus': 'ONLINE'|'OFFLINE'\n }\n },\n ],\n 'NextToken': 'string',\n 'TotalCount': 123\n }\n \n \n \"\"\"\n pass\n\ndef search_profiles(NextToken=None, MaxResults=None, Filters=None, SortCriteria=None):\n \"\"\"\n Searches room profiles and lists the ones that meet a set of filter criteria.\n See also: AWS API Documentation\n \n \n :example: response = client.search_profiles(\n NextToken='string',\n MaxResults=123,\n Filters=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n SortCriteria=[\n {\n 'Key': 'string',\n 'Value': 'ASC'|'DESC'\n },\n ]\n )\n \n \n :type NextToken: string\n :param NextToken: An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults .\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.\n\n :type Filters: list\n :param Filters: The filters to use to list a specified set of room profiles. Supported filter keys are ProfileName and Address. Required.\n (dict) --A filter name and value pair that is used to return a more specific list of results. Filters can be used to match a set of resources by various criteria.\n Key (string) -- [REQUIRED]The key of a filter.\n Values (list) -- [REQUIRED]The values of a filter.\n (string) --\n \n \n\n :type SortCriteria: list\n :param SortCriteria: The sort order to use in listing the specified set of room profiles. Supported sort keys are ProfileName and Address.\n (dict) --An object representing a sort criteria.\n Key (string) -- [REQUIRED]The sort key of a sort object.\n Value (string) -- [REQUIRED]The sort value of a sort object.\n \n \n\n :rtype: dict\n :return: {\n 'Profiles': [\n {\n 'ProfileArn': 'string',\n 'ProfileName': 'string',\n 'IsDefault': True|False,\n 'Address': 'string',\n 'Timezone': 'string',\n 'DistanceUnit': 'METRIC'|'IMPERIAL',\n 'TemperatureUnit': 'FAHRENHEIT'|'CELSIUS',\n 'WakeWord': 'ALEXA'|'AMAZON'|'ECHO'|'COMPUTER'\n },\n ],\n 'NextToken': 'string',\n 'TotalCount': 123\n }\n \n \n \"\"\"\n pass\n\ndef search_rooms(NextToken=None, MaxResults=None, Filters=None, SortCriteria=None):\n \"\"\"\n Searches rooms and lists the ones that meet a set of filter and sort criteria.\n See also: AWS API Documentation\n \n \n :example: response = client.search_rooms(\n NextToken='string',\n MaxResults=123,\n Filters=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n SortCriteria=[\n {\n 'Key': 'string',\n 'Value': 'ASC'|'DESC'\n },\n ]\n )\n \n \n :type NextToken: string\n :param NextToken: An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults .\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.\n\n :type Filters: list\n :param Filters: The filters to use to list a specified set of rooms. The supported filter keys are RoomName and ProfileName.\n (dict) --A filter name and value pair that is used to return a more specific list of results. Filters can be used to match a set of resources by various criteria.\n Key (string) -- [REQUIRED]The key of a filter.\n Values (list) -- [REQUIRED]The values of a filter.\n (string) --\n \n \n\n :type SortCriteria: list\n :param SortCriteria: The sort order to use in listing the specified set of rooms. The supported sort keys are RoomName and ProfileName.\n (dict) --An object representing a sort criteria.\n Key (string) -- [REQUIRED]The sort key of a sort object.\n Value (string) -- [REQUIRED]The sort value of a sort object.\n \n \n\n :rtype: dict\n :return: {\n 'Rooms': [\n {\n 'RoomArn': 'string',\n 'RoomName': 'string',\n 'Description': 'string',\n 'ProviderCalendarId': 'string',\n 'ProfileArn': 'string',\n 'ProfileName': 'string'\n },\n ],\n 'NextToken': 'string',\n 'TotalCount': 123\n }\n \n \n \"\"\"\n pass\n\ndef search_skill_groups(NextToken=None, MaxResults=None, Filters=None, SortCriteria=None):\n \"\"\"\n Searches skill groups and lists the ones that meet a set of filter and sort criteria.\n See also: AWS API Documentation\n \n \n :example: response = client.search_skill_groups(\n NextToken='string',\n MaxResults=123,\n Filters=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n SortCriteria=[\n {\n 'Key': 'string',\n 'Value': 'ASC'|'DESC'\n },\n ]\n )\n \n \n :type NextToken: string\n :param NextToken: An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults . Required.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.\n\n :type Filters: list\n :param Filters: The filters to use to list a specified set of skill groups. The supported filter key is SkillGroupName.\n (dict) --A filter name and value pair that is used to return a more specific list of results. Filters can be used to match a set of resources by various criteria.\n Key (string) -- [REQUIRED]The key of a filter.\n Values (list) -- [REQUIRED]The values of a filter.\n (string) --\n \n \n\n :type SortCriteria: list\n :param SortCriteria: The sort order to use in listing the specified set of skill groups. The supported sort key is SkillGroupName.\n (dict) --An object representing a sort criteria.\n Key (string) -- [REQUIRED]The sort key of a sort object.\n Value (string) -- [REQUIRED]The sort value of a sort object.\n \n \n\n :rtype: dict\n :return: {\n 'SkillGroups': [\n {\n 'SkillGroupArn': 'string',\n 'SkillGroupName': 'string',\n 'Description': 'string'\n },\n ],\n 'NextToken': 'string',\n 'TotalCount': 123\n }\n \n \n \"\"\"\n pass\n\ndef search_users(NextToken=None, MaxResults=None, Filters=None, SortCriteria=None):\n \"\"\"\n Searches users and lists the ones that meet a set of filter and sort criteria.\n See also: AWS API Documentation\n \n \n :example: response = client.search_users(\n NextToken='string',\n MaxResults=123,\n Filters=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n SortCriteria=[\n {\n 'Key': 'string',\n 'Value': 'ASC'|'DESC'\n },\n ]\n )\n \n \n :type NextToken: string\n :param NextToken: An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults . Required.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. Required.\n\n :type Filters: list\n :param Filters: The filters to use for listing a specific set of users. Required. Supported filter keys are UserId, FirstName, LastName, Email, and EnrollmentStatus.\n (dict) --A filter name and value pair that is used to return a more specific list of results. Filters can be used to match a set of resources by various criteria.\n Key (string) -- [REQUIRED]The key of a filter.\n Values (list) -- [REQUIRED]The values of a filter.\n (string) --\n \n \n\n :type SortCriteria: list\n :param SortCriteria: The sort order to use in listing the filtered set of users. Required. Supported sort keys are UserId, FirstName, LastName, Email, and EnrollmentStatus.\n (dict) --An object representing a sort criteria.\n Key (string) -- [REQUIRED]The sort key of a sort object.\n Value (string) -- [REQUIRED]The sort value of a sort object.\n \n \n\n :rtype: dict\n :return: {\n 'Users': [\n {\n 'UserArn': 'string',\n 'FirstName': 'string',\n 'LastName': 'string',\n 'Email': 'string',\n 'EnrollmentStatus': 'INITIALIZED'|'PENDING'|'REGISTERED'|'DISASSOCIATING'|'DEREGISTERING',\n 'EnrollmentId': 'string'\n },\n ],\n 'NextToken': 'string',\n 'TotalCount': 123\n }\n \n \n \"\"\"\n pass\n\ndef send_invitation(UserArn=None):\n \"\"\"\n Sends an enrollment invitation email with a URL to a user. The URL is valid for 72 hours or until you call this operation again, whichever comes first.\n See also: AWS API Documentation\n \n \n :example: response = client.send_invitation(\n UserArn='string'\n )\n \n \n :type UserArn: string\n :param UserArn: The ARN of the user to whom to send an invitation. Required.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef start_device_sync(RoomArn=None, DeviceArn=None, Features=None):\n \"\"\"\n Resets a device and its account to the known default settings, by clearing all information and settings set by previous users.\n See also: AWS API Documentation\n \n \n :example: response = client.start_device_sync(\n RoomArn='string',\n DeviceArn='string',\n Features=[\n 'BLUETOOTH'|'VOLUME'|'NOTIFICATIONS'|'LISTS'|'SKILLS'|'ALL',\n ]\n )\n \n \n :type RoomArn: string\n :param RoomArn: The ARN of the room with which the device to sync is associated. Required.\n\n :type DeviceArn: string\n :param DeviceArn: The ARN of the device to sync. Required.\n\n :type Features: list\n :param Features: [REQUIRED]\n Request structure to start the device sync. Required.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef start_smart_home_appliance_discovery(RoomArn=None):\n \"\"\"\n Initiates the discovery of any smart home appliances associated with the room.\n See also: AWS API Documentation\n \n \n :example: response = client.start_smart_home_appliance_discovery(\n RoomArn='string'\n )\n \n \n :type RoomArn: string\n :param RoomArn: [REQUIRED]\n The room where smart home appliance discovery was initiated.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef tag_resource(Arn=None, Tags=None):\n \"\"\"\n Adds metadata tags to a specified resource.\n See also: AWS API Documentation\n \n \n :example: response = client.tag_resource(\n Arn='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type Arn: string\n :param Arn: [REQUIRED]\n The ARN of the resource to which to add metadata tags. Required.\n \n\n :type Tags: list\n :param Tags: [REQUIRED]\n The tags to be added to the specified resource. Do not provide system tags. Required.\n (dict) --A key-value pair that can be associated with a resource.\n Key (string) -- [REQUIRED]The key of a tag. Tag keys are case-sensitive.\n Value (string) -- [REQUIRED]The value of a tag. Tag values are case-sensitive and can be null.\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef untag_resource(Arn=None, TagKeys=None):\n \"\"\"\n Removes metadata tags from a specified resource.\n See also: AWS API Documentation\n \n \n :example: response = client.untag_resource(\n Arn='string',\n TagKeys=[\n 'string',\n ]\n )\n \n \n :type Arn: string\n :param Arn: [REQUIRED]\n The ARN of the resource from which to remove metadata tags. Required.\n \n\n :type TagKeys: list\n :param TagKeys: [REQUIRED]\n The tags to be removed from the specified resource. Do not provide system tags. Required.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_address_book(AddressBookArn=None, Name=None, Description=None):\n \"\"\"\n Updates address book details by the address book ARN.\n See also: AWS API Documentation\n \n \n :example: response = client.update_address_book(\n AddressBookArn='string',\n Name='string',\n Description='string'\n )\n \n \n :type AddressBookArn: string\n :param AddressBookArn: [REQUIRED]\n The ARN of the room to update.\n \n\n :type Name: string\n :param Name: The updated name of the room.\n\n :type Description: string\n :param Description: The updated description of the room.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_business_report_schedule(ScheduleArn=None, S3BucketName=None, S3KeyPrefix=None, Format=None, ScheduleName=None, Recurrence=None):\n \"\"\"\n Updates the configuration of the report delivery schedule with the specified schedule ARN.\n See also: AWS API Documentation\n \n \n :example: response = client.update_business_report_schedule(\n ScheduleArn='string',\n S3BucketName='string',\n S3KeyPrefix='string',\n Format='CSV'|'CSV_ZIP',\n ScheduleName='string',\n Recurrence={\n 'StartDate': 'string'\n }\n )\n \n \n :type ScheduleArn: string\n :param ScheduleArn: [REQUIRED]\n The ARN of the business report schedule.\n \n\n :type S3BucketName: string\n :param S3BucketName: The S3 location of the output reports.\n\n :type S3KeyPrefix: string\n :param S3KeyPrefix: The S3 key where the report is delivered.\n\n :type Format: string\n :param Format: The format of the generated report (individual CSV files or zipped files of individual files).\n\n :type ScheduleName: string\n :param ScheduleName: The name identifier of the schedule.\n\n :type Recurrence: dict\n :param Recurrence: The recurrence of the reports.\n StartDate (string) --The start date.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_conference_provider(ConferenceProviderArn=None, ConferenceProviderType=None, IPDialIn=None, PSTNDialIn=None, MeetingSetting=None):\n \"\"\"\n Updates an existing conference provider's settings.\n See also: AWS API Documentation\n \n \n :example: response = client.update_conference_provider(\n ConferenceProviderArn='string',\n ConferenceProviderType='CHIME'|'BLUEJEANS'|'FUZE'|'GOOGLE_HANGOUTS'|'POLYCOM'|'RINGCENTRAL'|'SKYPE_FOR_BUSINESS'|'WEBEX'|'ZOOM'|'CUSTOM',\n IPDialIn={\n 'Endpoint': 'string',\n 'CommsProtocol': 'SIP'|'SIPS'|'H323'\n },\n PSTNDialIn={\n 'CountryCode': 'string',\n 'PhoneNumber': 'string',\n 'OneClickIdDelay': 'string',\n 'OneClickPinDelay': 'string'\n },\n MeetingSetting={\n 'RequirePin': 'YES'|'NO'|'OPTIONAL'\n }\n )\n \n \n :type ConferenceProviderArn: string\n :param ConferenceProviderArn: [REQUIRED]\n The ARN of the conference provider.\n \n\n :type ConferenceProviderType: string\n :param ConferenceProviderType: [REQUIRED]\n The type of the conference provider.\n \n\n :type IPDialIn: dict\n :param IPDialIn: The IP endpoint and protocol for calling.\n Endpoint (string) -- [REQUIRED]The IP address.\n CommsProtocol (string) -- [REQUIRED]The protocol, including SIP, SIPS, and H323.\n \n\n :type PSTNDialIn: dict\n :param PSTNDialIn: The information for PSTN conferencing.\n CountryCode (string) -- [REQUIRED]The zip code.\n PhoneNumber (string) -- [REQUIRED]The phone number to call to join the conference.\n OneClickIdDelay (string) -- [REQUIRED]The delay duration before Alexa enters the conference ID with dual-tone multi-frequency (DTMF). Each number on the dial pad corresponds to a DTMF tone, which is how we send data over the telephone network.\n OneClickPinDelay (string) -- [REQUIRED]The delay duration before Alexa enters the conference pin with dual-tone multi-frequency (DTMF). Each number on the dial pad corresponds to a DTMF tone, which is how we send data over the telephone network.\n \n\n :type MeetingSetting: dict\n :param MeetingSetting: [REQUIRED]\n The meeting settings for the conference provider.\n RequirePin (string) -- [REQUIRED]The values that indicate whether the pin is always required.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_contact(ContactArn=None, DisplayName=None, FirstName=None, LastName=None, PhoneNumber=None):\n \"\"\"\n Updates the contact details by the contact ARN.\n See also: AWS API Documentation\n \n \n :example: response = client.update_contact(\n ContactArn='string',\n DisplayName='string',\n FirstName='string',\n LastName='string',\n PhoneNumber='string'\n )\n \n \n :type ContactArn: string\n :param ContactArn: [REQUIRED]\n The ARN of the contact to update.\n \n\n :type DisplayName: string\n :param DisplayName: The updated display name of the contact.\n\n :type FirstName: string\n :param FirstName: The updated first name of the contact.\n\n :type LastName: string\n :param LastName: The updated last name of the contact.\n\n :type PhoneNumber: string\n :param PhoneNumber: The updated phone number of the contact.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_device(DeviceArn=None, DeviceName=None):\n \"\"\"\n Updates the device name by device ARN.\n See also: AWS API Documentation\n \n \n :example: response = client.update_device(\n DeviceArn='string',\n DeviceName='string'\n )\n \n \n :type DeviceArn: string\n :param DeviceArn: The ARN of the device to update. Required.\n\n :type DeviceName: string\n :param DeviceName: The updated device name. Required.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_profile(ProfileArn=None, ProfileName=None, IsDefault=None, Timezone=None, Address=None, DistanceUnit=None, TemperatureUnit=None, WakeWord=None, SetupModeDisabled=None, MaxVolumeLimit=None, PSTNEnabled=None):\n \"\"\"\n Updates an existing room profile by room profile ARN.\n See also: AWS API Documentation\n \n \n :example: response = client.update_profile(\n ProfileArn='string',\n ProfileName='string',\n IsDefault=True|False,\n Timezone='string',\n Address='string',\n DistanceUnit='METRIC'|'IMPERIAL',\n TemperatureUnit='FAHRENHEIT'|'CELSIUS',\n WakeWord='ALEXA'|'AMAZON'|'ECHO'|'COMPUTER',\n SetupModeDisabled=True|False,\n MaxVolumeLimit=123,\n PSTNEnabled=True|False\n )\n \n \n :type ProfileArn: string\n :param ProfileArn: The ARN of the room profile to update. Required.\n\n :type ProfileName: string\n :param ProfileName: The updated name for the room profile.\n\n :type IsDefault: boolean\n :param IsDefault: Sets the profile as default if selected. If this is missing, no update is done to the default status.\n\n :type Timezone: string\n :param Timezone: The updated timezone for the room profile.\n\n :type Address: string\n :param Address: The updated address for the room profile.\n\n :type DistanceUnit: string\n :param DistanceUnit: The updated distance unit for the room profile.\n\n :type TemperatureUnit: string\n :param TemperatureUnit: The updated temperature unit for the room profile.\n\n :type WakeWord: string\n :param WakeWord: The updated wake word for the room profile.\n\n :type SetupModeDisabled: boolean\n :param SetupModeDisabled: Whether the setup mode of the profile is enabled.\n\n :type MaxVolumeLimit: integer\n :param MaxVolumeLimit: The updated maximum volume limit for the room profile.\n\n :type PSTNEnabled: boolean\n :param PSTNEnabled: Whether the PSTN setting of the room profile is enabled.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_room(RoomArn=None, RoomName=None, Description=None, ProviderCalendarId=None, ProfileArn=None):\n \"\"\"\n Updates room details by room ARN.\n See also: AWS API Documentation\n \n \n :example: response = client.update_room(\n RoomArn='string',\n RoomName='string',\n Description='string',\n ProviderCalendarId='string',\n ProfileArn='string'\n )\n \n \n :type RoomArn: string\n :param RoomArn: The ARN of the room to update.\n\n :type RoomName: string\n :param RoomName: The updated name for the room.\n\n :type Description: string\n :param Description: The updated description for the room.\n\n :type ProviderCalendarId: string\n :param ProviderCalendarId: The updated provider calendar ARN for the room.\n\n :type ProfileArn: string\n :param ProfileArn: The updated profile ARN for the room.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_skill_group(SkillGroupArn=None, SkillGroupName=None, Description=None):\n \"\"\"\n Updates skill group details by skill group ARN.\n See also: AWS API Documentation\n \n \n :example: response = client.update_skill_group(\n SkillGroupArn='string',\n SkillGroupName='string',\n Description='string'\n )\n \n \n :type SkillGroupArn: string\n :param SkillGroupArn: The ARN of the skill group to update.\n\n :type SkillGroupName: string\n :param SkillGroupName: The updated name for the skill group.\n\n :type Description: string\n :param Description: The updated description for the skill group.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6186137795448303, "alphanum_fraction": 0.6261388659477234, "avg_line_length": 36.53571319580078, "blob_id": "56af4b5b7637322470292e3c35eee011b37d3bc9", "content_id": "b9f4f605889d05f9dd838f9d129edc7b5301a06b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34684, "license_type": "permissive", "max_line_length": 556, "num_lines": 924, "path": "/pyboto3/globalaccelerator.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_accelerator(Name=None, IpAddressType=None, Enabled=None, IdempotencyToken=None):\n \"\"\"\n Create an accelerator. An accelerator includes one or more listeners that process inbound connections and direct traffic to one or more endpoint groups, each of which includes endpoints, such as Network Load Balancers. To see an AWS CLI example of creating an accelerator, scroll down to Example .\n See also: AWS API Documentation\n \n \n :example: response = client.create_accelerator(\n Name='string',\n IpAddressType='IPV4',\n Enabled=True|False,\n IdempotencyToken='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of an accelerator. The name can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens (-), and must not begin or end with a hyphen.\n \n\n :type IpAddressType: string\n :param IpAddressType: The value for the address type must be IPv4.\n\n :type Enabled: boolean\n :param Enabled: Indicates whether an accelerator is enabled. The value is true or false. The default value is true.\n If the value is set to true, an accelerator cannot be deleted. If set to false, the accelerator can be deleted.\n \n\n :type IdempotencyToken: string\n :param IdempotencyToken: [REQUIRED]\n A unique, case-sensitive identifier that you provide to ensure the idempotency that is, the uniqueness of an accelerator.\n \n\n :rtype: dict\n :return: {\n 'Accelerator': {\n 'AcceleratorArn': 'string',\n 'Name': 'string',\n 'IpAddressType': 'IPV4',\n 'Enabled': True|False,\n 'IpSets': [\n {\n 'IpFamily': 'string',\n 'IpAddresses': [\n 'string',\n ]\n },\n ],\n 'Status': 'DEPLOYED'|'IN_PROGRESS',\n 'CreatedTime': datetime(2015, 1, 1),\n 'LastModifiedTime': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef create_endpoint_group(ListenerArn=None, EndpointGroupRegion=None, EndpointConfigurations=None, TrafficDialPercentage=None, HealthCheckPort=None, HealthCheckProtocol=None, HealthCheckPath=None, HealthCheckIntervalSeconds=None, ThresholdCount=None, IdempotencyToken=None):\n \"\"\"\n Create an endpoint group for the specified listener. An endpoint group is a collection of endpoints in one AWS Region. To see an AWS CLI example of creating an endpoint group, scroll down to Example .\n See also: AWS API Documentation\n \n \n :example: response = client.create_endpoint_group(\n ListenerArn='string',\n EndpointGroupRegion='string',\n EndpointConfigurations=[\n {\n 'EndpointId': 'string',\n 'Weight': 123\n },\n ],\n TrafficDialPercentage=...,\n HealthCheckPort=123,\n HealthCheckProtocol='TCP'|'HTTP'|'HTTPS',\n HealthCheckPath='string',\n HealthCheckIntervalSeconds=123,\n ThresholdCount=123,\n IdempotencyToken='string'\n )\n \n \n :type ListenerArn: string\n :param ListenerArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the listener.\n \n\n :type EndpointGroupRegion: string\n :param EndpointGroupRegion: [REQUIRED]\n The name of the AWS Region where the endpoint group is located. A listener can have only one endpoint group in a specific Region.\n \n\n :type EndpointConfigurations: list\n :param EndpointConfigurations: The list of endpoint objects.\n (dict) --A complex type for endpoints.\n EndpointId (string) --An ID for the endpoint. If the endpoint is a Network Load Balancer or Application Load Balancer, this is the Amazon Resource Name (ARN) of the resource. If the endpoint is an Elastic IP address, this is the Elastic IP address allocation ID.\n Weight (integer) --The weight associated with the endpoint. When you add weights to endpoints, you configure AWS Global Accelerator to route traffic based on proportions that you specify. For example, you might specify endpoint weights of 4, 5, 5, and 6 (sum=20). The result is that 4/20 of your traffic, on average, is routed to the first endpoint, 5/20 is routed both to the second and third endpoints, and 6/20 is routed to the last endpoint. For more information, see Endpoint Weights in the AWS Global Accelerator Developer Guide .\n \n \n\n :type TrafficDialPercentage: float\n :param TrafficDialPercentage: The percentage of traffic to send to an AWS Region. Additional traffic is distributed to other endpoint groups for this listener.\n Use this action to increase (dial up) or decrease (dial down) traffic to a specific Region. The percentage is applied to the traffic that would otherwise have been routed to the Region based on optimal routing.\n The default value is 100.\n \n\n :type HealthCheckPort: integer\n :param HealthCheckPort: The port that AWS Global Accelerator uses to check the health of endpoints that are part of this endpoint group. The default port is the listener port that this endpoint group is associated with. If listener port is a list of ports, Global Accelerator uses the first port in the list.\n\n :type HealthCheckProtocol: string\n :param HealthCheckProtocol: The protocol that AWS Global Accelerator uses to check the health of endpoints that are part of this endpoint group. The default value is TCP.\n\n :type HealthCheckPath: string\n :param HealthCheckPath: If the protocol is HTTP/S, then this specifies the path that is the destination for health check targets. The default value is slash (/).\n\n :type HealthCheckIntervalSeconds: integer\n :param HealthCheckIntervalSeconds: The time 10 seconds or 30 seconds between each health check for an endpoint. The default value is 30.\n\n :type ThresholdCount: integer\n :param ThresholdCount: The number of consecutive health checks required to set the state of a healthy endpoint to unhealthy, or to set an unhealthy endpoint to healthy. The default value is 3.\n\n :type IdempotencyToken: string\n :param IdempotencyToken: [REQUIRED]\n A unique, case-sensitive identifier that you provide to ensure the idempotency that is, the uniqueness of the request.\n \n\n :rtype: dict\n :return: {\n 'EndpointGroup': {\n 'EndpointGroupArn': 'string',\n 'EndpointGroupRegion': 'string',\n 'EndpointDescriptions': [\n {\n 'EndpointId': 'string',\n 'Weight': 123,\n 'HealthState': 'INITIAL'|'HEALTHY'|'UNHEALTHY',\n 'HealthReason': 'string'\n },\n ],\n 'TrafficDialPercentage': ...,\n 'HealthCheckPort': 123,\n 'HealthCheckProtocol': 'TCP'|'HTTP'|'HTTPS',\n 'HealthCheckPath': 'string',\n 'HealthCheckIntervalSeconds': 123,\n 'ThresholdCount': 123\n }\n }\n \n \n :returns: \n Timeout : The health check requests to the endpoint are timing out before returning a status.\n Failed : The health check failed, for example because the endpoint response was invalid (malformed).\n \n \"\"\"\n pass\n\ndef create_listener(AcceleratorArn=None, PortRanges=None, Protocol=None, ClientAffinity=None, IdempotencyToken=None):\n \"\"\"\n Create a listener to process inbound connections from clients to an accelerator. Connections arrive to assigned static IP addresses on a port, port range, or list of port ranges that you specify. To see an AWS CLI example of creating a listener, scroll down to Example .\n See also: AWS API Documentation\n \n \n :example: response = client.create_listener(\n AcceleratorArn='string',\n PortRanges=[\n {\n 'FromPort': 123,\n 'ToPort': 123\n },\n ],\n Protocol='TCP'|'UDP',\n ClientAffinity='NONE'|'SOURCE_IP',\n IdempotencyToken='string'\n )\n \n \n :type AcceleratorArn: string\n :param AcceleratorArn: [REQUIRED]\n The Amazon Resource Name (ARN) of your accelerator.\n \n\n :type PortRanges: list\n :param PortRanges: [REQUIRED]\n The list of port ranges to support for connections from clients to your accelerator.\n (dict) --A complex type for a range of ports for a listener.\n FromPort (integer) --The first port in the range of ports, inclusive.\n ToPort (integer) --The last port in the range of ports, inclusive.\n \n \n\n :type Protocol: string\n :param Protocol: [REQUIRED]\n The protocol for connections from clients to your accelerator.\n \n\n :type ClientAffinity: string\n :param ClientAffinity: Client affinity lets you direct all requests from a user to the same endpoint, if you have stateful applications, regardless of the port and protocol of the client request. Clienty affinity gives you control over whether to always route each client to the same specific endpoint.\n AWS Global Accelerator uses a consistent-flow hashing algorithm to choose the optimal endpoint for a connection. If client affinity is NONE , Global Accelerator uses the 'five-tuple' (5-tuple) properties source IP address, source port, destination IP address, destination port, and protocol to select the hash value, and then chooses the best endpoint. However, with this setting, if someone uses different ports to connect to Global Accelerator, their connections might not be always routed to the same endpoint because the hash value changes.\n If you want a given client to always be routed to the same endpoint, set client affinity to SOURCE_IP instead. When you use the SOURCE_IP setting, Global Accelerator uses the 'two-tuple' (2-tuple) properties source (client) IP address and destination IP address to select the hash value.\n The default value is NONE .\n \n\n :type IdempotencyToken: string\n :param IdempotencyToken: [REQUIRED]\n A unique, case-sensitive identifier that you provide to ensure the idempotency that is, the uniqueness of the request.\n \n\n :rtype: dict\n :return: {\n 'Listener': {\n 'ListenerArn': 'string',\n 'PortRanges': [\n {\n 'FromPort': 123,\n 'ToPort': 123\n },\n ],\n 'Protocol': 'TCP'|'UDP',\n 'ClientAffinity': 'NONE'|'SOURCE_IP'\n }\n }\n \n \n \"\"\"\n pass\n\ndef delete_accelerator(AcceleratorArn=None):\n \"\"\"\n Delete an accelerator. Note: before you can delete an accelerator, you must disable it and remove all dependent resources (listeners and endpoint groups).\n See also: AWS API Documentation\n \n \n :example: response = client.delete_accelerator(\n AcceleratorArn='string'\n )\n \n \n :type AcceleratorArn: string\n :param AcceleratorArn: [REQUIRED]\n The Amazon Resource Name (ARN) of an accelerator.\n \n\n \"\"\"\n pass\n\ndef delete_endpoint_group(EndpointGroupArn=None):\n \"\"\"\n Delete an endpoint group from a listener.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_endpoint_group(\n EndpointGroupArn='string'\n )\n \n \n :type EndpointGroupArn: string\n :param EndpointGroupArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the endpoint group to delete.\n \n\n \"\"\"\n pass\n\ndef delete_listener(ListenerArn=None):\n \"\"\"\n Delete a listener from an accelerator.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_listener(\n ListenerArn='string'\n )\n \n \n :type ListenerArn: string\n :param ListenerArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the listener.\n \n\n \"\"\"\n pass\n\ndef describe_accelerator(AcceleratorArn=None):\n \"\"\"\n Describe an accelerator. To see an AWS CLI example of describing an accelerator, scroll down to Example .\n See also: AWS API Documentation\n \n \n :example: response = client.describe_accelerator(\n AcceleratorArn='string'\n )\n \n \n :type AcceleratorArn: string\n :param AcceleratorArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the accelerator to describe.\n \n\n :rtype: dict\n :return: {\n 'Accelerator': {\n 'AcceleratorArn': 'string',\n 'Name': 'string',\n 'IpAddressType': 'IPV4',\n 'Enabled': True|False,\n 'IpSets': [\n {\n 'IpFamily': 'string',\n 'IpAddresses': [\n 'string',\n ]\n },\n ],\n 'Status': 'DEPLOYED'|'IN_PROGRESS',\n 'CreatedTime': datetime(2015, 1, 1),\n 'LastModifiedTime': datetime(2015, 1, 1)\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_accelerator_attributes(AcceleratorArn=None):\n \"\"\"\n Describe the attributes of an accelerator.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_accelerator_attributes(\n AcceleratorArn='string'\n )\n \n \n :type AcceleratorArn: string\n :param AcceleratorArn: The Amazon Resource Name (ARN) of the accelerator with the attributes that you want to describe. Value is required.\n\n :rtype: dict\n :return: {\n 'AcceleratorAttributes': {\n 'FlowLogsEnabled': True|False,\n 'FlowLogsS3Bucket': 'string',\n 'FlowLogsS3Prefix': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_endpoint_group(EndpointGroupArn=None):\n \"\"\"\n Describe an endpoint group.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_endpoint_group(\n EndpointGroupArn='string'\n )\n \n \n :type EndpointGroupArn: string\n :param EndpointGroupArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the endpoint group to describe.\n \n\n :rtype: dict\n :return: {\n 'EndpointGroup': {\n 'EndpointGroupArn': 'string',\n 'EndpointGroupRegion': 'string',\n 'EndpointDescriptions': [\n {\n 'EndpointId': 'string',\n 'Weight': 123,\n 'HealthState': 'INITIAL'|'HEALTHY'|'UNHEALTHY',\n 'HealthReason': 'string'\n },\n ],\n 'TrafficDialPercentage': ...,\n 'HealthCheckPort': 123,\n 'HealthCheckProtocol': 'TCP'|'HTTP'|'HTTPS',\n 'HealthCheckPath': 'string',\n 'HealthCheckIntervalSeconds': 123,\n 'ThresholdCount': 123\n }\n }\n \n \n :returns: \n ProvisioningInProgress : The endpoint is in the process of being provisioned.\n InitialHealthChecking : Global Accelerator is still setting up the minimum number of health checks for the endpoint that are required to determine its health status.\n \n \"\"\"\n pass\n\ndef describe_listener(ListenerArn=None):\n \"\"\"\n Describe a listener.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_listener(\n ListenerArn='string'\n )\n \n \n :type ListenerArn: string\n :param ListenerArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the listener to describe.\n \n\n :rtype: dict\n :return: {\n 'Listener': {\n 'ListenerArn': 'string',\n 'PortRanges': [\n {\n 'FromPort': 123,\n 'ToPort': 123\n },\n ],\n 'Protocol': 'TCP'|'UDP',\n 'ClientAffinity': 'NONE'|'SOURCE_IP'\n }\n }\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_accelerators(MaxResults=None, NextToken=None):\n \"\"\"\n List the accelerators for an AWS account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_accelerators(\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type MaxResults: integer\n :param MaxResults: The number of Global Accelerator objects that you want to return with this call. The default value is 10.\n\n :type NextToken: string\n :param NextToken: The token for the next set of results. You receive this token from a previous call.\n\n :rtype: dict\n :return: {\n 'Accelerators': [\n {\n 'AcceleratorArn': 'string',\n 'Name': 'string',\n 'IpAddressType': 'IPV4',\n 'Enabled': True|False,\n 'IpSets': [\n {\n 'IpFamily': 'string',\n 'IpAddresses': [\n 'string',\n ]\n },\n ],\n 'Status': 'DEPLOYED'|'IN_PROGRESS',\n 'CreatedTime': datetime(2015, 1, 1),\n 'LastModifiedTime': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_endpoint_groups(ListenerArn=None, MaxResults=None, NextToken=None):\n \"\"\"\n List the endpoint groups that are associated with a listener.\n See also: AWS API Documentation\n \n \n :example: response = client.list_endpoint_groups(\n ListenerArn='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type ListenerArn: string\n :param ListenerArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the listener.\n \n\n :type MaxResults: integer\n :param MaxResults: The number of endpoint group objects that you want to return with this call. The default value is 10.\n\n :type NextToken: string\n :param NextToken: The token for the next set of results. You receive this token from a previous call.\n\n :rtype: dict\n :return: {\n 'EndpointGroups': [\n {\n 'EndpointGroupArn': 'string',\n 'EndpointGroupRegion': 'string',\n 'EndpointDescriptions': [\n {\n 'EndpointId': 'string',\n 'Weight': 123,\n 'HealthState': 'INITIAL'|'HEALTHY'|'UNHEALTHY',\n 'HealthReason': 'string'\n },\n ],\n 'TrafficDialPercentage': ...,\n 'HealthCheckPort': 123,\n 'HealthCheckProtocol': 'TCP'|'HTTP'|'HTTPS',\n 'HealthCheckPath': 'string',\n 'HealthCheckIntervalSeconds': 123,\n 'ThresholdCount': 123\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n Timeout : The health check requests to the endpoint are timing out before returning a status.\n Failed : The health check failed, for example because the endpoint response was invalid (malformed).\n \n \"\"\"\n pass\n\ndef list_listeners(AcceleratorArn=None, MaxResults=None, NextToken=None):\n \"\"\"\n List the listeners for an accelerator.\n See also: AWS API Documentation\n \n \n :example: response = client.list_listeners(\n AcceleratorArn='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type AcceleratorArn: string\n :param AcceleratorArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the accelerator for which you want to list listener objects.\n \n\n :type MaxResults: integer\n :param MaxResults: The number of listener objects that you want to return with this call. The default value is 10.\n\n :type NextToken: string\n :param NextToken: The token for the next set of results. You receive this token from a previous call.\n\n :rtype: dict\n :return: {\n 'Listeners': [\n {\n 'ListenerArn': 'string',\n 'PortRanges': [\n {\n 'FromPort': 123,\n 'ToPort': 123\n },\n ],\n 'Protocol': 'TCP'|'UDP',\n 'ClientAffinity': 'NONE'|'SOURCE_IP'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef update_accelerator(AcceleratorArn=None, Name=None, IpAddressType=None, Enabled=None):\n \"\"\"\n Update an accelerator.\n See also: AWS API Documentation\n \n \n :example: response = client.update_accelerator(\n AcceleratorArn='string',\n Name='string',\n IpAddressType='IPV4',\n Enabled=True|False\n )\n \n \n :type AcceleratorArn: string\n :param AcceleratorArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the accelerator to update.\n \n\n :type Name: string\n :param Name: The name of the accelerator. The name can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens (-), and must not begin or end with a hyphen.\n\n :type IpAddressType: string\n :param IpAddressType: The value for the address type must be IPv4.\n\n :type Enabled: boolean\n :param Enabled: Indicates whether an accelerator is enabled. The value is true or false. The default value is true.\n If the value is set to true, the accelerator cannot be deleted. If set to false, the accelerator can be deleted.\n \n\n :rtype: dict\n :return: {\n 'Accelerator': {\n 'AcceleratorArn': 'string',\n 'Name': 'string',\n 'IpAddressType': 'IPV4',\n 'Enabled': True|False,\n 'IpSets': [\n {\n 'IpFamily': 'string',\n 'IpAddresses': [\n 'string',\n ]\n },\n ],\n 'Status': 'DEPLOYED'|'IN_PROGRESS',\n 'CreatedTime': datetime(2015, 1, 1),\n 'LastModifiedTime': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef update_accelerator_attributes(AcceleratorArn=None, FlowLogsEnabled=None, FlowLogsS3Bucket=None, FlowLogsS3Prefix=None):\n \"\"\"\n Update the attributes for an accelerator. To see an AWS CLI example of updating an accelerator to enable flow logs, scroll down to Example .\n See also: AWS API Documentation\n \n \n :example: response = client.update_accelerator_attributes(\n AcceleratorArn='string',\n FlowLogsEnabled=True|False,\n FlowLogsS3Bucket='string',\n FlowLogsS3Prefix='string'\n )\n \n \n :type AcceleratorArn: string\n :param AcceleratorArn: The Amazon Resource Name (ARN) of the accelerator that you want to update. Attribute is required.\n\n :type FlowLogsEnabled: boolean\n :param FlowLogsEnabled: Update whether flow logs are enabled. The default value is false. If the value is true, FlowLogsS3Bucket and FlowLogsS3Prefix must be specified.\n For more information, see Flow Logs in the AWS Global Accelerator Developer Guide .\n \n\n :type FlowLogsS3Bucket: string\n :param FlowLogsS3Bucket: The name of the Amazon S3 bucket for the flow logs. Attribute is required if FlowLogsEnabled is true . The bucket must exist and have a bucket policy that grants AWS Global Accelerator permission to write to the bucket.\n\n :type FlowLogsS3Prefix: string\n :param FlowLogsS3Prefix: Update the prefix for the location in the Amazon S3 bucket for the flow logs. Attribute is required if FlowLogsEnabled is true . If you don t specify a prefix, the flow logs are stored in the root of the bucket.\n\n :rtype: dict\n :return: {\n 'AcceleratorAttributes': {\n 'FlowLogsEnabled': True|False,\n 'FlowLogsS3Bucket': 'string',\n 'FlowLogsS3Prefix': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef update_endpoint_group(EndpointGroupArn=None, EndpointConfigurations=None, TrafficDialPercentage=None, HealthCheckPort=None, HealthCheckProtocol=None, HealthCheckPath=None, HealthCheckIntervalSeconds=None, ThresholdCount=None):\n \"\"\"\n Update an endpoint group. To see an AWS CLI example of updating an endpoint group, scroll down to Example .\n See also: AWS API Documentation\n \n \n :example: response = client.update_endpoint_group(\n EndpointGroupArn='string',\n EndpointConfigurations=[\n {\n 'EndpointId': 'string',\n 'Weight': 123\n },\n ],\n TrafficDialPercentage=...,\n HealthCheckPort=123,\n HealthCheckProtocol='TCP'|'HTTP'|'HTTPS',\n HealthCheckPath='string',\n HealthCheckIntervalSeconds=123,\n ThresholdCount=123\n )\n \n \n :type EndpointGroupArn: string\n :param EndpointGroupArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the endpoint group.\n \n\n :type EndpointConfigurations: list\n :param EndpointConfigurations: The list of endpoint objects.\n (dict) --A complex type for endpoints.\n EndpointId (string) --An ID for the endpoint. If the endpoint is a Network Load Balancer or Application Load Balancer, this is the Amazon Resource Name (ARN) of the resource. If the endpoint is an Elastic IP address, this is the Elastic IP address allocation ID.\n Weight (integer) --The weight associated with the endpoint. When you add weights to endpoints, you configure AWS Global Accelerator to route traffic based on proportions that you specify. For example, you might specify endpoint weights of 4, 5, 5, and 6 (sum=20). The result is that 4/20 of your traffic, on average, is routed to the first endpoint, 5/20 is routed both to the second and third endpoints, and 6/20 is routed to the last endpoint. For more information, see Endpoint Weights in the AWS Global Accelerator Developer Guide .\n \n \n\n :type TrafficDialPercentage: float\n :param TrafficDialPercentage: The percentage of traffic to send to an AWS Region. Additional traffic is distributed to other endpoint groups for this listener.\n Use this action to increase (dial up) or decrease (dial down) traffic to a specific Region. The percentage is applied to the traffic that would otherwise have been routed to the Region based on optimal routing.\n The default value is 100.\n \n\n :type HealthCheckPort: integer\n :param HealthCheckPort: The port that AWS Global Accelerator uses to check the health of endpoints that are part of this endpoint group. The default port is the listener port that this endpoint group is associated with. If the listener port is a list of ports, Global Accelerator uses the first port in the list.\n\n :type HealthCheckProtocol: string\n :param HealthCheckProtocol: The protocol that AWS Global Accelerator uses to check the health of endpoints that are part of this endpoint group. The default value is TCP.\n\n :type HealthCheckPath: string\n :param HealthCheckPath: If the protocol is HTTP/S, then this specifies the path that is the destination for health check targets. The default value is slash (/).\n\n :type HealthCheckIntervalSeconds: integer\n :param HealthCheckIntervalSeconds: The time 10 seconds or 30 seconds between each health check for an endpoint. The default value is 30.\n\n :type ThresholdCount: integer\n :param ThresholdCount: The number of consecutive health checks required to set the state of a healthy endpoint to unhealthy, or to set an unhealthy endpoint to healthy. The default value is 3.\n\n :rtype: dict\n :return: {\n 'EndpointGroup': {\n 'EndpointGroupArn': 'string',\n 'EndpointGroupRegion': 'string',\n 'EndpointDescriptions': [\n {\n 'EndpointId': 'string',\n 'Weight': 123,\n 'HealthState': 'INITIAL'|'HEALTHY'|'UNHEALTHY',\n 'HealthReason': 'string'\n },\n ],\n 'TrafficDialPercentage': ...,\n 'HealthCheckPort': 123,\n 'HealthCheckProtocol': 'TCP'|'HTTP'|'HTTPS',\n 'HealthCheckPath': 'string',\n 'HealthCheckIntervalSeconds': 123,\n 'ThresholdCount': 123\n }\n }\n \n \n :returns: \n Timeout : The health check requests to the endpoint are timing out before returning a status.\n Failed : The health check failed, for example because the endpoint response was invalid (malformed).\n \n \"\"\"\n pass\n\ndef update_listener(ListenerArn=None, PortRanges=None, Protocol=None, ClientAffinity=None):\n \"\"\"\n Update a listener.\n See also: AWS API Documentation\n \n \n :example: response = client.update_listener(\n ListenerArn='string',\n PortRanges=[\n {\n 'FromPort': 123,\n 'ToPort': 123\n },\n ],\n Protocol='TCP'|'UDP',\n ClientAffinity='NONE'|'SOURCE_IP'\n )\n \n \n :type ListenerArn: string\n :param ListenerArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the listener to update.\n \n\n :type PortRanges: list\n :param PortRanges: The updated list of port ranges for the connections from clients to the accelerator.\n (dict) --A complex type for a range of ports for a listener.\n FromPort (integer) --The first port in the range of ports, inclusive.\n ToPort (integer) --The last port in the range of ports, inclusive.\n \n \n\n :type Protocol: string\n :param Protocol: The updated protocol for the connections from clients to the accelerator.\n\n :type ClientAffinity: string\n :param ClientAffinity: Client affinity lets you direct all requests from a user to the same endpoint, if you have stateful applications, regardless of the port and protocol of the client request. Clienty affinity gives you control over whether to always route each client to the same specific endpoint.\n AWS Global Accelerator uses a consistent-flow hashing algorithm to choose the optimal endpoint for a connection. If client affinity is NONE , Global Accelerator uses the 'five-tuple' (5-tuple) properties source IP address, source port, destination IP address, destination port, and protocol to select the hash value, and then chooses the best endpoint. However, with this setting, if someone uses different ports to connect to Global Accelerator, their connections might not be always routed to the same endpoint because the hash value changes.\n If you want a given client to always be routed to the same endpoint, set client affinity to SOURCE_IP instead. When you use the SOURCE_IP setting, Global Accelerator uses the 'two-tuple' (2-tuple) properties source (client) IP address and destination IP address to select the hash value.\n The default value is NONE .\n \n\n :rtype: dict\n :return: {\n 'Listener': {\n 'ListenerArn': 'string',\n 'PortRanges': [\n {\n 'FromPort': 123,\n 'ToPort': 123\n },\n ],\n 'Protocol': 'TCP'|'UDP',\n 'ClientAffinity': 'NONE'|'SOURCE_IP'\n }\n }\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.553268313407898, "alphanum_fraction": 0.560147225856781, "avg_line_length": 61.5776252746582, "blob_id": "523d95fe32a468a86e07e13cef509e241e27d6ae", "content_id": "fed8b0e298fd17417d84882af76903c8d3da8632", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 386543, "license_type": "permissive", "max_line_length": 650, "num_lines": 6177, "path": "/pyboto3/cloudfront.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_cloud_front_origin_access_identity(CloudFrontOriginAccessIdentityConfig=None):\n \"\"\"\n Creates a new origin access identity. If you're using Amazon S3 for your origin, you can use an origin access identity to require users to access your content using a CloudFront URL instead of the Amazon S3 URL. For more information about how to use origin access identities, see Serving Private Content through CloudFront in the Amazon CloudFront Developer Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.create_cloud_front_origin_access_identity(\n CloudFrontOriginAccessIdentityConfig={\n 'CallerReference': 'string',\n 'Comment': 'string'\n }\n )\n \n \n :type CloudFrontOriginAccessIdentityConfig: dict\n :param CloudFrontOriginAccessIdentityConfig: [REQUIRED]\n The current configuration information for the identity.\n CallerReference (string) -- [REQUIRED]A unique value (for example, a date-time stamp) that ensures that the request can't be replayed.\n If the value of CallerReference is new (regardless of the content of the CloudFrontOriginAccessIdentityConfig object), a new origin access identity is created.\n If the CallerReference is a value already sent in a previous identity request, and the content of the CloudFrontOriginAccessIdentityConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request.\n If the CallerReference is a value you already sent in a previous request to create an identity, but the content of the CloudFrontOriginAccessIdentityConfig is different from the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists error.\n Comment (string) -- [REQUIRED]Any comments you want to include about the origin access identity.\n \n\n :rtype: dict\n :return: {\n 'CloudFrontOriginAccessIdentity': {\n 'Id': 'string',\n 'S3CanonicalUserId': 'string',\n 'CloudFrontOriginAccessIdentityConfig': {\n 'CallerReference': 'string',\n 'Comment': 'string'\n }\n },\n 'Location': 'string',\n 'ETag': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_distribution(DistributionConfig=None):\n \"\"\"\n Creates a new web distribution. You create a CloudFront distribution to tell CloudFront where you want content to be delivered from, and the details about how to track and manage content delivery. Send a POST request to the /*CloudFront API version* /distribution /distribution ID resource.\n If you are using Adobe Flash Media Server's RTMP protocol, you set up a different kind of CloudFront distribution. For more information, see CreateStreamingDistribution .\n See also: AWS API Documentation\n \n \n :example: response = client.create_distribution(\n DistributionConfig={\n 'CallerReference': 'string',\n 'Aliases': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'DefaultRootObject': 'string',\n 'Origins': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'DomainName': 'string',\n 'OriginPath': 'string',\n 'CustomHeaders': {\n 'Quantity': 123,\n 'Items': [\n {\n 'HeaderName': 'string',\n 'HeaderValue': 'string'\n },\n ]\n },\n 'S3OriginConfig': {\n 'OriginAccessIdentity': 'string'\n },\n 'CustomOriginConfig': {\n 'HTTPPort': 123,\n 'HTTPSPort': 123,\n 'OriginProtocolPolicy': 'http-only'|'match-viewer'|'https-only',\n 'OriginSslProtocols': {\n 'Quantity': 123,\n 'Items': [\n 'SSLv3'|'TLSv1'|'TLSv1.1'|'TLSv1.2',\n ]\n },\n 'OriginReadTimeout': 123,\n 'OriginKeepaliveTimeout': 123\n }\n },\n ]\n },\n 'OriginGroups': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'FailoverCriteria': {\n 'StatusCodes': {\n 'Quantity': 123,\n 'Items': [\n 123,\n ]\n }\n },\n 'Members': {\n 'Quantity': 123,\n 'Items': [\n {\n 'OriginId': 'string'\n },\n ]\n }\n },\n ]\n },\n 'DefaultCacheBehavior': {\n 'TargetOriginId': 'string',\n 'ForwardedValues': {\n 'QueryString': True|False,\n 'Cookies': {\n 'Forward': 'none'|'whitelist'|'all',\n 'WhitelistedNames': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'Headers': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'QueryStringCacheKeys': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'ViewerProtocolPolicy': 'allow-all'|'https-only'|'redirect-to-https',\n 'MinTTL': 123,\n 'AllowedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ],\n 'CachedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ]\n }\n },\n 'SmoothStreaming': True|False,\n 'DefaultTTL': 123,\n 'MaxTTL': 123,\n 'Compress': True|False,\n 'LambdaFunctionAssociations': {\n 'Quantity': 123,\n 'Items': [\n {\n 'LambdaFunctionARN': 'string',\n 'EventType': 'viewer-request'|'viewer-response'|'origin-request'|'origin-response',\n 'IncludeBody': True|False\n },\n ]\n },\n 'FieldLevelEncryptionId': 'string'\n },\n 'CacheBehaviors': {\n 'Quantity': 123,\n 'Items': [\n {\n 'PathPattern': 'string',\n 'TargetOriginId': 'string',\n 'ForwardedValues': {\n 'QueryString': True|False,\n 'Cookies': {\n 'Forward': 'none'|'whitelist'|'all',\n 'WhitelistedNames': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'Headers': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'QueryStringCacheKeys': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'ViewerProtocolPolicy': 'allow-all'|'https-only'|'redirect-to-https',\n 'MinTTL': 123,\n 'AllowedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ],\n 'CachedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ]\n }\n },\n 'SmoothStreaming': True|False,\n 'DefaultTTL': 123,\n 'MaxTTL': 123,\n 'Compress': True|False,\n 'LambdaFunctionAssociations': {\n 'Quantity': 123,\n 'Items': [\n {\n 'LambdaFunctionARN': 'string',\n 'EventType': 'viewer-request'|'viewer-response'|'origin-request'|'origin-response',\n 'IncludeBody': True|False\n },\n ]\n },\n 'FieldLevelEncryptionId': 'string'\n },\n ]\n },\n 'CustomErrorResponses': {\n 'Quantity': 123,\n 'Items': [\n {\n 'ErrorCode': 123,\n 'ResponsePagePath': 'string',\n 'ResponseCode': 'string',\n 'ErrorCachingMinTTL': 123\n },\n ]\n },\n 'Comment': 'string',\n 'Logging': {\n 'Enabled': True|False,\n 'IncludeCookies': True|False,\n 'Bucket': 'string',\n 'Prefix': 'string'\n },\n 'PriceClass': 'PriceClass_100'|'PriceClass_200'|'PriceClass_All',\n 'Enabled': True|False,\n 'ViewerCertificate': {\n 'CloudFrontDefaultCertificate': True|False,\n 'IAMCertificateId': 'string',\n 'ACMCertificateArn': 'string',\n 'SSLSupportMethod': 'sni-only'|'vip',\n 'MinimumProtocolVersion': 'SSLv3'|'TLSv1'|'TLSv1_2016'|'TLSv1.1_2016'|'TLSv1.2_2018',\n 'Certificate': 'string',\n 'CertificateSource': 'cloudfront'|'iam'|'acm'\n },\n 'Restrictions': {\n 'GeoRestriction': {\n 'RestrictionType': 'blacklist'|'whitelist'|'none',\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'WebACLId': 'string',\n 'HttpVersion': 'http1.1'|'http2',\n 'IsIPV6Enabled': True|False\n }\n )\n \n \n :type DistributionConfig: dict\n :param DistributionConfig: [REQUIRED]\n The distribution's configuration information.\n CallerReference (string) -- [REQUIRED]A unique value (for example, a date-time stamp) that ensures that the request can't be replayed.\n If the value of CallerReference is new (regardless of the content of the DistributionConfig object), CloudFront creates a new distribution.\n If CallerReference is a value that you already sent in a previous request to create a distribution, CloudFront returns a DistributionAlreadyExists error.\n Aliases (dict) --A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.\n Quantity (integer) -- [REQUIRED]The number of CNAME aliases, if any, that you want to associate with this distribution.\n Items (list) --A complex type that contains the CNAME aliases, if any, that you want to associate with this distribution.\n (string) --\n \n DefaultRootObject (string) --The object that you want CloudFront to request from your origin (for example, index.html ) when a viewer requests the root URL for your distribution (http://www.example.com ) instead of an object in your distribution (http://www.example.com/product-description.html ). Specifying a default root object avoids exposing the contents of your distribution.\n Specify only the object name, for example, index.html . Don't add a / before the object name.\n If you don't want to specify a default root object when you create a distribution, include an empty DefaultRootObject element.\n To delete the default root object from an existing distribution, update the distribution configuration and include an empty DefaultRootObject element.\n To replace the default root object, update the distribution configuration and specify the new object.\n For more information about the default root object, see Creating a Default Root Object in the Amazon CloudFront Developer Guide .\n Origins (dict) -- [REQUIRED]A complex type that contains information about origins for this distribution.\n Quantity (integer) -- [REQUIRED]The number of origins or origin groups for this distribution.\n Items (list) -- [REQUIRED]A complex type that contains origins or origin groups for this distribution.\n (dict) --A complex type that describes the Amazon S3 bucket, HTTP server (for example, a web server), Amazon MediaStore, or other server from which CloudFront gets your files. This can also be an origin group, if you've created an origin group. You must specify at least one origin or origin group.\n For the current limit on the number of origins or origin groups that you can specify for a distribution, see Amazon CloudFront Limits in the AWS General Reference .\n Id (string) -- [REQUIRED]A unique identifier for the origin or origin group. The value of Id must be unique within the distribution.\n When you specify the value of TargetOriginId for the default cache behavior or for another cache behavior, you indicate the origin to which you want the cache behavior to route requests by specifying the value of the Id element for that origin. When a request matches the path pattern for that cache behavior, CloudFront routes the request to the specified origin. For more information, see Cache Behavior Settings in the Amazon CloudFront Developer Guide .\n DomainName (string) -- [REQUIRED]\n Amazon S3 origins : The DNS name of the Amazon S3 bucket from which you want CloudFront to get objects for this origin, for example, myawsbucket.s3.amazonaws.com . If you set up your bucket to be configured as a website endpoint, enter the Amazon S3 static website hosting endpoint for the bucket.\n For more information about specifying this value for different types of origins, see Origin Domain Name in the Amazon CloudFront Developer Guide .\n Constraints for Amazon S3 origins:\n If you configured Amazon S3 Transfer Acceleration for your bucket, don't specify the s3-accelerate endpoint for DomainName .\n The bucket name must be between 3 and 63 characters long (inclusive).\n The bucket name must contain only lowercase characters, numbers, periods, underscores, and dashes.\n The bucket name must not contain adjacent periods.\n Custom Origins : The DNS domain name for the HTTP server from which you want CloudFront to get objects for this origin, for example, www.example.com .\n Constraints for custom origins:\n DomainName must be a valid DNS name that contains only a-z, A-Z, 0-9, dot (.), hyphen (-), or underscore (_) characters.\n The name cannot exceed 128 characters.\n OriginPath (string) --An optional element that causes CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin. When you include the OriginPath element, specify the directory name, beginning with a / . CloudFront appends the directory name to the value of DomainName , for example, example.com/production . Do not include a / at the end of the directory name.\n For example, suppose you've specified the following values for your distribution:\n DomainName : An Amazon S3 bucket named myawsbucket .\n OriginPath : /production\n CNAME : example.com\n When a user enters example.com/index.html in a browser, CloudFront sends a request to Amazon S3 for myawsbucket/production/index.html .\n When a user enters example.com/acme/index.html in a browser, CloudFront sends a request to Amazon S3 for myawsbucket/production/acme/index.html .\n CustomHeaders (dict) --A complex type that contains names and values for the custom headers that you want.\n Quantity (integer) -- [REQUIRED]The number of custom headers, if any, for this distribution.\n Items (list) --\n Optional : A list that contains one OriginCustomHeader element for each custom header that you want CloudFront to forward to the origin. If Quantity is 0 , omit Items .\n (dict) --A complex type that contains HeaderName and HeaderValue elements, if any, for this distribution.\n HeaderName (string) -- [REQUIRED]The name of a header that you want CloudFront to forward to your origin. For more information, see Forwarding Custom Headers to Your Origin (Web Distributions Only) in the Amazon Amazon CloudFront Developer Guide .\n HeaderValue (string) -- [REQUIRED]The value for the header that you specified in the HeaderName field.\n \n S3OriginConfig (dict) --A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead.\n OriginAccessIdentity (string) -- [REQUIRED]The CloudFront origin access identity to associate with the origin. Use an origin access identity to configure the origin so that viewers can only access objects in an Amazon S3 bucket through CloudFront. The format of the value is:\n origin-access-identity/cloudfront/ID-of-origin-access-identity\n where `` ID-of-origin-access-identity `` is the value that CloudFront returned in the ID element when you created the origin access identity.\n If you want viewers to be able to access objects using either the CloudFront URL or the Amazon S3 URL, specify an empty OriginAccessIdentity element.\n To delete the origin access identity from an existing distribution, update the distribution configuration and include an empty OriginAccessIdentity element.\n To replace the origin access identity, update the distribution configuration and specify the new origin access identity.\n For more information about the origin access identity, see Serving Private Content through CloudFront in the Amazon CloudFront Developer Guide .\n CustomOriginConfig (dict) --A complex type that contains information about a custom origin. If the origin is an Amazon S3 bucket, use the S3OriginConfig element instead.\n HTTPPort (integer) -- [REQUIRED]The HTTP port the custom origin listens on.\n HTTPSPort (integer) -- [REQUIRED]The HTTPS port the custom origin listens on.\n OriginProtocolPolicy (string) -- [REQUIRED]The origin protocol policy to apply to your origin.\n OriginSslProtocols (dict) --The SSL/TLS protocols that you want CloudFront to use when communicating with your origin over HTTPS.\n Quantity (integer) -- [REQUIRED]The number of SSL/TLS protocols that you want to allow CloudFront to use when establishing an HTTPS connection with this origin.\n Items (list) -- [REQUIRED]A list that contains allowed SSL/TLS protocols for this distribution.\n (string) --\n \n OriginReadTimeout (integer) --You can create a custom origin read timeout. All timeout units are in seconds. The default origin read timeout is 30 seconds, but you can configure custom timeout lengths using the CloudFront API. The minimum timeout length is 4 seconds; the maximum is 60 seconds.\n If you need to increase the maximum time limit, contact the AWS Support Center .\n OriginKeepaliveTimeout (integer) --You can create a custom keep-alive timeout. All timeout units are in seconds. The default keep-alive timeout is 5 seconds, but you can configure custom timeout lengths using the CloudFront API. The minimum timeout length is 1 second; the maximum is 60 seconds.\n If you need to increase the maximum time limit, contact the AWS Support Center .\n \n \n OriginGroups (dict) --A complex type that contains information about origin groups for this distribution.\n Quantity (integer) -- [REQUIRED]The number of origin groups.\n Items (list) --The items (origin groups) in a distribution.\n (dict) --An origin group includes two origins (a primary origin and a second origin to failover to) and a failover criteria that you specify. You create an origin group to support origin failover in CloudFront. When you create or update a distribution, you can specifiy the origin group instead of a single origin, and CloudFront will failover from the primary origin to the second origin under the failover conditions that you've chosen.\n Id (string) -- [REQUIRED]The origin group's ID.\n FailoverCriteria (dict) -- [REQUIRED]A complex type that contains information about the failover criteria for an origin group.\n StatusCodes (dict) -- [REQUIRED]The status codes that, when returned from the primary origin, will trigger CloudFront to failover to the second origin.\n Quantity (integer) -- [REQUIRED]The number of status codes.\n Items (list) -- [REQUIRED]The items (status codes) for an origin group.\n (integer) --\n \n Members (dict) -- [REQUIRED]A complex type that contains information about the origins in an origin group.\n Quantity (integer) -- [REQUIRED]The number of origins in an origin group.\n Items (list) -- [REQUIRED]Items (origins) in an origin group.\n (dict) --An origin in an origin group.\n OriginId (string) -- [REQUIRED]The ID for an origin in an origin group.\n \n \n \n DefaultCacheBehavior (dict) -- [REQUIRED]A complex type that describes the default cache behavior if you don't specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements. You must create exactly one default cache behavior.\n TargetOriginId (string) -- [REQUIRED]The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior in your distribution.\n ForwardedValues (dict) -- [REQUIRED]A complex type that specifies how CloudFront handles query strings and cookies.\n QueryString (boolean) -- [REQUIRED]Indicates whether you want CloudFront to forward query strings to the origin that is associated with this cache behavior and cache based on the query string parameters. CloudFront behavior depends on the value of QueryString and on the values that you specify for QueryStringCacheKeys , if any:\n If you specify true for QueryString and you don't specify any values for QueryStringCacheKeys , CloudFront forwards all query string parameters to the origin and caches based on all query string parameters. Depending on how many query string parameters and values you have, this can adversely affect performance because CloudFront must forward more requests to the origin.\n If you specify true for QueryString and you specify one or more values for QueryStringCacheKeys , CloudFront forwards all query string parameters to the origin, but it only caches based on the query string parameters that you specify.\n If you specify false for QueryString , CloudFront doesn't forward any query string parameters to the origin, and doesn't cache based on query string parameters.\n For more information, see Configuring CloudFront to Cache Based on Query String Parameters in the Amazon CloudFront Developer Guide .\n Cookies (dict) -- [REQUIRED]A complex type that specifies whether you want CloudFront to forward cookies to the origin and, if so, which ones. For more information about forwarding cookies to the origin, see How CloudFront Forwards, Caches, and Logs Cookies in the Amazon CloudFront Developer Guide .\n Forward (string) -- [REQUIRED]Specifies which cookies to forward to the origin for this cache behavior: all, none, or the list of cookies specified in the WhitelistedNames complex type.\n Amazon S3 doesn't process cookies. When the cache behavior is forwarding requests to an Amazon S3 origin, specify none for the Forward element.\n WhitelistedNames (dict) --Required if you specify whitelist for the value of Forward: . A complex type that specifies how many different cookies you want CloudFront to forward to the origin for this cache behavior and, if you want to forward selected cookies, the names of those cookies.\n If you specify all or none for the value of Forward , omit WhitelistedNames . If you change the value of Forward from whitelist to all or none and you don't delete the WhitelistedNames element and its child elements, CloudFront deletes them automatically.\n For the current limit on the number of cookie names that you can whitelist for each cache behavior, see Amazon CloudFront Limits in the AWS General Reference .\n Quantity (integer) -- [REQUIRED]The number of different cookies that you want CloudFront to forward to the origin for this cache behavior.\n Items (list) --A complex type that contains one Name element for each cookie that you want CloudFront to forward to the origin for this cache behavior.\n (string) --\n \n Headers (dict) --A complex type that specifies the Headers , if any, that you want CloudFront to base caching on for this cache behavior.\n Quantity (integer) -- [REQUIRED]The number of different headers that you want CloudFront to base caching on for this cache behavior. You can configure each cache behavior in a web distribution to do one of the following:\n Forward all headers to your origin : Specify 1 for Quantity and * for Name .\n Warning\n CloudFront doesn't cache the objects that are associated with this cache behavior. Instead, CloudFront sends every request to the origin.\n Forward a whitelist of headers you specify : Specify the number of headers that you want CloudFront to base caching on. Then specify the header names in Name elements. CloudFront caches your objects based on the values in the specified headers.\n Forward only the default headers : Specify 0 for Quantity and omit Items . In this configuration, CloudFront doesn't cache based on the values in the request headers.\n Regardless of which option you choose, CloudFront forwards headers to your origin based on whether the origin is an S3 bucket or a custom origin. See the following documentation:\n S3 bucket : See HTTP Request Headers That CloudFront Removes or Updates\n Custom origin : See HTTP Request Headers and CloudFront Behavior\n Items (list) --A list that contains one Name element for each header that you want CloudFront to use for caching in this cache behavior. If Quantity is 0 , omit Items .\n (string) --\n \n QueryStringCacheKeys (dict) --A complex type that contains information about the query string parameters that you want CloudFront to use for caching for this cache behavior.\n Quantity (integer) -- [REQUIRED]The number of whitelisted query string parameters for this cache behavior.\n Items (list) --(Optional) A list that contains the query string parameters that you want CloudFront to use as a basis for caching for this cache behavior. If Quantity is 0, you can omit Items .\n (string) --\n \n TrustedSigners (dict) -- [REQUIRED]A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content.\n If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled , and specify the applicable values for Quantity and Items . For more information, see Serving Private Content through CloudFront in the Amazon Amazon CloudFront Developer Guide .\n If you don't want to require signed URLs in requests for objects that match PathPattern , specify false for Enabled and 0 for Quantity . Omit Items .\n To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false ), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.\n Enabled (boolean) -- [REQUIRED]Specifies whether you want to require viewers to use signed URLs to access the files specified by PathPattern and TargetOriginId .\n Quantity (integer) -- [REQUIRED]The number of trusted signers for this cache behavior.\n Items (list) --\n Optional : A complex type that contains trusted signers for this cache behavior. If Quantity is 0 , you can omit Items .\n (string) --\n \n ViewerProtocolPolicy (string) -- [REQUIRED]The protocol that viewers can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern . You can specify the following options:\n allow-all : Viewers can use HTTP or HTTPS.\n redirect-to-https : If a viewer submits an HTTP request, CloudFront returns an HTTP status code of 301 (Moved Permanently) to the viewer along with the HTTPS URL. The viewer then resubmits the request using the new URL.\n https-only : If a viewer sends an HTTP request, CloudFront returns an HTTP status code of 403 (Forbidden).\n For more information about requiring the HTTPS protocol, see Using an HTTPS Connection to Access Your Objects in the Amazon CloudFront Developer Guide .\n Note\n The only way to guarantee that viewers retrieve an object that was fetched from the origin using HTTPS is never to use any other protocol to fetch the object. If you have recently changed from HTTP to HTTPS, we recommend that you clear your objects' cache because cached objects are protocol agnostic. That means that an edge location will return an object from the cache regardless of whether the current request protocol matches the protocol used previously. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon CloudFront Developer Guide .\n MinTTL (integer) -- [REQUIRED]The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon Amazon CloudFront Developer Guide .\n You must specify 0 for MinTTL if you configure CloudFront to forward all headers to your origin (under Headers , if you specify 1 for Quantity and * for Name ).\n AllowedMethods (dict) --A complex type that controls which HTTP methods CloudFront processes and forwards to your Amazon S3 bucket or your custom origin. There are three choices:\n CloudFront forwards only GET and HEAD requests.\n CloudFront forwards only GET , HEAD , and OPTIONS requests.\n CloudFront forwards GET, HEAD, OPTIONS, PUT, PATCH, POST , and DELETE requests.\n If you pick the third choice, you may need to restrict access to your Amazon S3 bucket or to your custom origin so users can't perform operations that you don't want them to. For example, you might not want users to have permissions to delete objects from your origin.\n Quantity (integer) -- [REQUIRED]The number of HTTP methods that you want CloudFront to forward to your origin. Valid values are 2 (for GET and HEAD requests), 3 (for GET , HEAD , and OPTIONS requests) and 7 (for GET, HEAD, OPTIONS, PUT, PATCH, POST , and DELETE requests).\n Items (list) -- [REQUIRED]A complex type that contains the HTTP methods that you want CloudFront to process and forward to your origin.\n (string) --\n CachedMethods (dict) --A complex type that controls whether CloudFront caches the response to requests using the specified HTTP methods. There are two choices:\n CloudFront caches responses to GET and HEAD requests.\n CloudFront caches responses to GET , HEAD , and OPTIONS requests.\n If you pick the second choice for your Amazon S3 Origin, you may need to forward Access-Control-Request-Method, Access-Control-Request-Headers, and Origin headers for the responses to be cached correctly.\n Quantity (integer) -- [REQUIRED]The number of HTTP methods for which you want CloudFront to cache responses. Valid values are 2 (for caching responses to GET and HEAD requests) and 3 (for caching responses to GET , HEAD , and OPTIONS requests).\n Items (list) -- [REQUIRED]A complex type that contains the HTTP methods that you want CloudFront to cache responses to.\n (string) --\n \n SmoothStreaming (boolean) --Indicates whether you want to distribute media files in the Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true ; if not, specify false . If you specify true for SmoothStreaming , you can still distribute other content using this cache behavior if the content matches the value of PathPattern .\n DefaultTTL (integer) --The default amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age , Cache-Control s-maxage , and Expires to objects. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon CloudFront Developer Guide .\n MaxTTL (integer) --\n Compress (boolean) --Whether you want CloudFront to automatically compress certain files for this cache behavior. If so, specify true ; if not, specify false . For more information, see Serving Compressed Files in the Amazon CloudFront Developer Guide .\n LambdaFunctionAssociations (dict) --A complex type that contains zero or more Lambda function associations for a cache behavior.\n Quantity (integer) -- [REQUIRED]The number of Lambda function associations for this cache behavior.\n Items (list) --\n Optional : A complex type that contains LambdaFunctionAssociation items for this cache behavior. If Quantity is 0 , you can omit Items .\n (dict) --A complex type that contains a Lambda function association.\n LambdaFunctionARN (string) -- [REQUIRED]The ARN of the Lambda function. You must specify the ARN of a function version; you can't specify a Lambda alias or $LATEST.\n EventType (string) -- [REQUIRED]Specifies the event type that triggers a Lambda function invocation. You can specify the following values:\n viewer-request : The function executes when CloudFront receives a request from a viewer and before it checks to see whether the requested object is in the edge cache.\n origin-request : The function executes only when CloudFront forwards a request to your origin. When the requested object is in the edge cache, the function doesn't execute.\n origin-response : The function executes after CloudFront receives a response from the origin and before it caches the object in the response. When the requested object is in the edge cache, the function doesn't execute.\n viewer-response : The function executes before CloudFront returns the requested object to the viewer. The function executes regardless of whether the object was already in the edge cache. If the origin returns an HTTP status code other than HTTP 200 (OK), the function doesn't execute.\n IncludeBody (boolean) --A flag that allows a Lambda function to have read access to the body content. For more information, see Accessing the Request Body by Choosing the Include Body Option in the Amazon CloudFront Developer Guide.\n \n FieldLevelEncryptionId (string) --The value of ID for the field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data for a cache behavior or for the default cache behavior in your distribution.\n CacheBehaviors (dict) --A complex type that contains zero or more CacheBehavior elements.\n Quantity (integer) -- [REQUIRED]The number of cache behaviors for this distribution.\n Items (list) --Optional: A complex type that contains cache behaviors for this distribution. If Quantity is 0 , you can omit Items .\n (dict) --A complex type that describes how CloudFront processes requests.\n You must create at least as many cache behaviors (including the default cache behavior) as you have origins if you want CloudFront to distribute objects from all of the origins. Each cache behavior specifies the one origin from which you want CloudFront to get objects. If you have two origins and only the default cache behavior, the default cache behavior will cause CloudFront to get objects from one of the origins, but the other origin is never used.\n For the current limit on the number of cache behaviors that you can add to a distribution, see Amazon CloudFront Limits in the AWS General Reference .\n If you don't want to specify any cache behaviors, include only an empty CacheBehaviors element. Don't include an empty CacheBehavior element, or CloudFront returns a MalformedXML error.\n To delete all cache behaviors in an existing distribution, update the distribution configuration and include only an empty CacheBehaviors element.\n To add, change, or remove one or more cache behaviors, update the distribution configuration and specify all of the cache behaviors that you want to include in the updated distribution.\n For more information about cache behaviors, see Cache Behaviors in the Amazon CloudFront Developer Guide .\n PathPattern (string) -- [REQUIRED]The pattern (for example, images/*.jpg ) that specifies which requests to apply the behavior to. When CloudFront receives a viewer request, the requested path is compared with path patterns in the order in which cache behaviors are listed in the distribution.\n Note\n You can optionally include a slash (/ ) at the beginning of the path pattern. For example, /images/*.jpg . CloudFront behavior is the same with or without the leading / .\n The path pattern for the default cache behavior is * and cannot be changed. If the request for an object does not match the path pattern for any cache behaviors, CloudFront applies the behavior in the default cache behavior.\n For more information, see Path Pattern in the Amazon CloudFront Developer Guide .\n TargetOriginId (string) -- [REQUIRED]The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior in your distribution.\n ForwardedValues (dict) -- [REQUIRED]A complex type that specifies how CloudFront handles query strings and cookies.\n QueryString (boolean) -- [REQUIRED]Indicates whether you want CloudFront to forward query strings to the origin that is associated with this cache behavior and cache based on the query string parameters. CloudFront behavior depends on the value of QueryString and on the values that you specify for QueryStringCacheKeys , if any:\n If you specify true for QueryString and you don't specify any values for QueryStringCacheKeys , CloudFront forwards all query string parameters to the origin and caches based on all query string parameters. Depending on how many query string parameters and values you have, this can adversely affect performance because CloudFront must forward more requests to the origin.\n If you specify true for QueryString and you specify one or more values for QueryStringCacheKeys , CloudFront forwards all query string parameters to the origin, but it only caches based on the query string parameters that you specify.\n If you specify false for QueryString , CloudFront doesn't forward any query string parameters to the origin, and doesn't cache based on query string parameters.\n For more information, see Configuring CloudFront to Cache Based on Query String Parameters in the Amazon CloudFront Developer Guide .\n Cookies (dict) -- [REQUIRED]A complex type that specifies whether you want CloudFront to forward cookies to the origin and, if so, which ones. For more information about forwarding cookies to the origin, see How CloudFront Forwards, Caches, and Logs Cookies in the Amazon CloudFront Developer Guide .\n Forward (string) -- [REQUIRED]Specifies which cookies to forward to the origin for this cache behavior: all, none, or the list of cookies specified in the WhitelistedNames complex type.\n Amazon S3 doesn't process cookies. When the cache behavior is forwarding requests to an Amazon S3 origin, specify none for the Forward element.\n WhitelistedNames (dict) --Required if you specify whitelist for the value of Forward: . A complex type that specifies how many different cookies you want CloudFront to forward to the origin for this cache behavior and, if you want to forward selected cookies, the names of those cookies.\n If you specify all or none for the value of Forward , omit WhitelistedNames . If you change the value of Forward from whitelist to all or none and you don't delete the WhitelistedNames element and its child elements, CloudFront deletes them automatically.\n For the current limit on the number of cookie names that you can whitelist for each cache behavior, see Amazon CloudFront Limits in the AWS General Reference .\n Quantity (integer) -- [REQUIRED]The number of different cookies that you want CloudFront to forward to the origin for this cache behavior.\n Items (list) --A complex type that contains one Name element for each cookie that you want CloudFront to forward to the origin for this cache behavior.\n (string) --\n \n Headers (dict) --A complex type that specifies the Headers , if any, that you want CloudFront to base caching on for this cache behavior.\n Quantity (integer) -- [REQUIRED]The number of different headers that you want CloudFront to base caching on for this cache behavior. You can configure each cache behavior in a web distribution to do one of the following:\n Forward all headers to your origin : Specify 1 for Quantity and * for Name .\n Warning\n CloudFront doesn't cache the objects that are associated with this cache behavior. Instead, CloudFront sends every request to the origin.\n Forward a whitelist of headers you specify : Specify the number of headers that you want CloudFront to base caching on. Then specify the header names in Name elements. CloudFront caches your objects based on the values in the specified headers.\n Forward only the default headers : Specify 0 for Quantity and omit Items . In this configuration, CloudFront doesn't cache based on the values in the request headers.\n Regardless of which option you choose, CloudFront forwards headers to your origin based on whether the origin is an S3 bucket or a custom origin. See the following documentation:\n S3 bucket : See HTTP Request Headers That CloudFront Removes or Updates\n Custom origin : See HTTP Request Headers and CloudFront Behavior\n Items (list) --A list that contains one Name element for each header that you want CloudFront to use for caching in this cache behavior. If Quantity is 0 , omit Items .\n (string) --\n \n QueryStringCacheKeys (dict) --A complex type that contains information about the query string parameters that you want CloudFront to use for caching for this cache behavior.\n Quantity (integer) -- [REQUIRED]The number of whitelisted query string parameters for this cache behavior.\n Items (list) --(Optional) A list that contains the query string parameters that you want CloudFront to use as a basis for caching for this cache behavior. If Quantity is 0, you can omit Items .\n (string) --\n \n TrustedSigners (dict) -- [REQUIRED]A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content.\n If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled , and specify the applicable values for Quantity and Items . For more information, see Serving Private Content through CloudFront in the Amazon Amazon CloudFront Developer Guide .\n If you don't want to require signed URLs in requests for objects that match PathPattern , specify false for Enabled and 0 for Quantity . Omit Items .\n To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false ), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.\n Enabled (boolean) -- [REQUIRED]Specifies whether you want to require viewers to use signed URLs to access the files specified by PathPattern and TargetOriginId .\n Quantity (integer) -- [REQUIRED]The number of trusted signers for this cache behavior.\n Items (list) --\n Optional : A complex type that contains trusted signers for this cache behavior. If Quantity is 0 , you can omit Items .\n (string) --\n \n ViewerProtocolPolicy (string) -- [REQUIRED]The protocol that viewers can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern . You can specify the following options:\n allow-all : Viewers can use HTTP or HTTPS.\n redirect-to-https : If a viewer submits an HTTP request, CloudFront returns an HTTP status code of 301 (Moved Permanently) to the viewer along with the HTTPS URL. The viewer then resubmits the request using the new URL.\n https-only : If a viewer sends an HTTP request, CloudFront returns an HTTP status code of 403 (Forbidden).\n For more information about requiring the HTTPS protocol, see Using an HTTPS Connection to Access Your Objects in the Amazon CloudFront Developer Guide .\n Note\n The only way to guarantee that viewers retrieve an object that was fetched from the origin using HTTPS is never to use any other protocol to fetch the object. If you have recently changed from HTTP to HTTPS, we recommend that you clear your objects' cache because cached objects are protocol agnostic. That means that an edge location will return an object from the cache regardless of whether the current request protocol matches the protocol used previously. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon CloudFront Developer Guide .\n MinTTL (integer) -- [REQUIRED]The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon Amazon CloudFront Developer Guide .\n You must specify 0 for MinTTL if you configure CloudFront to forward all headers to your origin (under Headers , if you specify 1 for Quantity and * for Name ).\n AllowedMethods (dict) --A complex type that controls which HTTP methods CloudFront processes and forwards to your Amazon S3 bucket or your custom origin. There are three choices:\n CloudFront forwards only GET and HEAD requests.\n CloudFront forwards only GET , HEAD , and OPTIONS requests.\n CloudFront forwards GET, HEAD, OPTIONS, PUT, PATCH, POST , and DELETE requests.\n If you pick the third choice, you may need to restrict access to your Amazon S3 bucket or to your custom origin so users can't perform operations that you don't want them to. For example, you might not want users to have permissions to delete objects from your origin.\n Quantity (integer) -- [REQUIRED]The number of HTTP methods that you want CloudFront to forward to your origin. Valid values are 2 (for GET and HEAD requests), 3 (for GET , HEAD , and OPTIONS requests) and 7 (for GET, HEAD, OPTIONS, PUT, PATCH, POST , and DELETE requests).\n Items (list) -- [REQUIRED]A complex type that contains the HTTP methods that you want CloudFront to process and forward to your origin.\n (string) --\n CachedMethods (dict) --A complex type that controls whether CloudFront caches the response to requests using the specified HTTP methods. There are two choices:\n CloudFront caches responses to GET and HEAD requests.\n CloudFront caches responses to GET , HEAD , and OPTIONS requests.\n If you pick the second choice for your Amazon S3 Origin, you may need to forward Access-Control-Request-Method, Access-Control-Request-Headers, and Origin headers for the responses to be cached correctly.\n Quantity (integer) -- [REQUIRED]The number of HTTP methods for which you want CloudFront to cache responses. Valid values are 2 (for caching responses to GET and HEAD requests) and 3 (for caching responses to GET , HEAD , and OPTIONS requests).\n Items (list) -- [REQUIRED]A complex type that contains the HTTP methods that you want CloudFront to cache responses to.\n (string) --\n \n SmoothStreaming (boolean) --Indicates whether you want to distribute media files in the Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true ; if not, specify false . If you specify true for SmoothStreaming , you can still distribute other content using this cache behavior if the content matches the value of PathPattern .\n DefaultTTL (integer) --The default amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age , Cache-Control s-maxage , and Expires to objects. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon CloudFront Developer Guide .\n MaxTTL (integer) --The maximum amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as Cache-Control max-age , Cache-Control s-maxage , and Expires to objects. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon CloudFront Developer Guide .\n Compress (boolean) --Whether you want CloudFront to automatically compress certain files for this cache behavior. If so, specify true; if not, specify false. For more information, see Serving Compressed Files in the Amazon CloudFront Developer Guide .\n LambdaFunctionAssociations (dict) --A complex type that contains zero or more Lambda function associations for a cache behavior.\n Quantity (integer) -- [REQUIRED]The number of Lambda function associations for this cache behavior.\n Items (list) --\n Optional : A complex type that contains LambdaFunctionAssociation items for this cache behavior. If Quantity is 0 , you can omit Items .\n (dict) --A complex type that contains a Lambda function association.\n LambdaFunctionARN (string) -- [REQUIRED]The ARN of the Lambda function. You must specify the ARN of a function version; you can't specify a Lambda alias or $LATEST.\n EventType (string) -- [REQUIRED]Specifies the event type that triggers a Lambda function invocation. You can specify the following values:\n viewer-request : The function executes when CloudFront receives a request from a viewer and before it checks to see whether the requested object is in the edge cache.\n origin-request : The function executes only when CloudFront forwards a request to your origin. When the requested object is in the edge cache, the function doesn't execute.\n origin-response : The function executes after CloudFront receives a response from the origin and before it caches the object in the response. When the requested object is in the edge cache, the function doesn't execute.\n viewer-response : The function executes before CloudFront returns the requested object to the viewer. The function executes regardless of whether the object was already in the edge cache. If the origin returns an HTTP status code other than HTTP 200 (OK), the function doesn't execute.\n IncludeBody (boolean) --A flag that allows a Lambda function to have read access to the body content. For more information, see Accessing the Request Body by Choosing the Include Body Option in the Amazon CloudFront Developer Guide.\n \n FieldLevelEncryptionId (string) --The value of ID for the field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data for a cache behavior or for the default cache behavior in your distribution.\n \n CustomErrorResponses (dict) --A complex type that controls the following:\n Whether CloudFront replaces HTTP status codes in the 4xx and 5xx range with custom error messages before returning the response to the viewer.\n How long CloudFront caches HTTP status codes in the 4xx and 5xx range.\n For more information about custom error pages, see Customizing Error Responses in the Amazon CloudFront Developer Guide .\n Quantity (integer) -- [REQUIRED]The number of HTTP status codes for which you want to specify a custom error page and/or a caching duration. If Quantity is 0 , you can omit Items .\n Items (list) --A complex type that contains a CustomErrorResponse element for each HTTP status code for which you want to specify a custom error page and/or a caching duration.\n (dict) --A complex type that controls:\n Whether CloudFront replaces HTTP status codes in the 4xx and 5xx range with custom error messages before returning the response to the viewer.\n How long CloudFront caches HTTP status codes in the 4xx and 5xx range.\n For more information about custom error pages, see Customizing Error Responses in the Amazon CloudFront Developer Guide .\n ErrorCode (integer) -- [REQUIRED]The HTTP status code for which you want to specify a custom error page and/or a caching duration.\n ResponsePagePath (string) --The path to the custom error page that you want CloudFront to return to a viewer when your origin returns the HTTP status code specified by ErrorCode , for example, /4xx-errors/403-forbidden.html . If you want to store your objects and your custom error pages in different locations, your distribution must include a cache behavior for which the following is true:\n The value of PathPattern matches the path to your custom error messages. For example, suppose you saved custom error pages for 4xx errors in an Amazon S3 bucket in a directory named /4xx-errors . Your distribution must include a cache behavior for which the path pattern routes requests for your custom error pages to that location, for example, /4xx-errors/* .\n The value of TargetOriginId specifies the value of the ID element for the origin that contains your custom error pages.\n If you specify a value for ResponsePagePath , you must also specify a value for ResponseCode . If you don't want to specify a value, include an empty element, <ResponsePagePath> , in the XML document.\n We recommend that you store custom error pages in an Amazon S3 bucket. If you store custom error pages on an HTTP server and the server starts to return 5xx errors, CloudFront can't get the files that you want to return to viewers because the origin server is unavailable.\n ResponseCode (string) --The HTTP status code that you want CloudFront to return to the viewer along with the custom error page. There are a variety of reasons that you might want CloudFront to return a status code different from the status code that your origin returned to CloudFront, for example:\n Some Internet devices (some firewalls and corporate proxies, for example) intercept HTTP 4xx and 5xx and prevent the response from being returned to the viewer. If you substitute 200 , the response typically won't be intercepted.\n If you don't care about distinguishing among different client errors or server errors, you can specify 400 or 500 as the ResponseCode for all 4xx or 5xx errors.\n You might want to return a 200 status code (OK) and static website so your customers don't know that your website is down.\n If you specify a value for ResponseCode , you must also specify a value for ResponsePagePath . If you don't want to specify a value, include an empty element, <ResponseCode> , in the XML document.\n ErrorCachingMinTTL (integer) --The minimum amount of time, in seconds, that you want CloudFront to cache the HTTP status code specified in ErrorCode . When this time period has elapsed, CloudFront queries your origin to see whether the problem that caused the error has been resolved and the requested object is now available.\n If you don't want to specify a value, include an empty element, <ErrorCachingMinTTL> , in the XML document.\n For more information, see Customizing Error Responses in the Amazon CloudFront Developer Guide .\n \n Comment (string) -- [REQUIRED]Any comments you want to include about the distribution.\n If you don't want to specify a comment, include an empty Comment element.\n To delete an existing comment, update the distribution configuration and include an empty Comment element.\n To add or change a comment, update the distribution configuration and specify the new comment.\n Logging (dict) --A complex type that controls whether access logs are written for the distribution.\n For more information about logging, see Access Logs in the Amazon CloudFront Developer Guide .\n Enabled (boolean) -- [REQUIRED]Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you don't want to enable logging when you create a distribution or if you want to disable logging for an existing distribution, specify false for Enabled , and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket , prefix , and IncludeCookies , the values are automatically deleted.\n IncludeCookies (boolean) -- [REQUIRED]Specifies whether you want CloudFront to include cookies in access logs, specify true for IncludeCookies . If you choose to include cookies in logs, CloudFront logs all cookies regardless of how you configure the cache behaviors for this distribution. If you don't want to include cookies when you create a distribution or if you want to disable include cookies for an existing distribution, specify false for IncludeCookies .\n Bucket (string) -- [REQUIRED]The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com .\n Prefix (string) -- [REQUIRED]An optional string that you want CloudFront to prefix to the access log filenames for this distribution, for example, myprefix/ . If you want to enable logging, but you don't want to specify a prefix, you still must include an empty Prefix element in the Logging element.\n PriceClass (string) --The price class that corresponds with the maximum price that you want to pay for CloudFront service. If you specify PriceClass_All , CloudFront responds to requests for your objects from all CloudFront edge locations.\n If you specify a price class other than PriceClass_All , CloudFront serves your objects from the CloudFront edge location that has the lowest latency among the edge locations in your price class. Viewers who are in or near regions that are excluded from your specified price class may encounter slower performance.\n For more information about price classes, see Choosing the Price Class for a CloudFront Distribution in the Amazon CloudFront Developer Guide . For information about CloudFront pricing, including how price classes (such as Price Class 100) map to CloudFront regions, see Amazon CloudFront Pricing . For price class information, scroll down to see the table at the bottom of the page.\n Enabled (boolean) -- [REQUIRED]From this field, you can enable or disable the selected distribution.\n ViewerCertificate (dict) --\n CloudFrontDefaultCertificate (boolean) --For information about how and when to use CloudFrontDefaultCertificate , see ViewerCertificate .\n IAMCertificateId (string) --For information about how and when to use IAMCertificateId , see ViewerCertificate .\n ACMCertificateArn (string) --For information about how and when to use ACMCertificateArn , see ViewerCertificate .\n SSLSupportMethod (string) --If you specify a value for ViewerCertificate$ACMCertificateArn or for ViewerCertificate$IAMCertificateId , you must also specify how you want CloudFront to serve HTTPS requests: using a method that works for all clients or one that works for most clients:\n vip : CloudFront uses dedicated IP addresses for your content and can respond to HTTPS requests from any viewer. However, you will incur additional monthly charges.\n sni-only : CloudFront can respond to HTTPS requests from viewers that support Server Name Indication (SNI). All modern browsers support SNI, but some browsers still in use don't support SNI. If some of your users' browsers don't support SNI, we recommend that you do one of the following:\n Use the vip option (dedicated IP addresses) instead of sni-only .\n Use the CloudFront SSL/TLS certificate instead of a custom certificate. This requires that you use the CloudFront domain name of your distribution in the URLs for your objects, for example, https://d111111abcdef8.cloudfront.net/logo.png .\n If you can control which browser your users use, upgrade the browser to one that supports SNI.\n Use HTTP instead of HTTPS.\n Don't specify a value for SSLSupportMethod if you specified <CloudFrontDefaultCertificate>true<CloudFrontDefaultCertificate> .\n For more information, see Using Alternate Domain Names and HTTPS in the Amazon CloudFront Developer Guide .\n MinimumProtocolVersion (string) --Specify the security policy that you want CloudFront to use for HTTPS connections. A security policy determines two settings:\n The minimum SSL/TLS protocol that CloudFront uses to communicate with viewers\n The cipher that CloudFront uses to encrypt the content that it returns to viewers\n Note\n On the CloudFront console, this setting is called Security policy .\n We recommend that you specify TLSv1.1_2016 unless your users are using browsers or devices that do not support TLSv1.1 or later.\n When both of the following are true, you must specify TLSv1 or later for the security policy:\n You're using a custom certificate: you specified a value for ACMCertificateArn or for IAMCertificateId\n You're using SNI: you specified sni-only for SSLSupportMethod\n If you specify true for CloudFrontDefaultCertificate , CloudFront automatically sets the security policy to TLSv1 regardless of the value that you specify for MinimumProtocolVersion .\n For information about the relationship between the security policy that you choose and the protocols and ciphers that CloudFront uses to communicate with viewers, see Supported SSL/TLS Protocols and Ciphers for Communication Between Viewers and CloudFront in the Amazon CloudFront Developer Guide .\n Certificate (string) --This field has been deprecated. Use one of the following fields instead:\n ViewerCertificate$ACMCertificateArn\n ViewerCertificate$IAMCertificateId\n ViewerCertificate$CloudFrontDefaultCertificate\n CertificateSource (string) --This field has been deprecated. Use one of the following fields instead:\n ViewerCertificate$ACMCertificateArn\n ViewerCertificate$IAMCertificateId\n ViewerCertificate$CloudFrontDefaultCertificate\n \n Restrictions (dict) --\n GeoRestriction (dict) -- [REQUIRED]A complex type that controls the countries in which your content is distributed. CloudFront determines the location of your users using MaxMind GeoIP databases.\n RestrictionType (string) -- [REQUIRED]The method that you want to use to restrict distribution of your content by country:\n none : No geo restriction is enabled, meaning access to content is not restricted by client geo location.\n blacklist : The Location elements specify the countries in which you don't want CloudFront to distribute your content.\n whitelist : The Location elements specify the countries in which you want CloudFront to distribute your content.\n Quantity (integer) -- [REQUIRED]When geo restriction is enabled , this is the number of countries in your whitelist or blacklist . Otherwise, when it is not enabled, Quantity is 0 , and you can omit Items .\n Items (list) --A complex type that contains a Location element for each country in which you want CloudFront either to distribute your content (whitelist ) or not distribute your content (blacklist ).\n The Location element is a two-letter, uppercase country code for a country that you want to include in your blacklist or whitelist . Include one Location element for each country.\n CloudFront and MaxMind both use ISO 3166 country codes. For the current list of countries and the corresponding codes, see ISO 3166-1-alpha-2 code on the International Organization for Standardization website. You can also refer to the country list on the CloudFront console, which includes both country names and codes.\n (string) --\n \n WebACLId (string) --A unique identifier that specifies the AWS WAF web ACL, if any, to associate with this distribution.\n AWS WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to CloudFront, and lets you control access to your content. Based on conditions that you specify, such as the IP addresses that requests originate from or the values of query strings, CloudFront responds to requests either with the requested content or with an HTTP 403 status code (Forbidden). You can also configure CloudFront to return a custom error page when a request is blocked. For more information about AWS WAF, see the AWS WAF Developer Guide .\n HttpVersion (string) --(Optional) Specify the maximum HTTP version that you want viewers to use to communicate with CloudFront. The default value for new web distributions is http2. Viewers that don't support HTTP/2 automatically use an earlier HTTP version.\n For viewers and CloudFront to use HTTP/2, viewers must support TLS 1.2 or later, and must support Server Name Identification (SNI).\n In general, configuring CloudFront to communicate with viewers using HTTP/2 reduces latency. You can improve performance by optimizing for HTTP/2. For more information, do an Internet search for 'http/2 optimization.'\n IsIPV6Enabled (boolean) --If you want CloudFront to respond to IPv6 DNS requests with an IPv6 address for your distribution, specify true . If you specify false , CloudFront responds to IPv6 DNS requests with the DNS response code NOERROR and with no IP addresses. This allows viewers to submit a second request, for an IPv4 address for your distribution.\n In general, you should enable IPv6 if you have users on IPv6 networks who want to access your content. However, if you're using signed URLs or signed cookies to restrict access to your content, and if you're using a custom policy that includes the IpAddress parameter to restrict the IP addresses that can access your content, don't enable IPv6. If you want to restrict access to some content by IP address and not restrict access to other content (or restrict access but not by IP address), you can create two distributions. For more information, see Creating a Signed URL Using a Custom Policy in the Amazon CloudFront Developer Guide .\n If you're using an Amazon Route 53 alias resource record set to route traffic to your CloudFront distribution, you need to create a second alias resource record set when both of the following are true:\n You enable IPv6 for the distribution\n You're using alternate domain names in the URLs for your objects\n For more information, see Routing Traffic to an Amazon CloudFront Web Distribution by Using Your Domain Name in the Amazon Route 53 Developer Guide .\n If you created a CNAME resource record set, either with Amazon Route 53 or with another DNS service, you don't need to make any changes. A CNAME record will route traffic to your distribution regardless of the IP address format of the viewer request.\n \n\n :rtype: dict\n :return: {\n 'Distribution': {\n 'Id': 'string',\n 'ARN': 'string',\n 'Status': 'string',\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'InProgressInvalidationBatches': 123,\n 'DomainName': 'string',\n 'ActiveTrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n {\n 'AwsAccountNumber': 'string',\n 'KeyPairIds': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n ]\n },\n 'DistributionConfig': {\n 'CallerReference': 'string',\n 'Aliases': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'DefaultRootObject': 'string',\n 'Origins': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'DomainName': 'string',\n 'OriginPath': 'string',\n 'CustomHeaders': {\n 'Quantity': 123,\n 'Items': [\n {\n 'HeaderName': 'string',\n 'HeaderValue': 'string'\n },\n ]\n },\n 'S3OriginConfig': {\n 'OriginAccessIdentity': 'string'\n },\n 'CustomOriginConfig': {\n 'HTTPPort': 123,\n 'HTTPSPort': 123,\n 'OriginProtocolPolicy': 'http-only'|'match-viewer'|'https-only',\n 'OriginSslProtocols': {\n 'Quantity': 123,\n 'Items': [\n 'SSLv3'|'TLSv1'|'TLSv1.1'|'TLSv1.2',\n ]\n },\n 'OriginReadTimeout': 123,\n 'OriginKeepaliveTimeout': 123\n }\n },\n ]\n },\n 'OriginGroups': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'FailoverCriteria': {\n 'StatusCodes': {\n 'Quantity': 123,\n 'Items': [\n 123,\n ]\n }\n },\n 'Members': {\n 'Quantity': 123,\n 'Items': [\n {\n 'OriginId': 'string'\n },\n ]\n }\n },\n ]\n },\n 'DefaultCacheBehavior': {\n 'TargetOriginId': 'string',\n 'ForwardedValues': {\n 'QueryString': True|False,\n 'Cookies': {\n 'Forward': 'none'|'whitelist'|'all',\n 'WhitelistedNames': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'Headers': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'QueryStringCacheKeys': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'ViewerProtocolPolicy': 'allow-all'|'https-only'|'redirect-to-https',\n 'MinTTL': 123,\n 'AllowedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ],\n 'CachedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ]\n }\n },\n 'SmoothStreaming': True|False,\n 'DefaultTTL': 123,\n 'MaxTTL': 123,\n 'Compress': True|False,\n 'LambdaFunctionAssociations': {\n 'Quantity': 123,\n 'Items': [\n {\n 'LambdaFunctionARN': 'string',\n 'EventType': 'viewer-request'|'viewer-response'|'origin-request'|'origin-response',\n 'IncludeBody': True|False\n },\n ]\n },\n 'FieldLevelEncryptionId': 'string'\n },\n 'CacheBehaviors': {\n 'Quantity': 123,\n 'Items': [\n {\n 'PathPattern': 'string',\n 'TargetOriginId': 'string',\n 'ForwardedValues': {\n 'QueryString': True|False,\n 'Cookies': {\n 'Forward': 'none'|'whitelist'|'all',\n 'WhitelistedNames': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'Headers': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'QueryStringCacheKeys': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'ViewerProtocolPolicy': 'allow-all'|'https-only'|'redirect-to-https',\n 'MinTTL': 123,\n 'AllowedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ],\n 'CachedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ]\n }\n },\n 'SmoothStreaming': True|False,\n 'DefaultTTL': 123,\n 'MaxTTL': 123,\n 'Compress': True|False,\n 'LambdaFunctionAssociations': {\n 'Quantity': 123,\n 'Items': [\n {\n 'LambdaFunctionARN': 'string',\n 'EventType': 'viewer-request'|'viewer-response'|'origin-request'|'origin-response',\n 'IncludeBody': True|False\n },\n ]\n },\n 'FieldLevelEncryptionId': 'string'\n },\n ]\n },\n 'CustomErrorResponses': {\n 'Quantity': 123,\n 'Items': [\n {\n 'ErrorCode': 123,\n 'ResponsePagePath': 'string',\n 'ResponseCode': 'string',\n 'ErrorCachingMinTTL': 123\n },\n ]\n },\n 'Comment': 'string',\n 'Logging': {\n 'Enabled': True|False,\n 'IncludeCookies': True|False,\n 'Bucket': 'string',\n 'Prefix': 'string'\n },\n 'PriceClass': 'PriceClass_100'|'PriceClass_200'|'PriceClass_All',\n 'Enabled': True|False,\n 'ViewerCertificate': {\n 'CloudFrontDefaultCertificate': True|False,\n 'IAMCertificateId': 'string',\n 'ACMCertificateArn': 'string',\n 'SSLSupportMethod': 'sni-only'|'vip',\n 'MinimumProtocolVersion': 'SSLv3'|'TLSv1'|'TLSv1_2016'|'TLSv1.1_2016'|'TLSv1.2_2018',\n 'Certificate': 'string',\n 'CertificateSource': 'cloudfront'|'iam'|'acm'\n },\n 'Restrictions': {\n 'GeoRestriction': {\n 'RestrictionType': 'blacklist'|'whitelist'|'none',\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'WebACLId': 'string',\n 'HttpVersion': 'http1.1'|'http2',\n 'IsIPV6Enabled': True|False\n }\n },\n 'Location': 'string',\n 'ETag': 'string'\n }\n \n \n :returns: \n If you configured Amazon S3 Transfer Acceleration for your bucket, don't specify the s3-accelerate endpoint for DomainName .\n The bucket name must be between 3 and 63 characters long (inclusive).\n The bucket name must contain only lowercase characters, numbers, periods, underscores, and dashes.\n The bucket name must not contain adjacent periods.\n \n \"\"\"\n pass\n\ndef create_distribution_with_tags(DistributionConfigWithTags=None):\n \"\"\"\n Create a new distribution with tags.\n See also: AWS API Documentation\n \n \n :example: response = client.create_distribution_with_tags(\n DistributionConfigWithTags={\n 'DistributionConfig': {\n 'CallerReference': 'string',\n 'Aliases': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'DefaultRootObject': 'string',\n 'Origins': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'DomainName': 'string',\n 'OriginPath': 'string',\n 'CustomHeaders': {\n 'Quantity': 123,\n 'Items': [\n {\n 'HeaderName': 'string',\n 'HeaderValue': 'string'\n },\n ]\n },\n 'S3OriginConfig': {\n 'OriginAccessIdentity': 'string'\n },\n 'CustomOriginConfig': {\n 'HTTPPort': 123,\n 'HTTPSPort': 123,\n 'OriginProtocolPolicy': 'http-only'|'match-viewer'|'https-only',\n 'OriginSslProtocols': {\n 'Quantity': 123,\n 'Items': [\n 'SSLv3'|'TLSv1'|'TLSv1.1'|'TLSv1.2',\n ]\n },\n 'OriginReadTimeout': 123,\n 'OriginKeepaliveTimeout': 123\n }\n },\n ]\n },\n 'OriginGroups': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'FailoverCriteria': {\n 'StatusCodes': {\n 'Quantity': 123,\n 'Items': [\n 123,\n ]\n }\n },\n 'Members': {\n 'Quantity': 123,\n 'Items': [\n {\n 'OriginId': 'string'\n },\n ]\n }\n },\n ]\n },\n 'DefaultCacheBehavior': {\n 'TargetOriginId': 'string',\n 'ForwardedValues': {\n 'QueryString': True|False,\n 'Cookies': {\n 'Forward': 'none'|'whitelist'|'all',\n 'WhitelistedNames': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'Headers': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'QueryStringCacheKeys': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'ViewerProtocolPolicy': 'allow-all'|'https-only'|'redirect-to-https',\n 'MinTTL': 123,\n 'AllowedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ],\n 'CachedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ]\n }\n },\n 'SmoothStreaming': True|False,\n 'DefaultTTL': 123,\n 'MaxTTL': 123,\n 'Compress': True|False,\n 'LambdaFunctionAssociations': {\n 'Quantity': 123,\n 'Items': [\n {\n 'LambdaFunctionARN': 'string',\n 'EventType': 'viewer-request'|'viewer-response'|'origin-request'|'origin-response',\n 'IncludeBody': True|False\n },\n ]\n },\n 'FieldLevelEncryptionId': 'string'\n },\n 'CacheBehaviors': {\n 'Quantity': 123,\n 'Items': [\n {\n 'PathPattern': 'string',\n 'TargetOriginId': 'string',\n 'ForwardedValues': {\n 'QueryString': True|False,\n 'Cookies': {\n 'Forward': 'none'|'whitelist'|'all',\n 'WhitelistedNames': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'Headers': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'QueryStringCacheKeys': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'ViewerProtocolPolicy': 'allow-all'|'https-only'|'redirect-to-https',\n 'MinTTL': 123,\n 'AllowedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ],\n 'CachedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ]\n }\n },\n 'SmoothStreaming': True|False,\n 'DefaultTTL': 123,\n 'MaxTTL': 123,\n 'Compress': True|False,\n 'LambdaFunctionAssociations': {\n 'Quantity': 123,\n 'Items': [\n {\n 'LambdaFunctionARN': 'string',\n 'EventType': 'viewer-request'|'viewer-response'|'origin-request'|'origin-response',\n 'IncludeBody': True|False\n },\n ]\n },\n 'FieldLevelEncryptionId': 'string'\n },\n ]\n },\n 'CustomErrorResponses': {\n 'Quantity': 123,\n 'Items': [\n {\n 'ErrorCode': 123,\n 'ResponsePagePath': 'string',\n 'ResponseCode': 'string',\n 'ErrorCachingMinTTL': 123\n },\n ]\n },\n 'Comment': 'string',\n 'Logging': {\n 'Enabled': True|False,\n 'IncludeCookies': True|False,\n 'Bucket': 'string',\n 'Prefix': 'string'\n },\n 'PriceClass': 'PriceClass_100'|'PriceClass_200'|'PriceClass_All',\n 'Enabled': True|False,\n 'ViewerCertificate': {\n 'CloudFrontDefaultCertificate': True|False,\n 'IAMCertificateId': 'string',\n 'ACMCertificateArn': 'string',\n 'SSLSupportMethod': 'sni-only'|'vip',\n 'MinimumProtocolVersion': 'SSLv3'|'TLSv1'|'TLSv1_2016'|'TLSv1.1_2016'|'TLSv1.2_2018',\n 'Certificate': 'string',\n 'CertificateSource': 'cloudfront'|'iam'|'acm'\n },\n 'Restrictions': {\n 'GeoRestriction': {\n 'RestrictionType': 'blacklist'|'whitelist'|'none',\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'WebACLId': 'string',\n 'HttpVersion': 'http1.1'|'http2',\n 'IsIPV6Enabled': True|False\n },\n 'Tags': {\n 'Items': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n }\n )\n \n \n :type DistributionConfigWithTags: dict\n :param DistributionConfigWithTags: [REQUIRED]\n The distribution's configuration information.\n DistributionConfig (dict) -- [REQUIRED]A distribution configuration.\n CallerReference (string) -- [REQUIRED]A unique value (for example, a date-time stamp) that ensures that the request can't be replayed.\n If the value of CallerReference is new (regardless of the content of the DistributionConfig object), CloudFront creates a new distribution.\n If CallerReference is a value that you already sent in a previous request to create a distribution, CloudFront returns a DistributionAlreadyExists error.\n Aliases (dict) --A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.\n Quantity (integer) -- [REQUIRED]The number of CNAME aliases, if any, that you want to associate with this distribution.\n Items (list) --A complex type that contains the CNAME aliases, if any, that you want to associate with this distribution.\n (string) --\n \n DefaultRootObject (string) --The object that you want CloudFront to request from your origin (for example, index.html ) when a viewer requests the root URL for your distribution (http://www.example.com ) instead of an object in your distribution (http://www.example.com/product-description.html ). Specifying a default root object avoids exposing the contents of your distribution.\n Specify only the object name, for example, index.html . Don't add a / before the object name.\n If you don't want to specify a default root object when you create a distribution, include an empty DefaultRootObject element.\n To delete the default root object from an existing distribution, update the distribution configuration and include an empty DefaultRootObject element.\n To replace the default root object, update the distribution configuration and specify the new object.\n For more information about the default root object, see Creating a Default Root Object in the Amazon CloudFront Developer Guide .\n Origins (dict) -- [REQUIRED]A complex type that contains information about origins for this distribution.\n Quantity (integer) -- [REQUIRED]The number of origins or origin groups for this distribution.\n Items (list) -- [REQUIRED]A complex type that contains origins or origin groups for this distribution.\n (dict) --A complex type that describes the Amazon S3 bucket, HTTP server (for example, a web server), Amazon MediaStore, or other server from which CloudFront gets your files. This can also be an origin group, if you've created an origin group. You must specify at least one origin or origin group.\n For the current limit on the number of origins or origin groups that you can specify for a distribution, see Amazon CloudFront Limits in the AWS General Reference .\n Id (string) -- [REQUIRED]A unique identifier for the origin or origin group. The value of Id must be unique within the distribution.\n When you specify the value of TargetOriginId for the default cache behavior or for another cache behavior, you indicate the origin to which you want the cache behavior to route requests by specifying the value of the Id element for that origin. When a request matches the path pattern for that cache behavior, CloudFront routes the request to the specified origin. For more information, see Cache Behavior Settings in the Amazon CloudFront Developer Guide .\n DomainName (string) -- [REQUIRED]\n Amazon S3 origins : The DNS name of the Amazon S3 bucket from which you want CloudFront to get objects for this origin, for example, myawsbucket.s3.amazonaws.com . If you set up your bucket to be configured as a website endpoint, enter the Amazon S3 static website hosting endpoint for the bucket.\n For more information about specifying this value for different types of origins, see Origin Domain Name in the Amazon CloudFront Developer Guide .\n Constraints for Amazon S3 origins:\n If you configured Amazon S3 Transfer Acceleration for your bucket, don't specify the s3-accelerate endpoint for DomainName .\n The bucket name must be between 3 and 63 characters long (inclusive).\n The bucket name must contain only lowercase characters, numbers, periods, underscores, and dashes.\n The bucket name must not contain adjacent periods.\n Custom Origins : The DNS domain name for the HTTP server from which you want CloudFront to get objects for this origin, for example, www.example.com .\n Constraints for custom origins:\n DomainName must be a valid DNS name that contains only a-z, A-Z, 0-9, dot (.), hyphen (-), or underscore (_) characters.\n The name cannot exceed 128 characters.\n OriginPath (string) --An optional element that causes CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin. When you include the OriginPath element, specify the directory name, beginning with a / . CloudFront appends the directory name to the value of DomainName , for example, example.com/production . Do not include a / at the end of the directory name.\n For example, suppose you've specified the following values for your distribution:\n DomainName : An Amazon S3 bucket named myawsbucket .\n OriginPath : /production\n CNAME : example.com\n When a user enters example.com/index.html in a browser, CloudFront sends a request to Amazon S3 for myawsbucket/production/index.html .\n When a user enters example.com/acme/index.html in a browser, CloudFront sends a request to Amazon S3 for myawsbucket/production/acme/index.html .\n CustomHeaders (dict) --A complex type that contains names and values for the custom headers that you want.\n Quantity (integer) -- [REQUIRED]The number of custom headers, if any, for this distribution.\n Items (list) --\n Optional : A list that contains one OriginCustomHeader element for each custom header that you want CloudFront to forward to the origin. If Quantity is 0 , omit Items .\n (dict) --A complex type that contains HeaderName and HeaderValue elements, if any, for this distribution.\n HeaderName (string) -- [REQUIRED]The name of a header that you want CloudFront to forward to your origin. For more information, see Forwarding Custom Headers to Your Origin (Web Distributions Only) in the Amazon Amazon CloudFront Developer Guide .\n HeaderValue (string) -- [REQUIRED]The value for the header that you specified in the HeaderName field.\n \n S3OriginConfig (dict) --A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead.\n OriginAccessIdentity (string) -- [REQUIRED]The CloudFront origin access identity to associate with the origin. Use an origin access identity to configure the origin so that viewers can only access objects in an Amazon S3 bucket through CloudFront. The format of the value is:\n origin-access-identity/cloudfront/ID-of-origin-access-identity\n where `` ID-of-origin-access-identity `` is the value that CloudFront returned in the ID element when you created the origin access identity.\n If you want viewers to be able to access objects using either the CloudFront URL or the Amazon S3 URL, specify an empty OriginAccessIdentity element.\n To delete the origin access identity from an existing distribution, update the distribution configuration and include an empty OriginAccessIdentity element.\n To replace the origin access identity, update the distribution configuration and specify the new origin access identity.\n For more information about the origin access identity, see Serving Private Content through CloudFront in the Amazon CloudFront Developer Guide .\n CustomOriginConfig (dict) --A complex type that contains information about a custom origin. If the origin is an Amazon S3 bucket, use the S3OriginConfig element instead.\n HTTPPort (integer) -- [REQUIRED]The HTTP port the custom origin listens on.\n HTTPSPort (integer) -- [REQUIRED]The HTTPS port the custom origin listens on.\n OriginProtocolPolicy (string) -- [REQUIRED]The origin protocol policy to apply to your origin.\n OriginSslProtocols (dict) --The SSL/TLS protocols that you want CloudFront to use when communicating with your origin over HTTPS.\n Quantity (integer) -- [REQUIRED]The number of SSL/TLS protocols that you want to allow CloudFront to use when establishing an HTTPS connection with this origin.\n Items (list) -- [REQUIRED]A list that contains allowed SSL/TLS protocols for this distribution.\n (string) --\n \n OriginReadTimeout (integer) --You can create a custom origin read timeout. All timeout units are in seconds. The default origin read timeout is 30 seconds, but you can configure custom timeout lengths using the CloudFront API. The minimum timeout length is 4 seconds; the maximum is 60 seconds.\n If you need to increase the maximum time limit, contact the AWS Support Center .\n OriginKeepaliveTimeout (integer) --You can create a custom keep-alive timeout. All timeout units are in seconds. The default keep-alive timeout is 5 seconds, but you can configure custom timeout lengths using the CloudFront API. The minimum timeout length is 1 second; the maximum is 60 seconds.\n If you need to increase the maximum time limit, contact the AWS Support Center .\n \n \n OriginGroups (dict) --A complex type that contains information about origin groups for this distribution.\n Quantity (integer) -- [REQUIRED]The number of origin groups.\n Items (list) --The items (origin groups) in a distribution.\n (dict) --An origin group includes two origins (a primary origin and a second origin to failover to) and a failover criteria that you specify. You create an origin group to support origin failover in CloudFront. When you create or update a distribution, you can specifiy the origin group instead of a single origin, and CloudFront will failover from the primary origin to the second origin under the failover conditions that you've chosen.\n Id (string) -- [REQUIRED]The origin group's ID.\n FailoverCriteria (dict) -- [REQUIRED]A complex type that contains information about the failover criteria for an origin group.\n StatusCodes (dict) -- [REQUIRED]The status codes that, when returned from the primary origin, will trigger CloudFront to failover to the second origin.\n Quantity (integer) -- [REQUIRED]The number of status codes.\n Items (list) -- [REQUIRED]The items (status codes) for an origin group.\n (integer) --\n \n Members (dict) -- [REQUIRED]A complex type that contains information about the origins in an origin group.\n Quantity (integer) -- [REQUIRED]The number of origins in an origin group.\n Items (list) -- [REQUIRED]Items (origins) in an origin group.\n (dict) --An origin in an origin group.\n OriginId (string) -- [REQUIRED]The ID for an origin in an origin group.\n \n \n \n DefaultCacheBehavior (dict) -- [REQUIRED]A complex type that describes the default cache behavior if you don't specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements. You must create exactly one default cache behavior.\n TargetOriginId (string) -- [REQUIRED]The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior in your distribution.\n ForwardedValues (dict) -- [REQUIRED]A complex type that specifies how CloudFront handles query strings and cookies.\n QueryString (boolean) -- [REQUIRED]Indicates whether you want CloudFront to forward query strings to the origin that is associated with this cache behavior and cache based on the query string parameters. CloudFront behavior depends on the value of QueryString and on the values that you specify for QueryStringCacheKeys , if any:\n If you specify true for QueryString and you don't specify any values for QueryStringCacheKeys , CloudFront forwards all query string parameters to the origin and caches based on all query string parameters. Depending on how many query string parameters and values you have, this can adversely affect performance because CloudFront must forward more requests to the origin.\n If you specify true for QueryString and you specify one or more values for QueryStringCacheKeys , CloudFront forwards all query string parameters to the origin, but it only caches based on the query string parameters that you specify.\n If you specify false for QueryString , CloudFront doesn't forward any query string parameters to the origin, and doesn't cache based on query string parameters.\n For more information, see Configuring CloudFront to Cache Based on Query String Parameters in the Amazon CloudFront Developer Guide .\n Cookies (dict) -- [REQUIRED]A complex type that specifies whether you want CloudFront to forward cookies to the origin and, if so, which ones. For more information about forwarding cookies to the origin, see How CloudFront Forwards, Caches, and Logs Cookies in the Amazon CloudFront Developer Guide .\n Forward (string) -- [REQUIRED]Specifies which cookies to forward to the origin for this cache behavior: all, none, or the list of cookies specified in the WhitelistedNames complex type.\n Amazon S3 doesn't process cookies. When the cache behavior is forwarding requests to an Amazon S3 origin, specify none for the Forward element.\n WhitelistedNames (dict) --Required if you specify whitelist for the value of Forward: . A complex type that specifies how many different cookies you want CloudFront to forward to the origin for this cache behavior and, if you want to forward selected cookies, the names of those cookies.\n If you specify all or none for the value of Forward , omit WhitelistedNames . If you change the value of Forward from whitelist to all or none and you don't delete the WhitelistedNames element and its child elements, CloudFront deletes them automatically.\n For the current limit on the number of cookie names that you can whitelist for each cache behavior, see Amazon CloudFront Limits in the AWS General Reference .\n Quantity (integer) -- [REQUIRED]The number of different cookies that you want CloudFront to forward to the origin for this cache behavior.\n Items (list) --A complex type that contains one Name element for each cookie that you want CloudFront to forward to the origin for this cache behavior.\n (string) --\n \n Headers (dict) --A complex type that specifies the Headers , if any, that you want CloudFront to base caching on for this cache behavior.\n Quantity (integer) -- [REQUIRED]The number of different headers that you want CloudFront to base caching on for this cache behavior. You can configure each cache behavior in a web distribution to do one of the following:\n Forward all headers to your origin : Specify 1 for Quantity and * for Name .\n Warning\n CloudFront doesn't cache the objects that are associated with this cache behavior. Instead, CloudFront sends every request to the origin.\n Forward a whitelist of headers you specify : Specify the number of headers that you want CloudFront to base caching on. Then specify the header names in Name elements. CloudFront caches your objects based on the values in the specified headers.\n Forward only the default headers : Specify 0 for Quantity and omit Items . In this configuration, CloudFront doesn't cache based on the values in the request headers.\n Regardless of which option you choose, CloudFront forwards headers to your origin based on whether the origin is an S3 bucket or a custom origin. See the following documentation:\n S3 bucket : See HTTP Request Headers That CloudFront Removes or Updates\n Custom origin : See HTTP Request Headers and CloudFront Behavior\n Items (list) --A list that contains one Name element for each header that you want CloudFront to use for caching in this cache behavior. If Quantity is 0 , omit Items .\n (string) --\n \n QueryStringCacheKeys (dict) --A complex type that contains information about the query string parameters that you want CloudFront to use for caching for this cache behavior.\n Quantity (integer) -- [REQUIRED]The number of whitelisted query string parameters for this cache behavior.\n Items (list) --(Optional) A list that contains the query string parameters that you want CloudFront to use as a basis for caching for this cache behavior. If Quantity is 0, you can omit Items .\n (string) --\n \n TrustedSigners (dict) -- [REQUIRED]A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content.\n If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled , and specify the applicable values for Quantity and Items . For more information, see Serving Private Content through CloudFront in the Amazon Amazon CloudFront Developer Guide .\n If you don't want to require signed URLs in requests for objects that match PathPattern , specify false for Enabled and 0 for Quantity . Omit Items .\n To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false ), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.\n Enabled (boolean) -- [REQUIRED]Specifies whether you want to require viewers to use signed URLs to access the files specified by PathPattern and TargetOriginId .\n Quantity (integer) -- [REQUIRED]The number of trusted signers for this cache behavior.\n Items (list) --\n Optional : A complex type that contains trusted signers for this cache behavior. If Quantity is 0 , you can omit Items .\n (string) --\n \n ViewerProtocolPolicy (string) -- [REQUIRED]The protocol that viewers can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern . You can specify the following options:\n allow-all : Viewers can use HTTP or HTTPS.\n redirect-to-https : If a viewer submits an HTTP request, CloudFront returns an HTTP status code of 301 (Moved Permanently) to the viewer along with the HTTPS URL. The viewer then resubmits the request using the new URL.\n https-only : If a viewer sends an HTTP request, CloudFront returns an HTTP status code of 403 (Forbidden).\n For more information about requiring the HTTPS protocol, see Using an HTTPS Connection to Access Your Objects in the Amazon CloudFront Developer Guide .\n Note\n The only way to guarantee that viewers retrieve an object that was fetched from the origin using HTTPS is never to use any other protocol to fetch the object. If you have recently changed from HTTP to HTTPS, we recommend that you clear your objects' cache because cached objects are protocol agnostic. That means that an edge location will return an object from the cache regardless of whether the current request protocol matches the protocol used previously. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon CloudFront Developer Guide .\n MinTTL (integer) -- [REQUIRED]The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon Amazon CloudFront Developer Guide .\n You must specify 0 for MinTTL if you configure CloudFront to forward all headers to your origin (under Headers , if you specify 1 for Quantity and * for Name ).\n AllowedMethods (dict) --A complex type that controls which HTTP methods CloudFront processes and forwards to your Amazon S3 bucket or your custom origin. There are three choices:\n CloudFront forwards only GET and HEAD requests.\n CloudFront forwards only GET , HEAD , and OPTIONS requests.\n CloudFront forwards GET, HEAD, OPTIONS, PUT, PATCH, POST , and DELETE requests.\n If you pick the third choice, you may need to restrict access to your Amazon S3 bucket or to your custom origin so users can't perform operations that you don't want them to. For example, you might not want users to have permissions to delete objects from your origin.\n Quantity (integer) -- [REQUIRED]The number of HTTP methods that you want CloudFront to forward to your origin. Valid values are 2 (for GET and HEAD requests), 3 (for GET , HEAD , and OPTIONS requests) and 7 (for GET, HEAD, OPTIONS, PUT, PATCH, POST , and DELETE requests).\n Items (list) -- [REQUIRED]A complex type that contains the HTTP methods that you want CloudFront to process and forward to your origin.\n (string) --\n CachedMethods (dict) --A complex type that controls whether CloudFront caches the response to requests using the specified HTTP methods. There are two choices:\n CloudFront caches responses to GET and HEAD requests.\n CloudFront caches responses to GET , HEAD , and OPTIONS requests.\n If you pick the second choice for your Amazon S3 Origin, you may need to forward Access-Control-Request-Method, Access-Control-Request-Headers, and Origin headers for the responses to be cached correctly.\n Quantity (integer) -- [REQUIRED]The number of HTTP methods for which you want CloudFront to cache responses. Valid values are 2 (for caching responses to GET and HEAD requests) and 3 (for caching responses to GET , HEAD , and OPTIONS requests).\n Items (list) -- [REQUIRED]A complex type that contains the HTTP methods that you want CloudFront to cache responses to.\n (string) --\n \n SmoothStreaming (boolean) --Indicates whether you want to distribute media files in the Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true ; if not, specify false . If you specify true for SmoothStreaming , you can still distribute other content using this cache behavior if the content matches the value of PathPattern .\n DefaultTTL (integer) --The default amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age , Cache-Control s-maxage , and Expires to objects. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon CloudFront Developer Guide .\n MaxTTL (integer) --\n Compress (boolean) --Whether you want CloudFront to automatically compress certain files for this cache behavior. If so, specify true ; if not, specify false . For more information, see Serving Compressed Files in the Amazon CloudFront Developer Guide .\n LambdaFunctionAssociations (dict) --A complex type that contains zero or more Lambda function associations for a cache behavior.\n Quantity (integer) -- [REQUIRED]The number of Lambda function associations for this cache behavior.\n Items (list) --\n Optional : A complex type that contains LambdaFunctionAssociation items for this cache behavior. If Quantity is 0 , you can omit Items .\n (dict) --A complex type that contains a Lambda function association.\n LambdaFunctionARN (string) -- [REQUIRED]The ARN of the Lambda function. You must specify the ARN of a function version; you can't specify a Lambda alias or $LATEST.\n EventType (string) -- [REQUIRED]Specifies the event type that triggers a Lambda function invocation. You can specify the following values:\n viewer-request : The function executes when CloudFront receives a request from a viewer and before it checks to see whether the requested object is in the edge cache.\n origin-request : The function executes only when CloudFront forwards a request to your origin. When the requested object is in the edge cache, the function doesn't execute.\n origin-response : The function executes after CloudFront receives a response from the origin and before it caches the object in the response. When the requested object is in the edge cache, the function doesn't execute.\n viewer-response : The function executes before CloudFront returns the requested object to the viewer. The function executes regardless of whether the object was already in the edge cache. If the origin returns an HTTP status code other than HTTP 200 (OK), the function doesn't execute.\n IncludeBody (boolean) --A flag that allows a Lambda function to have read access to the body content. For more information, see Accessing the Request Body by Choosing the Include Body Option in the Amazon CloudFront Developer Guide.\n \n FieldLevelEncryptionId (string) --The value of ID for the field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data for a cache behavior or for the default cache behavior in your distribution.\n CacheBehaviors (dict) --A complex type that contains zero or more CacheBehavior elements.\n Quantity (integer) -- [REQUIRED]The number of cache behaviors for this distribution.\n Items (list) --Optional: A complex type that contains cache behaviors for this distribution. If Quantity is 0 , you can omit Items .\n (dict) --A complex type that describes how CloudFront processes requests.\n You must create at least as many cache behaviors (including the default cache behavior) as you have origins if you want CloudFront to distribute objects from all of the origins. Each cache behavior specifies the one origin from which you want CloudFront to get objects. If you have two origins and only the default cache behavior, the default cache behavior will cause CloudFront to get objects from one of the origins, but the other origin is never used.\n For the current limit on the number of cache behaviors that you can add to a distribution, see Amazon CloudFront Limits in the AWS General Reference .\n If you don't want to specify any cache behaviors, include only an empty CacheBehaviors element. Don't include an empty CacheBehavior element, or CloudFront returns a MalformedXML error.\n To delete all cache behaviors in an existing distribution, update the distribution configuration and include only an empty CacheBehaviors element.\n To add, change, or remove one or more cache behaviors, update the distribution configuration and specify all of the cache behaviors that you want to include in the updated distribution.\n For more information about cache behaviors, see Cache Behaviors in the Amazon CloudFront Developer Guide .\n PathPattern (string) -- [REQUIRED]The pattern (for example, images/*.jpg ) that specifies which requests to apply the behavior to. When CloudFront receives a viewer request, the requested path is compared with path patterns in the order in which cache behaviors are listed in the distribution.\n Note\n You can optionally include a slash (/ ) at the beginning of the path pattern. For example, /images/*.jpg . CloudFront behavior is the same with or without the leading / .\n The path pattern for the default cache behavior is * and cannot be changed. If the request for an object does not match the path pattern for any cache behaviors, CloudFront applies the behavior in the default cache behavior.\n For more information, see Path Pattern in the Amazon CloudFront Developer Guide .\n TargetOriginId (string) -- [REQUIRED]The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior in your distribution.\n ForwardedValues (dict) -- [REQUIRED]A complex type that specifies how CloudFront handles query strings and cookies.\n QueryString (boolean) -- [REQUIRED]Indicates whether you want CloudFront to forward query strings to the origin that is associated with this cache behavior and cache based on the query string parameters. CloudFront behavior depends on the value of QueryString and on the values that you specify for QueryStringCacheKeys , if any:\n If you specify true for QueryString and you don't specify any values for QueryStringCacheKeys , CloudFront forwards all query string parameters to the origin and caches based on all query string parameters. Depending on how many query string parameters and values you have, this can adversely affect performance because CloudFront must forward more requests to the origin.\n If you specify true for QueryString and you specify one or more values for QueryStringCacheKeys , CloudFront forwards all query string parameters to the origin, but it only caches based on the query string parameters that you specify.\n If you specify false for QueryString , CloudFront doesn't forward any query string parameters to the origin, and doesn't cache based on query string parameters.\n For more information, see Configuring CloudFront to Cache Based on Query String Parameters in the Amazon CloudFront Developer Guide .\n Cookies (dict) -- [REQUIRED]A complex type that specifies whether you want CloudFront to forward cookies to the origin and, if so, which ones. For more information about forwarding cookies to the origin, see How CloudFront Forwards, Caches, and Logs Cookies in the Amazon CloudFront Developer Guide .\n Forward (string) -- [REQUIRED]Specifies which cookies to forward to the origin for this cache behavior: all, none, or the list of cookies specified in the WhitelistedNames complex type.\n Amazon S3 doesn't process cookies. When the cache behavior is forwarding requests to an Amazon S3 origin, specify none for the Forward element.\n WhitelistedNames (dict) --Required if you specify whitelist for the value of Forward: . A complex type that specifies how many different cookies you want CloudFront to forward to the origin for this cache behavior and, if you want to forward selected cookies, the names of those cookies.\n If you specify all or none for the value of Forward , omit WhitelistedNames . If you change the value of Forward from whitelist to all or none and you don't delete the WhitelistedNames element and its child elements, CloudFront deletes them automatically.\n For the current limit on the number of cookie names that you can whitelist for each cache behavior, see Amazon CloudFront Limits in the AWS General Reference .\n Quantity (integer) -- [REQUIRED]The number of different cookies that you want CloudFront to forward to the origin for this cache behavior.\n Items (list) --A complex type that contains one Name element for each cookie that you want CloudFront to forward to the origin for this cache behavior.\n (string) --\n \n Headers (dict) --A complex type that specifies the Headers , if any, that you want CloudFront to base caching on for this cache behavior.\n Quantity (integer) -- [REQUIRED]The number of different headers that you want CloudFront to base caching on for this cache behavior. You can configure each cache behavior in a web distribution to do one of the following:\n Forward all headers to your origin : Specify 1 for Quantity and * for Name .\n Warning\n CloudFront doesn't cache the objects that are associated with this cache behavior. Instead, CloudFront sends every request to the origin.\n Forward a whitelist of headers you specify : Specify the number of headers that you want CloudFront to base caching on. Then specify the header names in Name elements. CloudFront caches your objects based on the values in the specified headers.\n Forward only the default headers : Specify 0 for Quantity and omit Items . In this configuration, CloudFront doesn't cache based on the values in the request headers.\n Regardless of which option you choose, CloudFront forwards headers to your origin based on whether the origin is an S3 bucket or a custom origin. See the following documentation:\n S3 bucket : See HTTP Request Headers That CloudFront Removes or Updates\n Custom origin : See HTTP Request Headers and CloudFront Behavior\n Items (list) --A list that contains one Name element for each header that you want CloudFront to use for caching in this cache behavior. If Quantity is 0 , omit Items .\n (string) --\n \n QueryStringCacheKeys (dict) --A complex type that contains information about the query string parameters that you want CloudFront to use for caching for this cache behavior.\n Quantity (integer) -- [REQUIRED]The number of whitelisted query string parameters for this cache behavior.\n Items (list) --(Optional) A list that contains the query string parameters that you want CloudFront to use as a basis for caching for this cache behavior. If Quantity is 0, you can omit Items .\n (string) --\n \n TrustedSigners (dict) -- [REQUIRED]A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content.\n If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled , and specify the applicable values for Quantity and Items . For more information, see Serving Private Content through CloudFront in the Amazon Amazon CloudFront Developer Guide .\n If you don't want to require signed URLs in requests for objects that match PathPattern , specify false for Enabled and 0 for Quantity . Omit Items .\n To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false ), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.\n Enabled (boolean) -- [REQUIRED]Specifies whether you want to require viewers to use signed URLs to access the files specified by PathPattern and TargetOriginId .\n Quantity (integer) -- [REQUIRED]The number of trusted signers for this cache behavior.\n Items (list) --\n Optional : A complex type that contains trusted signers for this cache behavior. If Quantity is 0 , you can omit Items .\n (string) --\n \n ViewerProtocolPolicy (string) -- [REQUIRED]The protocol that viewers can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern . You can specify the following options:\n allow-all : Viewers can use HTTP or HTTPS.\n redirect-to-https : If a viewer submits an HTTP request, CloudFront returns an HTTP status code of 301 (Moved Permanently) to the viewer along with the HTTPS URL. The viewer then resubmits the request using the new URL.\n https-only : If a viewer sends an HTTP request, CloudFront returns an HTTP status code of 403 (Forbidden).\n For more information about requiring the HTTPS protocol, see Using an HTTPS Connection to Access Your Objects in the Amazon CloudFront Developer Guide .\n Note\n The only way to guarantee that viewers retrieve an object that was fetched from the origin using HTTPS is never to use any other protocol to fetch the object. If you have recently changed from HTTP to HTTPS, we recommend that you clear your objects' cache because cached objects are protocol agnostic. That means that an edge location will return an object from the cache regardless of whether the current request protocol matches the protocol used previously. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon CloudFront Developer Guide .\n MinTTL (integer) -- [REQUIRED]The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon Amazon CloudFront Developer Guide .\n You must specify 0 for MinTTL if you configure CloudFront to forward all headers to your origin (under Headers , if you specify 1 for Quantity and * for Name ).\n AllowedMethods (dict) --A complex type that controls which HTTP methods CloudFront processes and forwards to your Amazon S3 bucket or your custom origin. There are three choices:\n CloudFront forwards only GET and HEAD requests.\n CloudFront forwards only GET , HEAD , and OPTIONS requests.\n CloudFront forwards GET, HEAD, OPTIONS, PUT, PATCH, POST , and DELETE requests.\n If you pick the third choice, you may need to restrict access to your Amazon S3 bucket or to your custom origin so users can't perform operations that you don't want them to. For example, you might not want users to have permissions to delete objects from your origin.\n Quantity (integer) -- [REQUIRED]The number of HTTP methods that you want CloudFront to forward to your origin. Valid values are 2 (for GET and HEAD requests), 3 (for GET , HEAD , and OPTIONS requests) and 7 (for GET, HEAD, OPTIONS, PUT, PATCH, POST , and DELETE requests).\n Items (list) -- [REQUIRED]A complex type that contains the HTTP methods that you want CloudFront to process and forward to your origin.\n (string) --\n CachedMethods (dict) --A complex type that controls whether CloudFront caches the response to requests using the specified HTTP methods. There are two choices:\n CloudFront caches responses to GET and HEAD requests.\n CloudFront caches responses to GET , HEAD , and OPTIONS requests.\n If you pick the second choice for your Amazon S3 Origin, you may need to forward Access-Control-Request-Method, Access-Control-Request-Headers, and Origin headers for the responses to be cached correctly.\n Quantity (integer) -- [REQUIRED]The number of HTTP methods for which you want CloudFront to cache responses. Valid values are 2 (for caching responses to GET and HEAD requests) and 3 (for caching responses to GET , HEAD , and OPTIONS requests).\n Items (list) -- [REQUIRED]A complex type that contains the HTTP methods that you want CloudFront to cache responses to.\n (string) --\n \n SmoothStreaming (boolean) --Indicates whether you want to distribute media files in the Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true ; if not, specify false . If you specify true for SmoothStreaming , you can still distribute other content using this cache behavior if the content matches the value of PathPattern .\n DefaultTTL (integer) --The default amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age , Cache-Control s-maxage , and Expires to objects. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon CloudFront Developer Guide .\n MaxTTL (integer) --The maximum amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as Cache-Control max-age , Cache-Control s-maxage , and Expires to objects. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon CloudFront Developer Guide .\n Compress (boolean) --Whether you want CloudFront to automatically compress certain files for this cache behavior. If so, specify true; if not, specify false. For more information, see Serving Compressed Files in the Amazon CloudFront Developer Guide .\n LambdaFunctionAssociations (dict) --A complex type that contains zero or more Lambda function associations for a cache behavior.\n Quantity (integer) -- [REQUIRED]The number of Lambda function associations for this cache behavior.\n Items (list) --\n Optional : A complex type that contains LambdaFunctionAssociation items for this cache behavior. If Quantity is 0 , you can omit Items .\n (dict) --A complex type that contains a Lambda function association.\n LambdaFunctionARN (string) -- [REQUIRED]The ARN of the Lambda function. You must specify the ARN of a function version; you can't specify a Lambda alias or $LATEST.\n EventType (string) -- [REQUIRED]Specifies the event type that triggers a Lambda function invocation. You can specify the following values:\n viewer-request : The function executes when CloudFront receives a request from a viewer and before it checks to see whether the requested object is in the edge cache.\n origin-request : The function executes only when CloudFront forwards a request to your origin. When the requested object is in the edge cache, the function doesn't execute.\n origin-response : The function executes after CloudFront receives a response from the origin and before it caches the object in the response. When the requested object is in the edge cache, the function doesn't execute.\n viewer-response : The function executes before CloudFront returns the requested object to the viewer. The function executes regardless of whether the object was already in the edge cache. If the origin returns an HTTP status code other than HTTP 200 (OK), the function doesn't execute.\n IncludeBody (boolean) --A flag that allows a Lambda function to have read access to the body content. For more information, see Accessing the Request Body by Choosing the Include Body Option in the Amazon CloudFront Developer Guide.\n \n FieldLevelEncryptionId (string) --The value of ID for the field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data for a cache behavior or for the default cache behavior in your distribution.\n \n CustomErrorResponses (dict) --A complex type that controls the following:\n Whether CloudFront replaces HTTP status codes in the 4xx and 5xx range with custom error messages before returning the response to the viewer.\n How long CloudFront caches HTTP status codes in the 4xx and 5xx range.\n For more information about custom error pages, see Customizing Error Responses in the Amazon CloudFront Developer Guide .\n Quantity (integer) -- [REQUIRED]The number of HTTP status codes for which you want to specify a custom error page and/or a caching duration. If Quantity is 0 , you can omit Items .\n Items (list) --A complex type that contains a CustomErrorResponse element for each HTTP status code for which you want to specify a custom error page and/or a caching duration.\n (dict) --A complex type that controls:\n Whether CloudFront replaces HTTP status codes in the 4xx and 5xx range with custom error messages before returning the response to the viewer.\n How long CloudFront caches HTTP status codes in the 4xx and 5xx range.\n For more information about custom error pages, see Customizing Error Responses in the Amazon CloudFront Developer Guide .\n ErrorCode (integer) -- [REQUIRED]The HTTP status code for which you want to specify a custom error page and/or a caching duration.\n ResponsePagePath (string) --The path to the custom error page that you want CloudFront to return to a viewer when your origin returns the HTTP status code specified by ErrorCode , for example, /4xx-errors/403-forbidden.html . If you want to store your objects and your custom error pages in different locations, your distribution must include a cache behavior for which the following is true:\n The value of PathPattern matches the path to your custom error messages. For example, suppose you saved custom error pages for 4xx errors in an Amazon S3 bucket in a directory named /4xx-errors . Your distribution must include a cache behavior for which the path pattern routes requests for your custom error pages to that location, for example, /4xx-errors/* .\n The value of TargetOriginId specifies the value of the ID element for the origin that contains your custom error pages.\n If you specify a value for ResponsePagePath , you must also specify a value for ResponseCode . If you don't want to specify a value, include an empty element, <ResponsePagePath> , in the XML document.\n We recommend that you store custom error pages in an Amazon S3 bucket. If you store custom error pages on an HTTP server and the server starts to return 5xx errors, CloudFront can't get the files that you want to return to viewers because the origin server is unavailable.\n ResponseCode (string) --The HTTP status code that you want CloudFront to return to the viewer along with the custom error page. There are a variety of reasons that you might want CloudFront to return a status code different from the status code that your origin returned to CloudFront, for example:\n Some Internet devices (some firewalls and corporate proxies, for example) intercept HTTP 4xx and 5xx and prevent the response from being returned to the viewer. If you substitute 200 , the response typically won't be intercepted.\n If you don't care about distinguishing among different client errors or server errors, you can specify 400 or 500 as the ResponseCode for all 4xx or 5xx errors.\n You might want to return a 200 status code (OK) and static website so your customers don't know that your website is down.\n If you specify a value for ResponseCode , you must also specify a value for ResponsePagePath . If you don't want to specify a value, include an empty element, <ResponseCode> , in the XML document.\n ErrorCachingMinTTL (integer) --The minimum amount of time, in seconds, that you want CloudFront to cache the HTTP status code specified in ErrorCode . When this time period has elapsed, CloudFront queries your origin to see whether the problem that caused the error has been resolved and the requested object is now available.\n If you don't want to specify a value, include an empty element, <ErrorCachingMinTTL> , in the XML document.\n For more information, see Customizing Error Responses in the Amazon CloudFront Developer Guide .\n \n Comment (string) -- [REQUIRED]Any comments you want to include about the distribution.\n If you don't want to specify a comment, include an empty Comment element.\n To delete an existing comment, update the distribution configuration and include an empty Comment element.\n To add or change a comment, update the distribution configuration and specify the new comment.\n Logging (dict) --A complex type that controls whether access logs are written for the distribution.\n For more information about logging, see Access Logs in the Amazon CloudFront Developer Guide .\n Enabled (boolean) -- [REQUIRED]Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you don't want to enable logging when you create a distribution or if you want to disable logging for an existing distribution, specify false for Enabled , and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket , prefix , and IncludeCookies , the values are automatically deleted.\n IncludeCookies (boolean) -- [REQUIRED]Specifies whether you want CloudFront to include cookies in access logs, specify true for IncludeCookies . If you choose to include cookies in logs, CloudFront logs all cookies regardless of how you configure the cache behaviors for this distribution. If you don't want to include cookies when you create a distribution or if you want to disable include cookies for an existing distribution, specify false for IncludeCookies .\n Bucket (string) -- [REQUIRED]The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com .\n Prefix (string) -- [REQUIRED]An optional string that you want CloudFront to prefix to the access log filenames for this distribution, for example, myprefix/ . If you want to enable logging, but you don't want to specify a prefix, you still must include an empty Prefix element in the Logging element.\n PriceClass (string) --The price class that corresponds with the maximum price that you want to pay for CloudFront service. If you specify PriceClass_All , CloudFront responds to requests for your objects from all CloudFront edge locations.\n If you specify a price class other than PriceClass_All , CloudFront serves your objects from the CloudFront edge location that has the lowest latency among the edge locations in your price class. Viewers who are in or near regions that are excluded from your specified price class may encounter slower performance.\n For more information about price classes, see Choosing the Price Class for a CloudFront Distribution in the Amazon CloudFront Developer Guide . For information about CloudFront pricing, including how price classes (such as Price Class 100) map to CloudFront regions, see Amazon CloudFront Pricing . For price class information, scroll down to see the table at the bottom of the page.\n Enabled (boolean) -- [REQUIRED]From this field, you can enable or disable the selected distribution.\n ViewerCertificate (dict) --\n CloudFrontDefaultCertificate (boolean) --For information about how and when to use CloudFrontDefaultCertificate , see ViewerCertificate .\n IAMCertificateId (string) --For information about how and when to use IAMCertificateId , see ViewerCertificate .\n ACMCertificateArn (string) --For information about how and when to use ACMCertificateArn , see ViewerCertificate .\n SSLSupportMethod (string) --If you specify a value for ViewerCertificate$ACMCertificateArn or for ViewerCertificate$IAMCertificateId , you must also specify how you want CloudFront to serve HTTPS requests: using a method that works for all clients or one that works for most clients:\n vip : CloudFront uses dedicated IP addresses for your content and can respond to HTTPS requests from any viewer. However, you will incur additional monthly charges.\n sni-only : CloudFront can respond to HTTPS requests from viewers that support Server Name Indication (SNI). All modern browsers support SNI, but some browsers still in use don't support SNI. If some of your users' browsers don't support SNI, we recommend that you do one of the following:\n Use the vip option (dedicated IP addresses) instead of sni-only .\n Use the CloudFront SSL/TLS certificate instead of a custom certificate. This requires that you use the CloudFront domain name of your distribution in the URLs for your objects, for example, https://d111111abcdef8.cloudfront.net/logo.png .\n If you can control which browser your users use, upgrade the browser to one that supports SNI.\n Use HTTP instead of HTTPS.\n Don't specify a value for SSLSupportMethod if you specified <CloudFrontDefaultCertificate>true<CloudFrontDefaultCertificate> .\n For more information, see Using Alternate Domain Names and HTTPS in the Amazon CloudFront Developer Guide .\n MinimumProtocolVersion (string) --Specify the security policy that you want CloudFront to use for HTTPS connections. A security policy determines two settings:\n The minimum SSL/TLS protocol that CloudFront uses to communicate with viewers\n The cipher that CloudFront uses to encrypt the content that it returns to viewers\n Note\n On the CloudFront console, this setting is called Security policy .\n We recommend that you specify TLSv1.1_2016 unless your users are using browsers or devices that do not support TLSv1.1 or later.\n When both of the following are true, you must specify TLSv1 or later for the security policy:\n You're using a custom certificate: you specified a value for ACMCertificateArn or for IAMCertificateId\n You're using SNI: you specified sni-only for SSLSupportMethod\n If you specify true for CloudFrontDefaultCertificate , CloudFront automatically sets the security policy to TLSv1 regardless of the value that you specify for MinimumProtocolVersion .\n For information about the relationship between the security policy that you choose and the protocols and ciphers that CloudFront uses to communicate with viewers, see Supported SSL/TLS Protocols and Ciphers for Communication Between Viewers and CloudFront in the Amazon CloudFront Developer Guide .\n Certificate (string) --This field has been deprecated. Use one of the following fields instead:\n ViewerCertificate$ACMCertificateArn\n ViewerCertificate$IAMCertificateId\n ViewerCertificate$CloudFrontDefaultCertificate\n CertificateSource (string) --This field has been deprecated. Use one of the following fields instead:\n ViewerCertificate$ACMCertificateArn\n ViewerCertificate$IAMCertificateId\n ViewerCertificate$CloudFrontDefaultCertificate\n \n Restrictions (dict) --\n GeoRestriction (dict) -- [REQUIRED]A complex type that controls the countries in which your content is distributed. CloudFront determines the location of your users using MaxMind GeoIP databases.\n RestrictionType (string) -- [REQUIRED]The method that you want to use to restrict distribution of your content by country:\n none : No geo restriction is enabled, meaning access to content is not restricted by client geo location.\n blacklist : The Location elements specify the countries in which you don't want CloudFront to distribute your content.\n whitelist : The Location elements specify the countries in which you want CloudFront to distribute your content.\n Quantity (integer) -- [REQUIRED]When geo restriction is enabled , this is the number of countries in your whitelist or blacklist . Otherwise, when it is not enabled, Quantity is 0 , and you can omit Items .\n Items (list) --A complex type that contains a Location element for each country in which you want CloudFront either to distribute your content (whitelist ) or not distribute your content (blacklist ).\n The Location element is a two-letter, uppercase country code for a country that you want to include in your blacklist or whitelist . Include one Location element for each country.\n CloudFront and MaxMind both use ISO 3166 country codes. For the current list of countries and the corresponding codes, see ISO 3166-1-alpha-2 code on the International Organization for Standardization website. You can also refer to the country list on the CloudFront console, which includes both country names and codes.\n (string) --\n \n WebACLId (string) --A unique identifier that specifies the AWS WAF web ACL, if any, to associate with this distribution.\n AWS WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to CloudFront, and lets you control access to your content. Based on conditions that you specify, such as the IP addresses that requests originate from or the values of query strings, CloudFront responds to requests either with the requested content or with an HTTP 403 status code (Forbidden). You can also configure CloudFront to return a custom error page when a request is blocked. For more information about AWS WAF, see the AWS WAF Developer Guide .\n HttpVersion (string) --(Optional) Specify the maximum HTTP version that you want viewers to use to communicate with CloudFront. The default value for new web distributions is http2. Viewers that don't support HTTP/2 automatically use an earlier HTTP version.\n For viewers and CloudFront to use HTTP/2, viewers must support TLS 1.2 or later, and must support Server Name Identification (SNI).\n In general, configuring CloudFront to communicate with viewers using HTTP/2 reduces latency. You can improve performance by optimizing for HTTP/2. For more information, do an Internet search for 'http/2 optimization.'\n IsIPV6Enabled (boolean) --If you want CloudFront to respond to IPv6 DNS requests with an IPv6 address for your distribution, specify true . If you specify false , CloudFront responds to IPv6 DNS requests with the DNS response code NOERROR and with no IP addresses. This allows viewers to submit a second request, for an IPv4 address for your distribution.\n In general, you should enable IPv6 if you have users on IPv6 networks who want to access your content. However, if you're using signed URLs or signed cookies to restrict access to your content, and if you're using a custom policy that includes the IpAddress parameter to restrict the IP addresses that can access your content, don't enable IPv6. If you want to restrict access to some content by IP address and not restrict access to other content (or restrict access but not by IP address), you can create two distributions. For more information, see Creating a Signed URL Using a Custom Policy in the Amazon CloudFront Developer Guide .\n If you're using an Amazon Route 53 alias resource record set to route traffic to your CloudFront distribution, you need to create a second alias resource record set when both of the following are true:\n You enable IPv6 for the distribution\n You're using alternate domain names in the URLs for your objects\n For more information, see Routing Traffic to an Amazon CloudFront Web Distribution by Using Your Domain Name in the Amazon Route 53 Developer Guide .\n If you created a CNAME resource record set, either with Amazon Route 53 or with another DNS service, you don't need to make any changes. A CNAME record will route traffic to your distribution regardless of the IP address format of the viewer request.\n Tags (dict) -- [REQUIRED]A complex type that contains zero or more Tag elements.\n Items (list) --A complex type that contains Tag elements.\n (dict) --A complex type that contains Tag key and Tag value.\n Key (string) -- [REQUIRED]A string that contains Tag key.\n The string length should be between 1 and 128 characters. Valid characters include a-z , A-Z , 0-9 , space, and the special characters _ - . : / = + @ .\n Value (string) --A string that contains an optional Tag value.\n The string length should be between 0 and 256 characters. Valid characters include a-z , A-Z , 0-9 , space, and the special characters _ - . : / = + @ .\n \n \n \n\n :rtype: dict\n :return: {\n 'Distribution': {\n 'Id': 'string',\n 'ARN': 'string',\n 'Status': 'string',\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'InProgressInvalidationBatches': 123,\n 'DomainName': 'string',\n 'ActiveTrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n {\n 'AwsAccountNumber': 'string',\n 'KeyPairIds': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n ]\n },\n 'DistributionConfig': {\n 'CallerReference': 'string',\n 'Aliases': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'DefaultRootObject': 'string',\n 'Origins': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'DomainName': 'string',\n 'OriginPath': 'string',\n 'CustomHeaders': {\n 'Quantity': 123,\n 'Items': [\n {\n 'HeaderName': 'string',\n 'HeaderValue': 'string'\n },\n ]\n },\n 'S3OriginConfig': {\n 'OriginAccessIdentity': 'string'\n },\n 'CustomOriginConfig': {\n 'HTTPPort': 123,\n 'HTTPSPort': 123,\n 'OriginProtocolPolicy': 'http-only'|'match-viewer'|'https-only',\n 'OriginSslProtocols': {\n 'Quantity': 123,\n 'Items': [\n 'SSLv3'|'TLSv1'|'TLSv1.1'|'TLSv1.2',\n ]\n },\n 'OriginReadTimeout': 123,\n 'OriginKeepaliveTimeout': 123\n }\n },\n ]\n },\n 'OriginGroups': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'FailoverCriteria': {\n 'StatusCodes': {\n 'Quantity': 123,\n 'Items': [\n 123,\n ]\n }\n },\n 'Members': {\n 'Quantity': 123,\n 'Items': [\n {\n 'OriginId': 'string'\n },\n ]\n }\n },\n ]\n },\n 'DefaultCacheBehavior': {\n 'TargetOriginId': 'string',\n 'ForwardedValues': {\n 'QueryString': True|False,\n 'Cookies': {\n 'Forward': 'none'|'whitelist'|'all',\n 'WhitelistedNames': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'Headers': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'QueryStringCacheKeys': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'ViewerProtocolPolicy': 'allow-all'|'https-only'|'redirect-to-https',\n 'MinTTL': 123,\n 'AllowedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ],\n 'CachedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ]\n }\n },\n 'SmoothStreaming': True|False,\n 'DefaultTTL': 123,\n 'MaxTTL': 123,\n 'Compress': True|False,\n 'LambdaFunctionAssociations': {\n 'Quantity': 123,\n 'Items': [\n {\n 'LambdaFunctionARN': 'string',\n 'EventType': 'viewer-request'|'viewer-response'|'origin-request'|'origin-response',\n 'IncludeBody': True|False\n },\n ]\n },\n 'FieldLevelEncryptionId': 'string'\n },\n 'CacheBehaviors': {\n 'Quantity': 123,\n 'Items': [\n {\n 'PathPattern': 'string',\n 'TargetOriginId': 'string',\n 'ForwardedValues': {\n 'QueryString': True|False,\n 'Cookies': {\n 'Forward': 'none'|'whitelist'|'all',\n 'WhitelistedNames': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'Headers': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'QueryStringCacheKeys': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'ViewerProtocolPolicy': 'allow-all'|'https-only'|'redirect-to-https',\n 'MinTTL': 123,\n 'AllowedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ],\n 'CachedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ]\n }\n },\n 'SmoothStreaming': True|False,\n 'DefaultTTL': 123,\n 'MaxTTL': 123,\n 'Compress': True|False,\n 'LambdaFunctionAssociations': {\n 'Quantity': 123,\n 'Items': [\n {\n 'LambdaFunctionARN': 'string',\n 'EventType': 'viewer-request'|'viewer-response'|'origin-request'|'origin-response',\n 'IncludeBody': True|False\n },\n ]\n },\n 'FieldLevelEncryptionId': 'string'\n },\n ]\n },\n 'CustomErrorResponses': {\n 'Quantity': 123,\n 'Items': [\n {\n 'ErrorCode': 123,\n 'ResponsePagePath': 'string',\n 'ResponseCode': 'string',\n 'ErrorCachingMinTTL': 123\n },\n ]\n },\n 'Comment': 'string',\n 'Logging': {\n 'Enabled': True|False,\n 'IncludeCookies': True|False,\n 'Bucket': 'string',\n 'Prefix': 'string'\n },\n 'PriceClass': 'PriceClass_100'|'PriceClass_200'|'PriceClass_All',\n 'Enabled': True|False,\n 'ViewerCertificate': {\n 'CloudFrontDefaultCertificate': True|False,\n 'IAMCertificateId': 'string',\n 'ACMCertificateArn': 'string',\n 'SSLSupportMethod': 'sni-only'|'vip',\n 'MinimumProtocolVersion': 'SSLv3'|'TLSv1'|'TLSv1_2016'|'TLSv1.1_2016'|'TLSv1.2_2018',\n 'Certificate': 'string',\n 'CertificateSource': 'cloudfront'|'iam'|'acm'\n },\n 'Restrictions': {\n 'GeoRestriction': {\n 'RestrictionType': 'blacklist'|'whitelist'|'none',\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'WebACLId': 'string',\n 'HttpVersion': 'http1.1'|'http2',\n 'IsIPV6Enabled': True|False\n }\n },\n 'Location': 'string',\n 'ETag': 'string'\n }\n \n \n :returns: \n If you configured Amazon S3 Transfer Acceleration for your bucket, don't specify the s3-accelerate endpoint for DomainName .\n The bucket name must be between 3 and 63 characters long (inclusive).\n The bucket name must contain only lowercase characters, numbers, periods, underscores, and dashes.\n The bucket name must not contain adjacent periods.\n \n \"\"\"\n pass\n\ndef create_field_level_encryption_config(FieldLevelEncryptionConfig=None):\n \"\"\"\n Create a new field-level encryption configuration.\n See also: AWS API Documentation\n \n \n :example: response = client.create_field_level_encryption_config(\n FieldLevelEncryptionConfig={\n 'CallerReference': 'string',\n 'Comment': 'string',\n 'QueryArgProfileConfig': {\n 'ForwardWhenQueryArgProfileIsUnknown': True|False,\n 'QueryArgProfiles': {\n 'Quantity': 123,\n 'Items': [\n {\n 'QueryArg': 'string',\n 'ProfileId': 'string'\n },\n ]\n }\n },\n 'ContentTypeProfileConfig': {\n 'ForwardWhenContentTypeIsUnknown': True|False,\n 'ContentTypeProfiles': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Format': 'URLEncoded',\n 'ProfileId': 'string',\n 'ContentType': 'string'\n },\n ]\n }\n }\n }\n )\n \n \n :type FieldLevelEncryptionConfig: dict\n :param FieldLevelEncryptionConfig: [REQUIRED]\n The request to create a new field-level encryption configuration.\n CallerReference (string) -- [REQUIRED]A unique number that ensures the request can't be replayed.\n Comment (string) --An optional comment about the configuration.\n QueryArgProfileConfig (dict) --A complex data type that specifies when to forward content if a profile isn't found and the profile that can be provided as a query argument in a request.\n ForwardWhenQueryArgProfileIsUnknown (boolean) -- [REQUIRED]Flag to set if you want a request to be forwarded to the origin even if the profile specified by the field-level encryption query argument, fle-profile, is unknown.\n QueryArgProfiles (dict) --Profiles specified for query argument-profile mapping for field-level encryption.\n Quantity (integer) -- [REQUIRED]Number of profiles for query argument-profile mapping for field-level encryption.\n Items (list) --Number of items for query argument-profile mapping for field-level encryption.\n (dict) --Query argument-profile mapping for field-level encryption.\n QueryArg (string) -- [REQUIRED]Query argument for field-level encryption query argument-profile mapping.\n ProfileId (string) -- [REQUIRED]ID of profile to use for field-level encryption query argument-profile mapping\n \n \n ContentTypeProfileConfig (dict) --A complex data type that specifies when to forward content if a content type isn't recognized and profiles to use as by default in a request if a query argument doesn't specify a profile to use.\n ForwardWhenContentTypeIsUnknown (boolean) -- [REQUIRED]The setting in a field-level encryption content type-profile mapping that specifies what to do when an unknown content type is provided for the profile. If true, content is forwarded without being encrypted when the content type is unknown. If false (the default), an error is returned when the content type is unknown.\n ContentTypeProfiles (dict) --The configuration for a field-level encryption content type-profile.\n Quantity (integer) -- [REQUIRED]The number of field-level encryption content type-profile mappings.\n Items (list) --Items in a field-level encryption content type-profile mapping.\n (dict) --A field-level encryption content type profile.\n Format (string) -- [REQUIRED]The format for a field-level encryption content type-profile mapping.\n ProfileId (string) --The profile ID for a field-level encryption content type-profile mapping.\n ContentType (string) -- [REQUIRED]The content type for a field-level encryption content type-profile mapping.\n \n \n \n\n :rtype: dict\n :return: {\n 'FieldLevelEncryption': {\n 'Id': 'string',\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'FieldLevelEncryptionConfig': {\n 'CallerReference': 'string',\n 'Comment': 'string',\n 'QueryArgProfileConfig': {\n 'ForwardWhenQueryArgProfileIsUnknown': True|False,\n 'QueryArgProfiles': {\n 'Quantity': 123,\n 'Items': [\n {\n 'QueryArg': 'string',\n 'ProfileId': 'string'\n },\n ]\n }\n },\n 'ContentTypeProfileConfig': {\n 'ForwardWhenContentTypeIsUnknown': True|False,\n 'ContentTypeProfiles': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Format': 'URLEncoded',\n 'ProfileId': 'string',\n 'ContentType': 'string'\n },\n ]\n }\n }\n }\n },\n 'Location': 'string',\n 'ETag': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_field_level_encryption_profile(FieldLevelEncryptionProfileConfig=None):\n \"\"\"\n Create a field-level encryption profile.\n See also: AWS API Documentation\n \n \n :example: response = client.create_field_level_encryption_profile(\n FieldLevelEncryptionProfileConfig={\n 'Name': 'string',\n 'CallerReference': 'string',\n 'Comment': 'string',\n 'EncryptionEntities': {\n 'Quantity': 123,\n 'Items': [\n {\n 'PublicKeyId': 'string',\n 'ProviderId': 'string',\n 'FieldPatterns': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n ]\n }\n }\n )\n \n \n :type FieldLevelEncryptionProfileConfig: dict\n :param FieldLevelEncryptionProfileConfig: [REQUIRED]\n The request to create a field-level encryption profile.\n Name (string) -- [REQUIRED]Profile name for the field-level encryption profile.\n CallerReference (string) -- [REQUIRED]A unique number that ensures that the request can't be replayed.\n Comment (string) --An optional comment for the field-level encryption profile.\n EncryptionEntities (dict) -- [REQUIRED]A complex data type of encryption entities for the field-level encryption profile that include the public key ID, provider, and field patterns for specifying which fields to encrypt with this key.\n Quantity (integer) -- [REQUIRED]Number of field pattern items in a field-level encryption content type-profile mapping.\n Items (list) --An array of field patterns in a field-level encryption content type-profile mapping.\n (dict) --Complex data type for field-level encryption profiles that includes the encryption key and field pattern specifications.\n PublicKeyId (string) -- [REQUIRED]The public key associated with a set of field-level encryption patterns, to be used when encrypting the fields that match the patterns.\n ProviderId (string) -- [REQUIRED]The provider associated with the public key being used for encryption. This value must also be provided with the private key for applications to be able to decrypt data.\n FieldPatterns (dict) -- [REQUIRED]Field patterns in a field-level encryption content type profile specify the fields that you want to be encrypted. You can provide the full field name, or any beginning characters followed by a wildcard (*). You can't overlap field patterns. For example, you can't have both ABC* and AB*. Note that field patterns are case-sensitive.\n Quantity (integer) -- [REQUIRED]The number of field-level encryption field patterns.\n Items (list) --An array of the field-level encryption field patterns.\n (string) --\n \n \n \n\n :rtype: dict\n :return: {\n 'FieldLevelEncryptionProfile': {\n 'Id': 'string',\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'FieldLevelEncryptionProfileConfig': {\n 'Name': 'string',\n 'CallerReference': 'string',\n 'Comment': 'string',\n 'EncryptionEntities': {\n 'Quantity': 123,\n 'Items': [\n {\n 'PublicKeyId': 'string',\n 'ProviderId': 'string',\n 'FieldPatterns': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n ]\n }\n }\n },\n 'Location': 'string',\n 'ETag': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef create_invalidation(DistributionId=None, InvalidationBatch=None):\n \"\"\"\n Create a new invalidation.\n See also: AWS API Documentation\n \n \n :example: response = client.create_invalidation(\n DistributionId='string',\n InvalidationBatch={\n 'Paths': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'CallerReference': 'string'\n }\n )\n \n \n :type DistributionId: string\n :param DistributionId: [REQUIRED]\n The distribution's id.\n \n\n :type InvalidationBatch: dict\n :param InvalidationBatch: [REQUIRED]\n The batch information for the invalidation.\n Paths (dict) -- [REQUIRED]A complex type that contains information about the objects that you want to invalidate. For more information, see Specifying the Objects to Invalidate in the Amazon CloudFront Developer Guide .\n Quantity (integer) -- [REQUIRED]The number of objects that you want to invalidate.\n Items (list) --A complex type that contains a list of the paths that you want to invalidate.\n (string) --\n \n CallerReference (string) -- [REQUIRED]A value that you specify to uniquely identify an invalidation request. CloudFront uses the value to prevent you from accidentally resubmitting an identical request. Whenever you create a new invalidation request, you must specify a new value for CallerReference and change other values in the request as applicable. One way to ensure that the value of CallerReference is unique is to use a timestamp , for example, 20120301090000 .\n If you make a second invalidation request with the same value for CallerReference , and if the rest of the request is the same, CloudFront doesn't create a new invalidation request. Instead, CloudFront returns information about the invalidation request that you previously created with the same CallerReference .\n If CallerReference is a value you already sent in a previous invalidation batch request but the content of any Path is different from the original request, CloudFront returns an InvalidationBatchAlreadyExists error.\n \n\n :rtype: dict\n :return: {\n 'Location': 'string',\n 'Invalidation': {\n 'Id': 'string',\n 'Status': 'string',\n 'CreateTime': datetime(2015, 1, 1),\n 'InvalidationBatch': {\n 'Paths': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'CallerReference': 'string'\n }\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef create_public_key(PublicKeyConfig=None):\n \"\"\"\n Add a new public key to CloudFront to use, for example, for field-level encryption. You can add a maximum of 10 public keys with one AWS account.\n See also: AWS API Documentation\n \n \n :example: response = client.create_public_key(\n PublicKeyConfig={\n 'CallerReference': 'string',\n 'Name': 'string',\n 'EncodedKey': 'string',\n 'Comment': 'string'\n }\n )\n \n \n :type PublicKeyConfig: dict\n :param PublicKeyConfig: [REQUIRED]\n The request to add a public key to CloudFront.\n CallerReference (string) -- [REQUIRED]A unique number that ensures that the request can't be replayed.\n Name (string) -- [REQUIRED]The name for a public key you add to CloudFront to use with features like field-level encryption.\n EncodedKey (string) -- [REQUIRED]The encoded public key that you want to add to CloudFront to use with features like field-level encryption.\n Comment (string) --An optional comment about a public key.\n \n\n :rtype: dict\n :return: {\n 'PublicKey': {\n 'Id': 'string',\n 'CreatedTime': datetime(2015, 1, 1),\n 'PublicKeyConfig': {\n 'CallerReference': 'string',\n 'Name': 'string',\n 'EncodedKey': 'string',\n 'Comment': 'string'\n }\n },\n 'Location': 'string',\n 'ETag': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_streaming_distribution(StreamingDistributionConfig=None):\n \"\"\"\n Creates a new RMTP distribution. An RTMP distribution is similar to a web distribution, but an RTMP distribution streams media files using the Adobe Real-Time Messaging Protocol (RTMP) instead of serving files using HTTP.\n To create a new web distribution, submit a POST request to the CloudFront API version /distribution resource. The request body must include a document with a StreamingDistributionConfig element. The response echoes the StreamingDistributionConfig element and returns other information about the RTMP distribution.\n To get the status of your request, use the GET StreamingDistribution API action. When the value of Enabled is true and the value of Status is Deployed , your distribution is ready. A distribution usually deploys in less than 15 minutes.\n For more information about web distributions, see Working with RTMP Distributions in the Amazon CloudFront Developer Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.create_streaming_distribution(\n StreamingDistributionConfig={\n 'CallerReference': 'string',\n 'S3Origin': {\n 'DomainName': 'string',\n 'OriginAccessIdentity': 'string'\n },\n 'Aliases': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'Comment': 'string',\n 'Logging': {\n 'Enabled': True|False,\n 'Bucket': 'string',\n 'Prefix': 'string'\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'PriceClass': 'PriceClass_100'|'PriceClass_200'|'PriceClass_All',\n 'Enabled': True|False\n }\n )\n \n \n :type StreamingDistributionConfig: dict\n :param StreamingDistributionConfig: [REQUIRED]\n The streaming distribution's configuration information.\n CallerReference (string) -- [REQUIRED]A unique value (for example, a date-time stamp) that ensures that the request can't be replayed.\n If the value of CallerReference is new (regardless of the content of the StreamingDistributionConfig object), CloudFront creates a new distribution.\n If CallerReference is a value that you already sent in a previous request to create a distribution, CloudFront returns a DistributionAlreadyExists error.\n S3Origin (dict) -- [REQUIRED]A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution.\n DomainName (string) -- [REQUIRED]The DNS name of the Amazon S3 origin.\n OriginAccessIdentity (string) -- [REQUIRED]The CloudFront origin access identity to associate with the RTMP distribution. Use an origin access identity to configure the distribution so that end users can only access objects in an Amazon S3 bucket through CloudFront.\n If you want end users to be able to access objects using either the CloudFront URL or the Amazon S3 URL, specify an empty OriginAccessIdentity element.\n To delete the origin access identity from an existing distribution, update the distribution configuration and include an empty OriginAccessIdentity element.\n To replace the origin access identity, update the distribution configuration and specify the new origin access identity.\n For more information, see Using an Origin Access Identity to Restrict Access to Your Amazon S3 Content in the Amazon Amazon CloudFront Developer Guide .\n Aliases (dict) --A complex type that contains information about CNAMEs (alternate domain names), if any, for this streaming distribution.\n Quantity (integer) -- [REQUIRED]The number of CNAME aliases, if any, that you want to associate with this distribution.\n Items (list) --A complex type that contains the CNAME aliases, if any, that you want to associate with this distribution.\n (string) --\n \n Comment (string) -- [REQUIRED]Any comments you want to include about the streaming distribution.\n Logging (dict) --A complex type that controls whether access logs are written for the streaming distribution.\n Enabled (boolean) -- [REQUIRED]Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you don't want to enable logging when you create a streaming distribution or if you want to disable logging for an existing streaming distribution, specify false for Enabled , and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket and Prefix , the values are automatically deleted.\n Bucket (string) -- [REQUIRED]The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com .\n Prefix (string) -- [REQUIRED]An optional string that you want CloudFront to prefix to the access log filenames for this streaming distribution, for example, myprefix/ . If you want to enable logging, but you don't want to specify a prefix, you still must include an empty Prefix element in the Logging element.\n TrustedSigners (dict) -- [REQUIRED]A complex type that specifies any AWS accounts that you want to permit to create signed URLs for private content. If you want the distribution to use signed URLs, include this element; if you want the distribution to use public URLs, remove this element. For more information, see Serving Private Content through CloudFront in the Amazon CloudFront Developer Guide .\n Enabled (boolean) -- [REQUIRED]Specifies whether you want to require viewers to use signed URLs to access the files specified by PathPattern and TargetOriginId .\n Quantity (integer) -- [REQUIRED]The number of trusted signers for this cache behavior.\n Items (list) --\n Optional : A complex type that contains trusted signers for this cache behavior. If Quantity is 0 , you can omit Items .\n (string) --\n \n PriceClass (string) --A complex type that contains information about price class for this streaming distribution.\n Enabled (boolean) -- [REQUIRED]Whether the streaming distribution is enabled to accept user requests for content.\n \n\n :rtype: dict\n :return: {\n 'StreamingDistribution': {\n 'Id': 'string',\n 'ARN': 'string',\n 'Status': 'string',\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'DomainName': 'string',\n 'ActiveTrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n {\n 'AwsAccountNumber': 'string',\n 'KeyPairIds': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n ]\n },\n 'StreamingDistributionConfig': {\n 'CallerReference': 'string',\n 'S3Origin': {\n 'DomainName': 'string',\n 'OriginAccessIdentity': 'string'\n },\n 'Aliases': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'Comment': 'string',\n 'Logging': {\n 'Enabled': True|False,\n 'Bucket': 'string',\n 'Prefix': 'string'\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'PriceClass': 'PriceClass_100'|'PriceClass_200'|'PriceClass_All',\n 'Enabled': True|False\n }\n },\n 'Location': 'string',\n 'ETag': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef create_streaming_distribution_with_tags(StreamingDistributionConfigWithTags=None):\n \"\"\"\n Create a new streaming distribution with tags.\n See also: AWS API Documentation\n \n \n :example: response = client.create_streaming_distribution_with_tags(\n StreamingDistributionConfigWithTags={\n 'StreamingDistributionConfig': {\n 'CallerReference': 'string',\n 'S3Origin': {\n 'DomainName': 'string',\n 'OriginAccessIdentity': 'string'\n },\n 'Aliases': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'Comment': 'string',\n 'Logging': {\n 'Enabled': True|False,\n 'Bucket': 'string',\n 'Prefix': 'string'\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'PriceClass': 'PriceClass_100'|'PriceClass_200'|'PriceClass_All',\n 'Enabled': True|False\n },\n 'Tags': {\n 'Items': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n }\n )\n \n \n :type StreamingDistributionConfigWithTags: dict\n :param StreamingDistributionConfigWithTags: [REQUIRED]\n The streaming distribution's configuration information.\n StreamingDistributionConfig (dict) -- [REQUIRED]A streaming distribution Configuration.\n CallerReference (string) -- [REQUIRED]A unique value (for example, a date-time stamp) that ensures that the request can't be replayed.\n If the value of CallerReference is new (regardless of the content of the StreamingDistributionConfig object), CloudFront creates a new distribution.\n If CallerReference is a value that you already sent in a previous request to create a distribution, CloudFront returns a DistributionAlreadyExists error.\n S3Origin (dict) -- [REQUIRED]A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution.\n DomainName (string) -- [REQUIRED]The DNS name of the Amazon S3 origin.\n OriginAccessIdentity (string) -- [REQUIRED]The CloudFront origin access identity to associate with the RTMP distribution. Use an origin access identity to configure the distribution so that end users can only access objects in an Amazon S3 bucket through CloudFront.\n If you want end users to be able to access objects using either the CloudFront URL or the Amazon S3 URL, specify an empty OriginAccessIdentity element.\n To delete the origin access identity from an existing distribution, update the distribution configuration and include an empty OriginAccessIdentity element.\n To replace the origin access identity, update the distribution configuration and specify the new origin access identity.\n For more information, see Using an Origin Access Identity to Restrict Access to Your Amazon S3 Content in the Amazon Amazon CloudFront Developer Guide .\n Aliases (dict) --A complex type that contains information about CNAMEs (alternate domain names), if any, for this streaming distribution.\n Quantity (integer) -- [REQUIRED]The number of CNAME aliases, if any, that you want to associate with this distribution.\n Items (list) --A complex type that contains the CNAME aliases, if any, that you want to associate with this distribution.\n (string) --\n \n Comment (string) -- [REQUIRED]Any comments you want to include about the streaming distribution.\n Logging (dict) --A complex type that controls whether access logs are written for the streaming distribution.\n Enabled (boolean) -- [REQUIRED]Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you don't want to enable logging when you create a streaming distribution or if you want to disable logging for an existing streaming distribution, specify false for Enabled , and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket and Prefix , the values are automatically deleted.\n Bucket (string) -- [REQUIRED]The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com .\n Prefix (string) -- [REQUIRED]An optional string that you want CloudFront to prefix to the access log filenames for this streaming distribution, for example, myprefix/ . If you want to enable logging, but you don't want to specify a prefix, you still must include an empty Prefix element in the Logging element.\n TrustedSigners (dict) -- [REQUIRED]A complex type that specifies any AWS accounts that you want to permit to create signed URLs for private content. If you want the distribution to use signed URLs, include this element; if you want the distribution to use public URLs, remove this element. For more information, see Serving Private Content through CloudFront in the Amazon CloudFront Developer Guide .\n Enabled (boolean) -- [REQUIRED]Specifies whether you want to require viewers to use signed URLs to access the files specified by PathPattern and TargetOriginId .\n Quantity (integer) -- [REQUIRED]The number of trusted signers for this cache behavior.\n Items (list) --\n Optional : A complex type that contains trusted signers for this cache behavior. If Quantity is 0 , you can omit Items .\n (string) --\n \n PriceClass (string) --A complex type that contains information about price class for this streaming distribution.\n Enabled (boolean) -- [REQUIRED]Whether the streaming distribution is enabled to accept user requests for content.\n Tags (dict) -- [REQUIRED]A complex type that contains zero or more Tag elements.\n Items (list) --A complex type that contains Tag elements.\n (dict) --A complex type that contains Tag key and Tag value.\n Key (string) -- [REQUIRED]A string that contains Tag key.\n The string length should be between 1 and 128 characters. Valid characters include a-z , A-Z , 0-9 , space, and the special characters _ - . : / = + @ .\n Value (string) --A string that contains an optional Tag value.\n The string length should be between 0 and 256 characters. Valid characters include a-z , A-Z , 0-9 , space, and the special characters _ - . : / = + @ .\n \n \n \n\n :rtype: dict\n :return: {\n 'StreamingDistribution': {\n 'Id': 'string',\n 'ARN': 'string',\n 'Status': 'string',\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'DomainName': 'string',\n 'ActiveTrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n {\n 'AwsAccountNumber': 'string',\n 'KeyPairIds': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n ]\n },\n 'StreamingDistributionConfig': {\n 'CallerReference': 'string',\n 'S3Origin': {\n 'DomainName': 'string',\n 'OriginAccessIdentity': 'string'\n },\n 'Aliases': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'Comment': 'string',\n 'Logging': {\n 'Enabled': True|False,\n 'Bucket': 'string',\n 'Prefix': 'string'\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'PriceClass': 'PriceClass_100'|'PriceClass_200'|'PriceClass_All',\n 'Enabled': True|False\n }\n },\n 'Location': 'string',\n 'ETag': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef delete_cloud_front_origin_access_identity(Id=None, IfMatch=None):\n \"\"\"\n Delete an origin access identity.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_cloud_front_origin_access_identity(\n Id='string',\n IfMatch='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n The origin access identity's ID.\n \n\n :type IfMatch: string\n :param IfMatch: The value of the ETag header you received from a previous GET or PUT request. For example: E2QWRUHAPOMQZL .\n\n \"\"\"\n pass\n\ndef delete_distribution(Id=None, IfMatch=None):\n \"\"\"\n Delete a distribution.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_distribution(\n Id='string',\n IfMatch='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n The distribution ID.\n \n\n :type IfMatch: string\n :param IfMatch: The value of the ETag header that you received when you disabled the distribution. For example: E2QWRUHAPOMQZL .\n\n \"\"\"\n pass\n\ndef delete_field_level_encryption_config(Id=None, IfMatch=None):\n \"\"\"\n Remove a field-level encryption configuration.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_field_level_encryption_config(\n Id='string',\n IfMatch='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n The ID of the configuration you want to delete from CloudFront.\n \n\n :type IfMatch: string\n :param IfMatch: The value of the ETag header that you received when retrieving the configuration identity to delete. For example: E2QWRUHAPOMQZL .\n\n \"\"\"\n pass\n\ndef delete_field_level_encryption_profile(Id=None, IfMatch=None):\n \"\"\"\n Remove a field-level encryption profile.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_field_level_encryption_profile(\n Id='string',\n IfMatch='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n Request the ID of the profile you want to delete from CloudFront.\n \n\n :type IfMatch: string\n :param IfMatch: The value of the ETag header that you received when retrieving the profile to delete. For example: E2QWRUHAPOMQZL .\n\n \"\"\"\n pass\n\ndef delete_public_key(Id=None, IfMatch=None):\n \"\"\"\n Remove a public key you previously added to CloudFront.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_public_key(\n Id='string',\n IfMatch='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n The ID of the public key you want to remove from CloudFront.\n \n\n :type IfMatch: string\n :param IfMatch: The value of the ETag header that you received when retrieving the public key identity to delete. For example: E2QWRUHAPOMQZL .\n\n \"\"\"\n pass\n\ndef delete_streaming_distribution(Id=None, IfMatch=None):\n \"\"\"\n Delete a streaming distribution. To delete an RTMP distribution using the CloudFront API, perform the following steps.\n For information about deleting a distribution using the CloudFront console, see Deleting a Distribution in the Amazon CloudFront Developer Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.delete_streaming_distribution(\n Id='string',\n IfMatch='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n The distribution ID.\n \n\n :type IfMatch: string\n :param IfMatch: The value of the ETag header that you received when you disabled the streaming distribution. For example: E2QWRUHAPOMQZL .\n\n :returns: \n Id (string) -- [REQUIRED]\n The distribution ID.\n \n IfMatch (string) -- The value of the ETag header that you received when you disabled the streaming distribution. For example: E2QWRUHAPOMQZL .\n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_cloud_front_origin_access_identity(Id=None):\n \"\"\"\n Get the information about an origin access identity.\n See also: AWS API Documentation\n \n \n :example: response = client.get_cloud_front_origin_access_identity(\n Id='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n The identity's ID.\n \n\n :rtype: dict\n :return: {\n 'CloudFrontOriginAccessIdentity': {\n 'Id': 'string',\n 'S3CanonicalUserId': 'string',\n 'CloudFrontOriginAccessIdentityConfig': {\n 'CallerReference': 'string',\n 'Comment': 'string'\n }\n },\n 'ETag': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_cloud_front_origin_access_identity_config(Id=None):\n \"\"\"\n Get the configuration information about an origin access identity.\n See also: AWS API Documentation\n \n \n :example: response = client.get_cloud_front_origin_access_identity_config(\n Id='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n The identity's ID.\n \n\n :rtype: dict\n :return: {\n 'CloudFrontOriginAccessIdentityConfig': {\n 'CallerReference': 'string',\n 'Comment': 'string'\n },\n 'ETag': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_distribution(Id=None):\n \"\"\"\n Get the information about a distribution.\n See also: AWS API Documentation\n \n \n :example: response = client.get_distribution(\n Id='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n The distribution's ID.\n \n\n :rtype: dict\n :return: {\n 'Distribution': {\n 'Id': 'string',\n 'ARN': 'string',\n 'Status': 'string',\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'InProgressInvalidationBatches': 123,\n 'DomainName': 'string',\n 'ActiveTrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n {\n 'AwsAccountNumber': 'string',\n 'KeyPairIds': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n ]\n },\n 'DistributionConfig': {\n 'CallerReference': 'string',\n 'Aliases': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'DefaultRootObject': 'string',\n 'Origins': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'DomainName': 'string',\n 'OriginPath': 'string',\n 'CustomHeaders': {\n 'Quantity': 123,\n 'Items': [\n {\n 'HeaderName': 'string',\n 'HeaderValue': 'string'\n },\n ]\n },\n 'S3OriginConfig': {\n 'OriginAccessIdentity': 'string'\n },\n 'CustomOriginConfig': {\n 'HTTPPort': 123,\n 'HTTPSPort': 123,\n 'OriginProtocolPolicy': 'http-only'|'match-viewer'|'https-only',\n 'OriginSslProtocols': {\n 'Quantity': 123,\n 'Items': [\n 'SSLv3'|'TLSv1'|'TLSv1.1'|'TLSv1.2',\n ]\n },\n 'OriginReadTimeout': 123,\n 'OriginKeepaliveTimeout': 123\n }\n },\n ]\n },\n 'OriginGroups': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'FailoverCriteria': {\n 'StatusCodes': {\n 'Quantity': 123,\n 'Items': [\n 123,\n ]\n }\n },\n 'Members': {\n 'Quantity': 123,\n 'Items': [\n {\n 'OriginId': 'string'\n },\n ]\n }\n },\n ]\n },\n 'DefaultCacheBehavior': {\n 'TargetOriginId': 'string',\n 'ForwardedValues': {\n 'QueryString': True|False,\n 'Cookies': {\n 'Forward': 'none'|'whitelist'|'all',\n 'WhitelistedNames': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'Headers': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'QueryStringCacheKeys': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'ViewerProtocolPolicy': 'allow-all'|'https-only'|'redirect-to-https',\n 'MinTTL': 123,\n 'AllowedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ],\n 'CachedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ]\n }\n },\n 'SmoothStreaming': True|False,\n 'DefaultTTL': 123,\n 'MaxTTL': 123,\n 'Compress': True|False,\n 'LambdaFunctionAssociations': {\n 'Quantity': 123,\n 'Items': [\n {\n 'LambdaFunctionARN': 'string',\n 'EventType': 'viewer-request'|'viewer-response'|'origin-request'|'origin-response',\n 'IncludeBody': True|False\n },\n ]\n },\n 'FieldLevelEncryptionId': 'string'\n },\n 'CacheBehaviors': {\n 'Quantity': 123,\n 'Items': [\n {\n 'PathPattern': 'string',\n 'TargetOriginId': 'string',\n 'ForwardedValues': {\n 'QueryString': True|False,\n 'Cookies': {\n 'Forward': 'none'|'whitelist'|'all',\n 'WhitelistedNames': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'Headers': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'QueryStringCacheKeys': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'ViewerProtocolPolicy': 'allow-all'|'https-only'|'redirect-to-https',\n 'MinTTL': 123,\n 'AllowedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ],\n 'CachedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ]\n }\n },\n 'SmoothStreaming': True|False,\n 'DefaultTTL': 123,\n 'MaxTTL': 123,\n 'Compress': True|False,\n 'LambdaFunctionAssociations': {\n 'Quantity': 123,\n 'Items': [\n {\n 'LambdaFunctionARN': 'string',\n 'EventType': 'viewer-request'|'viewer-response'|'origin-request'|'origin-response',\n 'IncludeBody': True|False\n },\n ]\n },\n 'FieldLevelEncryptionId': 'string'\n },\n ]\n },\n 'CustomErrorResponses': {\n 'Quantity': 123,\n 'Items': [\n {\n 'ErrorCode': 123,\n 'ResponsePagePath': 'string',\n 'ResponseCode': 'string',\n 'ErrorCachingMinTTL': 123\n },\n ]\n },\n 'Comment': 'string',\n 'Logging': {\n 'Enabled': True|False,\n 'IncludeCookies': True|False,\n 'Bucket': 'string',\n 'Prefix': 'string'\n },\n 'PriceClass': 'PriceClass_100'|'PriceClass_200'|'PriceClass_All',\n 'Enabled': True|False,\n 'ViewerCertificate': {\n 'CloudFrontDefaultCertificate': True|False,\n 'IAMCertificateId': 'string',\n 'ACMCertificateArn': 'string',\n 'SSLSupportMethod': 'sni-only'|'vip',\n 'MinimumProtocolVersion': 'SSLv3'|'TLSv1'|'TLSv1_2016'|'TLSv1.1_2016'|'TLSv1.2_2018',\n 'Certificate': 'string',\n 'CertificateSource': 'cloudfront'|'iam'|'acm'\n },\n 'Restrictions': {\n 'GeoRestriction': {\n 'RestrictionType': 'blacklist'|'whitelist'|'none',\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'WebACLId': 'string',\n 'HttpVersion': 'http1.1'|'http2',\n 'IsIPV6Enabled': True|False\n }\n },\n 'ETag': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_distribution_config(Id=None):\n \"\"\"\n Get the configuration information about a distribution.\n See also: AWS API Documentation\n \n \n :example: response = client.get_distribution_config(\n Id='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n The distribution's ID.\n \n\n :rtype: dict\n :return: {\n 'DistributionConfig': {\n 'CallerReference': 'string',\n 'Aliases': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'DefaultRootObject': 'string',\n 'Origins': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'DomainName': 'string',\n 'OriginPath': 'string',\n 'CustomHeaders': {\n 'Quantity': 123,\n 'Items': [\n {\n 'HeaderName': 'string',\n 'HeaderValue': 'string'\n },\n ]\n },\n 'S3OriginConfig': {\n 'OriginAccessIdentity': 'string'\n },\n 'CustomOriginConfig': {\n 'HTTPPort': 123,\n 'HTTPSPort': 123,\n 'OriginProtocolPolicy': 'http-only'|'match-viewer'|'https-only',\n 'OriginSslProtocols': {\n 'Quantity': 123,\n 'Items': [\n 'SSLv3'|'TLSv1'|'TLSv1.1'|'TLSv1.2',\n ]\n },\n 'OriginReadTimeout': 123,\n 'OriginKeepaliveTimeout': 123\n }\n },\n ]\n },\n 'OriginGroups': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'FailoverCriteria': {\n 'StatusCodes': {\n 'Quantity': 123,\n 'Items': [\n 123,\n ]\n }\n },\n 'Members': {\n 'Quantity': 123,\n 'Items': [\n {\n 'OriginId': 'string'\n },\n ]\n }\n },\n ]\n },\n 'DefaultCacheBehavior': {\n 'TargetOriginId': 'string',\n 'ForwardedValues': {\n 'QueryString': True|False,\n 'Cookies': {\n 'Forward': 'none'|'whitelist'|'all',\n 'WhitelistedNames': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'Headers': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'QueryStringCacheKeys': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'ViewerProtocolPolicy': 'allow-all'|'https-only'|'redirect-to-https',\n 'MinTTL': 123,\n 'AllowedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ],\n 'CachedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ]\n }\n },\n 'SmoothStreaming': True|False,\n 'DefaultTTL': 123,\n 'MaxTTL': 123,\n 'Compress': True|False,\n 'LambdaFunctionAssociations': {\n 'Quantity': 123,\n 'Items': [\n {\n 'LambdaFunctionARN': 'string',\n 'EventType': 'viewer-request'|'viewer-response'|'origin-request'|'origin-response',\n 'IncludeBody': True|False\n },\n ]\n },\n 'FieldLevelEncryptionId': 'string'\n },\n 'CacheBehaviors': {\n 'Quantity': 123,\n 'Items': [\n {\n 'PathPattern': 'string',\n 'TargetOriginId': 'string',\n 'ForwardedValues': {\n 'QueryString': True|False,\n 'Cookies': {\n 'Forward': 'none'|'whitelist'|'all',\n 'WhitelistedNames': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'Headers': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'QueryStringCacheKeys': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'ViewerProtocolPolicy': 'allow-all'|'https-only'|'redirect-to-https',\n 'MinTTL': 123,\n 'AllowedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ],\n 'CachedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ]\n }\n },\n 'SmoothStreaming': True|False,\n 'DefaultTTL': 123,\n 'MaxTTL': 123,\n 'Compress': True|False,\n 'LambdaFunctionAssociations': {\n 'Quantity': 123,\n 'Items': [\n {\n 'LambdaFunctionARN': 'string',\n 'EventType': 'viewer-request'|'viewer-response'|'origin-request'|'origin-response',\n 'IncludeBody': True|False\n },\n ]\n },\n 'FieldLevelEncryptionId': 'string'\n },\n ]\n },\n 'CustomErrorResponses': {\n 'Quantity': 123,\n 'Items': [\n {\n 'ErrorCode': 123,\n 'ResponsePagePath': 'string',\n 'ResponseCode': 'string',\n 'ErrorCachingMinTTL': 123\n },\n ]\n },\n 'Comment': 'string',\n 'Logging': {\n 'Enabled': True|False,\n 'IncludeCookies': True|False,\n 'Bucket': 'string',\n 'Prefix': 'string'\n },\n 'PriceClass': 'PriceClass_100'|'PriceClass_200'|'PriceClass_All',\n 'Enabled': True|False,\n 'ViewerCertificate': {\n 'CloudFrontDefaultCertificate': True|False,\n 'IAMCertificateId': 'string',\n 'ACMCertificateArn': 'string',\n 'SSLSupportMethod': 'sni-only'|'vip',\n 'MinimumProtocolVersion': 'SSLv3'|'TLSv1'|'TLSv1_2016'|'TLSv1.1_2016'|'TLSv1.2_2018',\n 'Certificate': 'string',\n 'CertificateSource': 'cloudfront'|'iam'|'acm'\n },\n 'Restrictions': {\n 'GeoRestriction': {\n 'RestrictionType': 'blacklist'|'whitelist'|'none',\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'WebACLId': 'string',\n 'HttpVersion': 'http1.1'|'http2',\n 'IsIPV6Enabled': True|False\n },\n 'ETag': 'string'\n }\n \n \n :returns: \n If you configured Amazon S3 Transfer Acceleration for your bucket, don't specify the s3-accelerate endpoint for DomainName .\n The bucket name must be between 3 and 63 characters long (inclusive).\n The bucket name must contain only lowercase characters, numbers, periods, underscores, and dashes.\n The bucket name must not contain adjacent periods.\n \n \"\"\"\n pass\n\ndef get_field_level_encryption(Id=None):\n \"\"\"\n Get the field-level encryption configuration information.\n See also: AWS API Documentation\n \n \n :example: response = client.get_field_level_encryption(\n Id='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n Request the ID for the field-level encryption configuration information.\n \n\n :rtype: dict\n :return: {\n 'FieldLevelEncryption': {\n 'Id': 'string',\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'FieldLevelEncryptionConfig': {\n 'CallerReference': 'string',\n 'Comment': 'string',\n 'QueryArgProfileConfig': {\n 'ForwardWhenQueryArgProfileIsUnknown': True|False,\n 'QueryArgProfiles': {\n 'Quantity': 123,\n 'Items': [\n {\n 'QueryArg': 'string',\n 'ProfileId': 'string'\n },\n ]\n }\n },\n 'ContentTypeProfileConfig': {\n 'ForwardWhenContentTypeIsUnknown': True|False,\n 'ContentTypeProfiles': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Format': 'URLEncoded',\n 'ProfileId': 'string',\n 'ContentType': 'string'\n },\n ]\n }\n }\n }\n },\n 'ETag': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_field_level_encryption_config(Id=None):\n \"\"\"\n Get the field-level encryption configuration information.\n See also: AWS API Documentation\n \n \n :example: response = client.get_field_level_encryption_config(\n Id='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n Request the ID for the field-level encryption configuration information.\n \n\n :rtype: dict\n :return: {\n 'FieldLevelEncryptionConfig': {\n 'CallerReference': 'string',\n 'Comment': 'string',\n 'QueryArgProfileConfig': {\n 'ForwardWhenQueryArgProfileIsUnknown': True|False,\n 'QueryArgProfiles': {\n 'Quantity': 123,\n 'Items': [\n {\n 'QueryArg': 'string',\n 'ProfileId': 'string'\n },\n ]\n }\n },\n 'ContentTypeProfileConfig': {\n 'ForwardWhenContentTypeIsUnknown': True|False,\n 'ContentTypeProfiles': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Format': 'URLEncoded',\n 'ProfileId': 'string',\n 'ContentType': 'string'\n },\n ]\n }\n }\n },\n 'ETag': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_field_level_encryption_profile(Id=None):\n \"\"\"\n Get the field-level encryption profile information.\n See also: AWS API Documentation\n \n \n :example: response = client.get_field_level_encryption_profile(\n Id='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n Get the ID for the field-level encryption profile information.\n \n\n :rtype: dict\n :return: {\n 'FieldLevelEncryptionProfile': {\n 'Id': 'string',\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'FieldLevelEncryptionProfileConfig': {\n 'Name': 'string',\n 'CallerReference': 'string',\n 'Comment': 'string',\n 'EncryptionEntities': {\n 'Quantity': 123,\n 'Items': [\n {\n 'PublicKeyId': 'string',\n 'ProviderId': 'string',\n 'FieldPatterns': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n ]\n }\n }\n },\n 'ETag': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_field_level_encryption_profile_config(Id=None):\n \"\"\"\n Get the field-level encryption profile configuration information.\n See also: AWS API Documentation\n \n \n :example: response = client.get_field_level_encryption_profile_config(\n Id='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n Get the ID for the field-level encryption profile configuration information.\n \n\n :rtype: dict\n :return: {\n 'FieldLevelEncryptionProfileConfig': {\n 'Name': 'string',\n 'CallerReference': 'string',\n 'Comment': 'string',\n 'EncryptionEntities': {\n 'Quantity': 123,\n 'Items': [\n {\n 'PublicKeyId': 'string',\n 'ProviderId': 'string',\n 'FieldPatterns': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n ]\n }\n },\n 'ETag': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_invalidation(DistributionId=None, Id=None):\n \"\"\"\n Get the information about an invalidation.\n See also: AWS API Documentation\n \n \n :example: response = client.get_invalidation(\n DistributionId='string',\n Id='string'\n )\n \n \n :type DistributionId: string\n :param DistributionId: [REQUIRED]\n The distribution's ID.\n \n\n :type Id: string\n :param Id: [REQUIRED]\n The identifier for the invalidation request, for example, IDFDVBD632BHDS5 .\n \n\n :rtype: dict\n :return: {\n 'Invalidation': {\n 'Id': 'string',\n 'Status': 'string',\n 'CreateTime': datetime(2015, 1, 1),\n 'InvalidationBatch': {\n 'Paths': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'CallerReference': 'string'\n }\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_public_key(Id=None):\n \"\"\"\n Get the public key information.\n See also: AWS API Documentation\n \n \n :example: response = client.get_public_key(\n Id='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n Request the ID for the public key.\n \n\n :rtype: dict\n :return: {\n 'PublicKey': {\n 'Id': 'string',\n 'CreatedTime': datetime(2015, 1, 1),\n 'PublicKeyConfig': {\n 'CallerReference': 'string',\n 'Name': 'string',\n 'EncodedKey': 'string',\n 'Comment': 'string'\n }\n },\n 'ETag': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_public_key_config(Id=None):\n \"\"\"\n Return public key configuration informaation\n See also: AWS API Documentation\n \n \n :example: response = client.get_public_key_config(\n Id='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n Request the ID for the public key configuration.\n \n\n :rtype: dict\n :return: {\n 'PublicKeyConfig': {\n 'CallerReference': 'string',\n 'Name': 'string',\n 'EncodedKey': 'string',\n 'Comment': 'string'\n },\n 'ETag': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_streaming_distribution(Id=None):\n \"\"\"\n Gets information about a specified RTMP distribution, including the distribution configuration.\n See also: AWS API Documentation\n \n \n :example: response = client.get_streaming_distribution(\n Id='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n The streaming distribution's ID.\n \n\n :rtype: dict\n :return: {\n 'StreamingDistribution': {\n 'Id': 'string',\n 'ARN': 'string',\n 'Status': 'string',\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'DomainName': 'string',\n 'ActiveTrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n {\n 'AwsAccountNumber': 'string',\n 'KeyPairIds': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n ]\n },\n 'StreamingDistributionConfig': {\n 'CallerReference': 'string',\n 'S3Origin': {\n 'DomainName': 'string',\n 'OriginAccessIdentity': 'string'\n },\n 'Aliases': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'Comment': 'string',\n 'Logging': {\n 'Enabled': True|False,\n 'Bucket': 'string',\n 'Prefix': 'string'\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'PriceClass': 'PriceClass_100'|'PriceClass_200'|'PriceClass_All',\n 'Enabled': True|False\n }\n },\n 'ETag': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_streaming_distribution_config(Id=None):\n \"\"\"\n Get the configuration information about a streaming distribution.\n See also: AWS API Documentation\n \n \n :example: response = client.get_streaming_distribution_config(\n Id='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n The streaming distribution's ID.\n \n\n :rtype: dict\n :return: {\n 'StreamingDistributionConfig': {\n 'CallerReference': 'string',\n 'S3Origin': {\n 'DomainName': 'string',\n 'OriginAccessIdentity': 'string'\n },\n 'Aliases': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'Comment': 'string',\n 'Logging': {\n 'Enabled': True|False,\n 'Bucket': 'string',\n 'Prefix': 'string'\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'PriceClass': 'PriceClass_100'|'PriceClass_200'|'PriceClass_All',\n 'Enabled': True|False\n },\n 'ETag': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_cloud_front_origin_access_identities(Marker=None, MaxItems=None):\n \"\"\"\n Lists origin access identities.\n See also: AWS API Documentation\n \n \n :example: response = client.list_cloud_front_origin_access_identities(\n Marker='string',\n MaxItems='string'\n )\n \n \n :type Marker: string\n :param Marker: Use this when paginating results to indicate where to begin in your list of origin access identities. The results include identities in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last identity on that page).\n\n :type MaxItems: string\n :param MaxItems: The maximum number of origin access identities you want in the response body.\n\n :rtype: dict\n :return: {\n 'CloudFrontOriginAccessIdentityList': {\n 'Marker': 'string',\n 'NextMarker': 'string',\n 'MaxItems': 123,\n 'IsTruncated': True|False,\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'S3CanonicalUserId': 'string',\n 'Comment': 'string'\n },\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef list_distributions(Marker=None, MaxItems=None):\n \"\"\"\n List distributions.\n See also: AWS API Documentation\n \n \n :example: response = client.list_distributions(\n Marker='string',\n MaxItems='string'\n )\n \n \n :type Marker: string\n :param Marker: Use this when paginating results to indicate where to begin in your list of distributions. The results include distributions in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last distribution on that page).\n\n :type MaxItems: string\n :param MaxItems: The maximum number of distributions you want in the response body.\n\n :rtype: dict\n :return: {\n 'DistributionList': {\n 'Marker': 'string',\n 'NextMarker': 'string',\n 'MaxItems': 123,\n 'IsTruncated': True|False,\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'ARN': 'string',\n 'Status': 'string',\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'DomainName': 'string',\n 'Aliases': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'Origins': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'DomainName': 'string',\n 'OriginPath': 'string',\n 'CustomHeaders': {\n 'Quantity': 123,\n 'Items': [\n {\n 'HeaderName': 'string',\n 'HeaderValue': 'string'\n },\n ]\n },\n 'S3OriginConfig': {\n 'OriginAccessIdentity': 'string'\n },\n 'CustomOriginConfig': {\n 'HTTPPort': 123,\n 'HTTPSPort': 123,\n 'OriginProtocolPolicy': 'http-only'|'match-viewer'|'https-only',\n 'OriginSslProtocols': {\n 'Quantity': 123,\n 'Items': [\n 'SSLv3'|'TLSv1'|'TLSv1.1'|'TLSv1.2',\n ]\n },\n 'OriginReadTimeout': 123,\n 'OriginKeepaliveTimeout': 123\n }\n },\n ]\n },\n 'OriginGroups': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'FailoverCriteria': {\n 'StatusCodes': {\n 'Quantity': 123,\n 'Items': [\n 123,\n ]\n }\n },\n 'Members': {\n 'Quantity': 123,\n 'Items': [\n {\n 'OriginId': 'string'\n },\n ]\n }\n },\n ]\n },\n 'DefaultCacheBehavior': {\n 'TargetOriginId': 'string',\n 'ForwardedValues': {\n 'QueryString': True|False,\n 'Cookies': {\n 'Forward': 'none'|'whitelist'|'all',\n 'WhitelistedNames': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'Headers': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'QueryStringCacheKeys': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'ViewerProtocolPolicy': 'allow-all'|'https-only'|'redirect-to-https',\n 'MinTTL': 123,\n 'AllowedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ],\n 'CachedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ]\n }\n },\n 'SmoothStreaming': True|False,\n 'DefaultTTL': 123,\n 'MaxTTL': 123,\n 'Compress': True|False,\n 'LambdaFunctionAssociations': {\n 'Quantity': 123,\n 'Items': [\n {\n 'LambdaFunctionARN': 'string',\n 'EventType': 'viewer-request'|'viewer-response'|'origin-request'|'origin-response',\n 'IncludeBody': True|False\n },\n ]\n },\n 'FieldLevelEncryptionId': 'string'\n },\n 'CacheBehaviors': {\n 'Quantity': 123,\n 'Items': [\n {\n 'PathPattern': 'string',\n 'TargetOriginId': 'string',\n 'ForwardedValues': {\n 'QueryString': True|False,\n 'Cookies': {\n 'Forward': 'none'|'whitelist'|'all',\n 'WhitelistedNames': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'Headers': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'QueryStringCacheKeys': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'ViewerProtocolPolicy': 'allow-all'|'https-only'|'redirect-to-https',\n 'MinTTL': 123,\n 'AllowedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ],\n 'CachedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ]\n }\n },\n 'SmoothStreaming': True|False,\n 'DefaultTTL': 123,\n 'MaxTTL': 123,\n 'Compress': True|False,\n 'LambdaFunctionAssociations': {\n 'Quantity': 123,\n 'Items': [\n {\n 'LambdaFunctionARN': 'string',\n 'EventType': 'viewer-request'|'viewer-response'|'origin-request'|'origin-response',\n 'IncludeBody': True|False\n },\n ]\n },\n 'FieldLevelEncryptionId': 'string'\n },\n ]\n },\n 'CustomErrorResponses': {\n 'Quantity': 123,\n 'Items': [\n {\n 'ErrorCode': 123,\n 'ResponsePagePath': 'string',\n 'ResponseCode': 'string',\n 'ErrorCachingMinTTL': 123\n },\n ]\n },\n 'Comment': 'string',\n 'PriceClass': 'PriceClass_100'|'PriceClass_200'|'PriceClass_All',\n 'Enabled': True|False,\n 'ViewerCertificate': {\n 'CloudFrontDefaultCertificate': True|False,\n 'IAMCertificateId': 'string',\n 'ACMCertificateArn': 'string',\n 'SSLSupportMethod': 'sni-only'|'vip',\n 'MinimumProtocolVersion': 'SSLv3'|'TLSv1'|'TLSv1_2016'|'TLSv1.1_2016'|'TLSv1.2_2018',\n 'Certificate': 'string',\n 'CertificateSource': 'cloudfront'|'iam'|'acm'\n },\n 'Restrictions': {\n 'GeoRestriction': {\n 'RestrictionType': 'blacklist'|'whitelist'|'none',\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'WebACLId': 'string',\n 'HttpVersion': 'http1.1'|'http2',\n 'IsIPV6Enabled': True|False\n },\n ]\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_distributions_by_web_acl_id(Marker=None, MaxItems=None, WebACLId=None):\n \"\"\"\n List the distributions that are associated with a specified AWS WAF web ACL.\n See also: AWS API Documentation\n \n \n :example: response = client.list_distributions_by_web_acl_id(\n Marker='string',\n MaxItems='string',\n WebACLId='string'\n )\n \n \n :type Marker: string\n :param Marker: Use Marker and MaxItems to control pagination of results. If you have more than MaxItems distributions that satisfy the request, the response includes a NextMarker element. To get the next page of results, submit another request. For the value of Marker , specify the value of NextMarker from the last response. (For the first request, omit Marker .)\n\n :type MaxItems: string\n :param MaxItems: The maximum number of distributions that you want CloudFront to return in the response body. The maximum and default values are both 100.\n\n :type WebACLId: string\n :param WebACLId: [REQUIRED]\n The ID of the AWS WAF web ACL that you want to list the associated distributions. If you specify 'null' for the ID, the request returns a list of the distributions that aren't associated with a web ACL.\n \n\n :rtype: dict\n :return: {\n 'DistributionList': {\n 'Marker': 'string',\n 'NextMarker': 'string',\n 'MaxItems': 123,\n 'IsTruncated': True|False,\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'ARN': 'string',\n 'Status': 'string',\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'DomainName': 'string',\n 'Aliases': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'Origins': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'DomainName': 'string',\n 'OriginPath': 'string',\n 'CustomHeaders': {\n 'Quantity': 123,\n 'Items': [\n {\n 'HeaderName': 'string',\n 'HeaderValue': 'string'\n },\n ]\n },\n 'S3OriginConfig': {\n 'OriginAccessIdentity': 'string'\n },\n 'CustomOriginConfig': {\n 'HTTPPort': 123,\n 'HTTPSPort': 123,\n 'OriginProtocolPolicy': 'http-only'|'match-viewer'|'https-only',\n 'OriginSslProtocols': {\n 'Quantity': 123,\n 'Items': [\n 'SSLv3'|'TLSv1'|'TLSv1.1'|'TLSv1.2',\n ]\n },\n 'OriginReadTimeout': 123,\n 'OriginKeepaliveTimeout': 123\n }\n },\n ]\n },\n 'OriginGroups': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'FailoverCriteria': {\n 'StatusCodes': {\n 'Quantity': 123,\n 'Items': [\n 123,\n ]\n }\n },\n 'Members': {\n 'Quantity': 123,\n 'Items': [\n {\n 'OriginId': 'string'\n },\n ]\n }\n },\n ]\n },\n 'DefaultCacheBehavior': {\n 'TargetOriginId': 'string',\n 'ForwardedValues': {\n 'QueryString': True|False,\n 'Cookies': {\n 'Forward': 'none'|'whitelist'|'all',\n 'WhitelistedNames': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'Headers': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'QueryStringCacheKeys': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'ViewerProtocolPolicy': 'allow-all'|'https-only'|'redirect-to-https',\n 'MinTTL': 123,\n 'AllowedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ],\n 'CachedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ]\n }\n },\n 'SmoothStreaming': True|False,\n 'DefaultTTL': 123,\n 'MaxTTL': 123,\n 'Compress': True|False,\n 'LambdaFunctionAssociations': {\n 'Quantity': 123,\n 'Items': [\n {\n 'LambdaFunctionARN': 'string',\n 'EventType': 'viewer-request'|'viewer-response'|'origin-request'|'origin-response',\n 'IncludeBody': True|False\n },\n ]\n },\n 'FieldLevelEncryptionId': 'string'\n },\n 'CacheBehaviors': {\n 'Quantity': 123,\n 'Items': [\n {\n 'PathPattern': 'string',\n 'TargetOriginId': 'string',\n 'ForwardedValues': {\n 'QueryString': True|False,\n 'Cookies': {\n 'Forward': 'none'|'whitelist'|'all',\n 'WhitelistedNames': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'Headers': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'QueryStringCacheKeys': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'ViewerProtocolPolicy': 'allow-all'|'https-only'|'redirect-to-https',\n 'MinTTL': 123,\n 'AllowedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ],\n 'CachedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ]\n }\n },\n 'SmoothStreaming': True|False,\n 'DefaultTTL': 123,\n 'MaxTTL': 123,\n 'Compress': True|False,\n 'LambdaFunctionAssociations': {\n 'Quantity': 123,\n 'Items': [\n {\n 'LambdaFunctionARN': 'string',\n 'EventType': 'viewer-request'|'viewer-response'|'origin-request'|'origin-response',\n 'IncludeBody': True|False\n },\n ]\n },\n 'FieldLevelEncryptionId': 'string'\n },\n ]\n },\n 'CustomErrorResponses': {\n 'Quantity': 123,\n 'Items': [\n {\n 'ErrorCode': 123,\n 'ResponsePagePath': 'string',\n 'ResponseCode': 'string',\n 'ErrorCachingMinTTL': 123\n },\n ]\n },\n 'Comment': 'string',\n 'PriceClass': 'PriceClass_100'|'PriceClass_200'|'PriceClass_All',\n 'Enabled': True|False,\n 'ViewerCertificate': {\n 'CloudFrontDefaultCertificate': True|False,\n 'IAMCertificateId': 'string',\n 'ACMCertificateArn': 'string',\n 'SSLSupportMethod': 'sni-only'|'vip',\n 'MinimumProtocolVersion': 'SSLv3'|'TLSv1'|'TLSv1_2016'|'TLSv1.1_2016'|'TLSv1.2_2018',\n 'Certificate': 'string',\n 'CertificateSource': 'cloudfront'|'iam'|'acm'\n },\n 'Restrictions': {\n 'GeoRestriction': {\n 'RestrictionType': 'blacklist'|'whitelist'|'none',\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'WebACLId': 'string',\n 'HttpVersion': 'http1.1'|'http2',\n 'IsIPV6Enabled': True|False\n },\n ]\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_field_level_encryption_configs(Marker=None, MaxItems=None):\n \"\"\"\n List all field-level encryption configurations that have been created in CloudFront for this account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_field_level_encryption_configs(\n Marker='string',\n MaxItems='string'\n )\n \n \n :type Marker: string\n :param Marker: Use this when paginating results to indicate where to begin in your list of configurations. The results include configurations in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last configuration on that page).\n\n :type MaxItems: string\n :param MaxItems: The maximum number of field-level encryption configurations you want in the response body.\n\n :rtype: dict\n :return: {\n 'FieldLevelEncryptionList': {\n 'NextMarker': 'string',\n 'MaxItems': 123,\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'Comment': 'string',\n 'QueryArgProfileConfig': {\n 'ForwardWhenQueryArgProfileIsUnknown': True|False,\n 'QueryArgProfiles': {\n 'Quantity': 123,\n 'Items': [\n {\n 'QueryArg': 'string',\n 'ProfileId': 'string'\n },\n ]\n }\n },\n 'ContentTypeProfileConfig': {\n 'ForwardWhenContentTypeIsUnknown': True|False,\n 'ContentTypeProfiles': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Format': 'URLEncoded',\n 'ProfileId': 'string',\n 'ContentType': 'string'\n },\n ]\n }\n }\n },\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef list_field_level_encryption_profiles(Marker=None, MaxItems=None):\n \"\"\"\n Request a list of field-level encryption profiles that have been created in CloudFront for this account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_field_level_encryption_profiles(\n Marker='string',\n MaxItems='string'\n )\n \n \n :type Marker: string\n :param Marker: Use this when paginating results to indicate where to begin in your list of profiles. The results include profiles in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last profile on that page).\n\n :type MaxItems: string\n :param MaxItems: The maximum number of field-level encryption profiles you want in the response body.\n\n :rtype: dict\n :return: {\n 'FieldLevelEncryptionProfileList': {\n 'NextMarker': 'string',\n 'MaxItems': 123,\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'Name': 'string',\n 'EncryptionEntities': {\n 'Quantity': 123,\n 'Items': [\n {\n 'PublicKeyId': 'string',\n 'ProviderId': 'string',\n 'FieldPatterns': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n ]\n },\n 'Comment': 'string'\n },\n ]\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_invalidations(DistributionId=None, Marker=None, MaxItems=None):\n \"\"\"\n Lists invalidation batches.\n See also: AWS API Documentation\n \n \n :example: response = client.list_invalidations(\n DistributionId='string',\n Marker='string',\n MaxItems='string'\n )\n \n \n :type DistributionId: string\n :param DistributionId: [REQUIRED]\n The distribution's ID.\n \n\n :type Marker: string\n :param Marker: Use this parameter when paginating results to indicate where to begin in your list of invalidation batches. Because the results are returned in decreasing order from most recent to oldest, the most recent results are on the first page, the second page will contain earlier results, and so on. To get the next page of results, set Marker to the value of the NextMarker from the current page's response. This value is the same as the ID of the last invalidation batch on that page.\n\n :type MaxItems: string\n :param MaxItems: The maximum number of invalidation batches that you want in the response body.\n\n :rtype: dict\n :return: {\n 'InvalidationList': {\n 'Marker': 'string',\n 'NextMarker': 'string',\n 'MaxItems': 123,\n 'IsTruncated': True|False,\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'CreateTime': datetime(2015, 1, 1),\n 'Status': 'string'\n },\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef list_public_keys(Marker=None, MaxItems=None):\n \"\"\"\n List all public keys that have been added to CloudFront for this account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_public_keys(\n Marker='string',\n MaxItems='string'\n )\n \n \n :type Marker: string\n :param Marker: Use this when paginating results to indicate where to begin in your list of public keys. The results include public keys in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last public key on that page).\n\n :type MaxItems: string\n :param MaxItems: The maximum number of public keys you want in the response body.\n\n :rtype: dict\n :return: {\n 'PublicKeyList': {\n 'NextMarker': 'string',\n 'MaxItems': 123,\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'Name': 'string',\n 'CreatedTime': datetime(2015, 1, 1),\n 'EncodedKey': 'string',\n 'Comment': 'string'\n },\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef list_streaming_distributions(Marker=None, MaxItems=None):\n \"\"\"\n List streaming distributions.\n See also: AWS API Documentation\n \n \n :example: response = client.list_streaming_distributions(\n Marker='string',\n MaxItems='string'\n )\n \n \n :type Marker: string\n :param Marker: The value that you provided for the Marker request parameter.\n\n :type MaxItems: string\n :param MaxItems: The value that you provided for the MaxItems request parameter.\n\n :rtype: dict\n :return: {\n 'StreamingDistributionList': {\n 'Marker': 'string',\n 'NextMarker': 'string',\n 'MaxItems': 123,\n 'IsTruncated': True|False,\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'ARN': 'string',\n 'Status': 'string',\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'DomainName': 'string',\n 'S3Origin': {\n 'DomainName': 'string',\n 'OriginAccessIdentity': 'string'\n },\n 'Aliases': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'Comment': 'string',\n 'PriceClass': 'PriceClass_100'|'PriceClass_200'|'PriceClass_All',\n 'Enabled': True|False\n },\n ]\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_tags_for_resource(Resource=None):\n \"\"\"\n List tags for a CloudFront resource.\n See also: AWS API Documentation\n \n \n :example: response = client.list_tags_for_resource(\n Resource='string'\n )\n \n \n :type Resource: string\n :param Resource: [REQUIRED]\n An ARN of a CloudFront resource.\n \n\n :rtype: dict\n :return: {\n 'Tags': {\n 'Items': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef tag_resource(Resource=None, Tags=None):\n \"\"\"\n Add tags to a CloudFront resource.\n See also: AWS API Documentation\n \n \n :example: response = client.tag_resource(\n Resource='string',\n Tags={\n 'Items': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n )\n \n \n :type Resource: string\n :param Resource: [REQUIRED]\n An ARN of a CloudFront resource.\n \n\n :type Tags: dict\n :param Tags: [REQUIRED]\n A complex type that contains zero or more Tag elements.\n Items (list) --A complex type that contains Tag elements.\n (dict) --A complex type that contains Tag key and Tag value.\n Key (string) -- [REQUIRED]A string that contains Tag key.\n The string length should be between 1 and 128 characters. Valid characters include a-z , A-Z , 0-9 , space, and the special characters _ - . : / = + @ .\n Value (string) --A string that contains an optional Tag value.\n The string length should be between 0 and 256 characters. Valid characters include a-z , A-Z , 0-9 , space, and the special characters _ - . : / = + @ .\n \n \n\n \"\"\"\n pass\n\ndef untag_resource(Resource=None, TagKeys=None):\n \"\"\"\n Remove tags from a CloudFront resource.\n See also: AWS API Documentation\n \n \n :example: response = client.untag_resource(\n Resource='string',\n TagKeys={\n 'Items': [\n 'string',\n ]\n }\n )\n \n \n :type Resource: string\n :param Resource: [REQUIRED]\n An ARN of a CloudFront resource.\n \n\n :type TagKeys: dict\n :param TagKeys: [REQUIRED]\n A complex type that contains zero or more Tag key elements.\n Items (list) --A complex type that contains Tag key elements.\n (string) --A string that contains Tag key.\n The string length should be between 1 and 128 characters. Valid characters include a-z , A-Z , 0-9 , space, and the special characters _ - . : / = + @ .\n \n \n\n \"\"\"\n pass\n\ndef update_cloud_front_origin_access_identity(CloudFrontOriginAccessIdentityConfig=None, Id=None, IfMatch=None):\n \"\"\"\n Update an origin access identity.\n See also: AWS API Documentation\n \n \n :example: response = client.update_cloud_front_origin_access_identity(\n CloudFrontOriginAccessIdentityConfig={\n 'CallerReference': 'string',\n 'Comment': 'string'\n },\n Id='string',\n IfMatch='string'\n )\n \n \n :type CloudFrontOriginAccessIdentityConfig: dict\n :param CloudFrontOriginAccessIdentityConfig: [REQUIRED]\n The identity's configuration information.\n CallerReference (string) -- [REQUIRED]A unique value (for example, a date-time stamp) that ensures that the request can't be replayed.\n If the value of CallerReference is new (regardless of the content of the CloudFrontOriginAccessIdentityConfig object), a new origin access identity is created.\n If the CallerReference is a value already sent in a previous identity request, and the content of the CloudFrontOriginAccessIdentityConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request.\n If the CallerReference is a value you already sent in a previous request to create an identity, but the content of the CloudFrontOriginAccessIdentityConfig is different from the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists error.\n Comment (string) -- [REQUIRED]Any comments you want to include about the origin access identity.\n \n\n :type Id: string\n :param Id: [REQUIRED]\n The identity's id.\n \n\n :type IfMatch: string\n :param IfMatch: The value of the ETag header that you received when retrieving the identity's configuration. For example: E2QWRUHAPOMQZL .\n\n :rtype: dict\n :return: {\n 'CloudFrontOriginAccessIdentity': {\n 'Id': 'string',\n 'S3CanonicalUserId': 'string',\n 'CloudFrontOriginAccessIdentityConfig': {\n 'CallerReference': 'string',\n 'Comment': 'string'\n }\n },\n 'ETag': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef update_distribution(DistributionConfig=None, Id=None, IfMatch=None):\n \"\"\"\n Updates the configuration for a web distribution.\n The update process includes getting the current distribution configuration, updating the XML document that is returned to make your changes, and then submitting an UpdateDistribution request to make the updates.\n For information about updating a distribution using the CloudFront console instead, see Creating a Distribution in the Amazon CloudFront Developer Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.update_distribution(\n DistributionConfig={\n 'CallerReference': 'string',\n 'Aliases': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'DefaultRootObject': 'string',\n 'Origins': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'DomainName': 'string',\n 'OriginPath': 'string',\n 'CustomHeaders': {\n 'Quantity': 123,\n 'Items': [\n {\n 'HeaderName': 'string',\n 'HeaderValue': 'string'\n },\n ]\n },\n 'S3OriginConfig': {\n 'OriginAccessIdentity': 'string'\n },\n 'CustomOriginConfig': {\n 'HTTPPort': 123,\n 'HTTPSPort': 123,\n 'OriginProtocolPolicy': 'http-only'|'match-viewer'|'https-only',\n 'OriginSslProtocols': {\n 'Quantity': 123,\n 'Items': [\n 'SSLv3'|'TLSv1'|'TLSv1.1'|'TLSv1.2',\n ]\n },\n 'OriginReadTimeout': 123,\n 'OriginKeepaliveTimeout': 123\n }\n },\n ]\n },\n 'OriginGroups': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'FailoverCriteria': {\n 'StatusCodes': {\n 'Quantity': 123,\n 'Items': [\n 123,\n ]\n }\n },\n 'Members': {\n 'Quantity': 123,\n 'Items': [\n {\n 'OriginId': 'string'\n },\n ]\n }\n },\n ]\n },\n 'DefaultCacheBehavior': {\n 'TargetOriginId': 'string',\n 'ForwardedValues': {\n 'QueryString': True|False,\n 'Cookies': {\n 'Forward': 'none'|'whitelist'|'all',\n 'WhitelistedNames': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'Headers': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'QueryStringCacheKeys': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'ViewerProtocolPolicy': 'allow-all'|'https-only'|'redirect-to-https',\n 'MinTTL': 123,\n 'AllowedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ],\n 'CachedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ]\n }\n },\n 'SmoothStreaming': True|False,\n 'DefaultTTL': 123,\n 'MaxTTL': 123,\n 'Compress': True|False,\n 'LambdaFunctionAssociations': {\n 'Quantity': 123,\n 'Items': [\n {\n 'LambdaFunctionARN': 'string',\n 'EventType': 'viewer-request'|'viewer-response'|'origin-request'|'origin-response',\n 'IncludeBody': True|False\n },\n ]\n },\n 'FieldLevelEncryptionId': 'string'\n },\n 'CacheBehaviors': {\n 'Quantity': 123,\n 'Items': [\n {\n 'PathPattern': 'string',\n 'TargetOriginId': 'string',\n 'ForwardedValues': {\n 'QueryString': True|False,\n 'Cookies': {\n 'Forward': 'none'|'whitelist'|'all',\n 'WhitelistedNames': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'Headers': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'QueryStringCacheKeys': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'ViewerProtocolPolicy': 'allow-all'|'https-only'|'redirect-to-https',\n 'MinTTL': 123,\n 'AllowedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ],\n 'CachedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ]\n }\n },\n 'SmoothStreaming': True|False,\n 'DefaultTTL': 123,\n 'MaxTTL': 123,\n 'Compress': True|False,\n 'LambdaFunctionAssociations': {\n 'Quantity': 123,\n 'Items': [\n {\n 'LambdaFunctionARN': 'string',\n 'EventType': 'viewer-request'|'viewer-response'|'origin-request'|'origin-response',\n 'IncludeBody': True|False\n },\n ]\n },\n 'FieldLevelEncryptionId': 'string'\n },\n ]\n },\n 'CustomErrorResponses': {\n 'Quantity': 123,\n 'Items': [\n {\n 'ErrorCode': 123,\n 'ResponsePagePath': 'string',\n 'ResponseCode': 'string',\n 'ErrorCachingMinTTL': 123\n },\n ]\n },\n 'Comment': 'string',\n 'Logging': {\n 'Enabled': True|False,\n 'IncludeCookies': True|False,\n 'Bucket': 'string',\n 'Prefix': 'string'\n },\n 'PriceClass': 'PriceClass_100'|'PriceClass_200'|'PriceClass_All',\n 'Enabled': True|False,\n 'ViewerCertificate': {\n 'CloudFrontDefaultCertificate': True|False,\n 'IAMCertificateId': 'string',\n 'ACMCertificateArn': 'string',\n 'SSLSupportMethod': 'sni-only'|'vip',\n 'MinimumProtocolVersion': 'SSLv3'|'TLSv1'|'TLSv1_2016'|'TLSv1.1_2016'|'TLSv1.2_2018',\n 'Certificate': 'string',\n 'CertificateSource': 'cloudfront'|'iam'|'acm'\n },\n 'Restrictions': {\n 'GeoRestriction': {\n 'RestrictionType': 'blacklist'|'whitelist'|'none',\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'WebACLId': 'string',\n 'HttpVersion': 'http1.1'|'http2',\n 'IsIPV6Enabled': True|False\n },\n Id='string',\n IfMatch='string'\n )\n \n \n :type DistributionConfig: dict\n :param DistributionConfig: [REQUIRED]\n The distribution's configuration information.\n CallerReference (string) -- [REQUIRED]A unique value (for example, a date-time stamp) that ensures that the request can't be replayed.\n If the value of CallerReference is new (regardless of the content of the DistributionConfig object), CloudFront creates a new distribution.\n If CallerReference is a value that you already sent in a previous request to create a distribution, CloudFront returns a DistributionAlreadyExists error.\n Aliases (dict) --A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.\n Quantity (integer) -- [REQUIRED]The number of CNAME aliases, if any, that you want to associate with this distribution.\n Items (list) --A complex type that contains the CNAME aliases, if any, that you want to associate with this distribution.\n (string) --\n \n DefaultRootObject (string) --The object that you want CloudFront to request from your origin (for example, index.html ) when a viewer requests the root URL for your distribution (http://www.example.com ) instead of an object in your distribution (http://www.example.com/product-description.html ). Specifying a default root object avoids exposing the contents of your distribution.\n Specify only the object name, for example, index.html . Don't add a / before the object name.\n If you don't want to specify a default root object when you create a distribution, include an empty DefaultRootObject element.\n To delete the default root object from an existing distribution, update the distribution configuration and include an empty DefaultRootObject element.\n To replace the default root object, update the distribution configuration and specify the new object.\n For more information about the default root object, see Creating a Default Root Object in the Amazon CloudFront Developer Guide .\n Origins (dict) -- [REQUIRED]A complex type that contains information about origins for this distribution.\n Quantity (integer) -- [REQUIRED]The number of origins or origin groups for this distribution.\n Items (list) -- [REQUIRED]A complex type that contains origins or origin groups for this distribution.\n (dict) --A complex type that describes the Amazon S3 bucket, HTTP server (for example, a web server), Amazon MediaStore, or other server from which CloudFront gets your files. This can also be an origin group, if you've created an origin group. You must specify at least one origin or origin group.\n For the current limit on the number of origins or origin groups that you can specify for a distribution, see Amazon CloudFront Limits in the AWS General Reference .\n Id (string) -- [REQUIRED]A unique identifier for the origin or origin group. The value of Id must be unique within the distribution.\n When you specify the value of TargetOriginId for the default cache behavior or for another cache behavior, you indicate the origin to which you want the cache behavior to route requests by specifying the value of the Id element for that origin. When a request matches the path pattern for that cache behavior, CloudFront routes the request to the specified origin. For more information, see Cache Behavior Settings in the Amazon CloudFront Developer Guide .\n DomainName (string) -- [REQUIRED]\n Amazon S3 origins : The DNS name of the Amazon S3 bucket from which you want CloudFront to get objects for this origin, for example, myawsbucket.s3.amazonaws.com . If you set up your bucket to be configured as a website endpoint, enter the Amazon S3 static website hosting endpoint for the bucket.\n For more information about specifying this value for different types of origins, see Origin Domain Name in the Amazon CloudFront Developer Guide .\n Constraints for Amazon S3 origins:\n If you configured Amazon S3 Transfer Acceleration for your bucket, don't specify the s3-accelerate endpoint for DomainName .\n The bucket name must be between 3 and 63 characters long (inclusive).\n The bucket name must contain only lowercase characters, numbers, periods, underscores, and dashes.\n The bucket name must not contain adjacent periods.\n Custom Origins : The DNS domain name for the HTTP server from which you want CloudFront to get objects for this origin, for example, www.example.com .\n Constraints for custom origins:\n DomainName must be a valid DNS name that contains only a-z, A-Z, 0-9, dot (.), hyphen (-), or underscore (_) characters.\n The name cannot exceed 128 characters.\n OriginPath (string) --An optional element that causes CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin. When you include the OriginPath element, specify the directory name, beginning with a / . CloudFront appends the directory name to the value of DomainName , for example, example.com/production . Do not include a / at the end of the directory name.\n For example, suppose you've specified the following values for your distribution:\n DomainName : An Amazon S3 bucket named myawsbucket .\n OriginPath : /production\n CNAME : example.com\n When a user enters example.com/index.html in a browser, CloudFront sends a request to Amazon S3 for myawsbucket/production/index.html .\n When a user enters example.com/acme/index.html in a browser, CloudFront sends a request to Amazon S3 for myawsbucket/production/acme/index.html .\n CustomHeaders (dict) --A complex type that contains names and values for the custom headers that you want.\n Quantity (integer) -- [REQUIRED]The number of custom headers, if any, for this distribution.\n Items (list) --\n Optional : A list that contains one OriginCustomHeader element for each custom header that you want CloudFront to forward to the origin. If Quantity is 0 , omit Items .\n (dict) --A complex type that contains HeaderName and HeaderValue elements, if any, for this distribution.\n HeaderName (string) -- [REQUIRED]The name of a header that you want CloudFront to forward to your origin. For more information, see Forwarding Custom Headers to Your Origin (Web Distributions Only) in the Amazon Amazon CloudFront Developer Guide .\n HeaderValue (string) -- [REQUIRED]The value for the header that you specified in the HeaderName field.\n \n S3OriginConfig (dict) --A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead.\n OriginAccessIdentity (string) -- [REQUIRED]The CloudFront origin access identity to associate with the origin. Use an origin access identity to configure the origin so that viewers can only access objects in an Amazon S3 bucket through CloudFront. The format of the value is:\n origin-access-identity/cloudfront/ID-of-origin-access-identity\n where `` ID-of-origin-access-identity `` is the value that CloudFront returned in the ID element when you created the origin access identity.\n If you want viewers to be able to access objects using either the CloudFront URL or the Amazon S3 URL, specify an empty OriginAccessIdentity element.\n To delete the origin access identity from an existing distribution, update the distribution configuration and include an empty OriginAccessIdentity element.\n To replace the origin access identity, update the distribution configuration and specify the new origin access identity.\n For more information about the origin access identity, see Serving Private Content through CloudFront in the Amazon CloudFront Developer Guide .\n CustomOriginConfig (dict) --A complex type that contains information about a custom origin. If the origin is an Amazon S3 bucket, use the S3OriginConfig element instead.\n HTTPPort (integer) -- [REQUIRED]The HTTP port the custom origin listens on.\n HTTPSPort (integer) -- [REQUIRED]The HTTPS port the custom origin listens on.\n OriginProtocolPolicy (string) -- [REQUIRED]The origin protocol policy to apply to your origin.\n OriginSslProtocols (dict) --The SSL/TLS protocols that you want CloudFront to use when communicating with your origin over HTTPS.\n Quantity (integer) -- [REQUIRED]The number of SSL/TLS protocols that you want to allow CloudFront to use when establishing an HTTPS connection with this origin.\n Items (list) -- [REQUIRED]A list that contains allowed SSL/TLS protocols for this distribution.\n (string) --\n \n OriginReadTimeout (integer) --You can create a custom origin read timeout. All timeout units are in seconds. The default origin read timeout is 30 seconds, but you can configure custom timeout lengths using the CloudFront API. The minimum timeout length is 4 seconds; the maximum is 60 seconds.\n If you need to increase the maximum time limit, contact the AWS Support Center .\n OriginKeepaliveTimeout (integer) --You can create a custom keep-alive timeout. All timeout units are in seconds. The default keep-alive timeout is 5 seconds, but you can configure custom timeout lengths using the CloudFront API. The minimum timeout length is 1 second; the maximum is 60 seconds.\n If you need to increase the maximum time limit, contact the AWS Support Center .\n \n \n OriginGroups (dict) --A complex type that contains information about origin groups for this distribution.\n Quantity (integer) -- [REQUIRED]The number of origin groups.\n Items (list) --The items (origin groups) in a distribution.\n (dict) --An origin group includes two origins (a primary origin and a second origin to failover to) and a failover criteria that you specify. You create an origin group to support origin failover in CloudFront. When you create or update a distribution, you can specifiy the origin group instead of a single origin, and CloudFront will failover from the primary origin to the second origin under the failover conditions that you've chosen.\n Id (string) -- [REQUIRED]The origin group's ID.\n FailoverCriteria (dict) -- [REQUIRED]A complex type that contains information about the failover criteria for an origin group.\n StatusCodes (dict) -- [REQUIRED]The status codes that, when returned from the primary origin, will trigger CloudFront to failover to the second origin.\n Quantity (integer) -- [REQUIRED]The number of status codes.\n Items (list) -- [REQUIRED]The items (status codes) for an origin group.\n (integer) --\n \n Members (dict) -- [REQUIRED]A complex type that contains information about the origins in an origin group.\n Quantity (integer) -- [REQUIRED]The number of origins in an origin group.\n Items (list) -- [REQUIRED]Items (origins) in an origin group.\n (dict) --An origin in an origin group.\n OriginId (string) -- [REQUIRED]The ID for an origin in an origin group.\n \n \n \n DefaultCacheBehavior (dict) -- [REQUIRED]A complex type that describes the default cache behavior if you don't specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements. You must create exactly one default cache behavior.\n TargetOriginId (string) -- [REQUIRED]The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior in your distribution.\n ForwardedValues (dict) -- [REQUIRED]A complex type that specifies how CloudFront handles query strings and cookies.\n QueryString (boolean) -- [REQUIRED]Indicates whether you want CloudFront to forward query strings to the origin that is associated with this cache behavior and cache based on the query string parameters. CloudFront behavior depends on the value of QueryString and on the values that you specify for QueryStringCacheKeys , if any:\n If you specify true for QueryString and you don't specify any values for QueryStringCacheKeys , CloudFront forwards all query string parameters to the origin and caches based on all query string parameters. Depending on how many query string parameters and values you have, this can adversely affect performance because CloudFront must forward more requests to the origin.\n If you specify true for QueryString and you specify one or more values for QueryStringCacheKeys , CloudFront forwards all query string parameters to the origin, but it only caches based on the query string parameters that you specify.\n If you specify false for QueryString , CloudFront doesn't forward any query string parameters to the origin, and doesn't cache based on query string parameters.\n For more information, see Configuring CloudFront to Cache Based on Query String Parameters in the Amazon CloudFront Developer Guide .\n Cookies (dict) -- [REQUIRED]A complex type that specifies whether you want CloudFront to forward cookies to the origin and, if so, which ones. For more information about forwarding cookies to the origin, see How CloudFront Forwards, Caches, and Logs Cookies in the Amazon CloudFront Developer Guide .\n Forward (string) -- [REQUIRED]Specifies which cookies to forward to the origin for this cache behavior: all, none, or the list of cookies specified in the WhitelistedNames complex type.\n Amazon S3 doesn't process cookies. When the cache behavior is forwarding requests to an Amazon S3 origin, specify none for the Forward element.\n WhitelistedNames (dict) --Required if you specify whitelist for the value of Forward: . A complex type that specifies how many different cookies you want CloudFront to forward to the origin for this cache behavior and, if you want to forward selected cookies, the names of those cookies.\n If you specify all or none for the value of Forward , omit WhitelistedNames . If you change the value of Forward from whitelist to all or none and you don't delete the WhitelistedNames element and its child elements, CloudFront deletes them automatically.\n For the current limit on the number of cookie names that you can whitelist for each cache behavior, see Amazon CloudFront Limits in the AWS General Reference .\n Quantity (integer) -- [REQUIRED]The number of different cookies that you want CloudFront to forward to the origin for this cache behavior.\n Items (list) --A complex type that contains one Name element for each cookie that you want CloudFront to forward to the origin for this cache behavior.\n (string) --\n \n Headers (dict) --A complex type that specifies the Headers , if any, that you want CloudFront to base caching on for this cache behavior.\n Quantity (integer) -- [REQUIRED]The number of different headers that you want CloudFront to base caching on for this cache behavior. You can configure each cache behavior in a web distribution to do one of the following:\n Forward all headers to your origin : Specify 1 for Quantity and * for Name .\n Warning\n CloudFront doesn't cache the objects that are associated with this cache behavior. Instead, CloudFront sends every request to the origin.\n Forward a whitelist of headers you specify : Specify the number of headers that you want CloudFront to base caching on. Then specify the header names in Name elements. CloudFront caches your objects based on the values in the specified headers.\n Forward only the default headers : Specify 0 for Quantity and omit Items . In this configuration, CloudFront doesn't cache based on the values in the request headers.\n Regardless of which option you choose, CloudFront forwards headers to your origin based on whether the origin is an S3 bucket or a custom origin. See the following documentation:\n S3 bucket : See HTTP Request Headers That CloudFront Removes or Updates\n Custom origin : See HTTP Request Headers and CloudFront Behavior\n Items (list) --A list that contains one Name element for each header that you want CloudFront to use for caching in this cache behavior. If Quantity is 0 , omit Items .\n (string) --\n \n QueryStringCacheKeys (dict) --A complex type that contains information about the query string parameters that you want CloudFront to use for caching for this cache behavior.\n Quantity (integer) -- [REQUIRED]The number of whitelisted query string parameters for this cache behavior.\n Items (list) --(Optional) A list that contains the query string parameters that you want CloudFront to use as a basis for caching for this cache behavior. If Quantity is 0, you can omit Items .\n (string) --\n \n TrustedSigners (dict) -- [REQUIRED]A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content.\n If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled , and specify the applicable values for Quantity and Items . For more information, see Serving Private Content through CloudFront in the Amazon Amazon CloudFront Developer Guide .\n If you don't want to require signed URLs in requests for objects that match PathPattern , specify false for Enabled and 0 for Quantity . Omit Items .\n To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false ), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.\n Enabled (boolean) -- [REQUIRED]Specifies whether you want to require viewers to use signed URLs to access the files specified by PathPattern and TargetOriginId .\n Quantity (integer) -- [REQUIRED]The number of trusted signers for this cache behavior.\n Items (list) --\n Optional : A complex type that contains trusted signers for this cache behavior. If Quantity is 0 , you can omit Items .\n (string) --\n \n ViewerProtocolPolicy (string) -- [REQUIRED]The protocol that viewers can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern . You can specify the following options:\n allow-all : Viewers can use HTTP or HTTPS.\n redirect-to-https : If a viewer submits an HTTP request, CloudFront returns an HTTP status code of 301 (Moved Permanently) to the viewer along with the HTTPS URL. The viewer then resubmits the request using the new URL.\n https-only : If a viewer sends an HTTP request, CloudFront returns an HTTP status code of 403 (Forbidden).\n For more information about requiring the HTTPS protocol, see Using an HTTPS Connection to Access Your Objects in the Amazon CloudFront Developer Guide .\n Note\n The only way to guarantee that viewers retrieve an object that was fetched from the origin using HTTPS is never to use any other protocol to fetch the object. If you have recently changed from HTTP to HTTPS, we recommend that you clear your objects' cache because cached objects are protocol agnostic. That means that an edge location will return an object from the cache regardless of whether the current request protocol matches the protocol used previously. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon CloudFront Developer Guide .\n MinTTL (integer) -- [REQUIRED]The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon Amazon CloudFront Developer Guide .\n You must specify 0 for MinTTL if you configure CloudFront to forward all headers to your origin (under Headers , if you specify 1 for Quantity and * for Name ).\n AllowedMethods (dict) --A complex type that controls which HTTP methods CloudFront processes and forwards to your Amazon S3 bucket or your custom origin. There are three choices:\n CloudFront forwards only GET and HEAD requests.\n CloudFront forwards only GET , HEAD , and OPTIONS requests.\n CloudFront forwards GET, HEAD, OPTIONS, PUT, PATCH, POST , and DELETE requests.\n If you pick the third choice, you may need to restrict access to your Amazon S3 bucket or to your custom origin so users can't perform operations that you don't want them to. For example, you might not want users to have permissions to delete objects from your origin.\n Quantity (integer) -- [REQUIRED]The number of HTTP methods that you want CloudFront to forward to your origin. Valid values are 2 (for GET and HEAD requests), 3 (for GET , HEAD , and OPTIONS requests) and 7 (for GET, HEAD, OPTIONS, PUT, PATCH, POST , and DELETE requests).\n Items (list) -- [REQUIRED]A complex type that contains the HTTP methods that you want CloudFront to process and forward to your origin.\n (string) --\n CachedMethods (dict) --A complex type that controls whether CloudFront caches the response to requests using the specified HTTP methods. There are two choices:\n CloudFront caches responses to GET and HEAD requests.\n CloudFront caches responses to GET , HEAD , and OPTIONS requests.\n If you pick the second choice for your Amazon S3 Origin, you may need to forward Access-Control-Request-Method, Access-Control-Request-Headers, and Origin headers for the responses to be cached correctly.\n Quantity (integer) -- [REQUIRED]The number of HTTP methods for which you want CloudFront to cache responses. Valid values are 2 (for caching responses to GET and HEAD requests) and 3 (for caching responses to GET , HEAD , and OPTIONS requests).\n Items (list) -- [REQUIRED]A complex type that contains the HTTP methods that you want CloudFront to cache responses to.\n (string) --\n \n SmoothStreaming (boolean) --Indicates whether you want to distribute media files in the Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true ; if not, specify false . If you specify true for SmoothStreaming , you can still distribute other content using this cache behavior if the content matches the value of PathPattern .\n DefaultTTL (integer) --The default amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age , Cache-Control s-maxage , and Expires to objects. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon CloudFront Developer Guide .\n MaxTTL (integer) --\n Compress (boolean) --Whether you want CloudFront to automatically compress certain files for this cache behavior. If so, specify true ; if not, specify false . For more information, see Serving Compressed Files in the Amazon CloudFront Developer Guide .\n LambdaFunctionAssociations (dict) --A complex type that contains zero or more Lambda function associations for a cache behavior.\n Quantity (integer) -- [REQUIRED]The number of Lambda function associations for this cache behavior.\n Items (list) --\n Optional : A complex type that contains LambdaFunctionAssociation items for this cache behavior. If Quantity is 0 , you can omit Items .\n (dict) --A complex type that contains a Lambda function association.\n LambdaFunctionARN (string) -- [REQUIRED]The ARN of the Lambda function. You must specify the ARN of a function version; you can't specify a Lambda alias or $LATEST.\n EventType (string) -- [REQUIRED]Specifies the event type that triggers a Lambda function invocation. You can specify the following values:\n viewer-request : The function executes when CloudFront receives a request from a viewer and before it checks to see whether the requested object is in the edge cache.\n origin-request : The function executes only when CloudFront forwards a request to your origin. When the requested object is in the edge cache, the function doesn't execute.\n origin-response : The function executes after CloudFront receives a response from the origin and before it caches the object in the response. When the requested object is in the edge cache, the function doesn't execute.\n viewer-response : The function executes before CloudFront returns the requested object to the viewer. The function executes regardless of whether the object was already in the edge cache. If the origin returns an HTTP status code other than HTTP 200 (OK), the function doesn't execute.\n IncludeBody (boolean) --A flag that allows a Lambda function to have read access to the body content. For more information, see Accessing the Request Body by Choosing the Include Body Option in the Amazon CloudFront Developer Guide.\n \n FieldLevelEncryptionId (string) --The value of ID for the field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data for a cache behavior or for the default cache behavior in your distribution.\n CacheBehaviors (dict) --A complex type that contains zero or more CacheBehavior elements.\n Quantity (integer) -- [REQUIRED]The number of cache behaviors for this distribution.\n Items (list) --Optional: A complex type that contains cache behaviors for this distribution. If Quantity is 0 , you can omit Items .\n (dict) --A complex type that describes how CloudFront processes requests.\n You must create at least as many cache behaviors (including the default cache behavior) as you have origins if you want CloudFront to distribute objects from all of the origins. Each cache behavior specifies the one origin from which you want CloudFront to get objects. If you have two origins and only the default cache behavior, the default cache behavior will cause CloudFront to get objects from one of the origins, but the other origin is never used.\n For the current limit on the number of cache behaviors that you can add to a distribution, see Amazon CloudFront Limits in the AWS General Reference .\n If you don't want to specify any cache behaviors, include only an empty CacheBehaviors element. Don't include an empty CacheBehavior element, or CloudFront returns a MalformedXML error.\n To delete all cache behaviors in an existing distribution, update the distribution configuration and include only an empty CacheBehaviors element.\n To add, change, or remove one or more cache behaviors, update the distribution configuration and specify all of the cache behaviors that you want to include in the updated distribution.\n For more information about cache behaviors, see Cache Behaviors in the Amazon CloudFront Developer Guide .\n PathPattern (string) -- [REQUIRED]The pattern (for example, images/*.jpg ) that specifies which requests to apply the behavior to. When CloudFront receives a viewer request, the requested path is compared with path patterns in the order in which cache behaviors are listed in the distribution.\n Note\n You can optionally include a slash (/ ) at the beginning of the path pattern. For example, /images/*.jpg . CloudFront behavior is the same with or without the leading / .\n The path pattern for the default cache behavior is * and cannot be changed. If the request for an object does not match the path pattern for any cache behaviors, CloudFront applies the behavior in the default cache behavior.\n For more information, see Path Pattern in the Amazon CloudFront Developer Guide .\n TargetOriginId (string) -- [REQUIRED]The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior in your distribution.\n ForwardedValues (dict) -- [REQUIRED]A complex type that specifies how CloudFront handles query strings and cookies.\n QueryString (boolean) -- [REQUIRED]Indicates whether you want CloudFront to forward query strings to the origin that is associated with this cache behavior and cache based on the query string parameters. CloudFront behavior depends on the value of QueryString and on the values that you specify for QueryStringCacheKeys , if any:\n If you specify true for QueryString and you don't specify any values for QueryStringCacheKeys , CloudFront forwards all query string parameters to the origin and caches based on all query string parameters. Depending on how many query string parameters and values you have, this can adversely affect performance because CloudFront must forward more requests to the origin.\n If you specify true for QueryString and you specify one or more values for QueryStringCacheKeys , CloudFront forwards all query string parameters to the origin, but it only caches based on the query string parameters that you specify.\n If you specify false for QueryString , CloudFront doesn't forward any query string parameters to the origin, and doesn't cache based on query string parameters.\n For more information, see Configuring CloudFront to Cache Based on Query String Parameters in the Amazon CloudFront Developer Guide .\n Cookies (dict) -- [REQUIRED]A complex type that specifies whether you want CloudFront to forward cookies to the origin and, if so, which ones. For more information about forwarding cookies to the origin, see How CloudFront Forwards, Caches, and Logs Cookies in the Amazon CloudFront Developer Guide .\n Forward (string) -- [REQUIRED]Specifies which cookies to forward to the origin for this cache behavior: all, none, or the list of cookies specified in the WhitelistedNames complex type.\n Amazon S3 doesn't process cookies. When the cache behavior is forwarding requests to an Amazon S3 origin, specify none for the Forward element.\n WhitelistedNames (dict) --Required if you specify whitelist for the value of Forward: . A complex type that specifies how many different cookies you want CloudFront to forward to the origin for this cache behavior and, if you want to forward selected cookies, the names of those cookies.\n If you specify all or none for the value of Forward , omit WhitelistedNames . If you change the value of Forward from whitelist to all or none and you don't delete the WhitelistedNames element and its child elements, CloudFront deletes them automatically.\n For the current limit on the number of cookie names that you can whitelist for each cache behavior, see Amazon CloudFront Limits in the AWS General Reference .\n Quantity (integer) -- [REQUIRED]The number of different cookies that you want CloudFront to forward to the origin for this cache behavior.\n Items (list) --A complex type that contains one Name element for each cookie that you want CloudFront to forward to the origin for this cache behavior.\n (string) --\n \n Headers (dict) --A complex type that specifies the Headers , if any, that you want CloudFront to base caching on for this cache behavior.\n Quantity (integer) -- [REQUIRED]The number of different headers that you want CloudFront to base caching on for this cache behavior. You can configure each cache behavior in a web distribution to do one of the following:\n Forward all headers to your origin : Specify 1 for Quantity and * for Name .\n Warning\n CloudFront doesn't cache the objects that are associated with this cache behavior. Instead, CloudFront sends every request to the origin.\n Forward a whitelist of headers you specify : Specify the number of headers that you want CloudFront to base caching on. Then specify the header names in Name elements. CloudFront caches your objects based on the values in the specified headers.\n Forward only the default headers : Specify 0 for Quantity and omit Items . In this configuration, CloudFront doesn't cache based on the values in the request headers.\n Regardless of which option you choose, CloudFront forwards headers to your origin based on whether the origin is an S3 bucket or a custom origin. See the following documentation:\n S3 bucket : See HTTP Request Headers That CloudFront Removes or Updates\n Custom origin : See HTTP Request Headers and CloudFront Behavior\n Items (list) --A list that contains one Name element for each header that you want CloudFront to use for caching in this cache behavior. If Quantity is 0 , omit Items .\n (string) --\n \n QueryStringCacheKeys (dict) --A complex type that contains information about the query string parameters that you want CloudFront to use for caching for this cache behavior.\n Quantity (integer) -- [REQUIRED]The number of whitelisted query string parameters for this cache behavior.\n Items (list) --(Optional) A list that contains the query string parameters that you want CloudFront to use as a basis for caching for this cache behavior. If Quantity is 0, you can omit Items .\n (string) --\n \n TrustedSigners (dict) -- [REQUIRED]A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content.\n If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled , and specify the applicable values for Quantity and Items . For more information, see Serving Private Content through CloudFront in the Amazon Amazon CloudFront Developer Guide .\n If you don't want to require signed URLs in requests for objects that match PathPattern , specify false for Enabled and 0 for Quantity . Omit Items .\n To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false ), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.\n Enabled (boolean) -- [REQUIRED]Specifies whether you want to require viewers to use signed URLs to access the files specified by PathPattern and TargetOriginId .\n Quantity (integer) -- [REQUIRED]The number of trusted signers for this cache behavior.\n Items (list) --\n Optional : A complex type that contains trusted signers for this cache behavior. If Quantity is 0 , you can omit Items .\n (string) --\n \n ViewerProtocolPolicy (string) -- [REQUIRED]The protocol that viewers can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern . You can specify the following options:\n allow-all : Viewers can use HTTP or HTTPS.\n redirect-to-https : If a viewer submits an HTTP request, CloudFront returns an HTTP status code of 301 (Moved Permanently) to the viewer along with the HTTPS URL. The viewer then resubmits the request using the new URL.\n https-only : If a viewer sends an HTTP request, CloudFront returns an HTTP status code of 403 (Forbidden).\n For more information about requiring the HTTPS protocol, see Using an HTTPS Connection to Access Your Objects in the Amazon CloudFront Developer Guide .\n Note\n The only way to guarantee that viewers retrieve an object that was fetched from the origin using HTTPS is never to use any other protocol to fetch the object. If you have recently changed from HTTP to HTTPS, we recommend that you clear your objects' cache because cached objects are protocol agnostic. That means that an edge location will return an object from the cache regardless of whether the current request protocol matches the protocol used previously. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon CloudFront Developer Guide .\n MinTTL (integer) -- [REQUIRED]The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon Amazon CloudFront Developer Guide .\n You must specify 0 for MinTTL if you configure CloudFront to forward all headers to your origin (under Headers , if you specify 1 for Quantity and * for Name ).\n AllowedMethods (dict) --A complex type that controls which HTTP methods CloudFront processes and forwards to your Amazon S3 bucket or your custom origin. There are three choices:\n CloudFront forwards only GET and HEAD requests.\n CloudFront forwards only GET , HEAD , and OPTIONS requests.\n CloudFront forwards GET, HEAD, OPTIONS, PUT, PATCH, POST , and DELETE requests.\n If you pick the third choice, you may need to restrict access to your Amazon S3 bucket or to your custom origin so users can't perform operations that you don't want them to. For example, you might not want users to have permissions to delete objects from your origin.\n Quantity (integer) -- [REQUIRED]The number of HTTP methods that you want CloudFront to forward to your origin. Valid values are 2 (for GET and HEAD requests), 3 (for GET , HEAD , and OPTIONS requests) and 7 (for GET, HEAD, OPTIONS, PUT, PATCH, POST , and DELETE requests).\n Items (list) -- [REQUIRED]A complex type that contains the HTTP methods that you want CloudFront to process and forward to your origin.\n (string) --\n CachedMethods (dict) --A complex type that controls whether CloudFront caches the response to requests using the specified HTTP methods. There are two choices:\n CloudFront caches responses to GET and HEAD requests.\n CloudFront caches responses to GET , HEAD , and OPTIONS requests.\n If you pick the second choice for your Amazon S3 Origin, you may need to forward Access-Control-Request-Method, Access-Control-Request-Headers, and Origin headers for the responses to be cached correctly.\n Quantity (integer) -- [REQUIRED]The number of HTTP methods for which you want CloudFront to cache responses. Valid values are 2 (for caching responses to GET and HEAD requests) and 3 (for caching responses to GET , HEAD , and OPTIONS requests).\n Items (list) -- [REQUIRED]A complex type that contains the HTTP methods that you want CloudFront to cache responses to.\n (string) --\n \n SmoothStreaming (boolean) --Indicates whether you want to distribute media files in the Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true ; if not, specify false . If you specify true for SmoothStreaming , you can still distribute other content using this cache behavior if the content matches the value of PathPattern .\n DefaultTTL (integer) --The default amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age , Cache-Control s-maxage , and Expires to objects. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon CloudFront Developer Guide .\n MaxTTL (integer) --The maximum amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as Cache-Control max-age , Cache-Control s-maxage , and Expires to objects. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon CloudFront Developer Guide .\n Compress (boolean) --Whether you want CloudFront to automatically compress certain files for this cache behavior. If so, specify true; if not, specify false. For more information, see Serving Compressed Files in the Amazon CloudFront Developer Guide .\n LambdaFunctionAssociations (dict) --A complex type that contains zero or more Lambda function associations for a cache behavior.\n Quantity (integer) -- [REQUIRED]The number of Lambda function associations for this cache behavior.\n Items (list) --\n Optional : A complex type that contains LambdaFunctionAssociation items for this cache behavior. If Quantity is 0 , you can omit Items .\n (dict) --A complex type that contains a Lambda function association.\n LambdaFunctionARN (string) -- [REQUIRED]The ARN of the Lambda function. You must specify the ARN of a function version; you can't specify a Lambda alias or $LATEST.\n EventType (string) -- [REQUIRED]Specifies the event type that triggers a Lambda function invocation. You can specify the following values:\n viewer-request : The function executes when CloudFront receives a request from a viewer and before it checks to see whether the requested object is in the edge cache.\n origin-request : The function executes only when CloudFront forwards a request to your origin. When the requested object is in the edge cache, the function doesn't execute.\n origin-response : The function executes after CloudFront receives a response from the origin and before it caches the object in the response. When the requested object is in the edge cache, the function doesn't execute.\n viewer-response : The function executes before CloudFront returns the requested object to the viewer. The function executes regardless of whether the object was already in the edge cache. If the origin returns an HTTP status code other than HTTP 200 (OK), the function doesn't execute.\n IncludeBody (boolean) --A flag that allows a Lambda function to have read access to the body content. For more information, see Accessing the Request Body by Choosing the Include Body Option in the Amazon CloudFront Developer Guide.\n \n FieldLevelEncryptionId (string) --The value of ID for the field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data for a cache behavior or for the default cache behavior in your distribution.\n \n CustomErrorResponses (dict) --A complex type that controls the following:\n Whether CloudFront replaces HTTP status codes in the 4xx and 5xx range with custom error messages before returning the response to the viewer.\n How long CloudFront caches HTTP status codes in the 4xx and 5xx range.\n For more information about custom error pages, see Customizing Error Responses in the Amazon CloudFront Developer Guide .\n Quantity (integer) -- [REQUIRED]The number of HTTP status codes for which you want to specify a custom error page and/or a caching duration. If Quantity is 0 , you can omit Items .\n Items (list) --A complex type that contains a CustomErrorResponse element for each HTTP status code for which you want to specify a custom error page and/or a caching duration.\n (dict) --A complex type that controls:\n Whether CloudFront replaces HTTP status codes in the 4xx and 5xx range with custom error messages before returning the response to the viewer.\n How long CloudFront caches HTTP status codes in the 4xx and 5xx range.\n For more information about custom error pages, see Customizing Error Responses in the Amazon CloudFront Developer Guide .\n ErrorCode (integer) -- [REQUIRED]The HTTP status code for which you want to specify a custom error page and/or a caching duration.\n ResponsePagePath (string) --The path to the custom error page that you want CloudFront to return to a viewer when your origin returns the HTTP status code specified by ErrorCode , for example, /4xx-errors/403-forbidden.html . If you want to store your objects and your custom error pages in different locations, your distribution must include a cache behavior for which the following is true:\n The value of PathPattern matches the path to your custom error messages. For example, suppose you saved custom error pages for 4xx errors in an Amazon S3 bucket in a directory named /4xx-errors . Your distribution must include a cache behavior for which the path pattern routes requests for your custom error pages to that location, for example, /4xx-errors/* .\n The value of TargetOriginId specifies the value of the ID element for the origin that contains your custom error pages.\n If you specify a value for ResponsePagePath , you must also specify a value for ResponseCode . If you don't want to specify a value, include an empty element, <ResponsePagePath> , in the XML document.\n We recommend that you store custom error pages in an Amazon S3 bucket. If you store custom error pages on an HTTP server and the server starts to return 5xx errors, CloudFront can't get the files that you want to return to viewers because the origin server is unavailable.\n ResponseCode (string) --The HTTP status code that you want CloudFront to return to the viewer along with the custom error page. There are a variety of reasons that you might want CloudFront to return a status code different from the status code that your origin returned to CloudFront, for example:\n Some Internet devices (some firewalls and corporate proxies, for example) intercept HTTP 4xx and 5xx and prevent the response from being returned to the viewer. If you substitute 200 , the response typically won't be intercepted.\n If you don't care about distinguishing among different client errors or server errors, you can specify 400 or 500 as the ResponseCode for all 4xx or 5xx errors.\n You might want to return a 200 status code (OK) and static website so your customers don't know that your website is down.\n If you specify a value for ResponseCode , you must also specify a value for ResponsePagePath . If you don't want to specify a value, include an empty element, <ResponseCode> , in the XML document.\n ErrorCachingMinTTL (integer) --The minimum amount of time, in seconds, that you want CloudFront to cache the HTTP status code specified in ErrorCode . When this time period has elapsed, CloudFront queries your origin to see whether the problem that caused the error has been resolved and the requested object is now available.\n If you don't want to specify a value, include an empty element, <ErrorCachingMinTTL> , in the XML document.\n For more information, see Customizing Error Responses in the Amazon CloudFront Developer Guide .\n \n Comment (string) -- [REQUIRED]Any comments you want to include about the distribution.\n If you don't want to specify a comment, include an empty Comment element.\n To delete an existing comment, update the distribution configuration and include an empty Comment element.\n To add or change a comment, update the distribution configuration and specify the new comment.\n Logging (dict) --A complex type that controls whether access logs are written for the distribution.\n For more information about logging, see Access Logs in the Amazon CloudFront Developer Guide .\n Enabled (boolean) -- [REQUIRED]Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you don't want to enable logging when you create a distribution or if you want to disable logging for an existing distribution, specify false for Enabled , and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket , prefix , and IncludeCookies , the values are automatically deleted.\n IncludeCookies (boolean) -- [REQUIRED]Specifies whether you want CloudFront to include cookies in access logs, specify true for IncludeCookies . If you choose to include cookies in logs, CloudFront logs all cookies regardless of how you configure the cache behaviors for this distribution. If you don't want to include cookies when you create a distribution or if you want to disable include cookies for an existing distribution, specify false for IncludeCookies .\n Bucket (string) -- [REQUIRED]The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com .\n Prefix (string) -- [REQUIRED]An optional string that you want CloudFront to prefix to the access log filenames for this distribution, for example, myprefix/ . If you want to enable logging, but you don't want to specify a prefix, you still must include an empty Prefix element in the Logging element.\n PriceClass (string) --The price class that corresponds with the maximum price that you want to pay for CloudFront service. If you specify PriceClass_All , CloudFront responds to requests for your objects from all CloudFront edge locations.\n If you specify a price class other than PriceClass_All , CloudFront serves your objects from the CloudFront edge location that has the lowest latency among the edge locations in your price class. Viewers who are in or near regions that are excluded from your specified price class may encounter slower performance.\n For more information about price classes, see Choosing the Price Class for a CloudFront Distribution in the Amazon CloudFront Developer Guide . For information about CloudFront pricing, including how price classes (such as Price Class 100) map to CloudFront regions, see Amazon CloudFront Pricing . For price class information, scroll down to see the table at the bottom of the page.\n Enabled (boolean) -- [REQUIRED]From this field, you can enable or disable the selected distribution.\n ViewerCertificate (dict) --\n CloudFrontDefaultCertificate (boolean) --For information about how and when to use CloudFrontDefaultCertificate , see ViewerCertificate .\n IAMCertificateId (string) --For information about how and when to use IAMCertificateId , see ViewerCertificate .\n ACMCertificateArn (string) --For information about how and when to use ACMCertificateArn , see ViewerCertificate .\n SSLSupportMethod (string) --If you specify a value for ViewerCertificate$ACMCertificateArn or for ViewerCertificate$IAMCertificateId , you must also specify how you want CloudFront to serve HTTPS requests: using a method that works for all clients or one that works for most clients:\n vip : CloudFront uses dedicated IP addresses for your content and can respond to HTTPS requests from any viewer. However, you will incur additional monthly charges.\n sni-only : CloudFront can respond to HTTPS requests from viewers that support Server Name Indication (SNI). All modern browsers support SNI, but some browsers still in use don't support SNI. If some of your users' browsers don't support SNI, we recommend that you do one of the following:\n Use the vip option (dedicated IP addresses) instead of sni-only .\n Use the CloudFront SSL/TLS certificate instead of a custom certificate. This requires that you use the CloudFront domain name of your distribution in the URLs for your objects, for example, https://d111111abcdef8.cloudfront.net/logo.png .\n If you can control which browser your users use, upgrade the browser to one that supports SNI.\n Use HTTP instead of HTTPS.\n Don't specify a value for SSLSupportMethod if you specified <CloudFrontDefaultCertificate>true<CloudFrontDefaultCertificate> .\n For more information, see Using Alternate Domain Names and HTTPS in the Amazon CloudFront Developer Guide .\n MinimumProtocolVersion (string) --Specify the security policy that you want CloudFront to use for HTTPS connections. A security policy determines two settings:\n The minimum SSL/TLS protocol that CloudFront uses to communicate with viewers\n The cipher that CloudFront uses to encrypt the content that it returns to viewers\n Note\n On the CloudFront console, this setting is called Security policy .\n We recommend that you specify TLSv1.1_2016 unless your users are using browsers or devices that do not support TLSv1.1 or later.\n When both of the following are true, you must specify TLSv1 or later for the security policy:\n You're using a custom certificate: you specified a value for ACMCertificateArn or for IAMCertificateId\n You're using SNI: you specified sni-only for SSLSupportMethod\n If you specify true for CloudFrontDefaultCertificate , CloudFront automatically sets the security policy to TLSv1 regardless of the value that you specify for MinimumProtocolVersion .\n For information about the relationship between the security policy that you choose and the protocols and ciphers that CloudFront uses to communicate with viewers, see Supported SSL/TLS Protocols and Ciphers for Communication Between Viewers and CloudFront in the Amazon CloudFront Developer Guide .\n Certificate (string) --This field has been deprecated. Use one of the following fields instead:\n ViewerCertificate$ACMCertificateArn\n ViewerCertificate$IAMCertificateId\n ViewerCertificate$CloudFrontDefaultCertificate\n CertificateSource (string) --This field has been deprecated. Use one of the following fields instead:\n ViewerCertificate$ACMCertificateArn\n ViewerCertificate$IAMCertificateId\n ViewerCertificate$CloudFrontDefaultCertificate\n \n Restrictions (dict) --\n GeoRestriction (dict) -- [REQUIRED]A complex type that controls the countries in which your content is distributed. CloudFront determines the location of your users using MaxMind GeoIP databases.\n RestrictionType (string) -- [REQUIRED]The method that you want to use to restrict distribution of your content by country:\n none : No geo restriction is enabled, meaning access to content is not restricted by client geo location.\n blacklist : The Location elements specify the countries in which you don't want CloudFront to distribute your content.\n whitelist : The Location elements specify the countries in which you want CloudFront to distribute your content.\n Quantity (integer) -- [REQUIRED]When geo restriction is enabled , this is the number of countries in your whitelist or blacklist . Otherwise, when it is not enabled, Quantity is 0 , and you can omit Items .\n Items (list) --A complex type that contains a Location element for each country in which you want CloudFront either to distribute your content (whitelist ) or not distribute your content (blacklist ).\n The Location element is a two-letter, uppercase country code for a country that you want to include in your blacklist or whitelist . Include one Location element for each country.\n CloudFront and MaxMind both use ISO 3166 country codes. For the current list of countries and the corresponding codes, see ISO 3166-1-alpha-2 code on the International Organization for Standardization website. You can also refer to the country list on the CloudFront console, which includes both country names and codes.\n (string) --\n \n WebACLId (string) --A unique identifier that specifies the AWS WAF web ACL, if any, to associate with this distribution.\n AWS WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to CloudFront, and lets you control access to your content. Based on conditions that you specify, such as the IP addresses that requests originate from or the values of query strings, CloudFront responds to requests either with the requested content or with an HTTP 403 status code (Forbidden). You can also configure CloudFront to return a custom error page when a request is blocked. For more information about AWS WAF, see the AWS WAF Developer Guide .\n HttpVersion (string) --(Optional) Specify the maximum HTTP version that you want viewers to use to communicate with CloudFront. The default value for new web distributions is http2. Viewers that don't support HTTP/2 automatically use an earlier HTTP version.\n For viewers and CloudFront to use HTTP/2, viewers must support TLS 1.2 or later, and must support Server Name Identification (SNI).\n In general, configuring CloudFront to communicate with viewers using HTTP/2 reduces latency. You can improve performance by optimizing for HTTP/2. For more information, do an Internet search for 'http/2 optimization.'\n IsIPV6Enabled (boolean) --If you want CloudFront to respond to IPv6 DNS requests with an IPv6 address for your distribution, specify true . If you specify false , CloudFront responds to IPv6 DNS requests with the DNS response code NOERROR and with no IP addresses. This allows viewers to submit a second request, for an IPv4 address for your distribution.\n In general, you should enable IPv6 if you have users on IPv6 networks who want to access your content. However, if you're using signed URLs or signed cookies to restrict access to your content, and if you're using a custom policy that includes the IpAddress parameter to restrict the IP addresses that can access your content, don't enable IPv6. If you want to restrict access to some content by IP address and not restrict access to other content (or restrict access but not by IP address), you can create two distributions. For more information, see Creating a Signed URL Using a Custom Policy in the Amazon CloudFront Developer Guide .\n If you're using an Amazon Route 53 alias resource record set to route traffic to your CloudFront distribution, you need to create a second alias resource record set when both of the following are true:\n You enable IPv6 for the distribution\n You're using alternate domain names in the URLs for your objects\n For more information, see Routing Traffic to an Amazon CloudFront Web Distribution by Using Your Domain Name in the Amazon Route 53 Developer Guide .\n If you created a CNAME resource record set, either with Amazon Route 53 or with another DNS service, you don't need to make any changes. A CNAME record will route traffic to your distribution regardless of the IP address format of the viewer request.\n \n\n :type Id: string\n :param Id: [REQUIRED]\n The distribution's id.\n \n\n :type IfMatch: string\n :param IfMatch: The value of the ETag header that you received when retrieving the distribution's configuration. For example: E2QWRUHAPOMQZL .\n\n :rtype: dict\n :return: {\n 'Distribution': {\n 'Id': 'string',\n 'ARN': 'string',\n 'Status': 'string',\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'InProgressInvalidationBatches': 123,\n 'DomainName': 'string',\n 'ActiveTrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n {\n 'AwsAccountNumber': 'string',\n 'KeyPairIds': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n ]\n },\n 'DistributionConfig': {\n 'CallerReference': 'string',\n 'Aliases': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'DefaultRootObject': 'string',\n 'Origins': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'DomainName': 'string',\n 'OriginPath': 'string',\n 'CustomHeaders': {\n 'Quantity': 123,\n 'Items': [\n {\n 'HeaderName': 'string',\n 'HeaderValue': 'string'\n },\n ]\n },\n 'S3OriginConfig': {\n 'OriginAccessIdentity': 'string'\n },\n 'CustomOriginConfig': {\n 'HTTPPort': 123,\n 'HTTPSPort': 123,\n 'OriginProtocolPolicy': 'http-only'|'match-viewer'|'https-only',\n 'OriginSslProtocols': {\n 'Quantity': 123,\n 'Items': [\n 'SSLv3'|'TLSv1'|'TLSv1.1'|'TLSv1.2',\n ]\n },\n 'OriginReadTimeout': 123,\n 'OriginKeepaliveTimeout': 123\n }\n },\n ]\n },\n 'OriginGroups': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Id': 'string',\n 'FailoverCriteria': {\n 'StatusCodes': {\n 'Quantity': 123,\n 'Items': [\n 123,\n ]\n }\n },\n 'Members': {\n 'Quantity': 123,\n 'Items': [\n {\n 'OriginId': 'string'\n },\n ]\n }\n },\n ]\n },\n 'DefaultCacheBehavior': {\n 'TargetOriginId': 'string',\n 'ForwardedValues': {\n 'QueryString': True|False,\n 'Cookies': {\n 'Forward': 'none'|'whitelist'|'all',\n 'WhitelistedNames': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'Headers': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'QueryStringCacheKeys': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'ViewerProtocolPolicy': 'allow-all'|'https-only'|'redirect-to-https',\n 'MinTTL': 123,\n 'AllowedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ],\n 'CachedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ]\n }\n },\n 'SmoothStreaming': True|False,\n 'DefaultTTL': 123,\n 'MaxTTL': 123,\n 'Compress': True|False,\n 'LambdaFunctionAssociations': {\n 'Quantity': 123,\n 'Items': [\n {\n 'LambdaFunctionARN': 'string',\n 'EventType': 'viewer-request'|'viewer-response'|'origin-request'|'origin-response',\n 'IncludeBody': True|False\n },\n ]\n },\n 'FieldLevelEncryptionId': 'string'\n },\n 'CacheBehaviors': {\n 'Quantity': 123,\n 'Items': [\n {\n 'PathPattern': 'string',\n 'TargetOriginId': 'string',\n 'ForwardedValues': {\n 'QueryString': True|False,\n 'Cookies': {\n 'Forward': 'none'|'whitelist'|'all',\n 'WhitelistedNames': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'Headers': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'QueryStringCacheKeys': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'ViewerProtocolPolicy': 'allow-all'|'https-only'|'redirect-to-https',\n 'MinTTL': 123,\n 'AllowedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ],\n 'CachedMethods': {\n 'Quantity': 123,\n 'Items': [\n 'GET'|'HEAD'|'POST'|'PUT'|'PATCH'|'OPTIONS'|'DELETE',\n ]\n }\n },\n 'SmoothStreaming': True|False,\n 'DefaultTTL': 123,\n 'MaxTTL': 123,\n 'Compress': True|False,\n 'LambdaFunctionAssociations': {\n 'Quantity': 123,\n 'Items': [\n {\n 'LambdaFunctionARN': 'string',\n 'EventType': 'viewer-request'|'viewer-response'|'origin-request'|'origin-response',\n 'IncludeBody': True|False\n },\n ]\n },\n 'FieldLevelEncryptionId': 'string'\n },\n ]\n },\n 'CustomErrorResponses': {\n 'Quantity': 123,\n 'Items': [\n {\n 'ErrorCode': 123,\n 'ResponsePagePath': 'string',\n 'ResponseCode': 'string',\n 'ErrorCachingMinTTL': 123\n },\n ]\n },\n 'Comment': 'string',\n 'Logging': {\n 'Enabled': True|False,\n 'IncludeCookies': True|False,\n 'Bucket': 'string',\n 'Prefix': 'string'\n },\n 'PriceClass': 'PriceClass_100'|'PriceClass_200'|'PriceClass_All',\n 'Enabled': True|False,\n 'ViewerCertificate': {\n 'CloudFrontDefaultCertificate': True|False,\n 'IAMCertificateId': 'string',\n 'ACMCertificateArn': 'string',\n 'SSLSupportMethod': 'sni-only'|'vip',\n 'MinimumProtocolVersion': 'SSLv3'|'TLSv1'|'TLSv1_2016'|'TLSv1.1_2016'|'TLSv1.2_2018',\n 'Certificate': 'string',\n 'CertificateSource': 'cloudfront'|'iam'|'acm'\n },\n 'Restrictions': {\n 'GeoRestriction': {\n 'RestrictionType': 'blacklist'|'whitelist'|'none',\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n 'WebACLId': 'string',\n 'HttpVersion': 'http1.1'|'http2',\n 'IsIPV6Enabled': True|False\n }\n },\n 'ETag': 'string'\n }\n \n \n :returns: \n Update the XML document that was returned in the response to your GetDistributionConfig request to include your changes.\n \n \"\"\"\n pass\n\ndef update_field_level_encryption_config(FieldLevelEncryptionConfig=None, Id=None, IfMatch=None):\n \"\"\"\n Update a field-level encryption configuration.\n See also: AWS API Documentation\n \n \n :example: response = client.update_field_level_encryption_config(\n FieldLevelEncryptionConfig={\n 'CallerReference': 'string',\n 'Comment': 'string',\n 'QueryArgProfileConfig': {\n 'ForwardWhenQueryArgProfileIsUnknown': True|False,\n 'QueryArgProfiles': {\n 'Quantity': 123,\n 'Items': [\n {\n 'QueryArg': 'string',\n 'ProfileId': 'string'\n },\n ]\n }\n },\n 'ContentTypeProfileConfig': {\n 'ForwardWhenContentTypeIsUnknown': True|False,\n 'ContentTypeProfiles': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Format': 'URLEncoded',\n 'ProfileId': 'string',\n 'ContentType': 'string'\n },\n ]\n }\n }\n },\n Id='string',\n IfMatch='string'\n )\n \n \n :type FieldLevelEncryptionConfig: dict\n :param FieldLevelEncryptionConfig: [REQUIRED]\n Request to update a field-level encryption configuration.\n CallerReference (string) -- [REQUIRED]A unique number that ensures the request can't be replayed.\n Comment (string) --An optional comment about the configuration.\n QueryArgProfileConfig (dict) --A complex data type that specifies when to forward content if a profile isn't found and the profile that can be provided as a query argument in a request.\n ForwardWhenQueryArgProfileIsUnknown (boolean) -- [REQUIRED]Flag to set if you want a request to be forwarded to the origin even if the profile specified by the field-level encryption query argument, fle-profile, is unknown.\n QueryArgProfiles (dict) --Profiles specified for query argument-profile mapping for field-level encryption.\n Quantity (integer) -- [REQUIRED]Number of profiles for query argument-profile mapping for field-level encryption.\n Items (list) --Number of items for query argument-profile mapping for field-level encryption.\n (dict) --Query argument-profile mapping for field-level encryption.\n QueryArg (string) -- [REQUIRED]Query argument for field-level encryption query argument-profile mapping.\n ProfileId (string) -- [REQUIRED]ID of profile to use for field-level encryption query argument-profile mapping\n \n \n ContentTypeProfileConfig (dict) --A complex data type that specifies when to forward content if a content type isn't recognized and profiles to use as by default in a request if a query argument doesn't specify a profile to use.\n ForwardWhenContentTypeIsUnknown (boolean) -- [REQUIRED]The setting in a field-level encryption content type-profile mapping that specifies what to do when an unknown content type is provided for the profile. If true, content is forwarded without being encrypted when the content type is unknown. If false (the default), an error is returned when the content type is unknown.\n ContentTypeProfiles (dict) --The configuration for a field-level encryption content type-profile.\n Quantity (integer) -- [REQUIRED]The number of field-level encryption content type-profile mappings.\n Items (list) --Items in a field-level encryption content type-profile mapping.\n (dict) --A field-level encryption content type profile.\n Format (string) -- [REQUIRED]The format for a field-level encryption content type-profile mapping.\n ProfileId (string) --The profile ID for a field-level encryption content type-profile mapping.\n ContentType (string) -- [REQUIRED]The content type for a field-level encryption content type-profile mapping.\n \n \n \n\n :type Id: string\n :param Id: [REQUIRED]\n The ID of the configuration you want to update.\n \n\n :type IfMatch: string\n :param IfMatch: The value of the ETag header that you received when retrieving the configuration identity to update. For example: E2QWRUHAPOMQZL .\n\n :rtype: dict\n :return: {\n 'FieldLevelEncryption': {\n 'Id': 'string',\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'FieldLevelEncryptionConfig': {\n 'CallerReference': 'string',\n 'Comment': 'string',\n 'QueryArgProfileConfig': {\n 'ForwardWhenQueryArgProfileIsUnknown': True|False,\n 'QueryArgProfiles': {\n 'Quantity': 123,\n 'Items': [\n {\n 'QueryArg': 'string',\n 'ProfileId': 'string'\n },\n ]\n }\n },\n 'ContentTypeProfileConfig': {\n 'ForwardWhenContentTypeIsUnknown': True|False,\n 'ContentTypeProfiles': {\n 'Quantity': 123,\n 'Items': [\n {\n 'Format': 'URLEncoded',\n 'ProfileId': 'string',\n 'ContentType': 'string'\n },\n ]\n }\n }\n }\n },\n 'ETag': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef update_field_level_encryption_profile(FieldLevelEncryptionProfileConfig=None, Id=None, IfMatch=None):\n \"\"\"\n Update a field-level encryption profile.\n See also: AWS API Documentation\n \n \n :example: response = client.update_field_level_encryption_profile(\n FieldLevelEncryptionProfileConfig={\n 'Name': 'string',\n 'CallerReference': 'string',\n 'Comment': 'string',\n 'EncryptionEntities': {\n 'Quantity': 123,\n 'Items': [\n {\n 'PublicKeyId': 'string',\n 'ProviderId': 'string',\n 'FieldPatterns': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n ]\n }\n },\n Id='string',\n IfMatch='string'\n )\n \n \n :type FieldLevelEncryptionProfileConfig: dict\n :param FieldLevelEncryptionProfileConfig: [REQUIRED]\n Request to update a field-level encryption profile.\n Name (string) -- [REQUIRED]Profile name for the field-level encryption profile.\n CallerReference (string) -- [REQUIRED]A unique number that ensures that the request can't be replayed.\n Comment (string) --An optional comment for the field-level encryption profile.\n EncryptionEntities (dict) -- [REQUIRED]A complex data type of encryption entities for the field-level encryption profile that include the public key ID, provider, and field patterns for specifying which fields to encrypt with this key.\n Quantity (integer) -- [REQUIRED]Number of field pattern items in a field-level encryption content type-profile mapping.\n Items (list) --An array of field patterns in a field-level encryption content type-profile mapping.\n (dict) --Complex data type for field-level encryption profiles that includes the encryption key and field pattern specifications.\n PublicKeyId (string) -- [REQUIRED]The public key associated with a set of field-level encryption patterns, to be used when encrypting the fields that match the patterns.\n ProviderId (string) -- [REQUIRED]The provider associated with the public key being used for encryption. This value must also be provided with the private key for applications to be able to decrypt data.\n FieldPatterns (dict) -- [REQUIRED]Field patterns in a field-level encryption content type profile specify the fields that you want to be encrypted. You can provide the full field name, or any beginning characters followed by a wildcard (*). You can't overlap field patterns. For example, you can't have both ABC* and AB*. Note that field patterns are case-sensitive.\n Quantity (integer) -- [REQUIRED]The number of field-level encryption field patterns.\n Items (list) --An array of the field-level encryption field patterns.\n (string) --\n \n \n \n\n :type Id: string\n :param Id: [REQUIRED]\n The ID of the field-level encryption profile request.\n \n\n :type IfMatch: string\n :param IfMatch: The value of the ETag header that you received when retrieving the profile identity to update. For example: E2QWRUHAPOMQZL .\n\n :rtype: dict\n :return: {\n 'FieldLevelEncryptionProfile': {\n 'Id': 'string',\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'FieldLevelEncryptionProfileConfig': {\n 'Name': 'string',\n 'CallerReference': 'string',\n 'Comment': 'string',\n 'EncryptionEntities': {\n 'Quantity': 123,\n 'Items': [\n {\n 'PublicKeyId': 'string',\n 'ProviderId': 'string',\n 'FieldPatterns': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n ]\n }\n }\n },\n 'ETag': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef update_public_key(PublicKeyConfig=None, Id=None, IfMatch=None):\n \"\"\"\n Update public key information. Note that the only value you can change is the comment.\n See also: AWS API Documentation\n \n \n :example: response = client.update_public_key(\n PublicKeyConfig={\n 'CallerReference': 'string',\n 'Name': 'string',\n 'EncodedKey': 'string',\n 'Comment': 'string'\n },\n Id='string',\n IfMatch='string'\n )\n \n \n :type PublicKeyConfig: dict\n :param PublicKeyConfig: [REQUIRED]\n Request to update public key information.\n CallerReference (string) -- [REQUIRED]A unique number that ensures that the request can't be replayed.\n Name (string) -- [REQUIRED]The name for a public key you add to CloudFront to use with features like field-level encryption.\n EncodedKey (string) -- [REQUIRED]The encoded public key that you want to add to CloudFront to use with features like field-level encryption.\n Comment (string) --An optional comment about a public key.\n \n\n :type Id: string\n :param Id: [REQUIRED]\n ID of the public key to be updated.\n \n\n :type IfMatch: string\n :param IfMatch: The value of the ETag header that you received when retrieving the public key to update. For example: E2QWRUHAPOMQZL .\n\n :rtype: dict\n :return: {\n 'PublicKey': {\n 'Id': 'string',\n 'CreatedTime': datetime(2015, 1, 1),\n 'PublicKeyConfig': {\n 'CallerReference': 'string',\n 'Name': 'string',\n 'EncodedKey': 'string',\n 'Comment': 'string'\n }\n },\n 'ETag': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef update_streaming_distribution(StreamingDistributionConfig=None, Id=None, IfMatch=None):\n \"\"\"\n Update a streaming distribution.\n See also: AWS API Documentation\n \n \n :example: response = client.update_streaming_distribution(\n StreamingDistributionConfig={\n 'CallerReference': 'string',\n 'S3Origin': {\n 'DomainName': 'string',\n 'OriginAccessIdentity': 'string'\n },\n 'Aliases': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'Comment': 'string',\n 'Logging': {\n 'Enabled': True|False,\n 'Bucket': 'string',\n 'Prefix': 'string'\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'PriceClass': 'PriceClass_100'|'PriceClass_200'|'PriceClass_All',\n 'Enabled': True|False\n },\n Id='string',\n IfMatch='string'\n )\n \n \n :type StreamingDistributionConfig: dict\n :param StreamingDistributionConfig: [REQUIRED]\n The streaming distribution's configuration information.\n CallerReference (string) -- [REQUIRED]A unique value (for example, a date-time stamp) that ensures that the request can't be replayed.\n If the value of CallerReference is new (regardless of the content of the StreamingDistributionConfig object), CloudFront creates a new distribution.\n If CallerReference is a value that you already sent in a previous request to create a distribution, CloudFront returns a DistributionAlreadyExists error.\n S3Origin (dict) -- [REQUIRED]A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution.\n DomainName (string) -- [REQUIRED]The DNS name of the Amazon S3 origin.\n OriginAccessIdentity (string) -- [REQUIRED]The CloudFront origin access identity to associate with the RTMP distribution. Use an origin access identity to configure the distribution so that end users can only access objects in an Amazon S3 bucket through CloudFront.\n If you want end users to be able to access objects using either the CloudFront URL or the Amazon S3 URL, specify an empty OriginAccessIdentity element.\n To delete the origin access identity from an existing distribution, update the distribution configuration and include an empty OriginAccessIdentity element.\n To replace the origin access identity, update the distribution configuration and specify the new origin access identity.\n For more information, see Using an Origin Access Identity to Restrict Access to Your Amazon S3 Content in the Amazon Amazon CloudFront Developer Guide .\n Aliases (dict) --A complex type that contains information about CNAMEs (alternate domain names), if any, for this streaming distribution.\n Quantity (integer) -- [REQUIRED]The number of CNAME aliases, if any, that you want to associate with this distribution.\n Items (list) --A complex type that contains the CNAME aliases, if any, that you want to associate with this distribution.\n (string) --\n \n Comment (string) -- [REQUIRED]Any comments you want to include about the streaming distribution.\n Logging (dict) --A complex type that controls whether access logs are written for the streaming distribution.\n Enabled (boolean) -- [REQUIRED]Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you don't want to enable logging when you create a streaming distribution or if you want to disable logging for an existing streaming distribution, specify false for Enabled , and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket and Prefix , the values are automatically deleted.\n Bucket (string) -- [REQUIRED]The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com .\n Prefix (string) -- [REQUIRED]An optional string that you want CloudFront to prefix to the access log filenames for this streaming distribution, for example, myprefix/ . If you want to enable logging, but you don't want to specify a prefix, you still must include an empty Prefix element in the Logging element.\n TrustedSigners (dict) -- [REQUIRED]A complex type that specifies any AWS accounts that you want to permit to create signed URLs for private content. If you want the distribution to use signed URLs, include this element; if you want the distribution to use public URLs, remove this element. For more information, see Serving Private Content through CloudFront in the Amazon CloudFront Developer Guide .\n Enabled (boolean) -- [REQUIRED]Specifies whether you want to require viewers to use signed URLs to access the files specified by PathPattern and TargetOriginId .\n Quantity (integer) -- [REQUIRED]The number of trusted signers for this cache behavior.\n Items (list) --\n Optional : A complex type that contains trusted signers for this cache behavior. If Quantity is 0 , you can omit Items .\n (string) --\n \n PriceClass (string) --A complex type that contains information about price class for this streaming distribution.\n Enabled (boolean) -- [REQUIRED]Whether the streaming distribution is enabled to accept user requests for content.\n \n\n :type Id: string\n :param Id: [REQUIRED]\n The streaming distribution's id.\n \n\n :type IfMatch: string\n :param IfMatch: The value of the ETag header that you received when retrieving the streaming distribution's configuration. For example: E2QWRUHAPOMQZL .\n\n :rtype: dict\n :return: {\n 'StreamingDistribution': {\n 'Id': 'string',\n 'ARN': 'string',\n 'Status': 'string',\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'DomainName': 'string',\n 'ActiveTrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n {\n 'AwsAccountNumber': 'string',\n 'KeyPairIds': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n }\n },\n ]\n },\n 'StreamingDistributionConfig': {\n 'CallerReference': 'string',\n 'S3Origin': {\n 'DomainName': 'string',\n 'OriginAccessIdentity': 'string'\n },\n 'Aliases': {\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'Comment': 'string',\n 'Logging': {\n 'Enabled': True|False,\n 'Bucket': 'string',\n 'Prefix': 'string'\n },\n 'TrustedSigners': {\n 'Enabled': True|False,\n 'Quantity': 123,\n 'Items': [\n 'string',\n ]\n },\n 'PriceClass': 'PriceClass_100'|'PriceClass_200'|'PriceClass_All',\n 'Enabled': True|False\n }\n },\n 'ETag': 'string'\n }\n \n \n :returns: \n self , which is the AWS account used to create the distribution.\n An AWS account number.\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.5574210286140442, "alphanum_fraction": 0.5648723840713501, "avg_line_length": 31.81227684020996, "blob_id": "e1747b94fdb3f8d0370d0ca2d55d3c16bae1af09", "content_id": "348c4fafab0328ff3013cdb2b9f2fe9e30c43591", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 64149, "license_type": "permissive", "max_line_length": 566, "num_lines": 1955, "path": "/pyboto3/robomaker.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef batch_describe_simulation_job(jobs=None):\n \"\"\"\n Describes one or more simulation jobs.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_describe_simulation_job(\n jobs=[\n 'string',\n ]\n )\n \n \n :type jobs: list\n :param jobs: [REQUIRED]\n A list of Amazon Resource Names (ARNs) of simulation jobs to describe.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'jobs': [\n {\n 'arn': 'string',\n 'name': 'string',\n 'status': 'Pending'|'Preparing'|'Running'|'Restarting'|'Completed'|'Failed'|'RunningFailed'|'Terminating'|'Terminated'|'Canceled',\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'failureBehavior': 'Fail'|'Continue',\n 'failureCode': 'InternalServiceError'|'RobotApplicationCrash'|'SimulationApplicationCrash'|'BadPermissionsRobotApplication'|'BadPermissionsSimulationApplication'|'BadPermissionsS3Output'|'BadPermissionsCloudwatchLogs'|'SubnetIpLimitExceeded'|'ENILimitExceeded'|'BadPermissionsUserCredentials'|'InvalidBundleRobotApplication'|'InvalidBundleSimulationApplication'|'RobotApplicationVersionMismatchedEtag'|'SimulationApplicationVersionMismatchedEtag',\n 'clientRequestToken': 'string',\n 'outputLocation': {\n 's3Bucket': 'string',\n 's3Prefix': 'string'\n },\n 'maxJobDurationInSeconds': 123,\n 'simulationTimeMillis': 123,\n 'iamRole': 'string',\n 'robotApplications': [\n {\n 'application': 'string',\n 'applicationVersion': 'string',\n 'launchConfig': {\n 'packageName': 'string',\n 'launchFile': 'string',\n 'environmentVariables': {\n 'string': 'string'\n }\n }\n },\n ],\n 'simulationApplications': [\n {\n 'application': 'string',\n 'applicationVersion': 'string',\n 'launchConfig': {\n 'packageName': 'string',\n 'launchFile': 'string',\n 'environmentVariables': {\n 'string': 'string'\n }\n }\n },\n ],\n 'vpcConfig': {\n 'subnets': [\n 'string',\n ],\n 'securityGroups': [\n 'string',\n ],\n 'vpcId': 'string',\n 'assignPublicIp': True|False\n }\n },\n ],\n 'unprocessedJobs': [\n 'string',\n ]\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef cancel_simulation_job(job=None):\n \"\"\"\n Cancels the specified simulation job.\n See also: AWS API Documentation\n \n \n :example: response = client.cancel_simulation_job(\n job='string'\n )\n \n \n :type job: string\n :param job: [REQUIRED]\n The simulation job ARN to cancel.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef create_deployment_job(deploymentConfig=None, clientRequestToken=None, fleet=None, deploymentApplicationConfigs=None):\n \"\"\"\n Creates a deployment job.\n See also: AWS API Documentation\n \n \n :example: response = client.create_deployment_job(\n deploymentConfig={\n 'concurrentDeploymentPercentage': 123,\n 'failureThresholdPercentage': 123\n },\n clientRequestToken='string',\n fleet='string',\n deploymentApplicationConfigs=[\n {\n 'application': 'string',\n 'applicationVersion': 'string',\n 'launchConfig': {\n 'packageName': 'string',\n 'preLaunchFile': 'string',\n 'launchFile': 'string',\n 'postLaunchFile': 'string',\n 'environmentVariables': {\n 'string': 'string'\n }\n }\n },\n ]\n )\n \n \n :type deploymentConfig: dict\n :param deploymentConfig: The requested deployment configuration.\n concurrentDeploymentPercentage (integer) --The percentage of robots receiving the deployment at the same time.\n failureThresholdPercentage (integer) --The percentage of deployments that need to fail before stopping deployment.\n \n\n :type clientRequestToken: string\n :param clientRequestToken: [REQUIRED]\n Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.\n This field is autopopulated if not provided.\n \n\n :type fleet: string\n :param fleet: [REQUIRED]\n The Amazon Resource Name (ARN) of the fleet to deploy.\n \n\n :type deploymentApplicationConfigs: list\n :param deploymentApplicationConfigs: [REQUIRED]\n The deployment application configuration.\n (dict) --Information about a deployment application configuration.\n application (string) -- [REQUIRED]The application.\n applicationVersion (string) -- [REQUIRED]The version of the application.\n launchConfig (dict) -- [REQUIRED]The launch configuration, usually roslaunch .\n packageName (string) -- [REQUIRED]The package name.\n preLaunchFile (string) --The deployment pre-launch file. This file will be executed prior to the deployment launch file.\n launchFile (string) -- [REQUIRED]The deployment launch file.\n postLaunchFile (string) --The deployment post-launch file. This file will be executed after the deployment launch file.\n environmentVariables (dict) --An array of key/value pairs specifying environment variables for the deployment application.\n (string) --\n (string) --\n \n \n \n\n :rtype: dict\n :return: {\n 'arn': 'string',\n 'fleet': 'string',\n 'status': 'Pending'|'Preparing'|'InProgress'|'Failed'|'Succeeded',\n 'deploymentApplicationConfigs': [\n {\n 'application': 'string',\n 'applicationVersion': 'string',\n 'launchConfig': {\n 'packageName': 'string',\n 'preLaunchFile': 'string',\n 'launchFile': 'string',\n 'postLaunchFile': 'string',\n 'environmentVariables': {\n 'string': 'string'\n }\n }\n },\n ],\n 'failureReason': 'string',\n 'failureCode': 'ResourceNotFound'|'FailureThresholdBreached'|'RobotDeploymentNoResponse'|'GreengrassDeploymentFailed'|'MissingRobotArchitecture'|'MissingRobotApplicationArchitecture'|'MissingRobotDeploymentResource'|'GreengrassGroupVersionDoesNotExist'|'ExtractingBundleFailure'|'PreLaunchFileFailure'|'PostLaunchFileFailure'|'BadPermissionError'|'InternalServerError',\n 'createdAt': datetime(2015, 1, 1),\n 'deploymentConfig': {\n 'concurrentDeploymentPercentage': 123,\n 'failureThresholdPercentage': 123\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef create_fleet(name=None):\n \"\"\"\n Creates a fleet, a logical group of robots running the same robot application.\n See also: AWS API Documentation\n \n \n :example: response = client.create_fleet(\n name='string'\n )\n \n \n :type name: string\n :param name: [REQUIRED]\n The name of the fleet.\n \n\n :rtype: dict\n :return: {\n 'arn': 'string',\n 'name': 'string',\n 'createdAt': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef create_robot(name=None, architecture=None, greengrassGroupId=None):\n \"\"\"\n Creates a robot.\n See also: AWS API Documentation\n \n \n :example: response = client.create_robot(\n name='string',\n architecture='X86_64'|'ARM64'|'ARMHF',\n greengrassGroupId='string'\n )\n \n \n :type name: string\n :param name: [REQUIRED]\n The name for the robot.\n \n\n :type architecture: string\n :param architecture: [REQUIRED]\n The target architecture of the robot.\n \n\n :type greengrassGroupId: string\n :param greengrassGroupId: [REQUIRED]\n The Greengrass group id.\n \n\n :rtype: dict\n :return: {\n 'arn': 'string',\n 'name': 'string',\n 'createdAt': datetime(2015, 1, 1),\n 'greengrassGroupId': 'string',\n 'architecture': 'X86_64'|'ARM64'|'ARMHF'\n }\n \n \n \"\"\"\n pass\n\ndef create_robot_application(name=None, sources=None, robotSoftwareSuite=None):\n \"\"\"\n Creates a robot application.\n See also: AWS API Documentation\n \n \n :example: response = client.create_robot_application(\n name='string',\n sources=[\n {\n 's3Bucket': 'string',\n 's3Key': 'string',\n 'architecture': 'X86_64'|'ARM64'|'ARMHF'\n },\n ],\n robotSoftwareSuite={\n 'name': 'ROS',\n 'version': 'Kinetic'\n }\n )\n \n \n :type name: string\n :param name: [REQUIRED]\n The name of the robot application.\n \n\n :type sources: list\n :param sources: [REQUIRED]\n The sources of the robot application.\n (dict) --Information about a source configuration.\n s3Bucket (string) --The Amazon S3 bucket name.\n s3Key (string) --The s3 object key.\n architecture (string) --The target processor architecture for the application.\n \n \n\n :type robotSoftwareSuite: dict\n :param robotSoftwareSuite: [REQUIRED]\n The robot software suite used by the robot application.\n name (string) --The name of the robot software suite.\n version (string) --The version of the robot software suite.\n \n\n :rtype: dict\n :return: {\n 'arn': 'string',\n 'name': 'string',\n 'version': 'string',\n 'sources': [\n {\n 's3Bucket': 'string',\n 's3Key': 'string',\n 'etag': 'string',\n 'architecture': 'X86_64'|'ARM64'|'ARMHF'\n },\n ],\n 'robotSoftwareSuite': {\n 'name': 'ROS',\n 'version': 'Kinetic'\n },\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'revisionId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_robot_application_version(application=None, currentRevisionId=None):\n \"\"\"\n Creates a version of a robot application.\n See also: AWS API Documentation\n \n \n :example: response = client.create_robot_application_version(\n application='string',\n currentRevisionId='string'\n )\n \n \n :type application: string\n :param application: [REQUIRED]\n The application information for the robot application.\n \n\n :type currentRevisionId: string\n :param currentRevisionId: The current revision id for the robot application. If you provide a value and it matches the latest revision ID, a new version will be created.\n\n :rtype: dict\n :return: {\n 'arn': 'string',\n 'name': 'string',\n 'version': 'string',\n 'sources': [\n {\n 's3Bucket': 'string',\n 's3Key': 'string',\n 'etag': 'string',\n 'architecture': 'X86_64'|'ARM64'|'ARMHF'\n },\n ],\n 'robotSoftwareSuite': {\n 'name': 'ROS',\n 'version': 'Kinetic'\n },\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'revisionId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_simulation_application(name=None, sources=None, simulationSoftwareSuite=None, robotSoftwareSuite=None, renderingEngine=None):\n \"\"\"\n Creates a simulation application.\n See also: AWS API Documentation\n \n \n :example: response = client.create_simulation_application(\n name='string',\n sources=[\n {\n 's3Bucket': 'string',\n 's3Key': 'string',\n 'architecture': 'X86_64'|'ARM64'|'ARMHF'\n },\n ],\n simulationSoftwareSuite={\n 'name': 'Gazebo',\n 'version': 'string'\n },\n robotSoftwareSuite={\n 'name': 'ROS',\n 'version': 'Kinetic'\n },\n renderingEngine={\n 'name': 'OGRE',\n 'version': 'string'\n }\n )\n \n \n :type name: string\n :param name: [REQUIRED]\n The name of the simulation application.\n \n\n :type sources: list\n :param sources: [REQUIRED]\n The sources of the simulation application.\n (dict) --Information about a source configuration.\n s3Bucket (string) --The Amazon S3 bucket name.\n s3Key (string) --The s3 object key.\n architecture (string) --The target processor architecture for the application.\n \n \n\n :type simulationSoftwareSuite: dict\n :param simulationSoftwareSuite: [REQUIRED]\n The simulation software suite used by the simulation application.\n name (string) --The name of the simulation software suite.\n version (string) --The version of the simulation software suite.\n \n\n :type robotSoftwareSuite: dict\n :param robotSoftwareSuite: [REQUIRED]\n The robot software suite of the simulation application.\n name (string) --The name of the robot software suite.\n version (string) --The version of the robot software suite.\n \n\n :type renderingEngine: dict\n :param renderingEngine: [REQUIRED]\n The rendering engine for the simulation application.\n name (string) --The name of the rendering engine.\n version (string) --The version of the rendering engine.\n \n\n :rtype: dict\n :return: {\n 'arn': 'string',\n 'name': 'string',\n 'version': 'string',\n 'sources': [\n {\n 's3Bucket': 'string',\n 's3Key': 'string',\n 'etag': 'string',\n 'architecture': 'X86_64'|'ARM64'|'ARMHF'\n },\n ],\n 'simulationSoftwareSuite': {\n 'name': 'Gazebo',\n 'version': 'string'\n },\n 'robotSoftwareSuite': {\n 'name': 'ROS',\n 'version': 'Kinetic'\n },\n 'renderingEngine': {\n 'name': 'OGRE',\n 'version': 'string'\n },\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'revisionId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_simulation_application_version(application=None, currentRevisionId=None):\n \"\"\"\n Creates a simulation application with a specific revision id.\n See also: AWS API Documentation\n \n \n :example: response = client.create_simulation_application_version(\n application='string',\n currentRevisionId='string'\n )\n \n \n :type application: string\n :param application: [REQUIRED]\n The application information for the simulation application.\n \n\n :type currentRevisionId: string\n :param currentRevisionId: The current revision id for the simulation application. If you provide a value and it matches the latest revision ID, a new version will be created.\n\n :rtype: dict\n :return: {\n 'arn': 'string',\n 'name': 'string',\n 'version': 'string',\n 'sources': [\n {\n 's3Bucket': 'string',\n 's3Key': 'string',\n 'etag': 'string',\n 'architecture': 'X86_64'|'ARM64'|'ARMHF'\n },\n ],\n 'simulationSoftwareSuite': {\n 'name': 'Gazebo',\n 'version': 'string'\n },\n 'robotSoftwareSuite': {\n 'name': 'ROS',\n 'version': 'Kinetic'\n },\n 'renderingEngine': {\n 'name': 'OGRE',\n 'version': 'string'\n },\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'revisionId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_simulation_job(clientRequestToken=None, outputLocation=None, maxJobDurationInSeconds=None, iamRole=None, failureBehavior=None, robotApplications=None, simulationApplications=None, vpcConfig=None):\n \"\"\"\n Creates a simulation job.\n See also: AWS API Documentation\n \n \n :example: response = client.create_simulation_job(\n clientRequestToken='string',\n outputLocation={\n 's3Bucket': 'string',\n 's3Prefix': 'string'\n },\n maxJobDurationInSeconds=123,\n iamRole='string',\n failureBehavior='Fail'|'Continue',\n robotApplications=[\n {\n 'application': 'string',\n 'applicationVersion': 'string',\n 'launchConfig': {\n 'packageName': 'string',\n 'launchFile': 'string',\n 'environmentVariables': {\n 'string': 'string'\n }\n }\n },\n ],\n simulationApplications=[\n {\n 'application': 'string',\n 'applicationVersion': 'string',\n 'launchConfig': {\n 'packageName': 'string',\n 'launchFile': 'string',\n 'environmentVariables': {\n 'string': 'string'\n }\n }\n },\n ],\n vpcConfig={\n 'subnets': [\n 'string',\n ],\n 'securityGroups': [\n 'string',\n ],\n 'assignPublicIp': True|False\n }\n )\n \n \n :type clientRequestToken: string\n :param clientRequestToken: Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.\n This field is autopopulated if not provided.\n \n\n :type outputLocation: dict\n :param outputLocation: Location for output files generated by the simulation job.\n s3Bucket (string) --The S3 bucket for output.\n s3Prefix (string) --The S3 folder in the s3Bucket where output files will be placed.\n \n\n :type maxJobDurationInSeconds: integer\n :param maxJobDurationInSeconds: [REQUIRED]\n The maximum simulation job duration in seconds (up to 14 days or 1,209,600 seconds. When maxJobDurationInSeconds is reached, the simulation job will status will transition to Completed .\n \n\n :type iamRole: string\n :param iamRole: [REQUIRED]\n The IAM role that allows the simulation instance to call the AWS APIs that are specified in its associated policies on your behalf. This is how credentials are passed in to your simulation job. See how to specify AWS security credentials for your application .\n \n\n :type failureBehavior: string\n :param failureBehavior: The failure behavior the simulation job.\n Continue\n Restart the simulation job in the same host instance.\n Fail\n Stop the simulation job and terminate the instance.\n \n\n :type robotApplications: list\n :param robotApplications: The robot application to use in the simulation job.\n (dict) --Application configuration information for a robot.\n application (string) -- [REQUIRED]The application information for the robot application.\n applicationVersion (string) --The version of the robot application.\n launchConfig (dict) -- [REQUIRED]The launch configuration for the robot application.\n packageName (string) -- [REQUIRED]The package name.\n launchFile (string) -- [REQUIRED]The launch file.\n environmentVariables (dict) --The environment variables for the application launch.\n (string) --\n (string) --\n \n \n \n\n :type simulationApplications: list\n :param simulationApplications: The simulation application to use in the simulation job.\n (dict) --Information about a simulation application configuration.\n application (string) -- [REQUIRED]The application information for the simulation application.\n applicationVersion (string) --The version of the simulation application.\n launchConfig (dict) -- [REQUIRED]The launch configuration for the simulation application.\n packageName (string) -- [REQUIRED]The package name.\n launchFile (string) -- [REQUIRED]The launch file.\n environmentVariables (dict) --The environment variables for the application launch.\n (string) --\n (string) --\n \n \n \n\n :type vpcConfig: dict\n :param vpcConfig: If your simulation job accesses resources in a VPC, you provide this parameter identifying the list of security group IDs and subnet IDs. These must belong to the same VPC. You must provide at least one security group and one subnet ID.\n subnets (list) -- [REQUIRED]A list of one or more subnet IDs in your VPC.\n (string) --\n securityGroups (list) --A list of one or more security groups IDs in your VPC.\n (string) --\n assignPublicIp (boolean) --A boolean indicating whether to assign a public IP address.\n \n\n :rtype: dict\n :return: {\n 'arn': 'string',\n 'status': 'Pending'|'Preparing'|'Running'|'Restarting'|'Completed'|'Failed'|'RunningFailed'|'Terminating'|'Terminated'|'Canceled',\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'failureBehavior': 'Fail'|'Continue',\n 'failureCode': 'InternalServiceError'|'RobotApplicationCrash'|'SimulationApplicationCrash'|'BadPermissionsRobotApplication'|'BadPermissionsSimulationApplication'|'BadPermissionsS3Output'|'BadPermissionsCloudwatchLogs'|'SubnetIpLimitExceeded'|'ENILimitExceeded'|'BadPermissionsUserCredentials'|'InvalidBundleRobotApplication'|'InvalidBundleSimulationApplication'|'RobotApplicationVersionMismatchedEtag'|'SimulationApplicationVersionMismatchedEtag',\n 'clientRequestToken': 'string',\n 'outputLocation': {\n 's3Bucket': 'string',\n 's3Prefix': 'string'\n },\n 'maxJobDurationInSeconds': 123,\n 'simulationTimeMillis': 123,\n 'iamRole': 'string',\n 'robotApplications': [\n {\n 'application': 'string',\n 'applicationVersion': 'string',\n 'launchConfig': {\n 'packageName': 'string',\n 'launchFile': 'string',\n 'environmentVariables': {\n 'string': 'string'\n }\n }\n },\n ],\n 'simulationApplications': [\n {\n 'application': 'string',\n 'applicationVersion': 'string',\n 'launchConfig': {\n 'packageName': 'string',\n 'launchFile': 'string',\n 'environmentVariables': {\n 'string': 'string'\n }\n }\n },\n ],\n 'vpcConfig': {\n 'subnets': [\n 'string',\n ],\n 'securityGroups': [\n 'string',\n ],\n 'vpcId': 'string',\n 'assignPublicIp': True|False\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef delete_fleet(fleet=None):\n \"\"\"\n Deletes a fleet.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_fleet(\n fleet='string'\n )\n \n \n :type fleet: string\n :param fleet: [REQUIRED]\n The Amazon Resource Name (ARN) of the fleet.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_robot(robot=None):\n \"\"\"\n Deletes a robot.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_robot(\n robot='string'\n )\n \n \n :type robot: string\n :param robot: [REQUIRED]\n The Amazon Resource Name (ARN) of the robot.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_robot_application(application=None, applicationVersion=None):\n \"\"\"\n Deletes a robot application.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_robot_application(\n application='string',\n applicationVersion='string'\n )\n \n \n :type application: string\n :param application: [REQUIRED]\n The Amazon Resource Name (ARN) of the the robot application.\n \n\n :type applicationVersion: string\n :param applicationVersion: The version of the robot application to delete.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_simulation_application(application=None, applicationVersion=None):\n \"\"\"\n Deletes a simulation application.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_simulation_application(\n application='string',\n applicationVersion='string'\n )\n \n \n :type application: string\n :param application: [REQUIRED]\n The application information for the simulation application to delete.\n \n\n :type applicationVersion: string\n :param applicationVersion: The version of the simulation application to delete.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef deregister_robot(fleet=None, robot=None):\n \"\"\"\n Deregisters a robot.\n See also: AWS API Documentation\n \n \n :example: response = client.deregister_robot(\n fleet='string',\n robot='string'\n )\n \n \n :type fleet: string\n :param fleet: [REQUIRED]\n The Amazon Resource Name (ARN) of the fleet.\n \n\n :type robot: string\n :param robot: [REQUIRED]\n The Amazon Resource Name (ARN) of the robot.\n \n\n :rtype: dict\n :return: {\n 'fleet': 'string',\n 'robot': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_deployment_job(job=None):\n \"\"\"\n Describes a deployment job. [Does it work regardless of deployment status, e.g. Failed?]\n See also: AWS API Documentation\n \n \n :example: response = client.describe_deployment_job(\n job='string'\n )\n \n \n :type job: string\n :param job: [REQUIRED]\n The Amazon Resource Name (ARN) of the deployment job.\n \n\n :rtype: dict\n :return: {\n 'arn': 'string',\n 'fleet': 'string',\n 'status': 'Pending'|'Preparing'|'InProgress'|'Failed'|'Succeeded',\n 'deploymentConfig': {\n 'concurrentDeploymentPercentage': 123,\n 'failureThresholdPercentage': 123\n },\n 'deploymentApplicationConfigs': [\n {\n 'application': 'string',\n 'applicationVersion': 'string',\n 'launchConfig': {\n 'packageName': 'string',\n 'preLaunchFile': 'string',\n 'launchFile': 'string',\n 'postLaunchFile': 'string',\n 'environmentVariables': {\n 'string': 'string'\n }\n }\n },\n ],\n 'failureReason': 'string',\n 'failureCode': 'ResourceNotFound'|'FailureThresholdBreached'|'RobotDeploymentNoResponse'|'GreengrassDeploymentFailed'|'MissingRobotArchitecture'|'MissingRobotApplicationArchitecture'|'MissingRobotDeploymentResource'|'GreengrassGroupVersionDoesNotExist'|'ExtractingBundleFailure'|'PreLaunchFileFailure'|'PostLaunchFileFailure'|'BadPermissionError'|'InternalServerError',\n 'createdAt': datetime(2015, 1, 1),\n 'robotDeploymentSummary': [\n {\n 'arn': 'string',\n 'deploymentStartTime': datetime(2015, 1, 1),\n 'deploymentFinishTime': datetime(2015, 1, 1),\n 'status': 'Available'|'Registered'|'PendingNewDeployment'|'Deploying'|'Failed'|'InSync'|'NoResponse',\n 'progressDetail': {\n 'currentProgress': 'string',\n 'targetResource': 'string'\n },\n 'failureReason': 'string',\n 'failureCode': 'ResourceNotFound'|'FailureThresholdBreached'|'RobotDeploymentNoResponse'|'GreengrassDeploymentFailed'|'MissingRobotArchitecture'|'MissingRobotApplicationArchitecture'|'MissingRobotDeploymentResource'|'GreengrassGroupVersionDoesNotExist'|'ExtractingBundleFailure'|'PreLaunchFileFailure'|'PostLaunchFileFailure'|'BadPermissionError'|'InternalServerError'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef describe_fleet(fleet=None):\n \"\"\"\n Describes a fleet.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_fleet(\n fleet='string'\n )\n \n \n :type fleet: string\n :param fleet: [REQUIRED]\n The Amazon Resource Name (ARN) of the fleet.\n \n\n :rtype: dict\n :return: {\n 'name': 'string',\n 'arn': 'string',\n 'robots': [\n {\n 'arn': 'string',\n 'name': 'string',\n 'fleetArn': 'string',\n 'status': 'Available'|'Registered'|'PendingNewDeployment'|'Deploying'|'Failed'|'InSync'|'NoResponse',\n 'greenGrassGroupId': 'string',\n 'createdAt': datetime(2015, 1, 1),\n 'architecture': 'X86_64'|'ARM64'|'ARMHF',\n 'lastDeploymentJob': 'string',\n 'lastDeploymentTime': datetime(2015, 1, 1)\n },\n ],\n 'createdAt': datetime(2015, 1, 1),\n 'lastDeploymentStatus': 'Pending'|'Preparing'|'InProgress'|'Failed'|'Succeeded',\n 'lastDeploymentJob': 'string',\n 'lastDeploymentTime': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef describe_robot(robot=None):\n \"\"\"\n Describes a robot.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_robot(\n robot='string'\n )\n \n \n :type robot: string\n :param robot: [REQUIRED]\n The Amazon Resource Name (ARN) of the robot to be described.\n \n\n :rtype: dict\n :return: {\n 'arn': 'string',\n 'name': 'string',\n 'fleetArn': 'string',\n 'status': 'Available'|'Registered'|'PendingNewDeployment'|'Deploying'|'Failed'|'InSync'|'NoResponse',\n 'greengrassGroupId': 'string',\n 'createdAt': datetime(2015, 1, 1),\n 'architecture': 'X86_64'|'ARM64'|'ARMHF',\n 'lastDeploymentJob': 'string',\n 'lastDeploymentTime': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef describe_robot_application(application=None, applicationVersion=None):\n \"\"\"\n Describes a robot application.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_robot_application(\n application='string',\n applicationVersion='string'\n )\n \n \n :type application: string\n :param application: [REQUIRED]\n The Amazon Resource Name (ARN) of the robot application.\n \n\n :type applicationVersion: string\n :param applicationVersion: The version of the robot application to describe.\n\n :rtype: dict\n :return: {\n 'arn': 'string',\n 'name': 'string',\n 'version': 'string',\n 'sources': [\n {\n 's3Bucket': 'string',\n 's3Key': 'string',\n 'etag': 'string',\n 'architecture': 'X86_64'|'ARM64'|'ARMHF'\n },\n ],\n 'robotSoftwareSuite': {\n 'name': 'ROS',\n 'version': 'Kinetic'\n },\n 'revisionId': 'string',\n 'lastUpdatedAt': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef describe_simulation_application(application=None, applicationVersion=None):\n \"\"\"\n Describes a simulation application.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_simulation_application(\n application='string',\n applicationVersion='string'\n )\n \n \n :type application: string\n :param application: [REQUIRED]\n The application information for the simulation application.\n \n\n :type applicationVersion: string\n :param applicationVersion: The version of the simulation application to describe.\n\n :rtype: dict\n :return: {\n 'arn': 'string',\n 'name': 'string',\n 'version': 'string',\n 'sources': [\n {\n 's3Bucket': 'string',\n 's3Key': 'string',\n 'etag': 'string',\n 'architecture': 'X86_64'|'ARM64'|'ARMHF'\n },\n ],\n 'simulationSoftwareSuite': {\n 'name': 'Gazebo',\n 'version': 'string'\n },\n 'robotSoftwareSuite': {\n 'name': 'ROS',\n 'version': 'Kinetic'\n },\n 'renderingEngine': {\n 'name': 'OGRE',\n 'version': 'string'\n },\n 'revisionId': 'string',\n 'lastUpdatedAt': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef describe_simulation_job(job=None):\n \"\"\"\n Describes a simulation job.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_simulation_job(\n job='string'\n )\n \n \n :type job: string\n :param job: [REQUIRED]\n The Amazon Resource Name (ARN) of the simulation job to be described.\n \n\n :rtype: dict\n :return: {\n 'arn': 'string',\n 'name': 'string',\n 'status': 'Pending'|'Preparing'|'Running'|'Restarting'|'Completed'|'Failed'|'RunningFailed'|'Terminating'|'Terminated'|'Canceled',\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'failureBehavior': 'Fail'|'Continue',\n 'failureCode': 'InternalServiceError'|'RobotApplicationCrash'|'SimulationApplicationCrash'|'BadPermissionsRobotApplication'|'BadPermissionsSimulationApplication'|'BadPermissionsS3Output'|'BadPermissionsCloudwatchLogs'|'SubnetIpLimitExceeded'|'ENILimitExceeded'|'BadPermissionsUserCredentials'|'InvalidBundleRobotApplication'|'InvalidBundleSimulationApplication'|'RobotApplicationVersionMismatchedEtag'|'SimulationApplicationVersionMismatchedEtag',\n 'clientRequestToken': 'string',\n 'outputLocation': {\n 's3Bucket': 'string',\n 's3Prefix': 'string'\n },\n 'maxJobDurationInSeconds': 123,\n 'simulationTimeMillis': 123,\n 'iamRole': 'string',\n 'robotApplications': [\n {\n 'application': 'string',\n 'applicationVersion': 'string',\n 'launchConfig': {\n 'packageName': 'string',\n 'launchFile': 'string',\n 'environmentVariables': {\n 'string': 'string'\n }\n }\n },\n ],\n 'simulationApplications': [\n {\n 'application': 'string',\n 'applicationVersion': 'string',\n 'launchConfig': {\n 'packageName': 'string',\n 'launchFile': 'string',\n 'environmentVariables': {\n 'string': 'string'\n }\n }\n },\n ],\n 'vpcConfig': {\n 'subnets': [\n 'string',\n ],\n 'securityGroups': [\n 'string',\n ],\n 'vpcId': 'string',\n 'assignPublicIp': True|False\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_deployment_jobs(filters=None, nextToken=None, maxResults=None):\n \"\"\"\n Returns a list of deployment jobs for a fleet. You can optionally provide filters to retrieve specific deployment jobs.\n See also: AWS API Documentation\n \n \n :example: response = client.list_deployment_jobs(\n filters=[\n {\n 'name': 'string',\n 'values': [\n 'string',\n ]\n },\n ],\n nextToken='string',\n maxResults=123\n )\n \n \n :type filters: list\n :param filters: Optional filters to limit results.\n (dict) --Information about a filter.\n name (string) --The name of the filter.\n values (list) --A list of values.\n (string) --\n \n \n\n :type nextToken: string\n :param nextToken: The nextToken value returned from a previous paginated ListDeploymentJobs request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.\n Note\n This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.\n \n\n :type maxResults: integer\n :param maxResults: The maximum number of deployment job results returned by ListDeploymentJobs in paginated output. When this parameter is used, ListDeploymentJobs only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListDeploymentJobs request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListDeploymentJobs returns up to 100 results and a nextToken value if applicable.\n\n :rtype: dict\n :return: {\n 'deploymentJobs': [\n {\n 'arn': 'string',\n 'fleet': 'string',\n 'status': 'Pending'|'Preparing'|'InProgress'|'Failed'|'Succeeded',\n 'deploymentApplicationConfigs': [\n {\n 'application': 'string',\n 'applicationVersion': 'string',\n 'launchConfig': {\n 'packageName': 'string',\n 'preLaunchFile': 'string',\n 'launchFile': 'string',\n 'postLaunchFile': 'string',\n 'environmentVariables': {\n 'string': 'string'\n }\n }\n },\n ],\n 'deploymentConfig': {\n 'concurrentDeploymentPercentage': 123,\n 'failureThresholdPercentage': 123\n },\n 'failureReason': 'string',\n 'failureCode': 'ResourceNotFound'|'FailureThresholdBreached'|'RobotDeploymentNoResponse'|'GreengrassDeploymentFailed'|'MissingRobotArchitecture'|'MissingRobotApplicationArchitecture'|'MissingRobotDeploymentResource'|'GreengrassGroupVersionDoesNotExist'|'ExtractingBundleFailure'|'PreLaunchFileFailure'|'PostLaunchFileFailure'|'BadPermissionError'|'InternalServerError',\n 'createdAt': datetime(2015, 1, 1)\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef list_fleets(nextToken=None, maxResults=None, filters=None):\n \"\"\"\n Returns a list of fleets. You can optionally provide filters to retrieve specific fleets.\n See also: AWS API Documentation\n \n \n :example: response = client.list_fleets(\n nextToken='string',\n maxResults=123,\n filters=[\n {\n 'name': 'string',\n 'values': [\n 'string',\n ]\n },\n ]\n )\n \n \n :type nextToken: string\n :param nextToken: The nextToken value returned from a previous paginated ListFleets request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.\n Note\n This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.\n \n\n :type maxResults: integer\n :param maxResults: The maximum number of deployment job results returned by ListFleets in paginated output. When this parameter is used, ListFleets only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListFleets request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListFleets returns up to 100 results and a nextToken value if applicable.\n\n :type filters: list\n :param filters: Optional filters to limit results.\n (dict) --Information about a filter.\n name (string) --The name of the filter.\n values (list) --A list of values.\n (string) --\n \n \n\n :rtype: dict\n :return: {\n 'fleetDetails': [\n {\n 'name': 'string',\n 'arn': 'string',\n 'createdAt': datetime(2015, 1, 1),\n 'lastDeploymentStatus': 'Pending'|'Preparing'|'InProgress'|'Failed'|'Succeeded',\n 'lastDeploymentJob': 'string',\n 'lastDeploymentTime': datetime(2015, 1, 1)\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_robot_applications(versionQualifier=None, nextToken=None, maxResults=None, filters=None):\n \"\"\"\n Returns a list of robot application. You can optionally provide filters to retrieve specific robot applications.\n See also: AWS API Documentation\n \n \n :example: response = client.list_robot_applications(\n versionQualifier='string',\n nextToken='string',\n maxResults=123,\n filters=[\n {\n 'name': 'string',\n 'values': [\n 'string',\n ]\n },\n ]\n )\n \n \n :type versionQualifier: string\n :param versionQualifier: The version qualifier of the robot application.\n\n :type nextToken: string\n :param nextToken: The nextToken value returned from a previous paginated ListRobotApplications request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.\n Note\n This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.\n \n\n :type maxResults: integer\n :param maxResults: The maximum number of deployment job results returned by ListRobotApplications in paginated output. When this parameter is used, ListRobotApplications only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListFleets request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListRobotApplications returns up to 100 results and a nextToken value if applicable.\n\n :type filters: list\n :param filters: Optional filters to limit results.\n (dict) --Information about a filter.\n name (string) --The name of the filter.\n values (list) --A list of values.\n (string) --\n \n \n\n :rtype: dict\n :return: {\n 'robotApplicationSummaries': [\n {\n 'name': 'string',\n 'arn': 'string',\n 'version': 'string',\n 'lastUpdatedAt': datetime(2015, 1, 1)\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_robots(nextToken=None, maxResults=None, filters=None):\n \"\"\"\n Returns a list of robots. You can optionally provide filters to retrieve specific robots.\n See also: AWS API Documentation\n \n \n :example: response = client.list_robots(\n nextToken='string',\n maxResults=123,\n filters=[\n {\n 'name': 'string',\n 'values': [\n 'string',\n ]\n },\n ]\n )\n \n \n :type nextToken: string\n :param nextToken: The nextToken value returned from a previous paginated ListRobots request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.\n Note\n This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.\n \n\n :type maxResults: integer\n :param maxResults: The maximum number of deployment job results returned by ListRobots in paginated output. When this parameter is used, ListRobots only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListFleets request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListRobots returns up to 100 results and a nextToken value if applicable.\n\n :type filters: list\n :param filters: Optional filters to limit results.\n (dict) --Information about a filter.\n name (string) --The name of the filter.\n values (list) --A list of values.\n (string) --\n \n \n\n :rtype: dict\n :return: {\n 'robots': [\n {\n 'arn': 'string',\n 'name': 'string',\n 'fleetArn': 'string',\n 'status': 'Available'|'Registered'|'PendingNewDeployment'|'Deploying'|'Failed'|'InSync'|'NoResponse',\n 'greenGrassGroupId': 'string',\n 'createdAt': datetime(2015, 1, 1),\n 'architecture': 'X86_64'|'ARM64'|'ARMHF',\n 'lastDeploymentJob': 'string',\n 'lastDeploymentTime': datetime(2015, 1, 1)\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_simulation_applications(versionQualifier=None, nextToken=None, maxResults=None, filters=None):\n \"\"\"\n Returns a list of simulation applications. You can optionally provide filters to retrieve specific simulation applications.\n See also: AWS API Documentation\n \n \n :example: response = client.list_simulation_applications(\n versionQualifier='string',\n nextToken='string',\n maxResults=123,\n filters=[\n {\n 'name': 'string',\n 'values': [\n 'string',\n ]\n },\n ]\n )\n \n \n :type versionQualifier: string\n :param versionQualifier: The version qualifier of the simulation application.\n\n :type nextToken: string\n :param nextToken: The nextToken value returned from a previous paginated ListSimulationApplications request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.\n Note\n This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.\n \n\n :type maxResults: integer\n :param maxResults: The maximum number of deployment job results returned by ListSimulationApplications in paginated output. When this parameter is used, ListSimulationApplications only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListFleets request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListSimulationApplications returns up to 100 results and a nextToken value if applicable.\n\n :type filters: list\n :param filters: Optional list of filters to limit results. The only valid filter name is name .\n (dict) --Information about a filter.\n name (string) --The name of the filter.\n values (list) --A list of values.\n (string) --\n \n \n\n :rtype: dict\n :return: {\n 'simulationApplicationSummaries': [\n {\n 'name': 'string',\n 'arn': 'string',\n 'version': 'string',\n 'lastUpdatedAt': datetime(2015, 1, 1)\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_simulation_jobs(nextToken=None, maxResults=None, filters=None):\n \"\"\"\n Returns a list of simulation jobs. You can optionally provide filters to retrieve specific simulation jobs.\n See also: AWS API Documentation\n \n \n :example: response = client.list_simulation_jobs(\n nextToken='string',\n maxResults=123,\n filters=[\n {\n 'name': 'string',\n 'values': [\n 'string',\n ]\n },\n ]\n )\n \n \n :type nextToken: string\n :param nextToken: The nextToken value returned from a previous paginated ListSimulationJobs request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.\n Note\n This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.\n \n\n :type maxResults: integer\n :param maxResults: The maximum number of deployment job results returned by ListSimulationJobs in paginated output. When this parameter is used, ListSimulationJobs only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListFleets request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListSimulationJobs returns up to 100 results and a nextToken value if applicable.\n\n :type filters: list\n :param filters: Optional filters to limit results.\n (dict) --Information about a filter.\n name (string) --The name of the filter.\n values (list) --A list of values.\n (string) --\n \n \n\n :rtype: dict\n :return: {\n 'simulationJobSummaries': [\n {\n 'arn': 'string',\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'name': 'string',\n 'status': 'Pending'|'Preparing'|'Running'|'Restarting'|'Completed'|'Failed'|'RunningFailed'|'Terminating'|'Terminated'|'Canceled',\n 'simulationApplicationNames': [\n 'string',\n ],\n 'robotApplicationNames': [\n 'string',\n ]\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef register_robot(fleet=None, robot=None):\n \"\"\"\n Registers a robot with a fleet.\n See also: AWS API Documentation\n \n \n :example: response = client.register_robot(\n fleet='string',\n robot='string'\n )\n \n \n :type fleet: string\n :param fleet: [REQUIRED]\n The Amazon Resource Name (ARN) of the fleet.\n \n\n :type robot: string\n :param robot: [REQUIRED]\n The Amazon Resource Name (ARN) of the robot.\n \n\n :rtype: dict\n :return: {\n 'fleet': 'string',\n 'robot': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef restart_simulation_job(job=None):\n \"\"\"\n Restarts a running simulation job.\n See also: AWS API Documentation\n \n \n :example: response = client.restart_simulation_job(\n job='string'\n )\n \n \n :type job: string\n :param job: [REQUIRED]\n The Amazon Resource Name (ARN) of the simulation job.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef sync_deployment_job(clientRequestToken=None, fleet=None):\n \"\"\"\n Syncrhonizes robots in a fleet to the latest deployment. This is helpful if robots were added after a deployment.\n See also: AWS API Documentation\n \n \n :example: response = client.sync_deployment_job(\n clientRequestToken='string',\n fleet='string'\n )\n \n \n :type clientRequestToken: string\n :param clientRequestToken: [REQUIRED]\n Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.\n This field is autopopulated if not provided.\n \n\n :type fleet: string\n :param fleet: [REQUIRED]\n The target fleet for the synchronization.\n \n\n :rtype: dict\n :return: {\n 'arn': 'string',\n 'fleet': 'string',\n 'status': 'Pending'|'Preparing'|'InProgress'|'Failed'|'Succeeded',\n 'deploymentConfig': {\n 'concurrentDeploymentPercentage': 123,\n 'failureThresholdPercentage': 123\n },\n 'deploymentApplicationConfigs': [\n {\n 'application': 'string',\n 'applicationVersion': 'string',\n 'launchConfig': {\n 'packageName': 'string',\n 'preLaunchFile': 'string',\n 'launchFile': 'string',\n 'postLaunchFile': 'string',\n 'environmentVariables': {\n 'string': 'string'\n }\n }\n },\n ],\n 'failureReason': 'string',\n 'failureCode': 'ResourceNotFound'|'FailureThresholdBreached'|'RobotDeploymentNoResponse'|'GreengrassDeploymentFailed'|'MissingRobotArchitecture'|'MissingRobotApplicationArchitecture'|'MissingRobotDeploymentResource'|'GreengrassGroupVersionDoesNotExist'|'ExtractingBundleFailure'|'PreLaunchFileFailure'|'PostLaunchFileFailure'|'BadPermissionError'|'InternalServerError',\n 'createdAt': datetime(2015, 1, 1)\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef update_robot_application(application=None, sources=None, robotSoftwareSuite=None, currentRevisionId=None):\n \"\"\"\n Updates a robot application.\n See also: AWS API Documentation\n \n \n :example: response = client.update_robot_application(\n application='string',\n sources=[\n {\n 's3Bucket': 'string',\n 's3Key': 'string',\n 'architecture': 'X86_64'|'ARM64'|'ARMHF'\n },\n ],\n robotSoftwareSuite={\n 'name': 'ROS',\n 'version': 'Kinetic'\n },\n currentRevisionId='string'\n )\n \n \n :type application: string\n :param application: [REQUIRED]\n The application information for the robot application.\n \n\n :type sources: list\n :param sources: [REQUIRED]\n The sources of the robot application.\n (dict) --Information about a source configuration.\n s3Bucket (string) --The Amazon S3 bucket name.\n s3Key (string) --The s3 object key.\n architecture (string) --The target processor architecture for the application.\n \n \n\n :type robotSoftwareSuite: dict\n :param robotSoftwareSuite: [REQUIRED]\n The robot software suite used by the robot application.\n name (string) --The name of the robot software suite.\n version (string) --The version of the robot software suite.\n \n\n :type currentRevisionId: string\n :param currentRevisionId: The revision id for the robot application.\n\n :rtype: dict\n :return: {\n 'arn': 'string',\n 'name': 'string',\n 'version': 'string',\n 'sources': [\n {\n 's3Bucket': 'string',\n 's3Key': 'string',\n 'etag': 'string',\n 'architecture': 'X86_64'|'ARM64'|'ARMHF'\n },\n ],\n 'robotSoftwareSuite': {\n 'name': 'ROS',\n 'version': 'Kinetic'\n },\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'revisionId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef update_simulation_application(application=None, sources=None, simulationSoftwareSuite=None, robotSoftwareSuite=None, renderingEngine=None, currentRevisionId=None):\n \"\"\"\n Updates a simulation application.\n See also: AWS API Documentation\n \n \n :example: response = client.update_simulation_application(\n application='string',\n sources=[\n {\n 's3Bucket': 'string',\n 's3Key': 'string',\n 'architecture': 'X86_64'|'ARM64'|'ARMHF'\n },\n ],\n simulationSoftwareSuite={\n 'name': 'Gazebo',\n 'version': 'string'\n },\n robotSoftwareSuite={\n 'name': 'ROS',\n 'version': 'Kinetic'\n },\n renderingEngine={\n 'name': 'OGRE',\n 'version': 'string'\n },\n currentRevisionId='string'\n )\n \n \n :type application: string\n :param application: [REQUIRED]\n The application information for the simulation application.\n \n\n :type sources: list\n :param sources: [REQUIRED]\n The sources of the simulation application.\n (dict) --Information about a source configuration.\n s3Bucket (string) --The Amazon S3 bucket name.\n s3Key (string) --The s3 object key.\n architecture (string) --The target processor architecture for the application.\n \n \n\n :type simulationSoftwareSuite: dict\n :param simulationSoftwareSuite: [REQUIRED]\n The simulation software suite used by the simulation application.\n name (string) --The name of the simulation software suite.\n version (string) --The version of the simulation software suite.\n \n\n :type robotSoftwareSuite: dict\n :param robotSoftwareSuite: [REQUIRED]\n Information about the robot software suite.\n name (string) --The name of the robot software suite.\n version (string) --The version of the robot software suite.\n \n\n :type renderingEngine: dict\n :param renderingEngine: [REQUIRED]\n The rendering engine for the simulation application.\n name (string) --The name of the rendering engine.\n version (string) --The version of the rendering engine.\n \n\n :type currentRevisionId: string\n :param currentRevisionId: The revision id for the robot application.\n\n :rtype: dict\n :return: {\n 'arn': 'string',\n 'name': 'string',\n 'version': 'string',\n 'sources': [\n {\n 's3Bucket': 'string',\n 's3Key': 'string',\n 'etag': 'string',\n 'architecture': 'X86_64'|'ARM64'|'ARMHF'\n },\n ],\n 'simulationSoftwareSuite': {\n 'name': 'Gazebo',\n 'version': 'string'\n },\n 'robotSoftwareSuite': {\n 'name': 'ROS',\n 'version': 'Kinetic'\n },\n 'renderingEngine': {\n 'name': 'OGRE',\n 'version': 'string'\n },\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'revisionId': 'string'\n }\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.669177770614624, "alphanum_fraction": 0.6783297657966614, "avg_line_length": 51.94265365600586, "blob_id": "36c45eb8e930e65903bddf8a1498904c39b05707", "content_id": "8b0f41c8d1278205cef3f85b699a705937f6bca5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 82168, "license_type": "permissive", "max_line_length": 498, "num_lines": 1552, "path": "/pyboto3/servicediscovery.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_http_namespace(Name=None, CreatorRequestId=None, Description=None):\n \"\"\"\n Creates an HTTP namespace. Service instances that you register using an HTTP namespace can be discovered using a DiscoverInstances request but can't be discovered using DNS.\n For the current limit on the number of namespaces that you can create using the same AWS account, see AWS Cloud Map Limits in the AWS Cloud Map Developer Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.create_http_namespace(\n Name='string',\n CreatorRequestId='string',\n Description='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name that you want to assign to this namespace.\n \n\n :type CreatorRequestId: string\n :param CreatorRequestId: A unique string that identifies the request and that allows failed CreateHttpNamespace requests to be retried without the risk of executing the operation twice. CreatorRequestId can be any unique string, for example, a date/time stamp.\n This field is autopopulated if not provided.\n \n\n :type Description: string\n :param Description: A description for the namespace.\n\n :rtype: dict\n :return: {\n 'OperationId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_private_dns_namespace(Name=None, CreatorRequestId=None, Description=None, Vpc=None):\n \"\"\"\n Creates a private namespace based on DNS, which will be visible only inside a specified Amazon VPC. The namespace defines your service naming scheme. For example, if you name your namespace example.com and name your service backend , the resulting DNS name for the service will be backend.example.com . For the current limit on the number of namespaces that you can create using the same AWS account, see AWS Cloud Map Limits in the AWS Cloud Map Developer Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.create_private_dns_namespace(\n Name='string',\n CreatorRequestId='string',\n Description='string',\n Vpc='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name that you want to assign to this namespace. When you create a private DNS namespace, AWS Cloud Map automatically creates an Amazon Route 53 private hosted zone that has the same name as the namespace.\n \n\n :type CreatorRequestId: string\n :param CreatorRequestId: A unique string that identifies the request and that allows failed CreatePrivateDnsNamespace requests to be retried without the risk of executing the operation twice. CreatorRequestId can be any unique string, for example, a date/time stamp.\n This field is autopopulated if not provided.\n \n\n :type Description: string\n :param Description: A description for the namespace.\n\n :type Vpc: string\n :param Vpc: [REQUIRED]\n The ID of the Amazon VPC that you want to associate the namespace with.\n \n\n :rtype: dict\n :return: {\n 'OperationId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_public_dns_namespace(Name=None, CreatorRequestId=None, Description=None):\n \"\"\"\n Creates a public namespace based on DNS, which will be visible on the internet. The namespace defines your service naming scheme. For example, if you name your namespace example.com and name your service backend , the resulting DNS name for the service will be backend.example.com . For the current limit on the number of namespaces that you can create using the same AWS account, see AWS Cloud Map Limits in the AWS Cloud Map Developer Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.create_public_dns_namespace(\n Name='string',\n CreatorRequestId='string',\n Description='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name that you want to assign to this namespace.\n \n\n :type CreatorRequestId: string\n :param CreatorRequestId: A unique string that identifies the request and that allows failed CreatePublicDnsNamespace requests to be retried without the risk of executing the operation twice. CreatorRequestId can be any unique string, for example, a date/time stamp.\n This field is autopopulated if not provided.\n \n\n :type Description: string\n :param Description: A description for the namespace.\n\n :rtype: dict\n :return: {\n 'OperationId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_service(Name=None, NamespaceId=None, CreatorRequestId=None, Description=None, DnsConfig=None, HealthCheckConfig=None, HealthCheckCustomConfig=None):\n \"\"\"\n Creates a service, which defines the configuration for the following entities:\n After you create the service, you can submit a RegisterInstance request, and AWS Cloud Map uses the values in the configuration to create the specified entities.\n For the current limit on the number of instances that you can register using the same namespace and using the same service, see AWS Cloud Map Limits in the AWS Cloud Map Developer Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.create_service(\n Name='string',\n NamespaceId='string',\n CreatorRequestId='string',\n Description='string',\n DnsConfig={\n 'NamespaceId': 'string',\n 'RoutingPolicy': 'MULTIVALUE'|'WEIGHTED',\n 'DnsRecords': [\n {\n 'Type': 'SRV'|'A'|'AAAA'|'CNAME',\n 'TTL': 123\n },\n ]\n },\n HealthCheckConfig={\n 'Type': 'HTTP'|'HTTPS'|'TCP',\n 'ResourcePath': 'string',\n 'FailureThreshold': 123\n },\n HealthCheckCustomConfig={\n 'FailureThreshold': 123\n }\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name that you want to assign to the service.\n \n\n :type NamespaceId: string\n :param NamespaceId: The ID of the namespace that you want to use to create the service.\n\n :type CreatorRequestId: string\n :param CreatorRequestId: A unique string that identifies the request and that allows failed CreateService requests to be retried without the risk of executing the operation twice. CreatorRequestId can be any unique string, for example, a date/time stamp.\n This field is autopopulated if not provided.\n \n\n :type Description: string\n :param Description: A description for the service.\n\n :type DnsConfig: dict\n :param DnsConfig: A complex type that contains information about the Amazon Route 53 records that you want AWS Cloud Map to create when you register an instance.\n NamespaceId (string) --The ID of the namespace to use for DNS configuration.\n RoutingPolicy (string) --The routing policy that you want to apply to all Route 53 DNS records that AWS Cloud Map creates when you register an instance and specify this service.\n Note\n If you want to use this service to register instances that create alias records, specify WEIGHTED for the routing policy.\n You can specify the following values:\n MULTIVALUE\n If you define a health check for the service and the health check is healthy, Route 53 returns the applicable value for up to eight instances.\n For example, suppose the service includes configurations for one A record and a health check, and you use the service to register 10 instances. Route 53 responds to DNS queries with IP addresses for up to eight healthy instances. If fewer than eight instances are healthy, Route 53 responds to every DNS query with the IP addresses for all of the healthy instances.\n If you don't define a health check for the service, Route 53 assumes that all instances are healthy and returns the values for up to eight instances.\n For more information about the multivalue routing policy, see Multivalue Answer Routing in the Route 53 Developer Guide .\n WEIGHTED\n Route 53 returns the applicable value from one randomly selected instance from among the instances that you registered using the same service. Currently, all records have the same weight, so you can't route more or less traffic to any instances.\n For example, suppose the service includes configurations for one A record and a health check, and you use the service to register 10 instances. Route 53 responds to DNS queries with the IP address for one randomly selected instance from among the healthy instances. If no instances are healthy, Route 53 responds to DNS queries as if all of the instances were healthy.\n If you don't define a health check for the service, Route 53 assumes that all instances are healthy and returns the applicable value for one randomly selected instance.\n For more information about the weighted routing policy, see Weighted Routing in the Route 53 Developer Guide .\n DnsRecords (list) -- [REQUIRED]An array that contains one DnsRecord object for each Route 53 DNS record that you want AWS Cloud Map to create when you register an instance.\n (dict) --A complex type that contains information about the Route 53 DNS records that you want AWS Cloud Map to create when you register an instance.\n Type (string) -- [REQUIRED]The type of the resource, which indicates the type of value that Route 53 returns in response to DNS queries.\n Note the following:\n A, AAAA, and SRV records: You can specify settings for a maximum of one A, one AAAA, and one SRV record. You can specify them in any combination.\n CNAME records: If you specify CNAME for Type , you can't define any other records. This is a limitation of DNS: you can't create a CNAME record and any other type of record that has the same name as a CNAME record.\n Alias records: If you want AWS Cloud Map to create a Route 53 alias record when you register an instance, specify A or AAAA for Type .\n All records: You specify settings other than TTL and Type when you register an instance.\n The following values are supported:\n A\n Route 53 returns the IP address of the resource in IPv4 format, such as 192.0.2.44.\n AAAA\n Route 53 returns the IP address of the resource in IPv6 format, such as 2001:0db8:85a3:0000:0000:abcd:0001:2345.\n CNAME\n Route 53 returns the domain name of the resource, such as www.example.com. Note the following:\n You specify the domain name that you want to route traffic to when you register an instance. For more information, see RegisterInstanceRequest$Attributes .\n You must specify WEIGHTED for the value of RoutingPolicy .\n You can't specify both CNAME for Type and settings for HealthCheckConfig . If you do, the request will fail with an InvalidInput error.\n SRV\n Route 53 returns the value for an SRV record. The value for an SRV record uses the following values:\n priority weight port service-hostname\n Note the following about the values:\n The values of priority and weight are both set to 1 and can't be changed.\n The value of port comes from the value that you specify for the AWS_INSTANCE_PORT attribute when you submit a RegisterInstance request.\n The value of service-hostname is a concatenation of the following values:\n The value that you specify for InstanceId when you register an instance.\n The name of the service.\n The name of the namespace.\n For example, if the value of InstanceId is test , the name of the service is backend , and the name of the namespace is example.com , the value of service-hostname is:\n test.backend.example.com\n If you specify settings for an SRV record and if you specify values for AWS_INSTANCE_IPV4 , AWS_INSTANCE_IPV6 , or both in the RegisterInstance request, AWS Cloud Map automatically creates A and/or AAAA records that have the same name as the value of service-hostname in the SRV record. You can ignore these records.\n TTL (integer) -- [REQUIRED]The amount of time, in seconds, that you want DNS resolvers to cache the settings for this record.\n Note\n Alias records don't include a TTL because Route 53 uses the TTL for the AWS resource that an alias record routes traffic to. If you include the AWS_ALIAS_DNS_NAME attribute when you submit a RegisterInstance request, the TTL value is ignored. Always specify a TTL for the service; you can use a service to register instances that create either alias or non-alias records.\n \n \n\n :type HealthCheckConfig: dict\n :param HealthCheckConfig: \n Public DNS namespaces only. A complex type that contains settings for an optional Route 53 health check. If you specify settings for a health check, AWS Cloud Map associates the health check with all the Route 53 DNS records that you specify in DnsConfig .\n Warning\n If you specify a health check configuration, you can specify either HealthCheckCustomConfig or HealthCheckConfig but not both.\n For information about the charges for health checks, see AWS Cloud Map Pricing .\n Type (string) -- [REQUIRED]The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy.\n Warning\n You can't change the value of Type after you create a health check.\n You can create the following types of health checks:\n HTTP : Route 53 tries to establish a TCP connection. If successful, Route 53 submits an HTTP request and waits for an HTTP status code of 200 or greater and less than 400.\n HTTPS : Route 53 tries to establish a TCP connection. If successful, Route 53 submits an HTTPS request and waits for an HTTP status code of 200 or greater and less than 400.\n Warning\n If you specify HTTPS for the value of Type , the endpoint must support TLS v1.0 or later.\n TCP : Route 53 tries to establish a TCP connection. If you specify TCP for Type , don't specify a value for ResourcePath .\n For more information, see How Route 53 Determines Whether an Endpoint Is Healthy in the Route 53 Developer Guide .\n ResourcePath (string) --The path that you want Route 53 to request when performing health checks. The path can be any value for which your endpoint will return an HTTP status code of 2xx or 3xx when the endpoint is healthy, such as the file /docs/route53-health-check.html . Route 53 automatically adds the DNS name for the service. If you don't specify a value for ResourcePath , the default value is / .\n If you specify TCP for Type , you must not specify a value for ResourcePath .\n FailureThreshold (integer) --The number of consecutive health checks that an endpoint must pass or fail for Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa. For more information, see How Route 53 Determines Whether an Endpoint Is Healthy in the Route 53 Developer Guide .\n \n\n :type HealthCheckCustomConfig: dict\n :param HealthCheckCustomConfig: A complex type that contains information about an optional custom health check.\n Warning\n If you specify a health check configuration, you can specify either HealthCheckCustomConfig or HealthCheckConfig but not both.\n FailureThreshold (integer) --The number of 30-second intervals that you want Cloud Map to wait after receiving an UpdateInstanceCustomHealthStatus request before it changes the health status of a service instance. For example, suppose you specify a value of 2 for FailureTheshold , and then your application sends an UpdateInstanceCustomHealthStatus request. Cloud Map waits for approximately 60 seconds (2 x 30) before changing the status of the service instance based on that request.\n Sending a second or subsequent UpdateInstanceCustomHealthStatus request with the same value before FailureThreshold x 30 seconds has passed doesn't accelerate the change. Cloud Map still waits FailureThreshold x 30 seconds after the first request to make the change.\n \n\n :rtype: dict\n :return: {\n 'Service': {\n 'Id': 'string',\n 'Arn': 'string',\n 'Name': 'string',\n 'NamespaceId': 'string',\n 'Description': 'string',\n 'InstanceCount': 123,\n 'DnsConfig': {\n 'NamespaceId': 'string',\n 'RoutingPolicy': 'MULTIVALUE'|'WEIGHTED',\n 'DnsRecords': [\n {\n 'Type': 'SRV'|'A'|'AAAA'|'CNAME',\n 'TTL': 123\n },\n ]\n },\n 'HealthCheckConfig': {\n 'Type': 'HTTP'|'HTTPS'|'TCP',\n 'ResourcePath': 'string',\n 'FailureThreshold': 123\n },\n 'HealthCheckCustomConfig': {\n 'FailureThreshold': 123\n },\n 'CreateDate': datetime(2015, 1, 1),\n 'CreatorRequestId': 'string'\n }\n }\n \n \n :returns: \n Name (string) -- [REQUIRED]\n The name that you want to assign to the service.\n \n NamespaceId (string) -- The ID of the namespace that you want to use to create the service.\n CreatorRequestId (string) -- A unique string that identifies the request and that allows failed CreateService requests to be retried without the risk of executing the operation twice. CreatorRequestId can be any unique string, for example, a date/time stamp.\n This field is autopopulated if not provided.\n \n Description (string) -- A description for the service.\n DnsConfig (dict) -- A complex type that contains information about the Amazon Route 53 records that you want AWS Cloud Map to create when you register an instance.\n \n NamespaceId (string) --The ID of the namespace to use for DNS configuration.\n \n RoutingPolicy (string) --The routing policy that you want to apply to all Route 53 DNS records that AWS Cloud Map creates when you register an instance and specify this service.\n \n Note\n If you want to use this service to register instances that create alias records, specify WEIGHTED for the routing policy.\n \n You can specify the following values:\n \n MULTIVALUE\n If you define a health check for the service and the health check is healthy, Route 53 returns the applicable value for up to eight instances.\n For example, suppose the service includes configurations for one A record and a health check, and you use the service to register 10 instances. Route 53 responds to DNS queries with IP addresses for up to eight healthy instances. If fewer than eight instances are healthy, Route 53 responds to every DNS query with the IP addresses for all of the healthy instances.\n If you don't define a health check for the service, Route 53 assumes that all instances are healthy and returns the values for up to eight instances.\n For more information about the multivalue routing policy, see Multivalue Answer Routing in the Route 53 Developer Guide .\n \n WEIGHTED\n Route 53 returns the applicable value from one randomly selected instance from among the instances that you registered using the same service. Currently, all records have the same weight, so you can't route more or less traffic to any instances.\n For example, suppose the service includes configurations for one A record and a health check, and you use the service to register 10 instances. Route 53 responds to DNS queries with the IP address for one randomly selected instance from among the healthy instances. If no instances are healthy, Route 53 responds to DNS queries as if all of the instances were healthy.\n If you don't define a health check for the service, Route 53 assumes that all instances are healthy and returns the applicable value for one randomly selected instance.\n For more information about the weighted routing policy, see Weighted Routing in the Route 53 Developer Guide .\n \n DnsRecords (list) -- [REQUIRED]An array that contains one DnsRecord object for each Route 53 DNS record that you want AWS Cloud Map to create when you register an instance.\n \n (dict) --A complex type that contains information about the Route 53 DNS records that you want AWS Cloud Map to create when you register an instance.\n \n Type (string) -- [REQUIRED]The type of the resource, which indicates the type of value that Route 53 returns in response to DNS queries.\n Note the following:\n \n A, AAAA, and SRV records: You can specify settings for a maximum of one A, one AAAA, and one SRV record. You can specify them in any combination.\n CNAME records: If you specify CNAME for Type , you can't define any other records. This is a limitation of DNS: you can't create a CNAME record and any other type of record that has the same name as a CNAME record.\n Alias records: If you want AWS Cloud Map to create a Route 53 alias record when you register an instance, specify A or AAAA for Type .\n All records: You specify settings other than TTL and Type when you register an instance.\n \n The following values are supported:\n \n A\n Route 53 returns the IP address of the resource in IPv4 format, such as 192.0.2.44.\n \n AAAA\n Route 53 returns the IP address of the resource in IPv6 format, such as 2001:0db8:85a3:0000:0000:abcd:0001:2345.\n \n CNAME\n Route 53 returns the domain name of the resource, such as www.example.com. Note the following:\n \n You specify the domain name that you want to route traffic to when you register an instance. For more information, see RegisterInstanceRequest$Attributes .\n You must specify WEIGHTED for the value of RoutingPolicy .\n You can't specify both CNAME for Type and settings for HealthCheckConfig . If you do, the request will fail with an InvalidInput error.\n \n \n SRV\n Route 53 returns the value for an SRV record. The value for an SRV record uses the following values:\n \n priority weight port service-hostname\n Note the following about the values:\n \n The values of priority and weight are both set to 1 and can't be changed.\n The value of port comes from the value that you specify for the AWS_INSTANCE_PORT attribute when you submit a RegisterInstance request.\n The value of service-hostname is a concatenation of the following values:\n The value that you specify for InstanceId when you register an instance.\n The name of the service.\n The name of the namespace.\n \n \n \n For example, if the value of InstanceId is test , the name of the service is backend , and the name of the namespace is example.com , the value of service-hostname is:\n \n test.backend.example.com\n If you specify settings for an SRV record and if you specify values for AWS_INSTANCE_IPV4 , AWS_INSTANCE_IPV6 , or both in the RegisterInstance request, AWS Cloud Map automatically creates A and/or AAAA records that have the same name as the value of service-hostname in the SRV record. You can ignore these records.\n \n TTL (integer) -- [REQUIRED]The amount of time, in seconds, that you want DNS resolvers to cache the settings for this record.\n \n Note\n Alias records don't include a TTL because Route 53 uses the TTL for the AWS resource that an alias record routes traffic to. If you include the AWS_ALIAS_DNS_NAME attribute when you submit a RegisterInstance request, the TTL value is ignored. Always specify a TTL for the service; you can use a service to register instances that create either alias or non-alias records.\n \n \n \n \n \n \n \n \n HealthCheckConfig (dict) -- \n Public DNS namespaces only. A complex type that contains settings for an optional Route 53 health check. If you specify settings for a health check, AWS Cloud Map associates the health check with all the Route 53 DNS records that you specify in DnsConfig .\n \n Warning\n If you specify a health check configuration, you can specify either HealthCheckCustomConfig or HealthCheckConfig but not both.\n \n For information about the charges for health checks, see AWS Cloud Map Pricing .\n \n Type (string) -- [REQUIRED]The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy.\n \n Warning\n You can't change the value of Type after you create a health check.\n \n You can create the following types of health checks:\n \n HTTP : Route 53 tries to establish a TCP connection. If successful, Route 53 submits an HTTP request and waits for an HTTP status code of 200 or greater and less than 400.\n HTTPS : Route 53 tries to establish a TCP connection. If successful, Route 53 submits an HTTPS request and waits for an HTTP status code of 200 or greater and less than 400.\n \n \n Warning\n If you specify HTTPS for the value of Type , the endpoint must support TLS v1.0 or later.\n \n \n TCP : Route 53 tries to establish a TCP connection. If you specify TCP for Type , don't specify a value for ResourcePath .\n \n For more information, see How Route 53 Determines Whether an Endpoint Is Healthy in the Route 53 Developer Guide .\n \n ResourcePath (string) --The path that you want Route 53 to request when performing health checks. The path can be any value for which your endpoint will return an HTTP status code of 2xx or 3xx when the endpoint is healthy, such as the file /docs/route53-health-check.html . Route 53 automatically adds the DNS name for the service. If you don't specify a value for ResourcePath , the default value is / .\n If you specify TCP for Type , you must not specify a value for ResourcePath .\n \n FailureThreshold (integer) --The number of consecutive health checks that an endpoint must pass or fail for Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa. For more information, see How Route 53 Determines Whether an Endpoint Is Healthy in the Route 53 Developer Guide .\n \n \n \n HealthCheckCustomConfig (dict) -- A complex type that contains information about an optional custom health check.\n \n Warning\n If you specify a health check configuration, you can specify either HealthCheckCustomConfig or HealthCheckConfig but not both.\n \n \n FailureThreshold (integer) --The number of 30-second intervals that you want Cloud Map to wait after receiving an UpdateInstanceCustomHealthStatus request before it changes the health status of a service instance. For example, suppose you specify a value of 2 for FailureTheshold , and then your application sends an UpdateInstanceCustomHealthStatus request. Cloud Map waits for approximately 60 seconds (2 x 30) before changing the status of the service instance based on that request.\n Sending a second or subsequent UpdateInstanceCustomHealthStatus request with the same value before FailureThreshold x 30 seconds has passed doesn't accelerate the change. Cloud Map still waits FailureThreshold x 30 seconds after the first request to make the change.\n \n \n \n \n \"\"\"\n pass\n\ndef delete_namespace(Id=None):\n \"\"\"\n Deletes a namespace from the current account. If the namespace still contains one or more services, the request fails.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_namespace(\n Id='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n The ID of the namespace that you want to delete.\n \n\n :rtype: dict\n :return: {\n 'OperationId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_service(Id=None):\n \"\"\"\n Deletes a specified service. If the service still contains one or more registered instances, the request fails.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_service(\n Id='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n The ID of the service that you want to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef deregister_instance(ServiceId=None, InstanceId=None):\n \"\"\"\n Deletes the Amazon Route 53 DNS records and health check, if any, that AWS Cloud Map created for the specified instance.\n See also: AWS API Documentation\n \n \n :example: response = client.deregister_instance(\n ServiceId='string',\n InstanceId='string'\n )\n \n \n :type ServiceId: string\n :param ServiceId: [REQUIRED]\n The ID of the service that the instance is associated with.\n \n\n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The value that you specified for Id in the RegisterInstance request.\n \n\n :rtype: dict\n :return: {\n 'OperationId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef discover_instances(NamespaceName=None, ServiceName=None, MaxResults=None, QueryParameters=None, HealthStatus=None):\n \"\"\"\n Discovers registered instances for a specified namespace and service.\n See also: AWS API Documentation\n \n \n :example: response = client.discover_instances(\n NamespaceName='string',\n ServiceName='string',\n MaxResults=123,\n QueryParameters={\n 'string': 'string'\n },\n HealthStatus='HEALTHY'|'UNHEALTHY'|'ALL'\n )\n \n \n :type NamespaceName: string\n :param NamespaceName: [REQUIRED]\n The name of the namespace that you specified when you registered the instance.\n \n\n :type ServiceName: string\n :param ServiceName: [REQUIRED]\n The name of the service that you specified when you registered the instance.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of instances that you want Cloud Map to return in the response to a DiscoverInstances request. If you don't specify a value for MaxResults , Cloud Map returns up to 100 instances.\n\n :type QueryParameters: dict\n :param QueryParameters: A string map that contains attributes with values that you can use to filter instances by any custom attribute that you specified when you registered the instance. Only instances that match all the specified key/value pairs will be returned.\n (string) --\n (string) --\n \n\n :type HealthStatus: string\n :param HealthStatus: The health status of the instances that you want to discover.\n\n :rtype: dict\n :return: {\n 'Instances': [\n {\n 'InstanceId': 'string',\n 'NamespaceName': 'string',\n 'ServiceName': 'string',\n 'HealthStatus': 'HEALTHY'|'UNHEALTHY'|'UNKNOWN',\n 'Attributes': {\n 'string': 'string'\n }\n },\n ]\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_instance(ServiceId=None, InstanceId=None):\n \"\"\"\n Gets information about a specified instance.\n See also: AWS API Documentation\n \n \n :example: response = client.get_instance(\n ServiceId='string',\n InstanceId='string'\n )\n \n \n :type ServiceId: string\n :param ServiceId: [REQUIRED]\n The ID of the service that the instance is associated with.\n \n\n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The ID of the instance that you want to get information about.\n \n\n :rtype: dict\n :return: {\n 'Instance': {\n 'Id': 'string',\n 'CreatorRequestId': 'string',\n 'Attributes': {\n 'string': 'string'\n }\n }\n }\n \n \n :returns: \n If the service that is specified by ServiceId includes settings for an SRV record, the value of InstanceId is automatically included as part of the value for the SRV record. For more information, see DnsRecord$Type .\n You can use this value to update an existing instance.\n To register a new instance, you must specify a value that is unique among instances that you register by using the same service.\n If you specify an existing InstanceId and ServiceId , AWS Cloud Map updates the existing DNS records. If there's also an existing health check, AWS Cloud Map deletes the old health check and creates a new one.\n \n \"\"\"\n pass\n\ndef get_instances_health_status(ServiceId=None, Instances=None, MaxResults=None, NextToken=None):\n \"\"\"\n Gets the current health status (Healthy , Unhealthy , or Unknown ) of one or more instances that are associated with a specified service.\n See also: AWS API Documentation\n \n \n :example: response = client.get_instances_health_status(\n ServiceId='string',\n Instances=[\n 'string',\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type ServiceId: string\n :param ServiceId: [REQUIRED]\n The ID of the service that the instance is associated with.\n \n\n :type Instances: list\n :param Instances: An array that contains the IDs of all the instances that you want to get the health status for.\n If you omit Instances , AWS Cloud Map returns the health status for all the instances that are associated with the specified service.\n Note\n To get the IDs for the instances that you've registered by using a specified service, submit a ListInstances request.\n (string) --\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of instances that you want AWS Cloud Map to return in the response to a GetInstancesHealthStatus request. If you don't specify a value for MaxResults , AWS Cloud Map returns up to 100 instances.\n\n :type NextToken: string\n :param NextToken: For the first GetInstancesHealthStatus request, omit this value.\n If more than MaxResults instances match the specified criteria, you can submit another GetInstancesHealthStatus request to get the next group of results. Specify the value of NextToken from the previous response in the next request.\n \n\n :rtype: dict\n :return: {\n 'Status': {\n 'string': 'HEALTHY'|'UNHEALTHY'|'UNKNOWN'\n },\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_namespace(Id=None):\n \"\"\"\n Gets information about a namespace.\n See also: AWS API Documentation\n \n \n :example: response = client.get_namespace(\n Id='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n The ID of the namespace that you want to get information about.\n \n\n :rtype: dict\n :return: {\n 'Namespace': {\n 'Id': 'string',\n 'Arn': 'string',\n 'Name': 'string',\n 'Type': 'DNS_PUBLIC'|'DNS_PRIVATE'|'HTTP',\n 'Description': 'string',\n 'ServiceCount': 123,\n 'Properties': {\n 'DnsProperties': {\n 'HostedZoneId': 'string'\n },\n 'HttpProperties': {\n 'HttpName': 'string'\n }\n },\n 'CreateDate': datetime(2015, 1, 1),\n 'CreatorRequestId': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_operation(OperationId=None):\n \"\"\"\n Gets information about any operation that returns an operation ID in the response, such as a CreateService request.\n See also: AWS API Documentation\n \n \n :example: response = client.get_operation(\n OperationId='string'\n )\n \n \n :type OperationId: string\n :param OperationId: [REQUIRED]\n The ID of the operation that you want to get more information about.\n \n\n :rtype: dict\n :return: {\n 'Operation': {\n 'Id': 'string',\n 'Type': 'CREATE_NAMESPACE'|'DELETE_NAMESPACE'|'UPDATE_SERVICE'|'REGISTER_INSTANCE'|'DEREGISTER_INSTANCE',\n 'Status': 'SUBMITTED'|'PENDING'|'SUCCESS'|'FAIL',\n 'ErrorMessage': 'string',\n 'ErrorCode': 'string',\n 'CreateDate': datetime(2015, 1, 1),\n 'UpdateDate': datetime(2015, 1, 1),\n 'Targets': {\n 'string': 'string'\n }\n }\n }\n \n \n :returns: \n ACCESS_DENIED\n CANNOT_CREATE_HOSTED_ZONE\n EXPIRED_TOKEN\n HOSTED_ZONE_NOT_FOUND\n INTERNAL_FAILURE\n INVALID_CHANGE_BATCH\n THROTTLED_REQUEST\n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_service(Id=None):\n \"\"\"\n Gets the settings for a specified service.\n See also: AWS API Documentation\n \n \n :example: response = client.get_service(\n Id='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n The ID of the service that you want to get settings for.\n \n\n :rtype: dict\n :return: {\n 'Service': {\n 'Id': 'string',\n 'Arn': 'string',\n 'Name': 'string',\n 'NamespaceId': 'string',\n 'Description': 'string',\n 'InstanceCount': 123,\n 'DnsConfig': {\n 'NamespaceId': 'string',\n 'RoutingPolicy': 'MULTIVALUE'|'WEIGHTED',\n 'DnsRecords': [\n {\n 'Type': 'SRV'|'A'|'AAAA'|'CNAME',\n 'TTL': 123\n },\n ]\n },\n 'HealthCheckConfig': {\n 'Type': 'HTTP'|'HTTPS'|'TCP',\n 'ResourcePath': 'string',\n 'FailureThreshold': 123\n },\n 'HealthCheckCustomConfig': {\n 'FailureThreshold': 123\n },\n 'CreateDate': datetime(2015, 1, 1),\n 'CreatorRequestId': 'string'\n }\n }\n \n \n :returns: \n You specify the domain name that you want to route traffic to when you register an instance. For more information, see RegisterInstanceRequest$Attributes .\n You must specify WEIGHTED for the value of RoutingPolicy .\n You can't specify both CNAME for Type and settings for HealthCheckConfig . If you do, the request will fail with an InvalidInput error.\n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_instances(ServiceId=None, NextToken=None, MaxResults=None):\n \"\"\"\n Lists summary information about the instances that you registered by using a specified service.\n See also: AWS API Documentation\n \n \n :example: response = client.list_instances(\n ServiceId='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type ServiceId: string\n :param ServiceId: [REQUIRED]\n The ID of the service that you want to list instances for.\n \n\n :type NextToken: string\n :param NextToken: For the first ListInstances request, omit this value.\n If more than MaxResults instances match the specified criteria, you can submit another ListInstances request to get the next group of results. Specify the value of NextToken from the previous response in the next request.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of instances that you want AWS Cloud Map to return in the response to a ListInstances request. If you don't specify a value for MaxResults , AWS Cloud Map returns up to 100 instances.\n\n :rtype: dict\n :return: {\n 'Instances': [\n {\n 'Id': 'string',\n 'Attributes': {\n 'string': 'string'\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n The attributes that are associate with the instance.\n For each attribute, the applicable value.\n \n \"\"\"\n pass\n\ndef list_namespaces(NextToken=None, MaxResults=None, Filters=None):\n \"\"\"\n Lists summary information about the namespaces that were created by the current AWS account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_namespaces(\n NextToken='string',\n MaxResults=123,\n Filters=[\n {\n 'Name': 'TYPE',\n 'Values': [\n 'string',\n ],\n 'Condition': 'EQ'|'IN'|'BETWEEN'\n },\n ]\n )\n \n \n :type NextToken: string\n :param NextToken: For the first ListNamespaces request, omit this value.\n If the response contains NextToken , submit another ListNamespaces request to get the next group of results. Specify the value of NextToken from the previous response in the next request.\n Note\n AWS Cloud Map gets MaxResults namespaces and then filters them based on the specified criteria. It's possible that no namespaces in the first MaxResults namespaces matched the specified criteria but that subsequent groups of MaxResults namespaces do contain namespaces that match the criteria.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of namespaces that you want AWS Cloud Map to return in the response to a ListNamespaces request. If you don't specify a value for MaxResults , AWS Cloud Map returns up to 100 namespaces.\n\n :type Filters: list\n :param Filters: A complex type that contains specifications for the namespaces that you want to list.\n If you specify more than one filter, a namespace must match all filters to be returned by ListNamespaces .\n (dict) --A complex type that identifies the namespaces that you want to list. You can choose to list public or private namespaces.\n Name (string) -- [REQUIRED]Specify TYPE .\n Values (list) -- [REQUIRED]If you specify EQ for Condition , specify either DNS_PUBLIC or DNS_PRIVATE .\n If you specify IN for Condition , you can specify DNS_PUBLIC , DNS_PRIVATE , or both.\n (string) --\n Condition (string) --The operator that you want to use to determine whether ListNamespaces returns a namespace. Valid values for condition include:\n EQ : When you specify EQ for the condition, you can choose to list only public namespaces or private namespaces, but not both. EQ is the default condition and can be omitted.\n IN : When you specify IN for the condition, you can choose to list public namespaces, private namespaces, or both.\n BETWEEN : Not applicable\n \n \n\n :rtype: dict\n :return: {\n 'Namespaces': [\n {\n 'Id': 'string',\n 'Arn': 'string',\n 'Name': 'string',\n 'Type': 'DNS_PUBLIC'|'DNS_PRIVATE'|'HTTP',\n 'Description': 'string',\n 'ServiceCount': 123,\n 'Properties': {\n 'DnsProperties': {\n 'HostedZoneId': 'string'\n },\n 'HttpProperties': {\n 'HttpName': 'string'\n }\n },\n 'CreateDate': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_operations(NextToken=None, MaxResults=None, Filters=None):\n \"\"\"\n Lists operations that match the criteria that you specify.\n See also: AWS API Documentation\n \n \n :example: response = client.list_operations(\n NextToken='string',\n MaxResults=123,\n Filters=[\n {\n 'Name': 'NAMESPACE_ID'|'SERVICE_ID'|'STATUS'|'TYPE'|'UPDATE_DATE',\n 'Values': [\n 'string',\n ],\n 'Condition': 'EQ'|'IN'|'BETWEEN'\n },\n ]\n )\n \n \n :type NextToken: string\n :param NextToken: For the first ListOperations request, omit this value.\n If the response contains NextToken , submit another ListOperations request to get the next group of results. Specify the value of NextToken from the previous response in the next request.\n Note\n AWS Cloud Map gets MaxResults operations and then filters them based on the specified criteria. It's possible that no operations in the first MaxResults operations matched the specified criteria but that subsequent groups of MaxResults operations do contain operations that match the criteria.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items that you want AWS Cloud Map to return in the response to a ListOperations request. If you don't specify a value for MaxResults , AWS Cloud Map returns up to 100 operations.\n\n :type Filters: list\n :param Filters: A complex type that contains specifications for the operations that you want to list, for example, operations that you started between a specified start date and end date.\n If you specify more than one filter, an operation must match all filters to be returned by ListOperations .\n (dict) --A complex type that lets you select the operations that you want to list.\n Name (string) -- [REQUIRED]Specify the operations that you want to get:\n NAMESPACE_ID : Gets operations related to specified namespaces.\n SERVICE_ID : Gets operations related to specified services.\n STATUS : Gets operations based on the status of the operations: SUBMITTED , PENDING , SUCCEED , or FAIL .\n TYPE : Gets specified types of operation.\n UPDATE_DATE : Gets operations that changed status during a specified date/time range.\n Values (list) -- [REQUIRED]Specify values that are applicable to the value that you specify for Name :\n NAMESPACE_ID : Specify one namespace ID.\n SERVICE_ID : Specify one service ID.\n STATUS : Specify one or more statuses: SUBMITTED , PENDING , SUCCEED , or FAIL .\n TYPE : Specify one or more of the following types: CREATE_NAMESPACE , DELETE_NAMESPACE , UPDATE_SERVICE , REGISTER_INSTANCE , or DEREGISTER_INSTANCE .\n UPDATE_DATE : Specify a start date and an end date in Unix date/time format and Coordinated Universal Time (UTC). The start date must be the first value.\n (string) --\n Condition (string) --The operator that you want to use to determine whether an operation matches the specified value. Valid values for condition include:\n EQ : When you specify EQ for the condition, you can specify only one value. EQ is supported for NAMESPACE_ID , SERVICE_ID , STATUS , and TYPE . EQ is the default condition and can be omitted.\n IN : When you specify IN for the condition, you can specify a list of one or more values. IN is supported for STATUS and TYPE . An operation must match one of the specified values to be returned in the response.\n BETWEEN : Specify a start date and an end date in Unix date/time format and Coordinated Universal Time (UTC). The start date must be the first value. BETWEEN is supported for UPDATE_DATE .\n \n \n\n :rtype: dict\n :return: {\n 'Operations': [\n {\n 'Id': 'string',\n 'Status': 'SUBMITTED'|'PENDING'|'SUCCESS'|'FAIL'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n SUBMITTED : This is the initial state immediately after you submit a request.\n PENDING : AWS Cloud Map is performing the operation.\n SUCCESS : The operation succeeded.\n FAIL : The operation failed. For the failure reason, see ErrorMessage .\n \n \"\"\"\n pass\n\ndef list_services(NextToken=None, MaxResults=None, Filters=None):\n \"\"\"\n Lists summary information for all the services that are associated with one or more specified namespaces.\n See also: AWS API Documentation\n \n \n :example: response = client.list_services(\n NextToken='string',\n MaxResults=123,\n Filters=[\n {\n 'Name': 'NAMESPACE_ID',\n 'Values': [\n 'string',\n ],\n 'Condition': 'EQ'|'IN'|'BETWEEN'\n },\n ]\n )\n \n \n :type NextToken: string\n :param NextToken: For the first ListServices request, omit this value.\n If the response contains NextToken , submit another ListServices request to get the next group of results. Specify the value of NextToken from the previous response in the next request.\n Note\n AWS Cloud Map gets MaxResults services and then filters them based on the specified criteria. It's possible that no services in the first MaxResults services matched the specified criteria but that subsequent groups of MaxResults services do contain services that match the criteria.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of services that you want AWS Cloud Map to return in the response to a ListServices request. If you don't specify a value for MaxResults , AWS Cloud Map returns up to 100 services.\n\n :type Filters: list\n :param Filters: A complex type that contains specifications for the namespaces that you want to list services for.\n If you specify more than one filter, an operation must match all filters to be returned by ListServices .\n (dict) --A complex type that lets you specify the namespaces that you want to list services for.\n Name (string) -- [REQUIRED]Specify NAMESPACE_ID .\n Values (list) -- [REQUIRED]The values that are applicable to the value that you specify for Condition to filter the list of services.\n (string) --\n Condition (string) --The operator that you want to use to determine whether a service is returned by ListServices . Valid values for Condition include the following:\n EQ : When you specify EQ , specify one namespace ID for Values . EQ is the default condition and can be omitted.\n IN : When you specify IN , specify a list of the IDs for the namespaces that you want ListServices to return a list of services for.\n BETWEEN : Not applicable.\n \n \n\n :rtype: dict\n :return: {\n 'Services': [\n {\n 'Id': 'string',\n 'Arn': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'InstanceCount': 123,\n 'DnsConfig': {\n 'NamespaceId': 'string',\n 'RoutingPolicy': 'MULTIVALUE'|'WEIGHTED',\n 'DnsRecords': [\n {\n 'Type': 'SRV'|'A'|'AAAA'|'CNAME',\n 'TTL': 123\n },\n ]\n },\n 'HealthCheckConfig': {\n 'Type': 'HTTP'|'HTTPS'|'TCP',\n 'ResourcePath': 'string',\n 'FailureThreshold': 123\n },\n 'HealthCheckCustomConfig': {\n 'FailureThreshold': 123\n },\n 'CreateDate': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n A, AAAA, and SRV records: You can specify settings for a maximum of one A, one AAAA, and one SRV record. You can specify them in any combination.\n CNAME records: If you specify CNAME for Type , you can't define any other records. This is a limitation of DNS: you can't create a CNAME record and any other type of record that has the same name as a CNAME record.\n Alias records: If you want AWS Cloud Map to create a Route 53 alias record when you register an instance, specify A or AAAA for Type .\n All records: You specify settings other than TTL and Type when you register an instance.\n \n \"\"\"\n pass\n\ndef register_instance(ServiceId=None, InstanceId=None, CreatorRequestId=None, Attributes=None):\n \"\"\"\n Creates or updates one or more records and, optionally, creates a health check based on the settings in a specified service. When you submit a RegisterInstance request, the following occurs:\n For more information, see CreateService .\n When AWS Cloud Map receives a DNS query for the specified DNS name, it returns the applicable value:\n For the current limit on the number of instances that you can register using the same namespace and using the same service, see AWS Cloud Map Limits in the AWS Cloud Map Developer Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.register_instance(\n ServiceId='string',\n InstanceId='string',\n CreatorRequestId='string',\n Attributes={\n 'string': 'string'\n }\n )\n \n \n :type ServiceId: string\n :param ServiceId: [REQUIRED]\n The ID of the service that you want to use for settings for the instance.\n \n\n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n An identifier that you want to associate with the instance. Note the following:\n If the service that is specified by ServiceId includes settings for an SRV record, the value of InstanceId is automatically included as part of the value for the SRV record. For more information, see DnsRecord$Type .\n You can use this value to update an existing instance.\n To register a new instance, you must specify a value that is unique among instances that you register by using the same service.\n If you specify an existing InstanceId and ServiceId , AWS Cloud Map updates the existing DNS records, if any. If there's also an existing health check, AWS Cloud Map deletes the old health check and creates a new one.\n Note\n The health check isn't deleted immediately, so it will still appear for a while if you submit a ListHealthChecks request, for example.\n \n\n :type CreatorRequestId: string\n :param CreatorRequestId: A unique string that identifies the request and that allows failed RegisterInstance requests to be retried without the risk of executing the operation twice. You must use a unique CreatorRequestId string every time you submit a RegisterInstance request if you're registering additional instances for the same namespace and service. CreatorRequestId can be any unique string, for example, a date/time stamp.\n This field is autopopulated if not provided.\n \n\n :type Attributes: dict\n :param Attributes: [REQUIRED]\n A string map that contains the following information for the service that you specify in ServiceId :\n The attributes that apply to the records that are defined in the service.\n For each attribute, the applicable value.\n Supported attribute keys include the following:\n AWS_ALIAS_DNS_NAME\n If you want AWS Cloud Map to create an Amazon Route 53 alias record that routes traffic to an Elastic Load Balancing load balancer, specify the DNS name that is associated with the load balancer. For information about how to get the DNS name, see 'DNSName' in the topic AliasTarget in the Route 53 API Reference .\n Note the following:\n The configuration for the service that is specified by ServiceId must include settings for an A record, an AAAA record, or both.\n In the service that is specified by ServiceId , the value of RoutingPolicy must be WEIGHTED .\n If the service that is specified by ServiceId includes HealthCheckConfig settings, AWS Cloud Map will create the Route 53 health check, but it won't associate the health check with the alias record.\n Auto naming currently doesn't support creating alias records that route traffic to AWS resources other than ELB load balancers.\n If you specify a value for AWS_ALIAS_DNS_NAME , don't specify values for any of the AWS_INSTANCE attributes.\n AWS_INIT_HEALTH_STATUS\n If the service configuration includes HealthCheckCustomConfig , you can optionally use AWS_INIT_HEALTH_STATUS to specify the initial status of the custom health check, HEALTHY or UNHEALTHY . If you don't specify a value for AWS_INIT_HEALTH_STATUS , the initial status is HEALTHY .\n AWS_INSTANCE_CNAME\n If the service configuration includes a CNAME record, the domain name that you want Route 53 to return in response to DNS queries, for example, example.com .\n This value is required if the service specified by ServiceId includes settings for an CNAME record.\n AWS_INSTANCE_IPV4\n If the service configuration includes an A record, the IPv4 address that you want Route 53 to return in response to DNS queries, for example, 192.0.2.44 .\n This value is required if the service specified by ServiceId includes settings for an A record. If the service includes settings for an SRV record, you must specify a value for AWS_INSTANCE_IPV4 , AWS_INSTANCE_IPV6 , or both.\n AWS_INSTANCE_IPV6\n If the service configuration includes an AAAA record, the IPv6 address that you want Route 53 to return in response to DNS queries, for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345 .\n This value is required if the service specified by ServiceId includes settings for an AAAA record. If the service includes settings for an SRV record, you must specify a value for AWS_INSTANCE_IPV4 , AWS_INSTANCE_IPV6 , or both.\n AWS_INSTANCE_PORT\n If the service includes an SRV record, the value that you want Route 53 to return for the port.\n If the service includes HealthCheckConfig , the port on the endpoint that you want Route 53 to send requests to.\n This value is required if you specified settings for an SRV record when you created the service.\n Custom attributes\n You can add up to 30 custom attributes. For each key-value pair, the maximum length of the attribute name is 255 characters, and the maximum length of the attribute value is 1,024 characters.\n (string) --\n (string) --\n \n\n :rtype: dict\n :return: {\n 'OperationId': 'string'\n }\n \n \n :returns: \n If the health check is healthy : returns all the records\n If the health check is unhealthy : returns the applicable value for the last healthy instance\n If you didn't specify a health check configuration : returns all the records\n \n \"\"\"\n pass\n\ndef update_instance_custom_health_status(ServiceId=None, InstanceId=None, Status=None):\n \"\"\"\n Submits a request to change the health status of a custom health check to healthy or unhealthy.\n You can use UpdateInstanceCustomHealthStatus to change the status only for custom health checks, which you define using HealthCheckCustomConfig when you create a service. You can't use it to change the status for Route 53 health checks, which you define using HealthCheckConfig .\n For more information, see HealthCheckCustomConfig .\n See also: AWS API Documentation\n \n \n :example: response = client.update_instance_custom_health_status(\n ServiceId='string',\n InstanceId='string',\n Status='HEALTHY'|'UNHEALTHY'\n )\n \n \n :type ServiceId: string\n :param ServiceId: [REQUIRED]\n The ID of the service that includes the configuration for the custom health check that you want to change the status for.\n \n\n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The ID of the instance that you want to change the health status for.\n \n\n :type Status: string\n :param Status: [REQUIRED]\n The new status of the instance, HEALTHY or UNHEALTHY .\n \n\n \"\"\"\n pass\n\ndef update_service(Id=None, Service=None):\n \"\"\"\n Submits a request to perform the following operations:\n For public and private DNS namespaces, you must specify all DnsRecords configurations (and, optionally, HealthCheckConfig ) that you want to appear in the updated service. Any current configurations that don't appear in an UpdateService request are deleted.\n When you update the TTL setting for a service, AWS Cloud Map also updates the corresponding settings in all the records and health checks that were created by using the specified service.\n See also: AWS API Documentation\n \n \n :example: response = client.update_service(\n Id='string',\n Service={\n 'Description': 'string',\n 'DnsConfig': {\n 'DnsRecords': [\n {\n 'Type': 'SRV'|'A'|'AAAA'|'CNAME',\n 'TTL': 123\n },\n ]\n },\n 'HealthCheckConfig': {\n 'Type': 'HTTP'|'HTTPS'|'TCP',\n 'ResourcePath': 'string',\n 'FailureThreshold': 123\n }\n }\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n The ID of the service that you want to update.\n \n\n :type Service: dict\n :param Service: [REQUIRED]\n A complex type that contains the new settings for the service.\n Description (string) --A description for the service.\n DnsConfig (dict) -- [REQUIRED]A complex type that contains information about the Route 53 DNS records that you want AWS Cloud Map to create when you register an instance.\n DnsRecords (list) -- [REQUIRED]An array that contains one DnsRecord object for each Route 53 record that you want AWS Cloud Map to create when you register an instance.\n (dict) --A complex type that contains information about the Route 53 DNS records that you want AWS Cloud Map to create when you register an instance.\n Type (string) -- [REQUIRED]The type of the resource, which indicates the type of value that Route 53 returns in response to DNS queries.\n Note the following:\n A, AAAA, and SRV records: You can specify settings for a maximum of one A, one AAAA, and one SRV record. You can specify them in any combination.\n CNAME records: If you specify CNAME for Type , you can't define any other records. This is a limitation of DNS: you can't create a CNAME record and any other type of record that has the same name as a CNAME record.\n Alias records: If you want AWS Cloud Map to create a Route 53 alias record when you register an instance, specify A or AAAA for Type .\n All records: You specify settings other than TTL and Type when you register an instance.\n The following values are supported:\n A\n Route 53 returns the IP address of the resource in IPv4 format, such as 192.0.2.44.\n AAAA\n Route 53 returns the IP address of the resource in IPv6 format, such as 2001:0db8:85a3:0000:0000:abcd:0001:2345.\n CNAME\n Route 53 returns the domain name of the resource, such as www.example.com. Note the following:\n You specify the domain name that you want to route traffic to when you register an instance. For more information, see RegisterInstanceRequest$Attributes .\n You must specify WEIGHTED for the value of RoutingPolicy .\n You can't specify both CNAME for Type and settings for HealthCheckConfig . If you do, the request will fail with an InvalidInput error.\n SRV\n Route 53 returns the value for an SRV record. The value for an SRV record uses the following values:\n priority weight port service-hostname\n Note the following about the values:\n The values of priority and weight are both set to 1 and can't be changed.\n The value of port comes from the value that you specify for the AWS_INSTANCE_PORT attribute when you submit a RegisterInstance request.\n The value of service-hostname is a concatenation of the following values:\n The value that you specify for InstanceId when you register an instance.\n The name of the service.\n The name of the namespace.\n For example, if the value of InstanceId is test , the name of the service is backend , and the name of the namespace is example.com , the value of service-hostname is:\n test.backend.example.com\n If you specify settings for an SRV record and if you specify values for AWS_INSTANCE_IPV4 , AWS_INSTANCE_IPV6 , or both in the RegisterInstance request, AWS Cloud Map automatically creates A and/or AAAA records that have the same name as the value of service-hostname in the SRV record. You can ignore these records.\n TTL (integer) -- [REQUIRED]The amount of time, in seconds, that you want DNS resolvers to cache the settings for this record.\n Note\n Alias records don't include a TTL because Route 53 uses the TTL for the AWS resource that an alias record routes traffic to. If you include the AWS_ALIAS_DNS_NAME attribute when you submit a RegisterInstance request, the TTL value is ignored. Always specify a TTL for the service; you can use a service to register instances that create either alias or non-alias records.\n \n \n HealthCheckConfig (dict) --\n Public DNS namespaces only. A complex type that contains settings for an optional health check. If you specify settings for a health check, AWS Cloud Map associates the health check with the records that you specify in DnsConfig .\n Warning\n If you specify a health check configuration, you can specify either HealthCheckCustomConfig or HealthCheckConfig but not both.\n Health checks are basic Route 53 health checks that monitor an AWS endpoint. For information about pricing for health checks, see Amazon Route 53 Pricing .\n Note the following about configuring health checks.\n A and AAAA records\n If DnsConfig includes configurations for both A and AAAA records, AWS Cloud Map creates a health check that uses the IPv4 address to check the health of the resource. If the endpoint that is specified by the IPv4 address is unhealthy, Route 53 considers both the A and AAAA records to be unhealthy.\n CNAME records\n You can't specify settings for HealthCheckConfig when the DNSConfig includes CNAME for the value of Type . If you do, the CreateService request will fail with an InvalidInput error.\n Request interval\n A Route 53 health checker in each health-checking region sends a health check request to an endpoint every 30 seconds. On average, your endpoint receives a health check request about every two seconds. However, health checkers don't coordinate with one another, so you'll sometimes see several requests per second followed by a few seconds with no health checks at all.\n Health checking regions\n Health checkers perform checks from all Route 53 health-checking regions. For a list of the current regions, see Regions .\n Alias records\n When you register an instance, if you include the AWS_ALIAS_DNS_NAME attribute, AWS Cloud Map creates a Route 53 alias record. Note the following:\n Route 53 automatically sets EvaluateTargetHealth to true for alias records. When EvaluateTargetHealth is true, the alias record inherits the health of the referenced AWS resource. such as an ELB load balancer. For more information, see EvaluateTargetHealth .\n If you include HealthCheckConfig and then use the service to register an instance that creates an alias record, Route 53 doesn't create the health check.\n Charges for health checks\n Health checks are basic Route 53 health checks that monitor an AWS endpoint. For information about pricing for health checks, see Amazon Route 53 Pricing .\n Type (string) -- [REQUIRED]The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy.\n Warning\n You can't change the value of Type after you create a health check.\n You can create the following types of health checks:\n HTTP : Route 53 tries to establish a TCP connection. If successful, Route 53 submits an HTTP request and waits for an HTTP status code of 200 or greater and less than 400.\n HTTPS : Route 53 tries to establish a TCP connection. If successful, Route 53 submits an HTTPS request and waits for an HTTP status code of 200 or greater and less than 400.\n Warning\n If you specify HTTPS for the value of Type , the endpoint must support TLS v1.0 or later.\n TCP : Route 53 tries to establish a TCP connection. If you specify TCP for Type , don't specify a value for ResourcePath .\n For more information, see How Route 53 Determines Whether an Endpoint Is Healthy in the Route 53 Developer Guide .\n ResourcePath (string) --The path that you want Route 53 to request when performing health checks. The path can be any value for which your endpoint will return an HTTP status code of 2xx or 3xx when the endpoint is healthy, such as the file /docs/route53-health-check.html . Route 53 automatically adds the DNS name for the service. If you don't specify a value for ResourcePath , the default value is / .\n If you specify TCP for Type , you must not specify a value for ResourcePath .\n FailureThreshold (integer) --The number of consecutive health checks that an endpoint must pass or fail for Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa. For more information, see How Route 53 Determines Whether an Endpoint Is Healthy in the Route 53 Developer Guide .\n \n \n\n :rtype: dict\n :return: {\n 'OperationId': 'string'\n }\n \n \n :returns: \n Id (string) -- [REQUIRED]\n The ID of the service that you want to update.\n \n Service (dict) -- [REQUIRED]\n A complex type that contains the new settings for the service.\n \n Description (string) --A description for the service.\n \n DnsConfig (dict) -- [REQUIRED]A complex type that contains information about the Route 53 DNS records that you want AWS Cloud Map to create when you register an instance.\n \n DnsRecords (list) -- [REQUIRED]An array that contains one DnsRecord object for each Route 53 record that you want AWS Cloud Map to create when you register an instance.\n \n (dict) --A complex type that contains information about the Route 53 DNS records that you want AWS Cloud Map to create when you register an instance.\n \n Type (string) -- [REQUIRED]The type of the resource, which indicates the type of value that Route 53 returns in response to DNS queries.\n Note the following:\n \n A, AAAA, and SRV records: You can specify settings for a maximum of one A, one AAAA, and one SRV record. You can specify them in any combination.\n CNAME records: If you specify CNAME for Type , you can't define any other records. This is a limitation of DNS: you can't create a CNAME record and any other type of record that has the same name as a CNAME record.\n Alias records: If you want AWS Cloud Map to create a Route 53 alias record when you register an instance, specify A or AAAA for Type .\n All records: You specify settings other than TTL and Type when you register an instance.\n \n The following values are supported:\n \n A\n Route 53 returns the IP address of the resource in IPv4 format, such as 192.0.2.44.\n \n AAAA\n Route 53 returns the IP address of the resource in IPv6 format, such as 2001:0db8:85a3:0000:0000:abcd:0001:2345.\n \n CNAME\n Route 53 returns the domain name of the resource, such as www.example.com. Note the following:\n \n You specify the domain name that you want to route traffic to when you register an instance. For more information, see RegisterInstanceRequest$Attributes .\n You must specify WEIGHTED for the value of RoutingPolicy .\n You can't specify both CNAME for Type and settings for HealthCheckConfig . If you do, the request will fail with an InvalidInput error.\n \n \n SRV\n Route 53 returns the value for an SRV record. The value for an SRV record uses the following values:\n \n priority weight port service-hostname\n Note the following about the values:\n \n The values of priority and weight are both set to 1 and can't be changed.\n The value of port comes from the value that you specify for the AWS_INSTANCE_PORT attribute when you submit a RegisterInstance request.\n The value of service-hostname is a concatenation of the following values:\n The value that you specify for InstanceId when you register an instance.\n The name of the service.\n The name of the namespace.\n \n \n \n For example, if the value of InstanceId is test , the name of the service is backend , and the name of the namespace is example.com , the value of service-hostname is:\n \n test.backend.example.com\n If you specify settings for an SRV record and if you specify values for AWS_INSTANCE_IPV4 , AWS_INSTANCE_IPV6 , or both in the RegisterInstance request, AWS Cloud Map automatically creates A and/or AAAA records that have the same name as the value of service-hostname in the SRV record. You can ignore these records.\n \n TTL (integer) -- [REQUIRED]The amount of time, in seconds, that you want DNS resolvers to cache the settings for this record.\n \n Note\n Alias records don't include a TTL because Route 53 uses the TTL for the AWS resource that an alias record routes traffic to. If you include the AWS_ALIAS_DNS_NAME attribute when you submit a RegisterInstance request, the TTL value is ignored. Always specify a TTL for the service; you can use a service to register instances that create either alias or non-alias records.\n \n \n \n \n \n \n \n \n HealthCheckConfig (dict) --\n Public DNS namespaces only. A complex type that contains settings for an optional health check. If you specify settings for a health check, AWS Cloud Map associates the health check with the records that you specify in DnsConfig .\n \n Warning\n If you specify a health check configuration, you can specify either HealthCheckCustomConfig or HealthCheckConfig but not both.\n \n Health checks are basic Route 53 health checks that monitor an AWS endpoint. For information about pricing for health checks, see Amazon Route 53 Pricing .\n Note the following about configuring health checks.\n \n A and AAAA records\n If DnsConfig includes configurations for both A and AAAA records, AWS Cloud Map creates a health check that uses the IPv4 address to check the health of the resource. If the endpoint that is specified by the IPv4 address is unhealthy, Route 53 considers both the A and AAAA records to be unhealthy.\n \n CNAME records\n You can't specify settings for HealthCheckConfig when the DNSConfig includes CNAME for the value of Type . If you do, the CreateService request will fail with an InvalidInput error.\n \n Request interval\n A Route 53 health checker in each health-checking region sends a health check request to an endpoint every 30 seconds. On average, your endpoint receives a health check request about every two seconds. However, health checkers don't coordinate with one another, so you'll sometimes see several requests per second followed by a few seconds with no health checks at all.\n \n Health checking regions\n Health checkers perform checks from all Route 53 health-checking regions. For a list of the current regions, see Regions .\n \n Alias records\n When you register an instance, if you include the AWS_ALIAS_DNS_NAME attribute, AWS Cloud Map creates a Route 53 alias record. Note the following:\n \n Route 53 automatically sets EvaluateTargetHealth to true for alias records. When EvaluateTargetHealth is true, the alias record inherits the health of the referenced AWS resource. such as an ELB load balancer. For more information, see EvaluateTargetHealth .\n If you include HealthCheckConfig and then use the service to register an instance that creates an alias record, Route 53 doesn't create the health check.\n \n \n Charges for health checks\n Health checks are basic Route 53 health checks that monitor an AWS endpoint. For information about pricing for health checks, see Amazon Route 53 Pricing .\n \n Type (string) -- [REQUIRED]The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy.\n \n Warning\n You can't change the value of Type after you create a health check.\n \n You can create the following types of health checks:\n \n HTTP : Route 53 tries to establish a TCP connection. If successful, Route 53 submits an HTTP request and waits for an HTTP status code of 200 or greater and less than 400.\n HTTPS : Route 53 tries to establish a TCP connection. If successful, Route 53 submits an HTTPS request and waits for an HTTP status code of 200 or greater and less than 400.\n \n \n Warning\n If you specify HTTPS for the value of Type , the endpoint must support TLS v1.0 or later.\n \n \n TCP : Route 53 tries to establish a TCP connection. If you specify TCP for Type , don't specify a value for ResourcePath .\n \n For more information, see How Route 53 Determines Whether an Endpoint Is Healthy in the Route 53 Developer Guide .\n \n ResourcePath (string) --The path that you want Route 53 to request when performing health checks. The path can be any value for which your endpoint will return an HTTP status code of 2xx or 3xx when the endpoint is healthy, such as the file /docs/route53-health-check.html . Route 53 automatically adds the DNS name for the service. If you don't specify a value for ResourcePath , the default value is / .\n If you specify TCP for Type , you must not specify a value for ResourcePath .\n \n FailureThreshold (integer) --The number of consecutive health checks that an endpoint must pass or fail for Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa. For more information, see How Route 53 Determines Whether an Endpoint Is Healthy in the Route 53 Developer Guide .\n \n \n \n \n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.601749062538147, "alphanum_fraction": 0.6053266525268555, "avg_line_length": 25.90450668334961, "blob_id": "946792974c46564a9342fa1a389539e403061652", "content_id": "a12ceafd44b94b786da72c51511b85a11810d072", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 35219, "license_type": "permissive", "max_line_length": 546, "num_lines": 1309, "path": "/pyboto3/workmail.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef associate_delegate_to_resource(OrganizationId=None, ResourceId=None, EntityId=None):\n \"\"\"\n Adds a member to the resource's set of delegates.\n See also: AWS API Documentation\n \n \n :example: response = client.associate_delegate_to_resource(\n OrganizationId='string',\n ResourceId='string',\n EntityId='string'\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The organization under which the resource exists.\n \n\n :type ResourceId: string\n :param ResourceId: [REQUIRED]\n The resource for which members are associated.\n \n\n :type EntityId: string\n :param EntityId: [REQUIRED]\n The member (user or group) to associate to the resource.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef associate_member_to_group(OrganizationId=None, GroupId=None, MemberId=None):\n \"\"\"\n Adds a member to the group's set.\n See also: AWS API Documentation\n \n \n :example: response = client.associate_member_to_group(\n OrganizationId='string',\n GroupId='string',\n MemberId='string'\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The organization under which the group exists.\n \n\n :type GroupId: string\n :param GroupId: [REQUIRED]\n The group for which the member is associated.\n \n\n :type MemberId: string\n :param MemberId: [REQUIRED]\n The member to associate to the group.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_alias(OrganizationId=None, EntityId=None, Alias=None):\n \"\"\"\n Adds an alias to the set of a given member of Amazon WorkMail.\n See also: AWS API Documentation\n \n \n :example: response = client.create_alias(\n OrganizationId='string',\n EntityId='string',\n Alias='string'\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The organization under which the member exists.\n \n\n :type EntityId: string\n :param EntityId: [REQUIRED]\n The alias is added to this Amazon WorkMail entity.\n \n\n :type Alias: string\n :param Alias: [REQUIRED]\n The alias to add to the user.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef create_group(OrganizationId=None, Name=None):\n \"\"\"\n Creates a group that can be used in Amazon WorkMail by calling the RegisterToWorkMail operation.\n See also: AWS API Documentation\n \n \n :example: response = client.create_group(\n OrganizationId='string',\n Name='string'\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The organization under which the group is to be created.\n \n\n :type Name: string\n :param Name: [REQUIRED]\n The name of the group.\n \n\n :rtype: dict\n :return: {\n 'GroupId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_resource(OrganizationId=None, Name=None, Type=None):\n \"\"\"\n Creates a new Amazon WorkMail resource. The available types are equipment and room.\n See also: AWS API Documentation\n \n \n :example: response = client.create_resource(\n OrganizationId='string',\n Name='string',\n Type='ROOM'|'EQUIPMENT'\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The identifier associated with the organization for which the resource is created.\n \n\n :type Name: string\n :param Name: [REQUIRED]\n The name of the created resource.\n \n\n :type Type: string\n :param Type: [REQUIRED]\n The type of the created resource.\n \n\n :rtype: dict\n :return: {\n 'ResourceId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_user(OrganizationId=None, Name=None, DisplayName=None, Password=None):\n \"\"\"\n Creates a user who can be used in Amazon WorkMail by calling the RegisterToWorkMail operation.\n See also: AWS API Documentation\n \n \n :example: response = client.create_user(\n OrganizationId='string',\n Name='string',\n DisplayName='string',\n Password='string'\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The identifier of the organization for which the user is created.\n \n\n :type Name: string\n :param Name: [REQUIRED]\n The name for the user to be created.\n \n\n :type DisplayName: string\n :param DisplayName: [REQUIRED]\n The display name for the user to be created.\n \n\n :type Password: string\n :param Password: [REQUIRED]\n The password for the user to be created.\n \n\n :rtype: dict\n :return: {\n 'UserId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_alias(OrganizationId=None, EntityId=None, Alias=None):\n \"\"\"\n Remove the alias from a set of aliases for a given user.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_alias(\n OrganizationId='string',\n EntityId='string',\n Alias='string'\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The identifier for the organization under which the user exists.\n \n\n :type EntityId: string\n :param EntityId: [REQUIRED]\n The identifier for the Amazon WorkMail entity to have the aliases removed.\n \n\n :type Alias: string\n :param Alias: [REQUIRED]\n The aliases to be removed from the user's set of aliases. Duplicate entries in the list are collapsed into single entries (the list is transformed into a set).\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_group(OrganizationId=None, GroupId=None):\n \"\"\"\n Deletes a group from Amazon WorkMail.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_group(\n OrganizationId='string',\n GroupId='string'\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The organization that contains the group.\n \n\n :type GroupId: string\n :param GroupId: [REQUIRED]\n The identifier of the group to be deleted.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_mailbox_permissions(OrganizationId=None, EntityId=None, GranteeId=None):\n \"\"\"\n Deletes permissions granted to a user or group.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_mailbox_permissions(\n OrganizationId='string',\n EntityId='string',\n GranteeId='string'\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The identifier of the organization under which the entity (user or group) exists.\n \n\n :type EntityId: string\n :param EntityId: [REQUIRED]\n The identifier of the entity (user or group) for which to delete mailbox permissions.\n \n\n :type GranteeId: string\n :param GranteeId: [REQUIRED]\n The identifier of the entity (user or group) for which to delete granted permissions.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_resource(OrganizationId=None, ResourceId=None):\n \"\"\"\n Deletes the specified resource.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_resource(\n OrganizationId='string',\n ResourceId='string'\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The identifier associated with the organization for which the resource is deleted.\n \n\n :type ResourceId: string\n :param ResourceId: [REQUIRED]\n The identifier of the resource to be deleted.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_user(OrganizationId=None, UserId=None):\n \"\"\"\n Deletes a user from Amazon WorkMail and all subsequent systems. The action can't be undone. The mailbox is kept as-is for a minimum of 30 days, without any means to restore it.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_user(\n OrganizationId='string',\n UserId='string'\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The organization that contains the user.\n \n\n :type UserId: string\n :param UserId: [REQUIRED]\n The identifier of the user to be deleted.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef deregister_from_work_mail(OrganizationId=None, EntityId=None):\n \"\"\"\n Mark a user, group, or resource as no longer used in Amazon WorkMail. This action disassociates the mailbox and schedules it for clean-up. Amazon WorkMail keeps mailboxes for 30 days before they are permanently removed. The functionality in the console is Disable .\n See also: AWS API Documentation\n \n \n :example: response = client.deregister_from_work_mail(\n OrganizationId='string',\n EntityId='string'\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The identifier for the organization under which the Amazon WorkMail entity exists.\n \n\n :type EntityId: string\n :param EntityId: [REQUIRED]\n The identifier for the entity to be updated.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef describe_group(OrganizationId=None, GroupId=None):\n \"\"\"\n Returns the data available for the group.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_group(\n OrganizationId='string',\n GroupId='string'\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The identifier for the organization under which the group exists.\n \n\n :type GroupId: string\n :param GroupId: [REQUIRED]\n The identifier for the group to be described.\n \n\n :rtype: dict\n :return: {\n 'GroupId': 'string',\n 'Name': 'string',\n 'Email': 'string',\n 'State': 'ENABLED'|'DISABLED'|'DELETED',\n 'EnabledDate': datetime(2015, 1, 1),\n 'DisabledDate': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef describe_organization(OrganizationId=None):\n \"\"\"\n Provides more information regarding a given organization based on its identifier.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_organization(\n OrganizationId='string'\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The identifier for the organization to be described.\n \n\n :rtype: dict\n :return: {\n 'OrganizationId': 'string',\n 'Alias': 'string',\n 'State': 'string',\n 'DirectoryId': 'string',\n 'DirectoryType': 'string',\n 'DefaultMailDomain': 'string',\n 'CompletedDate': datetime(2015, 1, 1),\n 'ErrorMessage': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_resource(OrganizationId=None, ResourceId=None):\n \"\"\"\n Returns the data available for the resource.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_resource(\n OrganizationId='string',\n ResourceId='string'\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The identifier associated with the organization for which the resource is described.\n \n\n :type ResourceId: string\n :param ResourceId: [REQUIRED]\n The identifier of the resource to be described.\n \n\n :rtype: dict\n :return: {\n 'ResourceId': 'string',\n 'Email': 'string',\n 'Name': 'string',\n 'Type': 'ROOM'|'EQUIPMENT',\n 'BookingOptions': {\n 'AutoAcceptRequests': True|False,\n 'AutoDeclineRecurringRequests': True|False,\n 'AutoDeclineConflictingRequests': True|False\n },\n 'State': 'ENABLED'|'DISABLED'|'DELETED',\n 'EnabledDate': datetime(2015, 1, 1),\n 'DisabledDate': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef describe_user(OrganizationId=None, UserId=None):\n \"\"\"\n Provides information regarding the user.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_user(\n OrganizationId='string',\n UserId='string'\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The identifier for the organization under which the user exists.\n \n\n :type UserId: string\n :param UserId: [REQUIRED]\n The identifier for the user to be described.\n \n\n :rtype: dict\n :return: {\n 'UserId': 'string',\n 'Name': 'string',\n 'Email': 'string',\n 'DisplayName': 'string',\n 'State': 'ENABLED'|'DISABLED'|'DELETED',\n 'UserRole': 'USER'|'RESOURCE'|'SYSTEM_USER',\n 'EnabledDate': datetime(2015, 1, 1),\n 'DisabledDate': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef disassociate_delegate_from_resource(OrganizationId=None, ResourceId=None, EntityId=None):\n \"\"\"\n Removes a member from the resource's set of delegates.\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_delegate_from_resource(\n OrganizationId='string',\n ResourceId='string',\n EntityId='string'\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The identifier for the organization under which the resource exists.\n \n\n :type ResourceId: string\n :param ResourceId: [REQUIRED]\n The identifier of the resource from which delegates' set members are removed.\n \n\n :type EntityId: string\n :param EntityId: [REQUIRED]\n The identifier for the member (user, group) to be removed from the resource's delegates.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef disassociate_member_from_group(OrganizationId=None, GroupId=None, MemberId=None):\n \"\"\"\n Removes a member from a group.\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_member_from_group(\n OrganizationId='string',\n GroupId='string',\n MemberId='string'\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The identifier for the organization under which the group exists.\n \n\n :type GroupId: string\n :param GroupId: [REQUIRED]\n The identifier for the group from which members are removed.\n \n\n :type MemberId: string\n :param MemberId: [REQUIRED]\n The identifier for the member to be removed to the group.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_aliases(OrganizationId=None, EntityId=None, NextToken=None, MaxResults=None):\n \"\"\"\n Creates a paginated call to list the aliases associated with a given entity.\n See also: AWS API Documentation\n \n \n :example: response = client.list_aliases(\n OrganizationId='string',\n EntityId='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The identifier for the organization under which the entity exists.\n \n\n :type EntityId: string\n :param EntityId: [REQUIRED]\n The identifier for the entity for which to list the aliases.\n \n\n :type NextToken: string\n :param NextToken: The token to use to retrieve the next page of results. The first call does not contain any tokens.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return in a single call.\n\n :rtype: dict\n :return: {\n 'Aliases': [\n 'string',\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_group_members(OrganizationId=None, GroupId=None, NextToken=None, MaxResults=None):\n \"\"\"\n Returns an overview of the members of a group.\n See also: AWS API Documentation\n \n \n :example: response = client.list_group_members(\n OrganizationId='string',\n GroupId='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The identifier for the organization under which the group exists.\n \n\n :type GroupId: string\n :param GroupId: [REQUIRED]\n The identifier for the group to which the members are associated.\n \n\n :type NextToken: string\n :param NextToken: The token to use to retrieve the next page of results. The first call does not contain any tokens.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return in a single call.\n\n :rtype: dict\n :return: {\n 'Members': [\n {\n 'Id': 'string',\n 'Name': 'string',\n 'Type': 'GROUP'|'USER',\n 'State': 'ENABLED'|'DISABLED'|'DELETED',\n 'EnabledDate': datetime(2015, 1, 1),\n 'DisabledDate': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_groups(OrganizationId=None, NextToken=None, MaxResults=None):\n \"\"\"\n Returns summaries of the organization's groups.\n See also: AWS API Documentation\n \n \n :example: response = client.list_groups(\n OrganizationId='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The identifier for the organization under which the groups exist.\n \n\n :type NextToken: string\n :param NextToken: The token to use to retrieve the next page of results. The first call does not contain any tokens.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return in a single call.\n\n :rtype: dict\n :return: {\n 'Groups': [\n {\n 'Id': 'string',\n 'Email': 'string',\n 'Name': 'string',\n 'State': 'ENABLED'|'DISABLED'|'DELETED',\n 'EnabledDate': datetime(2015, 1, 1),\n 'DisabledDate': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_mailbox_permissions(OrganizationId=None, EntityId=None, NextToken=None, MaxResults=None):\n \"\"\"\n Lists the mailbox permissions associated with a mailbox.\n See also: AWS API Documentation\n \n \n :example: response = client.list_mailbox_permissions(\n OrganizationId='string',\n EntityId='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The identifier of the organization under which the entity (user or group) exists.\n \n\n :type EntityId: string\n :param EntityId: [REQUIRED]\n The identifier of the entity (user or group) for which to list mailbox permissions.\n \n\n :type NextToken: string\n :param NextToken: The token to use to retrieve the next page of results. The first call does not contain any tokens.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return in a single call.\n\n :rtype: dict\n :return: {\n 'Permissions': [\n {\n 'GranteeId': 'string',\n 'GranteeType': 'GROUP'|'USER',\n 'PermissionValues': [\n 'FULL_ACCESS'|'SEND_AS'|'SEND_ON_BEHALF',\n ]\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_organizations(NextToken=None, MaxResults=None):\n \"\"\"\n Returns summaries of the customer's non-deleted organizations.\n See also: AWS API Documentation\n \n \n :example: response = client.list_organizations(\n NextToken='string',\n MaxResults=123\n )\n \n \n :type NextToken: string\n :param NextToken: The token to use to retrieve the next page of results. The first call does not contain any tokens.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return in a single call.\n\n :rtype: dict\n :return: {\n 'OrganizationSummaries': [\n {\n 'OrganizationId': 'string',\n 'Alias': 'string',\n 'ErrorMessage': 'string',\n 'State': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_resource_delegates(OrganizationId=None, ResourceId=None, NextToken=None, MaxResults=None):\n \"\"\"\n Lists the delegates associated with a resource. Users and groups can be resource delegates and answer requests on behalf of the resource.\n See also: AWS API Documentation\n \n \n :example: response = client.list_resource_delegates(\n OrganizationId='string',\n ResourceId='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The identifier for the organization that contains the resource for which delegates are listed.\n \n\n :type ResourceId: string\n :param ResourceId: [REQUIRED]\n The identifier for the resource whose delegates are listed.\n \n\n :type NextToken: string\n :param NextToken: The token used to paginate through the delegates associated with a resource.\n\n :type MaxResults: integer\n :param MaxResults: The number of maximum results in a page.\n\n :rtype: dict\n :return: {\n 'Delegates': [\n {\n 'Id': 'string',\n 'Type': 'GROUP'|'USER'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_resources(OrganizationId=None, NextToken=None, MaxResults=None):\n \"\"\"\n Returns summaries of the organization's resources.\n See also: AWS API Documentation\n \n \n :example: response = client.list_resources(\n OrganizationId='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The identifier for the organization under which the resources exist.\n \n\n :type NextToken: string\n :param NextToken: The token to use to retrieve the next page of results. The first call does not contain any tokens.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return in a single call.\n\n :rtype: dict\n :return: {\n 'Resources': [\n {\n 'Id': 'string',\n 'Email': 'string',\n 'Name': 'string',\n 'Type': 'ROOM'|'EQUIPMENT',\n 'State': 'ENABLED'|'DISABLED'|'DELETED',\n 'EnabledDate': datetime(2015, 1, 1),\n 'DisabledDate': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_users(OrganizationId=None, NextToken=None, MaxResults=None):\n \"\"\"\n Returns summaries of the organization's users.\n See also: AWS API Documentation\n \n \n :example: response = client.list_users(\n OrganizationId='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The identifier for the organization under which the users exist.\n \n\n :type NextToken: string\n :param NextToken: TBD\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return in a single call.\n\n :rtype: dict\n :return: {\n 'Users': [\n {\n 'Id': 'string',\n 'Email': 'string',\n 'Name': 'string',\n 'DisplayName': 'string',\n 'State': 'ENABLED'|'DISABLED'|'DELETED',\n 'UserRole': 'USER'|'RESOURCE'|'SYSTEM_USER',\n 'EnabledDate': datetime(2015, 1, 1),\n 'DisabledDate': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef put_mailbox_permissions(OrganizationId=None, EntityId=None, GranteeId=None, PermissionValues=None):\n \"\"\"\n Sets permissions for a user or group. This replaces any pre-existing permissions set for the entity.\n See also: AWS API Documentation\n \n \n :example: response = client.put_mailbox_permissions(\n OrganizationId='string',\n EntityId='string',\n GranteeId='string',\n PermissionValues=[\n 'FULL_ACCESS'|'SEND_AS'|'SEND_ON_BEHALF',\n ]\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The identifier of the organization under which the entity (user or group) exists.\n \n\n :type EntityId: string\n :param EntityId: [REQUIRED]\n The identifier of the entity (user or group) for which to update mailbox permissions.\n \n\n :type GranteeId: string\n :param GranteeId: [REQUIRED]\n The identifier of the entity (user or group) to which to grant the permissions.\n \n\n :type PermissionValues: list\n :param PermissionValues: [REQUIRED]\n The permissions granted to the grantee. SEND_AS allows the grantee to send email as the owner of the mailbox (the grantee is not mentioned on these emails). SEND_ON_BEHALF allows the grantee to send email on behalf of the owner of the mailbox (the grantee is not mentioned as the physical sender of these emails). FULL_ACCESS allows the grantee full access to the mailbox, irrespective of other folder-level permissions set on the mailbox.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef register_to_work_mail(OrganizationId=None, EntityId=None, Email=None):\n \"\"\"\n Registers an existing and disabled user, group, or resource/entity for Amazon WorkMail use by associating a mailbox and calendaring capabilities. It performs no change if the entity is enabled and fails if the entity is deleted. This operation results in the accumulation of costs. For more information, see Pricing . The equivalent console functionality for this operation is Enable . Users can either be created by calling the CreateUser API or they can be synchronized from your directory. For more information, see DeregisterFromWorkMail.\n See also: AWS API Documentation\n \n \n :example: response = client.register_to_work_mail(\n OrganizationId='string',\n EntityId='string',\n Email='string'\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The identifier for the organization under which the Amazon WorkMail entity exists.\n \n\n :type EntityId: string\n :param EntityId: [REQUIRED]\n The identifier for the entity to be updated.\n \n\n :type Email: string\n :param Email: [REQUIRED]\n The email for the entity to be updated.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef reset_password(OrganizationId=None, UserId=None, Password=None):\n \"\"\"\n Allows the administrator to reset the password for a user.\n See also: AWS API Documentation\n \n \n :example: response = client.reset_password(\n OrganizationId='string',\n UserId='string',\n Password='string'\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The identifier of the organization that contains the user for which the password is reset.\n \n\n :type UserId: string\n :param UserId: [REQUIRED]\n The identifier of the user for whom the password is reset.\n \n\n :type Password: string\n :param Password: [REQUIRED]\n The new password for the user.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_primary_email_address(OrganizationId=None, EntityId=None, Email=None):\n \"\"\"\n Updates the primary email for an entity. The current email is moved into the list of aliases (or swapped between an existing alias and the current primary email) and the email provided in the input is promoted as the primary.\n See also: AWS API Documentation\n \n \n :example: response = client.update_primary_email_address(\n OrganizationId='string',\n EntityId='string',\n Email='string'\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The organization that contains the entity to update.\n \n\n :type EntityId: string\n :param EntityId: [REQUIRED]\n The entity to update (user, group, or resource).\n \n\n :type Email: string\n :param Email: [REQUIRED]\n The value of the email to be updated as primary.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_resource(OrganizationId=None, ResourceId=None, Name=None, BookingOptions=None):\n \"\"\"\n Updates data for the resource. It must be preceded by a describe call in order to have the latest information. The dataset in the request should be the one expected when performing another describe call.\n See also: AWS API Documentation\n \n \n :example: response = client.update_resource(\n OrganizationId='string',\n ResourceId='string',\n Name='string',\n BookingOptions={\n 'AutoAcceptRequests': True|False,\n 'AutoDeclineRecurringRequests': True|False,\n 'AutoDeclineConflictingRequests': True|False\n }\n )\n \n \n :type OrganizationId: string\n :param OrganizationId: [REQUIRED]\n The identifier associated with the organization for which the resource is updated.\n \n\n :type ResourceId: string\n :param ResourceId: [REQUIRED]\n The identifier of the resource to be updated.\n \n\n :type Name: string\n :param Name: The name of the resource to be updated.\n\n :type BookingOptions: dict\n :param BookingOptions: The resource's booking options to be updated.\n AutoAcceptRequests (boolean) --The resource's ability to automatically reply to requests. If disabled, delegates must be associated to the resource.\n AutoDeclineRecurringRequests (boolean) --The resource's ability to automatically decline any recurring requests.\n AutoDeclineConflictingRequests (boolean) --The resource's ability to automatically decline any conflicting requests.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.5847769975662231, "alphanum_fraction": 0.5925582051277161, "avg_line_length": 38.427547454833984, "blob_id": "fd7797ea98fe72af55a664cf1f0575983c35abd1", "content_id": "04bd7f53afecd66bdb0152d2129cd65b579bef19", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 341207, "license_type": "permissive", "max_line_length": 681, "num_lines": 8654, "path": "/pyboto3/ssm.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef add_tags_to_resource(ResourceType=None, ResourceId=None, Tags=None):\n \"\"\"\n Adds or overwrites one or more tags for the specified resource. Tags are metadata that you can assign to your documents, managed instances, Maintenance Windows, Parameter Store parameters, and patch baselines. Tags enable you to categorize your resources in different ways, for example, by purpose, owner, or environment. Each tag consists of a key and an optional value, both of which you define. For example, you could define a set of tags for your account's managed instances that helps you track each instance's owner and stack level. For example: Key=Owner and Value=DbAdmin, SysAdmin, or Dev. Or Key=Stack and Value=Production, Pre-Production, or Test.\n Each resource can have a maximum of 50 tags.\n We recommend that you devise a set of tag keys that meets your needs for each resource type. Using a consistent set of tag keys makes it easier for you to manage your resources. You can search and filter the resources based on the tags you add. Tags don't have any semantic meaning to Amazon EC2 and are interpreted strictly as a string of characters.\n For more information about tags, see Tagging Your Amazon EC2 Resources in the Amazon EC2 User Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.add_tags_to_resource(\n ResourceType='Document'|'ManagedInstance'|'MaintenanceWindow'|'Parameter'|'PatchBaseline',\n ResourceId='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type ResourceType: string\n :param ResourceType: [REQUIRED]\n Specifies the type of resource you are tagging.\n Note\n The ManagedInstance type for this API action is for on-premises managed instances. You must specify the the name of the managed instance in the following format: mi-ID_number. For example, mi-1a2b3c4d5e6f.\n \n\n :type ResourceId: string\n :param ResourceId: [REQUIRED]\n The resource ID you want to tag.\n Use the ID of the resource. Here are some examples:\n ManagedInstance: mi-012345abcde\n MaintenanceWindow: mw-012345abcde\n PatchBaseline: pb-012345abcde\n For the Document and Parameter values, use the name of the resource.\n Note\n The ManagedInstance type for this API action is only for on-premises managed instances. You must specify the the name of the managed instance in the following format: mi-ID_number. For example, mi-1a2b3c4d5e6f.\n \n\n :type Tags: list\n :param Tags: [REQUIRED]\n One or more tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string.\n Warning\n Do not enter personally identifiable information in this field.\n (dict) --Metadata that you assign to your AWS resources. Tags enable you to categorize your resources in different ways, for example, by purpose, owner, or environment. In Systems Manager, you can apply tags to documents, managed instances, Maintenance Windows, Parameter Store parameters, and patch baselines.\n Key (string) -- [REQUIRED]The name of the tag.\n Value (string) -- [REQUIRED]The value of the tag.\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef cancel_command(CommandId=None, InstanceIds=None):\n \"\"\"\n Attempts to cancel the command specified by the Command ID. There is no guarantee that the command will be terminated and the underlying process stopped.\n See also: AWS API Documentation\n \n \n :example: response = client.cancel_command(\n CommandId='string',\n InstanceIds=[\n 'string',\n ]\n )\n \n \n :type CommandId: string\n :param CommandId: [REQUIRED]\n The ID of the command you want to cancel.\n \n\n :type InstanceIds: list\n :param InstanceIds: (Optional) A list of instance IDs on which you want to cancel the command. If not provided, the command is canceled on every instance on which it was requested.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef cancel_maintenance_window_execution(WindowExecutionId=None):\n \"\"\"\n Stops a Maintenance Window execution that is already in progress and cancels any tasks in the window that have not already starting running. (Tasks already in progress will continue to completion.)\n See also: AWS API Documentation\n \n \n :example: response = client.cancel_maintenance_window_execution(\n WindowExecutionId='string'\n )\n \n \n :type WindowExecutionId: string\n :param WindowExecutionId: [REQUIRED]\n The ID of the Maintenance Window execution to stop.\n \n\n :rtype: dict\n :return: {\n 'WindowExecutionId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_activation(Description=None, DefaultInstanceName=None, IamRole=None, RegistrationLimit=None, ExpirationDate=None):\n \"\"\"\n Registers your on-premises server or virtual machine with Amazon EC2 so that you can manage these resources using Run Command. An on-premises server or virtual machine that has been registered with EC2 is called a managed instance. For more information about activations, see Setting Up Systems Manager in Hybrid Environments .\n See also: AWS API Documentation\n \n \n :example: response = client.create_activation(\n Description='string',\n DefaultInstanceName='string',\n IamRole='string',\n RegistrationLimit=123,\n ExpirationDate=datetime(2015, 1, 1)\n )\n \n \n :type Description: string\n :param Description: A user-defined description of the resource that you want to register with Amazon EC2.\n Warning\n Do not enter personally identifiable information in this field.\n \n\n :type DefaultInstanceName: string\n :param DefaultInstanceName: The name of the registered, managed instance as it will appear in the Amazon EC2 console or when you use the AWS command line tools to list EC2 resources.\n Warning\n Do not enter personally identifiable information in this field.\n \n\n :type IamRole: string\n :param IamRole: [REQUIRED]\n The Amazon Identity and Access Management (IAM) role that you want to assign to the managed instance.\n \n\n :type RegistrationLimit: integer\n :param RegistrationLimit: Specify the maximum number of managed instances you want to register. The default value is 1 instance.\n\n :type ExpirationDate: datetime\n :param ExpirationDate: The date by which this activation request should expire. The default value is 24 hours.\n\n :rtype: dict\n :return: {\n 'ActivationId': 'string',\n 'ActivationCode': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_association(Name=None, DocumentVersion=None, InstanceId=None, Parameters=None, Targets=None, ScheduleExpression=None, OutputLocation=None, AssociationName=None, MaxErrors=None, MaxConcurrency=None, ComplianceSeverity=None):\n \"\"\"\n Associates the specified Systems Manager document with the specified instances or targets.\n When you associate a document with one or more instances using instance IDs or tags, SSM Agent running on the instance processes the document and configures the instance as specified.\n If you associate a document with an instance that already has an associated document, the system returns the AssociationAlreadyExists exception.\n See also: AWS API Documentation\n \n \n :example: response = client.create_association(\n Name='string',\n DocumentVersion='string',\n InstanceId='string',\n Parameters={\n 'string': [\n 'string',\n ]\n },\n Targets=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n ScheduleExpression='string',\n OutputLocation={\n 'S3Location': {\n 'OutputS3Region': 'string',\n 'OutputS3BucketName': 'string',\n 'OutputS3KeyPrefix': 'string'\n }\n },\n AssociationName='string',\n MaxErrors='string',\n MaxConcurrency='string',\n ComplianceSeverity='CRITICAL'|'HIGH'|'MEDIUM'|'LOW'|'UNSPECIFIED'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the Systems Manager document.\n \n\n :type DocumentVersion: string\n :param DocumentVersion: The document version you want to associate with the target(s). Can be a specific version or the default version.\n\n :type InstanceId: string\n :param InstanceId: The instance ID.\n\n :type Parameters: dict\n :param Parameters: The parameters for the documents runtime configuration.\n (string) --\n (list) --\n (string) --\n \n \n\n :type Targets: list\n :param Targets: The targets (either instances or tags) for the association.\n (dict) --An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call.\n Key (string) --User-defined criteria for sending commands that target instances that meet the criteria. Key can be tag:<Amazon EC2 tag> or InstanceIds. For more information about how to send commands that target instances using Key,Value parameters, see Targeting Multiple Instances in the AWS Systems Manager User Guide .\n Values (list) --User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, you could specify value:WebServer to execute a command on instances that include Amazon EC2 tags of ServerRole,WebServer. For more information about how to send commands that target instances using Key,Value parameters, see Sending Commands to a Fleet in the AWS Systems Manager User Guide .\n (string) --\n \n \n\n :type ScheduleExpression: string\n :param ScheduleExpression: A cron expression when the association will be applied to the target(s).\n\n :type OutputLocation: dict\n :param OutputLocation: An Amazon S3 bucket where you want to store the output details of the request.\n S3Location (dict) --An Amazon S3 bucket where you want to store the results of this request.\n OutputS3Region (string) --(Deprecated) You can no longer specify this parameter. The system ignores it. Instead, Systems Manager automatically determines the Amazon S3 bucket region.\n OutputS3BucketName (string) --The name of the Amazon S3 bucket.\n OutputS3KeyPrefix (string) --The Amazon S3 bucket subfolder.\n \n \n\n :type AssociationName: string\n :param AssociationName: Specify a descriptive name for the association.\n\n :type MaxErrors: string\n :param MaxErrors: The number of errors that are allowed before the system stops sending requests to run the association on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops sending requests when the fourth error is received. If you specify 0, then the system stops sending requests after the first error is returned. If you run an association on 50 instances and set MaxError to 10%, then the system stops sending the request when the sixth error is received.\n Executions that are already running an association when MaxErrors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set MaxConcurrency to 1 so that executions proceed one at a time.\n \n\n :type MaxConcurrency: string\n :param MaxConcurrency: The maximum number of targets allowed to run the association at the same time. You can specify a number, for example 10, or a percentage of the target set, for example 10%. The default value is 100%, which means all targets run the association at the same time.\n If a new instance starts and attempts to execute an association while Systems Manager is executing MaxConcurrency associations, the association is allowed to run. During the next association interval, the new instance will process its association within the limit specified for MaxConcurrency.\n \n\n :type ComplianceSeverity: string\n :param ComplianceSeverity: The severity level to assign to the association.\n\n :rtype: dict\n :return: {\n 'AssociationDescription': {\n 'Name': 'string',\n 'InstanceId': 'string',\n 'AssociationVersion': 'string',\n 'Date': datetime(2015, 1, 1),\n 'LastUpdateAssociationDate': datetime(2015, 1, 1),\n 'Status': {\n 'Date': datetime(2015, 1, 1),\n 'Name': 'Pending'|'Success'|'Failed',\n 'Message': 'string',\n 'AdditionalInfo': 'string'\n },\n 'Overview': {\n 'Status': 'string',\n 'DetailedStatus': 'string',\n 'AssociationStatusAggregatedCount': {\n 'string': 123\n }\n },\n 'DocumentVersion': 'string',\n 'Parameters': {\n 'string': [\n 'string',\n ]\n },\n 'AssociationId': 'string',\n 'Targets': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n 'ScheduleExpression': 'string',\n 'OutputLocation': {\n 'S3Location': {\n 'OutputS3Region': 'string',\n 'OutputS3BucketName': 'string',\n 'OutputS3KeyPrefix': 'string'\n }\n },\n 'LastExecutionDate': datetime(2015, 1, 1),\n 'LastSuccessfulExecutionDate': datetime(2015, 1, 1),\n 'AssociationName': 'string',\n 'MaxErrors': 'string',\n 'MaxConcurrency': 'string',\n 'ComplianceSeverity': 'CRITICAL'|'HIGH'|'MEDIUM'|'LOW'|'UNSPECIFIED'\n }\n }\n \n \n :returns: \n (string) --\n (integer) --\n \n \n \n \"\"\"\n pass\n\ndef create_association_batch(Entries=None):\n \"\"\"\n Associates the specified Systems Manager document with the specified instances or targets.\n When you associate a document with one or more instances using instance IDs or tags, SSM Agent running on the instance processes the document and configures the instance as specified.\n If you associate a document with an instance that already has an associated document, the system returns the AssociationAlreadyExists exception.\n See also: AWS API Documentation\n \n \n :example: response = client.create_association_batch(\n Entries=[\n {\n 'Name': 'string',\n 'InstanceId': 'string',\n 'Parameters': {\n 'string': [\n 'string',\n ]\n },\n 'DocumentVersion': 'string',\n 'Targets': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n 'ScheduleExpression': 'string',\n 'OutputLocation': {\n 'S3Location': {\n 'OutputS3Region': 'string',\n 'OutputS3BucketName': 'string',\n 'OutputS3KeyPrefix': 'string'\n }\n },\n 'AssociationName': 'string',\n 'MaxErrors': 'string',\n 'MaxConcurrency': 'string',\n 'ComplianceSeverity': 'CRITICAL'|'HIGH'|'MEDIUM'|'LOW'|'UNSPECIFIED'\n },\n ]\n )\n \n \n :type Entries: list\n :param Entries: [REQUIRED]\n One or more associations.\n (dict) --Describes the association of a Systems Manager SSM document and an instance.\n Name (string) -- [REQUIRED]The name of the configuration document.\n InstanceId (string) --The ID of the instance.\n Parameters (dict) --A description of the parameters for a document.\n (string) --\n (list) --\n (string) --\n \n DocumentVersion (string) --The document version.\n Targets (list) --The instances targeted by the request.\n (dict) --An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call.\n Key (string) --User-defined criteria for sending commands that target instances that meet the criteria. Key can be tag:<Amazon EC2 tag> or InstanceIds. For more information about how to send commands that target instances using Key,Value parameters, see Targeting Multiple Instances in the AWS Systems Manager User Guide .\n Values (list) --User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, you could specify value:WebServer to execute a command on instances that include Amazon EC2 tags of ServerRole,WebServer. For more information about how to send commands that target instances using Key,Value parameters, see Sending Commands to a Fleet in the AWS Systems Manager User Guide .\n (string) --\n \n ScheduleExpression (string) --A cron expression that specifies a schedule when the association runs.\n OutputLocation (dict) --An Amazon S3 bucket where you want to store the results of this request.\n S3Location (dict) --An Amazon S3 bucket where you want to store the results of this request.\n OutputS3Region (string) --(Deprecated) You can no longer specify this parameter. The system ignores it. Instead, Systems Manager automatically determines the Amazon S3 bucket region.\n OutputS3BucketName (string) --The name of the Amazon S3 bucket.\n OutputS3KeyPrefix (string) --The Amazon S3 bucket subfolder.\n \n AssociationName (string) --Specify a descriptive name for the association.\n MaxErrors (string) --The number of errors that are allowed before the system stops sending requests to run the association on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops sending requests when the fourth error is received. If you specify 0, then the system stops sending requests after the first error is returned. If you run an association on 50 instances and set MaxError to 10%, then the system stops sending the request when the sixth error is received.\n Executions that are already running an association when MaxErrors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set MaxConcurrency to 1 so that executions proceed one at a time.\n MaxConcurrency (string) --The maximum number of targets allowed to run the association at the same time. You can specify a number, for example 10, or a percentage of the target set, for example 10%. The default value is 100%, which means all targets run the association at the same time.\n If a new instance starts and attempts to execute an association while Systems Manager is executing MaxConcurrency associations, the association is allowed to run. During the next association interval, the new instance will process its association within the limit specified for MaxConcurrency.\n ComplianceSeverity (string) --The severity level to assign to the association.\n \n \n\n :rtype: dict\n :return: {\n 'Successful': [\n {\n 'Name': 'string',\n 'InstanceId': 'string',\n 'AssociationVersion': 'string',\n 'Date': datetime(2015, 1, 1),\n 'LastUpdateAssociationDate': datetime(2015, 1, 1),\n 'Status': {\n 'Date': datetime(2015, 1, 1),\n 'Name': 'Pending'|'Success'|'Failed',\n 'Message': 'string',\n 'AdditionalInfo': 'string'\n },\n 'Overview': {\n 'Status': 'string',\n 'DetailedStatus': 'string',\n 'AssociationStatusAggregatedCount': {\n 'string': 123\n }\n },\n 'DocumentVersion': 'string',\n 'Parameters': {\n 'string': [\n 'string',\n ]\n },\n 'AssociationId': 'string',\n 'Targets': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n 'ScheduleExpression': 'string',\n 'OutputLocation': {\n 'S3Location': {\n 'OutputS3Region': 'string',\n 'OutputS3BucketName': 'string',\n 'OutputS3KeyPrefix': 'string'\n }\n },\n 'LastExecutionDate': datetime(2015, 1, 1),\n 'LastSuccessfulExecutionDate': datetime(2015, 1, 1),\n 'AssociationName': 'string',\n 'MaxErrors': 'string',\n 'MaxConcurrency': 'string',\n 'ComplianceSeverity': 'CRITICAL'|'HIGH'|'MEDIUM'|'LOW'|'UNSPECIFIED'\n },\n ],\n 'Failed': [\n {\n 'Entry': {\n 'Name': 'string',\n 'InstanceId': 'string',\n 'Parameters': {\n 'string': [\n 'string',\n ]\n },\n 'DocumentVersion': 'string',\n 'Targets': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n 'ScheduleExpression': 'string',\n 'OutputLocation': {\n 'S3Location': {\n 'OutputS3Region': 'string',\n 'OutputS3BucketName': 'string',\n 'OutputS3KeyPrefix': 'string'\n }\n },\n 'AssociationName': 'string',\n 'MaxErrors': 'string',\n 'MaxConcurrency': 'string',\n 'ComplianceSeverity': 'CRITICAL'|'HIGH'|'MEDIUM'|'LOW'|'UNSPECIFIED'\n },\n 'Message': 'string',\n 'Fault': 'Client'|'Server'|'Unknown'\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef create_document(Content=None, Attachments=None, Name=None, VersionName=None, DocumentType=None, DocumentFormat=None, TargetType=None):\n \"\"\"\n Creates a Systems Manager document.\n After you create a document, you can use CreateAssociation to associate it with one or more running instances.\n See also: AWS API Documentation\n \n \n :example: response = client.create_document(\n Content='string',\n Attachments=[\n {\n 'Key': 'SourceUrl',\n 'Values': [\n 'string',\n ]\n },\n ],\n Name='string',\n VersionName='string',\n DocumentType='Command'|'Policy'|'Automation'|'Session'|'Package',\n DocumentFormat='YAML'|'JSON',\n TargetType='string'\n )\n \n \n :type Content: string\n :param Content: [REQUIRED]\n A valid JSON or YAML string.\n \n\n :type Attachments: list\n :param Attachments: A list of key and value pairs that describe attachments to a version of a document.\n (dict) --A key and value pair that identifies the location of an attachment to a document.\n Key (string) --The key of a key and value pair that identifies the location of an attachment to a document.\n Values (list) --The URL of the location of a document attachment, such as the URL of an Amazon S3 bucket.\n (string) --\n \n \n\n :type Name: string\n :param Name: [REQUIRED]\n A name for the Systems Manager document.\n Warning\n Do not use the following to begin the names of documents you create. They are reserved by AWS for use as document prefixes:\n aws\n amazon\n amzn\n \n\n :type VersionName: string\n :param VersionName: An optional field specifying the version of the artifact you are creating with the document. For example, 'Release 12, Update 6'. This value is unique across all versions of a document, and cannot be changed.\n\n :type DocumentType: string\n :param DocumentType: The type of document to create. Valid document types include: Command , Policy , Automation , Session , and Package .\n\n :type DocumentFormat: string\n :param DocumentFormat: Specify the document format for the request. The document format can be either JSON or YAML. JSON is the default format.\n\n :type TargetType: string\n :param TargetType: Specify a target type to define the kinds of resources the document can run on. For example, to run a document on EC2 instances, specify the following value: /AWS::EC2::Instance. If you specify a value of '/' the document can run on all types of resources. If you don't specify a value, the document can't run on any resources. For a list of valid resource types, see AWS Resource Types Reference in the AWS CloudFormation User Guide .\n\n :rtype: dict\n :return: {\n 'DocumentDescription': {\n 'Sha1': 'string',\n 'Hash': 'string',\n 'HashType': 'Sha256'|'Sha1',\n 'Name': 'string',\n 'VersionName': 'string',\n 'Owner': 'string',\n 'CreatedDate': datetime(2015, 1, 1),\n 'Status': 'Creating'|'Active'|'Updating'|'Deleting'|'Failed',\n 'StatusInformation': 'string',\n 'DocumentVersion': 'string',\n 'Description': 'string',\n 'Parameters': [\n {\n 'Name': 'string',\n 'Type': 'String'|'StringList',\n 'Description': 'string',\n 'DefaultValue': 'string'\n },\n ],\n 'PlatformTypes': [\n 'Windows'|'Linux',\n ],\n 'DocumentType': 'Command'|'Policy'|'Automation'|'Session'|'Package',\n 'SchemaVersion': 'string',\n 'LatestVersion': 'string',\n 'DefaultVersion': 'string',\n 'DocumentFormat': 'YAML'|'JSON',\n 'TargetType': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'AttachmentsInformation': [\n {\n 'Name': 'string'\n },\n ]\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef create_maintenance_window(Name=None, Description=None, StartDate=None, EndDate=None, Schedule=None, ScheduleTimezone=None, Duration=None, Cutoff=None, AllowUnassociatedTargets=None, ClientToken=None):\n \"\"\"\n Creates a new Maintenance Window.\n See also: AWS API Documentation\n \n \n :example: response = client.create_maintenance_window(\n Name='string',\n Description='string',\n StartDate='string',\n EndDate='string',\n Schedule='string',\n ScheduleTimezone='string',\n Duration=123,\n Cutoff=123,\n AllowUnassociatedTargets=True|False,\n ClientToken='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the Maintenance Window.\n \n\n :type Description: string\n :param Description: An optional description for the Maintenance Window. We recommend specifying a description to help you organize your Maintenance Windows.\n\n :type StartDate: string\n :param StartDate: The date and time, in ISO-8601 Extended format, for when you want the Maintenance Window to become active. StartDate allows you to delay activation of the Maintenance Window until the specified future date.\n\n :type EndDate: string\n :param EndDate: The date and time, in ISO-8601 Extended format, for when you want the Maintenance Window to become inactive. EndDate allows you to set a date and time in the future when the Maintenance Window will no longer run.\n\n :type Schedule: string\n :param Schedule: [REQUIRED]\n The schedule of the Maintenance Window in the form of a cron or rate expression.\n \n\n :type ScheduleTimezone: string\n :param ScheduleTimezone: The time zone that the scheduled Maintenance Window executions are based on, in Internet Assigned Numbers Authority (IANA) format. For example: 'America/Los_Angeles', 'etc/UTC', or 'Asia/Seoul'. For more information, see the Time Zone Database on the IANA website.\n\n :type Duration: integer\n :param Duration: [REQUIRED]\n The duration of the Maintenance Window in hours.\n \n\n :type Cutoff: integer\n :param Cutoff: [REQUIRED]\n The number of hours before the end of the Maintenance Window that Systems Manager stops scheduling new tasks for execution.\n \n\n :type AllowUnassociatedTargets: boolean\n :param AllowUnassociatedTargets: [REQUIRED]\n Enables a Maintenance Window task to execute on managed instances, even if you have not registered those instances as targets. If enabled, then you must specify the unregistered instances (by instance ID) when you register a task with the Maintenance Window\n If you don't enable this option, then you must specify previously-registered targets when you register a task with the Maintenance Window.\n \n\n :type ClientToken: string\n :param ClientToken: User-provided idempotency token.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'WindowId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_patch_baseline(OperatingSystem=None, Name=None, GlobalFilters=None, ApprovalRules=None, ApprovedPatches=None, ApprovedPatchesComplianceLevel=None, ApprovedPatchesEnableNonSecurity=None, RejectedPatches=None, RejectedPatchesAction=None, Description=None, Sources=None, ClientToken=None):\n \"\"\"\n Creates a patch baseline.\n See also: AWS API Documentation\n \n \n :example: response = client.create_patch_baseline(\n OperatingSystem='WINDOWS'|'AMAZON_LINUX'|'AMAZON_LINUX_2'|'UBUNTU'|'REDHAT_ENTERPRISE_LINUX'|'SUSE'|'CENTOS',\n Name='string',\n GlobalFilters={\n 'PatchFilters': [\n {\n 'Key': 'PRODUCT'|'CLASSIFICATION'|'MSRC_SEVERITY'|'PATCH_ID'|'SECTION'|'PRIORITY'|'SEVERITY',\n 'Values': [\n 'string',\n ]\n },\n ]\n },\n ApprovalRules={\n 'PatchRules': [\n {\n 'PatchFilterGroup': {\n 'PatchFilters': [\n {\n 'Key': 'PRODUCT'|'CLASSIFICATION'|'MSRC_SEVERITY'|'PATCH_ID'|'SECTION'|'PRIORITY'|'SEVERITY',\n 'Values': [\n 'string',\n ]\n },\n ]\n },\n 'ComplianceLevel': 'CRITICAL'|'HIGH'|'MEDIUM'|'LOW'|'INFORMATIONAL'|'UNSPECIFIED',\n 'ApproveAfterDays': 123,\n 'EnableNonSecurity': True|False\n },\n ]\n },\n ApprovedPatches=[\n 'string',\n ],\n ApprovedPatchesComplianceLevel='CRITICAL'|'HIGH'|'MEDIUM'|'LOW'|'INFORMATIONAL'|'UNSPECIFIED',\n ApprovedPatchesEnableNonSecurity=True|False,\n RejectedPatches=[\n 'string',\n ],\n RejectedPatchesAction='ALLOW_AS_DEPENDENCY'|'BLOCK',\n Description='string',\n Sources=[\n {\n 'Name': 'string',\n 'Products': [\n 'string',\n ],\n 'Configuration': 'string'\n },\n ],\n ClientToken='string'\n )\n \n \n :type OperatingSystem: string\n :param OperatingSystem: Defines the operating system the patch baseline applies to. The Default value is WINDOWS.\n\n :type Name: string\n :param Name: [REQUIRED]\n The name of the patch baseline.\n \n\n :type GlobalFilters: dict\n :param GlobalFilters: A set of global filters used to exclude patches from the baseline.\n PatchFilters (list) -- [REQUIRED]The set of patch filters that make up the group.\n (dict) --Defines a patch filter.\n A patch filter consists of key/value pairs, but not all keys are valid for all operating system types. For example, the key PRODUCT is valid for all supported operating system types. The key MSRC_SEVERITY , however, is valid only for Windows operating systems, and the key SECTION is valid only for Ubuntu operating systems.\n Refer to the following sections for information about which keys may be used with each major operating system, and which values are valid for each key.\n Windows Operating Systems\n The supported keys for Windows operating systems are PRODUCT , CLASSIFICATION , and MSRC_SEVERITY . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n Windows7\n Windows8\n Windows8.1\n Windows8Embedded\n Windows10\n Windows10LTSB\n WindowsServer2008\n WindowsServer2008R2\n WindowsServer2012\n WindowsServer2012R2\n WindowsServer2016\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: CLASSIFICATIONSupported values:\n CriticalUpdates\n DefinitionUpdates\n Drivers\n FeaturePacks\n SecurityUpdates\n ServicePacks\n Tools\n UpdateRollups\n Updates\n Upgrades\n Supported key: MSRC_SEVERITYSupported values:\n Critical\n Important\n Moderate\n Low\n Unspecified\n Ubuntu Operating Systems\n The supported keys for Ubuntu operating systems are PRODUCT , PRIORITY , and SECTION . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n Ubuntu14.04\n Ubuntu16.04\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: PRIORITYSupported values:\n Required\n Important\n Standard\n Optional\n Extra\n Supported key: SECTION\n Only the length of the key value is validated. Minimum length is 1. Maximum length is 64.\n Amazon Linux Operating Systems\n The supported keys for Amazon Linux operating systems are PRODUCT , CLASSIFICATION , and SEVERITY . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n AmazonLinux2012.03\n AmazonLinux2012.09\n AmazonLinux2013.03\n AmazonLinux2013.09\n AmazonLinux2014.03\n AmazonLinux2014.09\n AmazonLinux2015.03\n AmazonLinux2015.09\n AmazonLinux2016.03\n AmazonLinux2016.09\n AmazonLinux2017.03\n AmazonLinux2017.09\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: CLASSIFICATIONSupported values:\n Security\n Bugfix\n Enhancement\n Recommended\n Newpackage\n Supported key: SEVERITYSupported values:\n Critical\n Important\n Medium\n Low\n Amazon Linux 2 Operating Systems\n The supported keys for Amazon Linux 2 operating systems are PRODUCT , CLASSIFICATION , and SEVERITY . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n AmazonLinux2\n AmazonLinux2.0\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: CLASSIFICATIONSupported values:\n Security\n Bugfix\n Enhancement\n Recommended\n Newpackage\n Supported key: SEVERITYSupported values:\n Critical\n Important\n Medium\n Low\n RedHat Enterprise Linux (RHEL) Operating Systems\n The supported keys for RedHat Enterprise Linux operating systems are PRODUCT , CLASSIFICATION , and SEVERITY . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n RedhatEnterpriseLinux6.5\n RedhatEnterpriseLinux6.6\n RedhatEnterpriseLinux6.7\n RedhatEnterpriseLinux6.8\n RedhatEnterpriseLinux6.9\n RedhatEnterpriseLinux7.0\n RedhatEnterpriseLinux7.1\n RedhatEnterpriseLinux7.2\n RedhatEnterpriseLinux7.3\n RedhatEnterpriseLinux7.4\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: CLASSIFICATIONSupported values:\n Security\n Bugfix\n Enhancement\n Recommended\n Newpackage\n Supported key: SEVERITYSupported values:\n Critical\n Important\n Medium\n Low\n SUSE Linux Enterprise Server (SLES) Operating Systems\n The supported keys for SLES operating systems are PRODUCT , CLASSIFICATION , and SEVERITY . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n Suse12.0\n Suse12.1\n Suse12.2\n Suse12.3\n Suse12.4\n Suse12.5\n Suse12.6\n Suse12.7\n Suse12.8\n Suse12.9\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: CLASSIFICATIONSupported values:\n Security\n Recommended\n Optional\n Feature\n Document\n Yast\n Supported key: SEVERITYSupported values:\n Critical\n Important\n Moderate\n Low\n CentOS Operating Systems\n The supported keys for CentOS operating systems are PRODUCT , CLASSIFICATION , and SEVERITY . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n CentOS6.5\n CentOS6.6\n CentOS6.7\n CentOS6.8\n CentOS6.9\n CentOS7.0\n CentOS7.1\n CentOS7.2\n CentOS7.3\n CentOS7.4\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: CLASSIFICATIONSupported values:\n Security\n Bugfix\n Enhancement\n Recommended\n Newpackage\n Supported key: SEVERITYSupported values:\n Critical\n Important\n Medium\n Low\n Key (string) -- [REQUIRED]The key for the filter.\n See PatchFilter for lists of valid keys for each operating system type.\n Values (list) -- [REQUIRED]The value for the filter key.\n See PatchFilter for lists of valid values for each key based on operating system type.\n (string) --\n \n \n\n :type ApprovalRules: dict\n :param ApprovalRules: A set of rules used to include patches in the baseline.\n PatchRules (list) -- [REQUIRED]The rules that make up the rule group.\n (dict) --Defines an approval rule for a patch baseline.\n PatchFilterGroup (dict) -- [REQUIRED]The patch filter group that defines the criteria for the rule.\n PatchFilters (list) -- [REQUIRED]The set of patch filters that make up the group.\n (dict) --Defines a patch filter.\n A patch filter consists of key/value pairs, but not all keys are valid for all operating system types. For example, the key PRODUCT is valid for all supported operating system types. The key MSRC_SEVERITY , however, is valid only for Windows operating systems, and the key SECTION is valid only for Ubuntu operating systems.\n Refer to the following sections for information about which keys may be used with each major operating system, and which values are valid for each key.\n Windows Operating Systems\n The supported keys for Windows operating systems are PRODUCT , CLASSIFICATION , and MSRC_SEVERITY . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n Windows7\n Windows8\n Windows8.1\n Windows8Embedded\n Windows10\n Windows10LTSB\n WindowsServer2008\n WindowsServer2008R2\n WindowsServer2012\n WindowsServer2012R2\n WindowsServer2016\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: CLASSIFICATIONSupported values:\n CriticalUpdates\n DefinitionUpdates\n Drivers\n FeaturePacks\n SecurityUpdates\n ServicePacks\n Tools\n UpdateRollups\n Updates\n Upgrades\n Supported key: MSRC_SEVERITYSupported values:\n Critical\n Important\n Moderate\n Low\n Unspecified\n Ubuntu Operating Systems\n The supported keys for Ubuntu operating systems are PRODUCT , PRIORITY , and SECTION . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n Ubuntu14.04\n Ubuntu16.04\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: PRIORITYSupported values:\n Required\n Important\n Standard\n Optional\n Extra\n Supported key: SECTION\n Only the length of the key value is validated. Minimum length is 1. Maximum length is 64.\n Amazon Linux Operating Systems\n The supported keys for Amazon Linux operating systems are PRODUCT , CLASSIFICATION , and SEVERITY . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n AmazonLinux2012.03\n AmazonLinux2012.09\n AmazonLinux2013.03\n AmazonLinux2013.09\n AmazonLinux2014.03\n AmazonLinux2014.09\n AmazonLinux2015.03\n AmazonLinux2015.09\n AmazonLinux2016.03\n AmazonLinux2016.09\n AmazonLinux2017.03\n AmazonLinux2017.09\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: CLASSIFICATIONSupported values:\n Security\n Bugfix\n Enhancement\n Recommended\n Newpackage\n Supported key: SEVERITYSupported values:\n Critical\n Important\n Medium\n Low\n Amazon Linux 2 Operating Systems\n The supported keys for Amazon Linux 2 operating systems are PRODUCT , CLASSIFICATION , and SEVERITY . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n AmazonLinux2\n AmazonLinux2.0\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: CLASSIFICATIONSupported values:\n Security\n Bugfix\n Enhancement\n Recommended\n Newpackage\n Supported key: SEVERITYSupported values:\n Critical\n Important\n Medium\n Low\n RedHat Enterprise Linux (RHEL) Operating Systems\n The supported keys for RedHat Enterprise Linux operating systems are PRODUCT , CLASSIFICATION , and SEVERITY . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n RedhatEnterpriseLinux6.5\n RedhatEnterpriseLinux6.6\n RedhatEnterpriseLinux6.7\n RedhatEnterpriseLinux6.8\n RedhatEnterpriseLinux6.9\n RedhatEnterpriseLinux7.0\n RedhatEnterpriseLinux7.1\n RedhatEnterpriseLinux7.2\n RedhatEnterpriseLinux7.3\n RedhatEnterpriseLinux7.4\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: CLASSIFICATIONSupported values:\n Security\n Bugfix\n Enhancement\n Recommended\n Newpackage\n Supported key: SEVERITYSupported values:\n Critical\n Important\n Medium\n Low\n SUSE Linux Enterprise Server (SLES) Operating Systems\n The supported keys for SLES operating systems are PRODUCT , CLASSIFICATION , and SEVERITY . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n Suse12.0\n Suse12.1\n Suse12.2\n Suse12.3\n Suse12.4\n Suse12.5\n Suse12.6\n Suse12.7\n Suse12.8\n Suse12.9\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: CLASSIFICATIONSupported values:\n Security\n Recommended\n Optional\n Feature\n Document\n Yast\n Supported key: SEVERITYSupported values:\n Critical\n Important\n Moderate\n Low\n CentOS Operating Systems\n The supported keys for CentOS operating systems are PRODUCT , CLASSIFICATION , and SEVERITY . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n CentOS6.5\n CentOS6.6\n CentOS6.7\n CentOS6.8\n CentOS6.9\n CentOS7.0\n CentOS7.1\n CentOS7.2\n CentOS7.3\n CentOS7.4\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: CLASSIFICATIONSupported values:\n Security\n Bugfix\n Enhancement\n Recommended\n Newpackage\n Supported key: SEVERITYSupported values:\n Critical\n Important\n Medium\n Low\n Key (string) -- [REQUIRED]The key for the filter.\n See PatchFilter for lists of valid keys for each operating system type.\n Values (list) -- [REQUIRED]The value for the filter key.\n See PatchFilter for lists of valid values for each key based on operating system type.\n (string) --\n \n \n ComplianceLevel (string) --A compliance severity level for all approved patches in a patch baseline. Valid compliance severity levels include the following: Unspecified, Critical, High, Medium, Low, and Informational.\n ApproveAfterDays (integer) -- [REQUIRED]The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of 7 means that patches are approved seven days after they are released.\n EnableNonSecurity (boolean) --For instances identified by the approval rule filters, enables a patch baseline to apply non-security updates available in the specified repository. The default value is 'false'. Applies to Linux instances only.\n \n \n\n :type ApprovedPatches: list\n :param ApprovedPatches: A list of explicitly approved patches for the baseline.\n For information about accepted formats for lists of approved patches and rejected patches, see Package Name Formats for Approved and Rejected Patch Lists in the AWS Systems Manager User Guide .\n (string) --\n \n\n :type ApprovedPatchesComplianceLevel: string\n :param ApprovedPatchesComplianceLevel: Defines the compliance level for approved patches. This means that if an approved patch is reported as missing, this is the severity of the compliance violation. The default value is UNSPECIFIED.\n\n :type ApprovedPatchesEnableNonSecurity: boolean\n :param ApprovedPatchesEnableNonSecurity: Indicates whether the list of approved patches includes non-security updates that should be applied to the instances. The default value is 'false'. Applies to Linux instances only.\n\n :type RejectedPatches: list\n :param RejectedPatches: A list of explicitly rejected patches for the baseline.\n For information about accepted formats for lists of approved patches and rejected patches, see Package Name Formats for Approved and Rejected Patch Lists in the AWS Systems Manager User Guide .\n (string) --\n \n\n :type RejectedPatchesAction: string\n :param RejectedPatchesAction: The action for Patch Manager to take on patches included in the RejectedPackages list.\n ALLOW_AS_DEPENDENCY : A package in the Rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as InstalledOther . This is the default action if no option is specified.\n BLOCK : Packages in the RejectedPatches list, and packages that include them as dependencies, are not installed under any circumstances. If a package was installed before it was added to the Rejected patches list, it is considered non-compliant with the patch baseline, and its status is reported as InstalledRejected .\n \n\n :type Description: string\n :param Description: A description of the patch baseline.\n\n :type Sources: list\n :param Sources: Information about the patches to use to update the instances, including target operating systems and source repositories. Applies to Linux instances only.\n (dict) --Information about the patches to use to update the instances, including target operating systems and source repository. Applies to Linux instances only.\n Name (string) -- [REQUIRED]The name specified to identify the patch source.\n Products (list) -- [REQUIRED]The specific operating system versions a patch repository applies to, such as 'Ubuntu16.04', 'AmazonLinux2016.09', 'RedhatEnterpriseLinux7.2' or 'Suse12.7'. For lists of supported product values, see PatchFilter .\n (string) --\n Configuration (string) -- [REQUIRED]The value of the yum repo configuration. For example:\n cachedir=/var/cache/yum/$basesearch$releasever\n keepcache=0\n debuglevel=2\n \n \n\n :type ClientToken: string\n :param ClientToken: User-provided idempotency token.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'BaselineId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_resource_data_sync(SyncName=None, S3Destination=None):\n \"\"\"\n Creates a resource data sync configuration to a single bucket in Amazon S3. This is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data to the Amazon S3 bucket. To check the status of the sync, use the ListResourceDataSync .\n By default, data is not encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy. To view an example of a restrictive Amazon S3 bucket policy for Resource Data Sync, see Create a Resource Data Sync for Inventory in the AWS Systems Manager User Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.create_resource_data_sync(\n SyncName='string',\n S3Destination={\n 'BucketName': 'string',\n 'Prefix': 'string',\n 'SyncFormat': 'JsonSerDe',\n 'Region': 'string',\n 'AWSKMSKeyARN': 'string'\n }\n )\n \n \n :type SyncName: string\n :param SyncName: [REQUIRED]\n A name for the configuration.\n \n\n :type S3Destination: dict\n :param S3Destination: [REQUIRED]\n Amazon S3 configuration details for the sync.\n BucketName (string) -- [REQUIRED]The name of the Amazon S3 bucket where the aggregated data is stored.\n Prefix (string) --An Amazon S3 prefix for the bucket.\n SyncFormat (string) -- [REQUIRED]A supported sync format. The following format is currently supported: JsonSerDe\n Region (string) -- [REQUIRED]The AWS Region with the Amazon S3 bucket targeted by the Resource Data Sync.\n AWSKMSKeyARN (string) --The ARN of an encryption key for a destination in Amazon S3. Must belong to the same region as the destination Amazon S3 bucket.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_activation(ActivationId=None):\n \"\"\"\n Deletes an activation. You are not required to delete an activation. If you delete an activation, you can no longer use it to register additional managed instances. Deleting an activation does not de-register managed instances. You must manually de-register managed instances.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_activation(\n ActivationId='string'\n )\n \n \n :type ActivationId: string\n :param ActivationId: [REQUIRED]\n The ID of the activation that you want to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_association(Name=None, InstanceId=None, AssociationId=None):\n \"\"\"\n Disassociates the specified Systems Manager document from the specified instance.\n When you disassociate a document from an instance, it does not change the configuration of the instance. To change the configuration state of an instance after you disassociate a document, you must create a new document with the desired configuration and associate it with the instance.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_association(\n Name='string',\n InstanceId='string',\n AssociationId='string'\n )\n \n \n :type Name: string\n :param Name: The name of the Systems Manager document.\n\n :type InstanceId: string\n :param InstanceId: The ID of the instance.\n\n :type AssociationId: string\n :param AssociationId: The association ID that you want to delete.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_document(Name=None):\n \"\"\"\n Deletes the Systems Manager document and all instance associations to the document.\n Before you delete the document, we recommend that you use DeleteAssociation to disassociate all instances that are associated with the document.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_document(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the document.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_inventory(TypeName=None, SchemaDeleteOption=None, DryRun=None, ClientToken=None):\n \"\"\"\n Delete a custom inventory type, or the data associated with a custom Inventory type. Deleting a custom inventory type is also referred to as deleting a custom inventory schema.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_inventory(\n TypeName='string',\n SchemaDeleteOption='DisableSchema'|'DeleteSchema',\n DryRun=True|False,\n ClientToken='string'\n )\n \n \n :type TypeName: string\n :param TypeName: [REQUIRED]\n The name of the custom inventory type for which you want to delete either all previously collected data, or the inventory type itself.\n \n\n :type SchemaDeleteOption: string\n :param SchemaDeleteOption: Use the SchemaDeleteOption to delete a custom inventory type (schema). If you don't choose this option, the system only deletes existing inventory data associated with the custom inventory type. Choose one of the following options:\n DisableSchema: If you choose this option, the system ignores all inventory data for the specified version, and any earlier versions. To enable this schema again, you must call the PutInventory action for a version greater than the disbled version.\n DeleteSchema: This option deletes the specified custom type from the Inventory service. You can recreate the schema later, if you want.\n \n\n :type DryRun: boolean\n :param DryRun: Use this option to view a summary of the deletion request without deleting any data or the data type. This option is useful when you only want to understand what will be deleted. Once you validate that the data to be deleted is what you intend to delete, you can run the same command without specifying the DryRun option.\n\n :type ClientToken: string\n :param ClientToken: User-provided idempotency token.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'DeletionId': 'string',\n 'TypeName': 'string',\n 'DeletionSummary': {\n 'TotalCount': 123,\n 'RemainingCount': 123,\n 'SummaryItems': [\n {\n 'Version': 'string',\n 'Count': 123,\n 'RemainingCount': 123\n },\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef delete_maintenance_window(WindowId=None):\n \"\"\"\n Deletes a Maintenance Window.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_maintenance_window(\n WindowId='string'\n )\n \n \n :type WindowId: string\n :param WindowId: [REQUIRED]\n The ID of the Maintenance Window to delete.\n \n\n :rtype: dict\n :return: {\n 'WindowId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_parameter(Name=None):\n \"\"\"\n Delete a parameter from the system.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_parameter(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the parameter to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_parameters(Names=None):\n \"\"\"\n Delete a list of parameters. This API is used to delete parameters by using the Amazon EC2 console.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_parameters(\n Names=[\n 'string',\n ]\n )\n \n \n :type Names: list\n :param Names: [REQUIRED]\n The names of the parameters to delete.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'DeletedParameters': [\n 'string',\n ],\n 'InvalidParameters': [\n 'string',\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef delete_patch_baseline(BaselineId=None):\n \"\"\"\n Deletes a patch baseline.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_patch_baseline(\n BaselineId='string'\n )\n \n \n :type BaselineId: string\n :param BaselineId: [REQUIRED]\n The ID of the patch baseline to delete.\n \n\n :rtype: dict\n :return: {\n 'BaselineId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_resource_data_sync(SyncName=None):\n \"\"\"\n Deletes a Resource Data Sync configuration. After the configuration is deleted, changes to inventory data on managed instances are no longer synced with the target Amazon S3 bucket. Deleting a sync configuration does not delete data in the target Amazon S3 bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_resource_data_sync(\n SyncName='string'\n )\n \n \n :type SyncName: string\n :param SyncName: [REQUIRED]\n The name of the configuration to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef deregister_managed_instance(InstanceId=None):\n \"\"\"\n Removes the server or virtual machine from the list of registered servers. You can reregister the instance again at any time. If you don't plan to use Run Command on the server, we suggest uninstalling SSM Agent first.\n See also: AWS API Documentation\n \n \n :example: response = client.deregister_managed_instance(\n InstanceId='string'\n )\n \n \n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The ID assigned to the managed instance when you registered it using the activation process.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef deregister_patch_baseline_for_patch_group(BaselineId=None, PatchGroup=None):\n \"\"\"\n Removes a patch group from a patch baseline.\n See also: AWS API Documentation\n \n \n :example: response = client.deregister_patch_baseline_for_patch_group(\n BaselineId='string',\n PatchGroup='string'\n )\n \n \n :type BaselineId: string\n :param BaselineId: [REQUIRED]\n The ID of the patch baseline to deregister the patch group from.\n \n\n :type PatchGroup: string\n :param PatchGroup: [REQUIRED]\n The name of the patch group that should be deregistered from the patch baseline.\n \n\n :rtype: dict\n :return: {\n 'BaselineId': 'string',\n 'PatchGroup': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef deregister_target_from_maintenance_window(WindowId=None, WindowTargetId=None, Safe=None):\n \"\"\"\n Removes a target from a Maintenance Window.\n See also: AWS API Documentation\n \n \n :example: response = client.deregister_target_from_maintenance_window(\n WindowId='string',\n WindowTargetId='string',\n Safe=True|False\n )\n \n \n :type WindowId: string\n :param WindowId: [REQUIRED]\n The ID of the Maintenance Window the target should be removed from.\n \n\n :type WindowTargetId: string\n :param WindowTargetId: [REQUIRED]\n The ID of the target definition to remove.\n \n\n :type Safe: boolean\n :param Safe: The system checks if the target is being referenced by a task. If the target is being referenced, the system returns an error and does not deregister the target from the Maintenance Window.\n\n :rtype: dict\n :return: {\n 'WindowId': 'string',\n 'WindowTargetId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef deregister_task_from_maintenance_window(WindowId=None, WindowTaskId=None):\n \"\"\"\n Removes a task from a Maintenance Window.\n See also: AWS API Documentation\n \n \n :example: response = client.deregister_task_from_maintenance_window(\n WindowId='string',\n WindowTaskId='string'\n )\n \n \n :type WindowId: string\n :param WindowId: [REQUIRED]\n The ID of the Maintenance Window the task should be removed from.\n \n\n :type WindowTaskId: string\n :param WindowTaskId: [REQUIRED]\n The ID of the task to remove from the Maintenance Window.\n \n\n :rtype: dict\n :return: {\n 'WindowId': 'string',\n 'WindowTaskId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_activations(Filters=None, MaxResults=None, NextToken=None):\n \"\"\"\n Details about the activation, including: the date and time the activation was created, the expiration date, the IAM role assigned to the instances in the activation, and the number of instances activated by this registration.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_activations(\n Filters=[\n {\n 'FilterKey': 'ActivationIds'|'DefaultInstanceName'|'IamRole',\n 'FilterValues': [\n 'string',\n ]\n },\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type Filters: list\n :param Filters: A filter to view information about your activations.\n (dict) --Filter for the DescribeActivation API.\n FilterKey (string) --The name of the filter.\n FilterValues (list) --The filter values.\n (string) --\n \n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type NextToken: string\n :param NextToken: A token to start the list. Use this token to get the next set of results.\n\n :rtype: dict\n :return: {\n 'ActivationList': [\n {\n 'ActivationId': 'string',\n 'Description': 'string',\n 'DefaultInstanceName': 'string',\n 'IamRole': 'string',\n 'RegistrationLimit': 123,\n 'RegistrationsCount': 123,\n 'ExpirationDate': datetime(2015, 1, 1),\n 'Expired': True|False,\n 'CreatedDate': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_association(Name=None, InstanceId=None, AssociationId=None, AssociationVersion=None):\n \"\"\"\n Describes the association for the specified target or instance. If you created the association by using the Targets parameter, then you must retrieve the association by using the association ID. If you created the association by specifying an instance ID and a Systems Manager document, then you retrieve the association by specifying the document name and the instance ID.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_association(\n Name='string',\n InstanceId='string',\n AssociationId='string',\n AssociationVersion='string'\n )\n \n \n :type Name: string\n :param Name: The name of the Systems Manager document.\n\n :type InstanceId: string\n :param InstanceId: The instance ID.\n\n :type AssociationId: string\n :param AssociationId: The association ID for which you want information.\n\n :type AssociationVersion: string\n :param AssociationVersion: Specify the association version to retrieve. To view the latest version, either specify $LATEST for this parameter, or omit this parameter. To view a list of all associations for an instance, use ListInstanceAssociations. To get a list of versions for a specific association, use ListAssociationVersions.\n\n :rtype: dict\n :return: {\n 'AssociationDescription': {\n 'Name': 'string',\n 'InstanceId': 'string',\n 'AssociationVersion': 'string',\n 'Date': datetime(2015, 1, 1),\n 'LastUpdateAssociationDate': datetime(2015, 1, 1),\n 'Status': {\n 'Date': datetime(2015, 1, 1),\n 'Name': 'Pending'|'Success'|'Failed',\n 'Message': 'string',\n 'AdditionalInfo': 'string'\n },\n 'Overview': {\n 'Status': 'string',\n 'DetailedStatus': 'string',\n 'AssociationStatusAggregatedCount': {\n 'string': 123\n }\n },\n 'DocumentVersion': 'string',\n 'Parameters': {\n 'string': [\n 'string',\n ]\n },\n 'AssociationId': 'string',\n 'Targets': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n 'ScheduleExpression': 'string',\n 'OutputLocation': {\n 'S3Location': {\n 'OutputS3Region': 'string',\n 'OutputS3BucketName': 'string',\n 'OutputS3KeyPrefix': 'string'\n }\n },\n 'LastExecutionDate': datetime(2015, 1, 1),\n 'LastSuccessfulExecutionDate': datetime(2015, 1, 1),\n 'AssociationName': 'string',\n 'MaxErrors': 'string',\n 'MaxConcurrency': 'string',\n 'ComplianceSeverity': 'CRITICAL'|'HIGH'|'MEDIUM'|'LOW'|'UNSPECIFIED'\n }\n }\n \n \n :returns: \n (string) --\n (integer) --\n \n \n \n \"\"\"\n pass\n\ndef describe_association_execution_targets(AssociationId=None, ExecutionId=None, Filters=None, MaxResults=None, NextToken=None):\n \"\"\"\n Use this API action to view information about a specific execution of a specific association.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_association_execution_targets(\n AssociationId='string',\n ExecutionId='string',\n Filters=[\n {\n 'Key': 'Status'|'ResourceId'|'ResourceType',\n 'Value': 'string'\n },\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type AssociationId: string\n :param AssociationId: [REQUIRED]\n The association ID that includes the execution for which you want to view details.\n \n\n :type ExecutionId: string\n :param ExecutionId: [REQUIRED]\n The execution ID for which you want to view details.\n \n\n :type Filters: list\n :param Filters: Filters for the request. You can specify the following filters and values.\n Status (EQUAL)\n ResourceId (EQUAL)\n ResourceType (EQUAL)\n (dict) --Filters for the association execution.\n Key (string) -- [REQUIRED]The key value used in the request.\n Value (string) -- [REQUIRED]The value specified for the key.\n \n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type NextToken: string\n :param NextToken: A token to start the list. Use this token to get the next set of results.\n\n :rtype: dict\n :return: {\n 'AssociationExecutionTargets': [\n {\n 'AssociationId': 'string',\n 'AssociationVersion': 'string',\n 'ExecutionId': 'string',\n 'ResourceId': 'string',\n 'ResourceType': 'string',\n 'Status': 'string',\n 'DetailedStatus': 'string',\n 'LastExecutionDate': datetime(2015, 1, 1),\n 'OutputSource': {\n 'OutputSourceId': 'string',\n 'OutputSourceType': 'string'\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_association_executions(AssociationId=None, Filters=None, MaxResults=None, NextToken=None):\n \"\"\"\n Use this API action to view all executions for a specific association ID.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_association_executions(\n AssociationId='string',\n Filters=[\n {\n 'Key': 'ExecutionId'|'Status'|'CreatedTime',\n 'Value': 'string',\n 'Type': 'EQUAL'|'LESS_THAN'|'GREATER_THAN'\n },\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type AssociationId: string\n :param AssociationId: [REQUIRED]\n The association ID for which you want to view execution history details.\n \n\n :type Filters: list\n :param Filters: Filters for the request. You can specify the following filters and values.\n ExecutionId (EQUAL)\n Status (EQUAL)\n CreatedTime (EQUAL, GREATER_THAN, LESS_THAN)\n (dict) --Filters used in the request.\n Key (string) -- [REQUIRED]The key value used in the request.\n Value (string) -- [REQUIRED]The value specified for the key.\n Type (string) -- [REQUIRED]The filter type specified in the request.\n \n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type NextToken: string\n :param NextToken: A token to start the list. Use this token to get the next set of results.\n\n :rtype: dict\n :return: {\n 'AssociationExecutions': [\n {\n 'AssociationId': 'string',\n 'AssociationVersion': 'string',\n 'ExecutionId': 'string',\n 'Status': 'string',\n 'DetailedStatus': 'string',\n 'CreatedTime': datetime(2015, 1, 1),\n 'LastExecutionDate': datetime(2015, 1, 1),\n 'ResourceCountByStatus': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_automation_executions(Filters=None, MaxResults=None, NextToken=None):\n \"\"\"\n Provides details about all active and terminated Automation executions.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_automation_executions(\n Filters=[\n {\n 'Key': 'DocumentNamePrefix'|'ExecutionStatus'|'ExecutionId'|'ParentExecutionId'|'CurrentAction'|'StartTimeBefore'|'StartTimeAfter'|'AutomationType',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type Filters: list\n :param Filters: Filters used to limit the scope of executions that are requested.\n (dict) --A filter used to match specific automation executions. This is used to limit the scope of Automation execution information returned.\n Key (string) -- [REQUIRED]One or more keys to limit the results. Valid filter keys include the following: DocumentNamePrefix, ExecutionStatus, ExecutionId, ParentExecutionId, CurrentAction, StartTimeBefore, StartTimeAfter.\n Values (list) -- [REQUIRED]The values used to limit the execution information associated with the filter's key.\n (string) --\n \n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :rtype: dict\n :return: {\n 'AutomationExecutionMetadataList': [\n {\n 'AutomationExecutionId': 'string',\n 'DocumentName': 'string',\n 'DocumentVersion': 'string',\n 'AutomationExecutionStatus': 'Pending'|'InProgress'|'Waiting'|'Success'|'TimedOut'|'Cancelling'|'Cancelled'|'Failed',\n 'ExecutionStartTime': datetime(2015, 1, 1),\n 'ExecutionEndTime': datetime(2015, 1, 1),\n 'ExecutedBy': 'string',\n 'LogFile': 'string',\n 'Outputs': {\n 'string': [\n 'string',\n ]\n },\n 'Mode': 'Auto'|'Interactive',\n 'ParentAutomationExecutionId': 'string',\n 'CurrentStepName': 'string',\n 'CurrentAction': 'string',\n 'FailureMessage': 'string',\n 'TargetParameterName': 'string',\n 'Targets': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n 'TargetMaps': [\n {\n 'string': [\n 'string',\n ]\n },\n ],\n 'ResolvedTargets': {\n 'ParameterValues': [\n 'string',\n ],\n 'Truncated': True|False\n },\n 'MaxConcurrency': 'string',\n 'MaxErrors': 'string',\n 'Target': 'string',\n 'AutomationType': 'CrossAccount'|'Local'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (list) --\n (string) --\n \n \n \n \n \n \"\"\"\n pass\n\ndef describe_automation_step_executions(AutomationExecutionId=None, Filters=None, NextToken=None, MaxResults=None, ReverseOrder=None):\n \"\"\"\n Information about all active and terminated step executions in an Automation workflow.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_automation_step_executions(\n AutomationExecutionId='string',\n Filters=[\n {\n 'Key': 'StartTimeBefore'|'StartTimeAfter'|'StepExecutionStatus'|'StepExecutionId'|'StepName'|'Action',\n 'Values': [\n 'string',\n ]\n },\n ],\n NextToken='string',\n MaxResults=123,\n ReverseOrder=True|False\n )\n \n \n :type AutomationExecutionId: string\n :param AutomationExecutionId: [REQUIRED]\n The Automation execution ID for which you want step execution descriptions.\n \n\n :type Filters: list\n :param Filters: One or more filters to limit the number of step executions returned by the request.\n (dict) --A filter to limit the amount of step execution information returned by the call.\n Key (string) -- [REQUIRED]One or more keys to limit the results. Valid filter keys include the following: StepName, Action, StepExecutionId, StepExecutionStatus, StartTimeBefore, StartTimeAfter.\n Values (list) -- [REQUIRED]The values of the filter key.\n (string) --\n \n \n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type ReverseOrder: boolean\n :param ReverseOrder: A boolean that indicates whether to list step executions in reverse order by start time. The default value is false.\n\n :rtype: dict\n :return: {\n 'StepExecutions': [\n {\n 'StepName': 'string',\n 'Action': 'string',\n 'TimeoutSeconds': 123,\n 'OnFailure': 'string',\n 'MaxAttempts': 123,\n 'ExecutionStartTime': datetime(2015, 1, 1),\n 'ExecutionEndTime': datetime(2015, 1, 1),\n 'StepStatus': 'Pending'|'InProgress'|'Waiting'|'Success'|'TimedOut'|'Cancelling'|'Cancelled'|'Failed',\n 'ResponseCode': 'string',\n 'Inputs': {\n 'string': 'string'\n },\n 'Outputs': {\n 'string': [\n 'string',\n ]\n },\n 'Response': 'string',\n 'FailureMessage': 'string',\n 'FailureDetails': {\n 'FailureStage': 'string',\n 'FailureType': 'string',\n 'Details': {\n 'string': [\n 'string',\n ]\n }\n },\n 'StepExecutionId': 'string',\n 'OverriddenParameters': {\n 'string': [\n 'string',\n ]\n },\n 'IsEnd': True|False,\n 'NextStep': 'string',\n 'IsCritical': True|False,\n 'ValidNextSteps': [\n 'string',\n ],\n 'Targets': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n 'TargetLocation': {\n 'Accounts': [\n 'string',\n ],\n 'Regions': [\n 'string',\n ],\n 'TargetLocationMaxConcurrency': 'string',\n 'TargetLocationMaxErrors': 'string',\n 'ExecutionRoleName': 'string'\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef describe_available_patches(Filters=None, MaxResults=None, NextToken=None):\n \"\"\"\n Lists all patches that could possibly be included in a patch baseline.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_available_patches(\n Filters=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type Filters: list\n :param Filters: Filters used to scope down the returned patches.\n (dict) --Defines a filter used in Patch Manager APIs.\n Key (string) --The key for the filter.\n Values (list) --The value for the filter.\n (string) --\n \n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of patches to return (per page).\n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :rtype: dict\n :return: {\n 'Patches': [\n {\n 'Id': 'string',\n 'ReleaseDate': datetime(2015, 1, 1),\n 'Title': 'string',\n 'Description': 'string',\n 'ContentUrl': 'string',\n 'Vendor': 'string',\n 'ProductFamily': 'string',\n 'Product': 'string',\n 'Classification': 'string',\n 'MsrcSeverity': 'string',\n 'KbNumber': 'string',\n 'MsrcNumber': 'string',\n 'Language': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_document(Name=None, DocumentVersion=None, VersionName=None):\n \"\"\"\n Describes the specified Systems Manager document.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_document(\n Name='string',\n DocumentVersion='string',\n VersionName='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the Systems Manager document.\n \n\n :type DocumentVersion: string\n :param DocumentVersion: The document version for which you want information. Can be a specific version or the default version.\n\n :type VersionName: string\n :param VersionName: An optional field specifying the version of the artifact associated with the document. For example, 'Release 12, Update 6'. This value is unique across all versions of a document, and cannot be changed.\n\n :rtype: dict\n :return: {\n 'Document': {\n 'Sha1': 'string',\n 'Hash': 'string',\n 'HashType': 'Sha256'|'Sha1',\n 'Name': 'string',\n 'VersionName': 'string',\n 'Owner': 'string',\n 'CreatedDate': datetime(2015, 1, 1),\n 'Status': 'Creating'|'Active'|'Updating'|'Deleting'|'Failed',\n 'StatusInformation': 'string',\n 'DocumentVersion': 'string',\n 'Description': 'string',\n 'Parameters': [\n {\n 'Name': 'string',\n 'Type': 'String'|'StringList',\n 'Description': 'string',\n 'DefaultValue': 'string'\n },\n ],\n 'PlatformTypes': [\n 'Windows'|'Linux',\n ],\n 'DocumentType': 'Command'|'Policy'|'Automation'|'Session'|'Package',\n 'SchemaVersion': 'string',\n 'LatestVersion': 'string',\n 'DefaultVersion': 'string',\n 'DocumentFormat': 'YAML'|'JSON',\n 'TargetType': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'AttachmentsInformation': [\n {\n 'Name': 'string'\n },\n ]\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_document_permission(Name=None, PermissionType=None):\n \"\"\"\n Describes the permissions for a Systems Manager document. If you created the document, you are the owner. If a document is shared, it can either be shared privately (by specifying a user's AWS account ID) or publicly (All ).\n See also: AWS API Documentation\n \n \n :example: response = client.describe_document_permission(\n Name='string',\n PermissionType='Share'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the document for which you are the owner.\n \n\n :type PermissionType: string\n :param PermissionType: [REQUIRED]\n The permission type for the document. The permission type can be Share .\n \n\n :rtype: dict\n :return: {\n 'AccountIds': [\n 'string',\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_effective_instance_associations(InstanceId=None, MaxResults=None, NextToken=None):\n \"\"\"\n All associations for the instance(s).\n See also: AWS API Documentation\n \n \n :example: response = client.describe_effective_instance_associations(\n InstanceId='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The instance ID for which you want to view all associations.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :rtype: dict\n :return: {\n 'Associations': [\n {\n 'AssociationId': 'string',\n 'InstanceId': 'string',\n 'Content': 'string',\n 'AssociationVersion': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_effective_patches_for_patch_baseline(BaselineId=None, MaxResults=None, NextToken=None):\n \"\"\"\n Retrieves the current effective patches (the patch and the approval state) for the specified patch baseline. Note that this API applies only to Windows patch baselines.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_effective_patches_for_patch_baseline(\n BaselineId='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type BaselineId: string\n :param BaselineId: [REQUIRED]\n The ID of the patch baseline to retrieve the effective patches for.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of patches to return (per page).\n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :rtype: dict\n :return: {\n 'EffectivePatches': [\n {\n 'Patch': {\n 'Id': 'string',\n 'ReleaseDate': datetime(2015, 1, 1),\n 'Title': 'string',\n 'Description': 'string',\n 'ContentUrl': 'string',\n 'Vendor': 'string',\n 'ProductFamily': 'string',\n 'Product': 'string',\n 'Classification': 'string',\n 'MsrcSeverity': 'string',\n 'KbNumber': 'string',\n 'MsrcNumber': 'string',\n 'Language': 'string'\n },\n 'PatchStatus': {\n 'DeploymentStatus': 'APPROVED'|'PENDING_APPROVAL'|'EXPLICIT_APPROVED'|'EXPLICIT_REJECTED',\n 'ComplianceLevel': 'CRITICAL'|'HIGH'|'MEDIUM'|'LOW'|'INFORMATIONAL'|'UNSPECIFIED',\n 'ApprovalDate': datetime(2015, 1, 1)\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_instance_associations_status(InstanceId=None, MaxResults=None, NextToken=None):\n \"\"\"\n The status of the associations for the instance(s).\n See also: AWS API Documentation\n \n \n :example: response = client.describe_instance_associations_status(\n InstanceId='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The instance IDs for which you want association status information.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :rtype: dict\n :return: {\n 'InstanceAssociationStatusInfos': [\n {\n 'AssociationId': 'string',\n 'Name': 'string',\n 'DocumentVersion': 'string',\n 'AssociationVersion': 'string',\n 'InstanceId': 'string',\n 'ExecutionDate': datetime(2015, 1, 1),\n 'Status': 'string',\n 'DetailedStatus': 'string',\n 'ExecutionSummary': 'string',\n 'ErrorCode': 'string',\n 'OutputUrl': {\n 'S3OutputUrl': {\n 'OutputUrl': 'string'\n }\n },\n 'AssociationName': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_instance_information(InstanceInformationFilterList=None, Filters=None, MaxResults=None, NextToken=None):\n \"\"\"\n Describes one or more of your instances. You can use this to get information about instances like the operating system platform, the SSM Agent version (Linux), status etc. If you specify one or more instance IDs, it returns information for those instances. If you do not specify instance IDs, it returns information for all your instances. If you specify an instance ID that is not valid or an instance that you do not own, you receive an error.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_instance_information(\n InstanceInformationFilterList=[\n {\n 'key': 'InstanceIds'|'AgentVersion'|'PingStatus'|'PlatformTypes'|'ActivationIds'|'IamRole'|'ResourceType'|'AssociationStatus',\n 'valueSet': [\n 'string',\n ]\n },\n ],\n Filters=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type InstanceInformationFilterList: list\n :param InstanceInformationFilterList: This is a legacy method. We recommend that you don't use this method. Instead, use the InstanceInformationFilter action. The InstanceInformationFilter action enables you to return instance information by using tags that are specified as a key-value mapping.\n If you do use this method, then you can't use the InstanceInformationFilter action. Using this method and the InstanceInformationFilter action causes an exception error.\n (dict) --Describes a filter for a specific list of instances. You can filter instances information by using tags. You specify tags by using a key-value mapping.\n Use this action instead of the DescribeInstanceInformationRequest$InstanceInformationFilterList method. The InstanceInformationFilterList method is a legacy method and does not support tags.\n key (string) -- [REQUIRED]The name of the filter.\n valueSet (list) -- [REQUIRED]The filter values.\n (string) --\n \n \n\n :type Filters: list\n :param Filters: One or more filters. Use a filter to return a more specific list of instances. You can filter on Amazon EC2 tag. Specify tags by using a key-value mapping.\n (dict) --The filters to describe or get information about your managed instances.\n Key (string) -- [REQUIRED]The filter key name to describe your instances. For example:\n 'InstanceIds'|'AgentVersion'|'PingStatus'|'PlatformTypes'|'ActivationIds'|'IamRole'|'ResourceType'|'AssociationStatus'|'Tag Key'\n Values (list) -- [REQUIRED]The filter values.\n (string) --\n \n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :rtype: dict\n :return: {\n 'InstanceInformationList': [\n {\n 'InstanceId': 'string',\n 'PingStatus': 'Online'|'ConnectionLost'|'Inactive',\n 'LastPingDateTime': datetime(2015, 1, 1),\n 'AgentVersion': 'string',\n 'IsLatestVersion': True|False,\n 'PlatformType': 'Windows'|'Linux',\n 'PlatformName': 'string',\n 'PlatformVersion': 'string',\n 'ActivationId': 'string',\n 'IamRole': 'string',\n 'RegistrationDate': datetime(2015, 1, 1),\n 'ResourceType': 'ManagedInstance'|'Document'|'EC2Instance',\n 'Name': 'string',\n 'IPAddress': 'string',\n 'ComputerName': 'string',\n 'AssociationStatus': 'string',\n 'LastAssociationExecutionDate': datetime(2015, 1, 1),\n 'LastSuccessfulAssociationExecutionDate': datetime(2015, 1, 1),\n 'AssociationOverview': {\n 'DetailedStatus': 'string',\n 'InstanceAssociationStatusAggregatedCount': {\n 'string': 123\n }\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (integer) --\n \n \n \n \"\"\"\n pass\n\ndef describe_instance_patch_states(InstanceIds=None, NextToken=None, MaxResults=None):\n \"\"\"\n Retrieves the high-level patch state of one or more instances.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_instance_patch_states(\n InstanceIds=[\n 'string',\n ],\n NextToken='string',\n MaxResults=123\n )\n \n \n :type InstanceIds: list\n :param InstanceIds: [REQUIRED]\n The ID of the instance whose patch state information should be retrieved.\n (string) --\n \n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of instances to return (per page).\n\n :rtype: dict\n :return: {\n 'InstancePatchStates': [\n {\n 'InstanceId': 'string',\n 'PatchGroup': 'string',\n 'BaselineId': 'string',\n 'SnapshotId': 'string',\n 'InstallOverrideList': 'string',\n 'OwnerInformation': 'string',\n 'InstalledCount': 123,\n 'InstalledOtherCount': 123,\n 'InstalledRejectedCount': 123,\n 'MissingCount': 123,\n 'FailedCount': 123,\n 'NotApplicableCount': 123,\n 'OperationStartTime': datetime(2015, 1, 1),\n 'OperationEndTime': datetime(2015, 1, 1),\n 'Operation': 'Scan'|'Install'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_instance_patch_states_for_patch_group(PatchGroup=None, Filters=None, NextToken=None, MaxResults=None):\n \"\"\"\n Retrieves the high-level patch state for the instances in the specified patch group.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_instance_patch_states_for_patch_group(\n PatchGroup='string',\n Filters=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ],\n 'Type': 'Equal'|'NotEqual'|'LessThan'|'GreaterThan'\n },\n ],\n NextToken='string',\n MaxResults=123\n )\n \n \n :type PatchGroup: string\n :param PatchGroup: [REQUIRED]\n The name of the patch group for which the patch state information should be retrieved.\n \n\n :type Filters: list\n :param Filters: Each entry in the array is a structure containing:\n Key (string between 1 and 200 characters)\n Values (array containing a single string)\n Type (string 'Equal', 'NotEqual', 'LessThan', 'GreaterThan')\n (dict) --Defines a filter used in DescribeInstancePatchStatesForPatchGroup used to scope down the information returned by the API.\n Key (string) -- [REQUIRED]The key for the filter. Supported values are FailedCount, InstalledCount, InstalledOtherCount, MissingCount and NotApplicableCount.\n Values (list) -- [REQUIRED]The value for the filter, must be an integer greater than or equal to 0.\n (string) --\n Type (string) -- [REQUIRED]The type of comparison that should be performed for the value: Equal, NotEqual, LessThan or GreaterThan.\n \n \n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of patches to return (per page).\n\n :rtype: dict\n :return: {\n 'InstancePatchStates': [\n {\n 'InstanceId': 'string',\n 'PatchGroup': 'string',\n 'BaselineId': 'string',\n 'SnapshotId': 'string',\n 'InstallOverrideList': 'string',\n 'OwnerInformation': 'string',\n 'InstalledCount': 123,\n 'InstalledOtherCount': 123,\n 'InstalledRejectedCount': 123,\n 'MissingCount': 123,\n 'FailedCount': 123,\n 'NotApplicableCount': 123,\n 'OperationStartTime': datetime(2015, 1, 1),\n 'OperationEndTime': datetime(2015, 1, 1),\n 'Operation': 'Scan'|'Install'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_instance_patches(InstanceId=None, Filters=None, NextToken=None, MaxResults=None):\n \"\"\"\n Retrieves information about the patches on the specified instance and their state relative to the patch baseline being used for the instance.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_instance_patches(\n InstanceId='string',\n Filters=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n NextToken='string',\n MaxResults=123\n )\n \n \n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The ID of the instance whose patch state information should be retrieved.\n \n\n :type Filters: list\n :param Filters: Each entry in the array is a structure containing:\n Key (string, between 1 and 128 characters)\n Values (array of strings, each string between 1 and 256 characters)\n (dict) --Defines a filter used in Patch Manager APIs.\n Key (string) --The key for the filter.\n Values (list) --The value for the filter.\n (string) --\n \n \n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of patches to return (per page).\n\n :rtype: dict\n :return: {\n 'Patches': [\n {\n 'Title': 'string',\n 'KBId': 'string',\n 'Classification': 'string',\n 'Severity': 'string',\n 'State': 'INSTALLED'|'INSTALLED_OTHER'|'INSTALLED_REJECTED'|'MISSING'|'NOT_APPLICABLE'|'FAILED',\n 'InstalledTime': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_inventory_deletions(DeletionId=None, NextToken=None, MaxResults=None):\n \"\"\"\n Describes a specific delete inventory operation.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_inventory_deletions(\n DeletionId='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type DeletionId: string\n :param DeletionId: Specify the delete inventory ID for which you want information. This ID was returned by the DeleteInventory action.\n\n :type NextToken: string\n :param NextToken: A token to start the list. Use this token to get the next set of results.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :rtype: dict\n :return: {\n 'InventoryDeletions': [\n {\n 'DeletionId': 'string',\n 'TypeName': 'string',\n 'DeletionStartTime': datetime(2015, 1, 1),\n 'LastStatus': 'InProgress'|'Complete',\n 'LastStatusMessage': 'string',\n 'DeletionSummary': {\n 'TotalCount': 123,\n 'RemainingCount': 123,\n 'SummaryItems': [\n {\n 'Version': 'string',\n 'Count': 123,\n 'RemainingCount': 123\n },\n ]\n },\n 'LastStatusUpdateTime': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_maintenance_window_execution_task_invocations(WindowExecutionId=None, TaskId=None, Filters=None, MaxResults=None, NextToken=None):\n \"\"\"\n Retrieves the individual task executions (one per target) for a particular task executed as part of a Maintenance Window execution.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_maintenance_window_execution_task_invocations(\n WindowExecutionId='string',\n TaskId='string',\n Filters=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type WindowExecutionId: string\n :param WindowExecutionId: [REQUIRED]\n The ID of the Maintenance Window execution the task is part of.\n \n\n :type TaskId: string\n :param TaskId: [REQUIRED]\n The ID of the specific task in the Maintenance Window task that should be retrieved.\n \n\n :type Filters: list\n :param Filters: Optional filters used to scope down the returned task invocations. The supported filter key is STATUS with the corresponding values PENDING, IN_PROGRESS, SUCCESS, FAILED, TIMED_OUT, CANCELLING, and CANCELLED.\n (dict) --Filter used in the request. Supported filter keys are Name and Enabled.\n Key (string) --The name of the filter.\n Values (list) --The filter values.\n (string) --\n \n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :rtype: dict\n :return: {\n 'WindowExecutionTaskInvocationIdentities': [\n {\n 'WindowExecutionId': 'string',\n 'TaskExecutionId': 'string',\n 'InvocationId': 'string',\n 'ExecutionId': 'string',\n 'TaskType': 'RUN_COMMAND'|'AUTOMATION'|'STEP_FUNCTIONS'|'LAMBDA',\n 'Parameters': 'string',\n 'Status': 'PENDING'|'IN_PROGRESS'|'SUCCESS'|'FAILED'|'TIMED_OUT'|'CANCELLING'|'CANCELLED'|'SKIPPED_OVERLAPPING',\n 'StatusDetails': 'string',\n 'StartTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'OwnerInformation': 'string',\n 'WindowTargetId': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_maintenance_window_execution_tasks(WindowExecutionId=None, Filters=None, MaxResults=None, NextToken=None):\n \"\"\"\n For a given Maintenance Window execution, lists the tasks that were executed.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_maintenance_window_execution_tasks(\n WindowExecutionId='string',\n Filters=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type WindowExecutionId: string\n :param WindowExecutionId: [REQUIRED]\n The ID of the Maintenance Window execution whose task executions should be retrieved.\n \n\n :type Filters: list\n :param Filters: Optional filters used to scope down the returned tasks. The supported filter key is STATUS with the corresponding values PENDING, IN_PROGRESS, SUCCESS, FAILED, TIMED_OUT, CANCELLING, and CANCELLED.\n (dict) --Filter used in the request. Supported filter keys are Name and Enabled.\n Key (string) --The name of the filter.\n Values (list) --The filter values.\n (string) --\n \n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :rtype: dict\n :return: {\n 'WindowExecutionTaskIdentities': [\n {\n 'WindowExecutionId': 'string',\n 'TaskExecutionId': 'string',\n 'Status': 'PENDING'|'IN_PROGRESS'|'SUCCESS'|'FAILED'|'TIMED_OUT'|'CANCELLING'|'CANCELLED'|'SKIPPED_OVERLAPPING',\n 'StatusDetails': 'string',\n 'StartTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'TaskArn': 'string',\n 'TaskType': 'RUN_COMMAND'|'AUTOMATION'|'STEP_FUNCTIONS'|'LAMBDA'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_maintenance_window_executions(WindowId=None, Filters=None, MaxResults=None, NextToken=None):\n \"\"\"\n Lists the executions of a Maintenance Window. This includes information about when the Maintenance Window was scheduled to be active, and information about tasks registered and run with the Maintenance Window.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_maintenance_window_executions(\n WindowId='string',\n Filters=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type WindowId: string\n :param WindowId: [REQUIRED]\n The ID of the Maintenance Window whose executions should be retrieved.\n \n\n :type Filters: list\n :param Filters: Each entry in the array is a structure containing:\n Key (string, between 1 and 128 characters)\n Values (array of strings, each string is between 1 and 256 characters)\n The supported Keys are ExecutedBefore and ExecutedAfter with the value being a date/time string such as 2016-11-04T05:00:00Z.\n (dict) --Filter used in the request. Supported filter keys are Name and Enabled.\n Key (string) --The name of the filter.\n Values (list) --The filter values.\n (string) --\n \n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :rtype: dict\n :return: {\n 'WindowExecutions': [\n {\n 'WindowId': 'string',\n 'WindowExecutionId': 'string',\n 'Status': 'PENDING'|'IN_PROGRESS'|'SUCCESS'|'FAILED'|'TIMED_OUT'|'CANCELLING'|'CANCELLED'|'SKIPPED_OVERLAPPING',\n 'StatusDetails': 'string',\n 'StartTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_maintenance_window_schedule(WindowId=None, Targets=None, ResourceType=None, Filters=None, MaxResults=None, NextToken=None):\n \"\"\"\n Retrieves information about upcoming executions of a Maintenance Window.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_maintenance_window_schedule(\n WindowId='string',\n Targets=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n ResourceType='INSTANCE',\n Filters=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type WindowId: string\n :param WindowId: The ID of the Maintenance Window to retrieve information about.\n\n :type Targets: list\n :param Targets: The instance ID or key/value pair to retrieve information about.\n (dict) --An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call.\n Key (string) --User-defined criteria for sending commands that target instances that meet the criteria. Key can be tag:<Amazon EC2 tag> or InstanceIds. For more information about how to send commands that target instances using Key,Value parameters, see Targeting Multiple Instances in the AWS Systems Manager User Guide .\n Values (list) --User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, you could specify value:WebServer to execute a command on instances that include Amazon EC2 tags of ServerRole,WebServer. For more information about how to send commands that target instances using Key,Value parameters, see Sending Commands to a Fleet in the AWS Systems Manager User Guide .\n (string) --\n \n \n\n :type ResourceType: string\n :param ResourceType: The type of resource you want to retrieve information about. For example, 'INSTANCE'.\n\n :type Filters: list\n :param Filters: Filters used to limit the range of results. For example, you can limit Maintenance Window executions to only those scheduled before or after a certain date and time.\n (dict) --Defines a filter used in Patch Manager APIs.\n Key (string) --The key for the filter.\n Values (list) --The value for the filter.\n (string) --\n \n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :rtype: dict\n :return: {\n 'ScheduledWindowExecutions': [\n {\n 'WindowId': 'string',\n 'Name': 'string',\n 'ExecutionTime': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_maintenance_window_targets(WindowId=None, Filters=None, MaxResults=None, NextToken=None):\n \"\"\"\n Lists the targets registered with the Maintenance Window.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_maintenance_window_targets(\n WindowId='string',\n Filters=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type WindowId: string\n :param WindowId: [REQUIRED]\n The ID of the Maintenance Window whose targets should be retrieved.\n \n\n :type Filters: list\n :param Filters: Optional filters that can be used to narrow down the scope of the returned window targets. The supported filter keys are Type, WindowTargetId and OwnerInformation.\n (dict) --Filter used in the request. Supported filter keys are Name and Enabled.\n Key (string) --The name of the filter.\n Values (list) --The filter values.\n (string) --\n \n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :rtype: dict\n :return: {\n 'Targets': [\n {\n 'WindowId': 'string',\n 'WindowTargetId': 'string',\n 'ResourceType': 'INSTANCE',\n 'Targets': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n 'OwnerInformation': 'string',\n 'Name': 'string',\n 'Description': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_maintenance_window_tasks(WindowId=None, Filters=None, MaxResults=None, NextToken=None):\n \"\"\"\n Lists the tasks in a Maintenance Window.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_maintenance_window_tasks(\n WindowId='string',\n Filters=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type WindowId: string\n :param WindowId: [REQUIRED]\n The ID of the Maintenance Window whose tasks should be retrieved.\n \n\n :type Filters: list\n :param Filters: Optional filters used to narrow down the scope of the returned tasks. The supported filter keys are WindowTaskId, TaskArn, Priority, and TaskType.\n (dict) --Filter used in the request. Supported filter keys are Name and Enabled.\n Key (string) --The name of the filter.\n Values (list) --The filter values.\n (string) --\n \n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :rtype: dict\n :return: {\n 'Tasks': [\n {\n 'WindowId': 'string',\n 'WindowTaskId': 'string',\n 'TaskArn': 'string',\n 'Type': 'RUN_COMMAND'|'AUTOMATION'|'STEP_FUNCTIONS'|'LAMBDA',\n 'Targets': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n 'TaskParameters': {\n 'string': {\n 'Values': [\n 'string',\n ]\n }\n },\n 'Priority': 123,\n 'LoggingInfo': {\n 'S3BucketName': 'string',\n 'S3KeyPrefix': 'string',\n 'S3Region': 'string'\n },\n 'ServiceRoleArn': 'string',\n 'MaxConcurrency': 'string',\n 'MaxErrors': 'string',\n 'Name': 'string',\n 'Description': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_maintenance_windows(Filters=None, MaxResults=None, NextToken=None):\n \"\"\"\n Retrieves the Maintenance Windows in an AWS account.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_maintenance_windows(\n Filters=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type Filters: list\n :param Filters: Optional filters used to narrow down the scope of the returned Maintenance Windows. Supported filter keys are Name and Enabled .\n (dict) --Filter used in the request. Supported filter keys are Name and Enabled.\n Key (string) --The name of the filter.\n Values (list) --The filter values.\n (string) --\n \n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :rtype: dict\n :return: {\n 'WindowIdentities': [\n {\n 'WindowId': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'Enabled': True|False,\n 'Duration': 123,\n 'Cutoff': 123,\n 'Schedule': 'string',\n 'ScheduleTimezone': 'string',\n 'EndDate': 'string',\n 'StartDate': 'string',\n 'NextExecutionTime': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_maintenance_windows_for_target(Targets=None, ResourceType=None, MaxResults=None, NextToken=None):\n \"\"\"\n Retrieves information about the Maintenance Windows targets or tasks that an instance is associated with.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_maintenance_windows_for_target(\n Targets=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n ResourceType='INSTANCE',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type Targets: list\n :param Targets: [REQUIRED]\n The instance ID or key/value pair to retrieve information about.\n (dict) --An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call.\n Key (string) --User-defined criteria for sending commands that target instances that meet the criteria. Key can be tag:<Amazon EC2 tag> or InstanceIds. For more information about how to send commands that target instances using Key,Value parameters, see Targeting Multiple Instances in the AWS Systems Manager User Guide .\n Values (list) --User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, you could specify value:WebServer to execute a command on instances that include Amazon EC2 tags of ServerRole,WebServer. For more information about how to send commands that target instances using Key,Value parameters, see Sending Commands to a Fleet in the AWS Systems Manager User Guide .\n (string) --\n \n \n\n :type ResourceType: string\n :param ResourceType: [REQUIRED]\n The type of resource you want to retrieve information about. For example, 'INSTANCE'.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :rtype: dict\n :return: {\n 'WindowIdentities': [\n {\n 'WindowId': 'string',\n 'Name': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_parameters(Filters=None, ParameterFilters=None, MaxResults=None, NextToken=None):\n \"\"\"\n Get information about a parameter.\n Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults . If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken . You can specify the NextToken in a subsequent call to get the next set of results.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_parameters(\n Filters=[\n {\n 'Key': 'Name'|'Type'|'KeyId',\n 'Values': [\n 'string',\n ]\n },\n ],\n ParameterFilters=[\n {\n 'Key': 'string',\n 'Option': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type Filters: list\n :param Filters: One or more filters. Use a filter to return a more specific list of results.\n (dict) --This data type is deprecated. Instead, use ParameterStringFilter .\n Key (string) -- [REQUIRED]The name of the filter.\n Values (list) -- [REQUIRED]The filter values.\n (string) --\n \n \n\n :type ParameterFilters: list\n :param ParameterFilters: Filters to limit the request results.\n (dict) --One or more filters. Use a filter to return a more specific list of results.\n Note\n The Name field can't be used with the GetParametersByPath API action.\n Key (string) -- [REQUIRED]The name of the filter.\n Option (string) --Valid options are Equals and BeginsWith. For Path filter, valid options are Recursive and OneLevel.\n Values (list) --The value you want to search for.\n (string) --\n \n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :rtype: dict\n :return: {\n 'Parameters': [\n {\n 'Name': 'string',\n 'Type': 'String'|'StringList'|'SecureString',\n 'KeyId': 'string',\n 'LastModifiedDate': datetime(2015, 1, 1),\n 'LastModifiedUser': 'string',\n 'Description': 'string',\n 'AllowedPattern': 'string',\n 'Version': 123\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_patch_baselines(Filters=None, MaxResults=None, NextToken=None):\n \"\"\"\n Lists the patch baselines in your AWS account.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_patch_baselines(\n Filters=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type Filters: list\n :param Filters: Each element in the array is a structure containing:\n Key: (string, 'NAME_PREFIX' or 'OWNER')\n Value: (array of strings, exactly 1 entry, between 1 and 255 characters)\n (dict) --Defines a filter used in Patch Manager APIs.\n Key (string) --The key for the filter.\n Values (list) --The value for the filter.\n (string) --\n \n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of patch baselines to return (per page).\n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :rtype: dict\n :return: {\n 'BaselineIdentities': [\n {\n 'BaselineId': 'string',\n 'BaselineName': 'string',\n 'OperatingSystem': 'WINDOWS'|'AMAZON_LINUX'|'AMAZON_LINUX_2'|'UBUNTU'|'REDHAT_ENTERPRISE_LINUX'|'SUSE'|'CENTOS',\n 'BaselineDescription': 'string',\n 'DefaultBaseline': True|False\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_patch_group_state(PatchGroup=None):\n \"\"\"\n Returns high-level aggregated patch compliance state for a patch group.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_patch_group_state(\n PatchGroup='string'\n )\n \n \n :type PatchGroup: string\n :param PatchGroup: [REQUIRED]\n The name of the patch group whose patch snapshot should be retrieved.\n \n\n :rtype: dict\n :return: {\n 'Instances': 123,\n 'InstancesWithInstalledPatches': 123,\n 'InstancesWithInstalledOtherPatches': 123,\n 'InstancesWithInstalledRejectedPatches': 123,\n 'InstancesWithMissingPatches': 123,\n 'InstancesWithFailedPatches': 123,\n 'InstancesWithNotApplicablePatches': 123\n }\n \n \n \"\"\"\n pass\n\ndef describe_patch_groups(MaxResults=None, Filters=None, NextToken=None):\n \"\"\"\n Lists all patch groups that have been registered with patch baselines.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_patch_groups(\n MaxResults=123,\n Filters=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n NextToken='string'\n )\n \n \n :type MaxResults: integer\n :param MaxResults: The maximum number of patch groups to return (per page).\n\n :type Filters: list\n :param Filters: One or more filters. Use a filter to return a more specific list of results.\n (dict) --Defines a filter used in Patch Manager APIs.\n Key (string) --The key for the filter.\n Values (list) --The value for the filter.\n (string) --\n \n \n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :rtype: dict\n :return: {\n 'Mappings': [\n {\n 'PatchGroup': 'string',\n 'BaselineIdentity': {\n 'BaselineId': 'string',\n 'BaselineName': 'string',\n 'OperatingSystem': 'WINDOWS'|'AMAZON_LINUX'|'AMAZON_LINUX_2'|'UBUNTU'|'REDHAT_ENTERPRISE_LINUX'|'SUSE'|'CENTOS',\n 'BaselineDescription': 'string',\n 'DefaultBaseline': True|False\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_sessions(State=None, MaxResults=None, NextToken=None, Filters=None):\n \"\"\"\n Retrieves a list of all active sessions (both connected and disconnected) or terminated sessions from the past 30 days.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_sessions(\n State='Active'|'History',\n MaxResults=123,\n NextToken='string',\n Filters=[\n {\n 'key': 'InvokedAfter'|'InvokedBefore'|'Target'|'Owner'|'Status',\n 'value': 'string'\n },\n ]\n )\n \n \n :type State: string\n :param State: [REQUIRED]\n The session status to retrieve a list of sessions for. For example, 'Active'.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :type Filters: list\n :param Filters: One or more filters to limit the type of sessions returned by the request.\n (dict) --Describes a filter for Session Manager information.\n key (string) -- [REQUIRED]The name of the filter.\n value (string) -- [REQUIRED]The filter value. Valid values for each filter key are as follows:\n InvokedAfter: Specify a timestamp to limit your results. For example, specify 2018-08-29T00:00:00Z to see sessions that started August 29, 2018, and later.\n InvokedBefore: Specify a timestamp to limit your results. For example, specify 2018-08-29T00:00:00Z to see sessions that started before August 29, 2018.\n Target: Specify an instance to which session connections have been made.\n Owner: Specify an AWS user account to see a list of sessions started by that user.\n Status: Specify a valid session status to see a list of all sessions with that status. Status values you can specify include:\n Connected\n Connecting\n Disconnected\n Terminated\n Terminating\n Failed\n \n \n\n :rtype: dict\n :return: {\n 'Sessions': [\n {\n 'SessionId': 'string',\n 'Target': 'string',\n 'Status': 'Connected'|'Connecting'|'Disconnected'|'Terminated'|'Terminating'|'Failed',\n 'StartDate': datetime(2015, 1, 1),\n 'EndDate': datetime(2015, 1, 1),\n 'DocumentName': 'string',\n 'Owner': 'string',\n 'Details': 'string',\n 'OutputUrl': {\n 'S3OutputUrl': 'string',\n 'CloudWatchOutputUrl': 'string'\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_automation_execution(AutomationExecutionId=None):\n \"\"\"\n Get detailed information about a particular Automation execution.\n See also: AWS API Documentation\n \n \n :example: response = client.get_automation_execution(\n AutomationExecutionId='string'\n )\n \n \n :type AutomationExecutionId: string\n :param AutomationExecutionId: [REQUIRED]\n The unique identifier for an existing automation execution to examine. The execution ID is returned by StartAutomationExecution when the execution of an Automation document is initiated.\n \n\n :rtype: dict\n :return: {\n 'AutomationExecution': {\n 'AutomationExecutionId': 'string',\n 'DocumentName': 'string',\n 'DocumentVersion': 'string',\n 'ExecutionStartTime': datetime(2015, 1, 1),\n 'ExecutionEndTime': datetime(2015, 1, 1),\n 'AutomationExecutionStatus': 'Pending'|'InProgress'|'Waiting'|'Success'|'TimedOut'|'Cancelling'|'Cancelled'|'Failed',\n 'StepExecutions': [\n {\n 'StepName': 'string',\n 'Action': 'string',\n 'TimeoutSeconds': 123,\n 'OnFailure': 'string',\n 'MaxAttempts': 123,\n 'ExecutionStartTime': datetime(2015, 1, 1),\n 'ExecutionEndTime': datetime(2015, 1, 1),\n 'StepStatus': 'Pending'|'InProgress'|'Waiting'|'Success'|'TimedOut'|'Cancelling'|'Cancelled'|'Failed',\n 'ResponseCode': 'string',\n 'Inputs': {\n 'string': 'string'\n },\n 'Outputs': {\n 'string': [\n 'string',\n ]\n },\n 'Response': 'string',\n 'FailureMessage': 'string',\n 'FailureDetails': {\n 'FailureStage': 'string',\n 'FailureType': 'string',\n 'Details': {\n 'string': [\n 'string',\n ]\n }\n },\n 'StepExecutionId': 'string',\n 'OverriddenParameters': {\n 'string': [\n 'string',\n ]\n },\n 'IsEnd': True|False,\n 'NextStep': 'string',\n 'IsCritical': True|False,\n 'ValidNextSteps': [\n 'string',\n ],\n 'Targets': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n 'TargetLocation': {\n 'Accounts': [\n 'string',\n ],\n 'Regions': [\n 'string',\n ],\n 'TargetLocationMaxConcurrency': 'string',\n 'TargetLocationMaxErrors': 'string',\n 'ExecutionRoleName': 'string'\n }\n },\n ],\n 'StepExecutionsTruncated': True|False,\n 'Parameters': {\n 'string': [\n 'string',\n ]\n },\n 'Outputs': {\n 'string': [\n 'string',\n ]\n },\n 'FailureMessage': 'string',\n 'Mode': 'Auto'|'Interactive',\n 'ParentAutomationExecutionId': 'string',\n 'ExecutedBy': 'string',\n 'CurrentStepName': 'string',\n 'CurrentAction': 'string',\n 'TargetParameterName': 'string',\n 'Targets': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n 'TargetMaps': [\n {\n 'string': [\n 'string',\n ]\n },\n ],\n 'ResolvedTargets': {\n 'ParameterValues': [\n 'string',\n ],\n 'Truncated': True|False\n },\n 'MaxConcurrency': 'string',\n 'MaxErrors': 'string',\n 'Target': 'string',\n 'TargetLocations': [\n {\n 'Accounts': [\n 'string',\n ],\n 'Regions': [\n 'string',\n ],\n 'TargetLocationMaxConcurrency': 'string',\n 'TargetLocationMaxErrors': 'string',\n 'ExecutionRoleName': 'string'\n },\n ],\n 'ProgressCounters': {\n 'TotalSteps': 123,\n 'SuccessSteps': 123,\n 'FailedSteps': 123,\n 'CancelledSteps': 123,\n 'TimedOutSteps': 123\n }\n }\n }\n \n \n :returns: \n (string) --\n (list) --\n (string) --\n \n \n \n \n \n \"\"\"\n pass\n\ndef get_command_invocation(CommandId=None, InstanceId=None, PluginName=None):\n \"\"\"\n Returns detailed information about command execution for an invocation or plugin.\n See also: AWS API Documentation\n \n \n :example: response = client.get_command_invocation(\n CommandId='string',\n InstanceId='string',\n PluginName='string'\n )\n \n \n :type CommandId: string\n :param CommandId: [REQUIRED]\n (Required) The parent command ID of the invocation plugin.\n \n\n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n (Required) The ID of the managed instance targeted by the command. A managed instance can be an Amazon EC2 instance or an instance in your hybrid environment that is configured for Systems Manager.\n \n\n :type PluginName: string\n :param PluginName: (Optional) The name of the plugin for which you want detailed results. If the document contains only one plugin, the name can be omitted and the details will be returned.\n\n :rtype: dict\n :return: {\n 'CommandId': 'string',\n 'InstanceId': 'string',\n 'Comment': 'string',\n 'DocumentName': 'string',\n 'DocumentVersion': 'string',\n 'PluginName': 'string',\n 'ResponseCode': 123,\n 'ExecutionStartDateTime': 'string',\n 'ExecutionElapsedTime': 'string',\n 'ExecutionEndDateTime': 'string',\n 'Status': 'Pending'|'InProgress'|'Delayed'|'Success'|'Cancelled'|'TimedOut'|'Failed'|'Cancelling',\n 'StatusDetails': 'string',\n 'StandardOutputContent': 'string',\n 'StandardOutputUrl': 'string',\n 'StandardErrorContent': 'string',\n 'StandardErrorUrl': 'string',\n 'CloudWatchOutputConfig': {\n 'CloudWatchLogGroupName': 'string',\n 'CloudWatchOutputEnabled': True|False\n }\n }\n \n \n :returns: \n Pending: The command has not been sent to the instance.\n In Progress: The command has been sent to the instance but has not reached a terminal state.\n Delayed: The system attempted to send the command to the target, but the target was not available. The instance might not be available because of network issues, the instance was stopped, etc. The system will try to deliver the command again.\n Success: The command or plugin was executed successfully. This is a terminal state.\n Delivery Timed Out: The command was not delivered to the instance before the delivery timeout expired. Delivery timeouts do not count against the parent command's MaxErrors limit, but they do contribute to whether the parent command status is Success or Incomplete. This is a terminal state.\n Execution Timed Out: The command started to execute on the instance, but the execution was not complete before the timeout expired. Execution timeouts count against the MaxErrors limit of the parent command. This is a terminal state.\n Failed: The command wasn't executed successfully on the instance. For a plugin, this indicates that the result code was not zero. For a command invocation, this indicates that the result code for one or more plugins was not zero. Invocation failures count against the MaxErrors limit of the parent command. This is a terminal state.\n Canceled: The command was terminated before it was completed. This is a terminal state.\n Undeliverable: The command can't be delivered to the instance. The instance might not exist or might not be responding. Undeliverable invocations don't count against the parent command's MaxErrors limit and don't contribute to whether the parent command status is Success or Incomplete. This is a terminal state.\n Terminated: The parent command exceeded its MaxErrors limit and subsequent command invocations were canceled by the system. This is a terminal state.\n \n \"\"\"\n pass\n\ndef get_connection_status(Target=None):\n \"\"\"\n Retrieves the Session Manager connection status for an instance to determine whether it is connected and ready to receive Session Manager connections.\n See also: AWS API Documentation\n \n \n :example: response = client.get_connection_status(\n Target='string'\n )\n \n \n :type Target: string\n :param Target: [REQUIRED]\n The ID of the instance.\n \n\n :rtype: dict\n :return: {\n 'Target': 'string',\n 'Status': 'Connected'|'NotConnected'\n }\n \n \n \"\"\"\n pass\n\ndef get_default_patch_baseline(OperatingSystem=None):\n \"\"\"\n Retrieves the default patch baseline. Note that Systems Manager supports creating multiple default patch baselines. For example, you can create a default patch baseline for each operating system.\n If you do not specify an operating system value, the default patch baseline for Windows is returned.\n See also: AWS API Documentation\n \n \n :example: response = client.get_default_patch_baseline(\n OperatingSystem='WINDOWS'|'AMAZON_LINUX'|'AMAZON_LINUX_2'|'UBUNTU'|'REDHAT_ENTERPRISE_LINUX'|'SUSE'|'CENTOS'\n )\n \n \n :type OperatingSystem: string\n :param OperatingSystem: Returns the default patch baseline for the specified operating system.\n\n :rtype: dict\n :return: {\n 'BaselineId': 'string',\n 'OperatingSystem': 'WINDOWS'|'AMAZON_LINUX'|'AMAZON_LINUX_2'|'UBUNTU'|'REDHAT_ENTERPRISE_LINUX'|'SUSE'|'CENTOS'\n }\n \n \n \"\"\"\n pass\n\ndef get_deployable_patch_snapshot_for_instance(InstanceId=None, SnapshotId=None):\n \"\"\"\n Retrieves the current snapshot for the patch baseline the instance uses. This API is primarily used by the AWS-RunPatchBaseline Systems Manager document.\n See also: AWS API Documentation\n \n \n :example: response = client.get_deployable_patch_snapshot_for_instance(\n InstanceId='string',\n SnapshotId='string'\n )\n \n \n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The ID of the instance for which the appropriate patch snapshot should be retrieved.\n \n\n :type SnapshotId: string\n :param SnapshotId: [REQUIRED]\n The user-defined snapshot ID.\n \n\n :rtype: dict\n :return: {\n 'InstanceId': 'string',\n 'SnapshotId': 'string',\n 'SnapshotDownloadUrl': 'string',\n 'Product': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_document(Name=None, VersionName=None, DocumentVersion=None, DocumentFormat=None):\n \"\"\"\n Gets the contents of the specified Systems Manager document.\n See also: AWS API Documentation\n \n \n :example: response = client.get_document(\n Name='string',\n VersionName='string',\n DocumentVersion='string',\n DocumentFormat='YAML'|'JSON'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the Systems Manager document.\n \n\n :type VersionName: string\n :param VersionName: An optional field specifying the version of the artifact associated with the document. For example, 'Release 12, Update 6'. This value is unique across all versions of a document, and cannot be changed.\n\n :type DocumentVersion: string\n :param DocumentVersion: The document version for which you want information.\n\n :type DocumentFormat: string\n :param DocumentFormat: Returns the document in the specified format. The document format can be either JSON or YAML. JSON is the default format.\n\n :rtype: dict\n :return: {\n 'Name': 'string',\n 'VersionName': 'string',\n 'DocumentVersion': 'string',\n 'Status': 'Creating'|'Active'|'Updating'|'Deleting'|'Failed',\n 'StatusInformation': 'string',\n 'Content': 'string',\n 'DocumentType': 'Command'|'Policy'|'Automation'|'Session'|'Package',\n 'DocumentFormat': 'YAML'|'JSON',\n 'AttachmentsContent': [\n {\n 'Name': 'string',\n 'Size': 123,\n 'Hash': 'string',\n 'HashType': 'Sha256',\n 'Url': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_inventory(Filters=None, Aggregators=None, ResultAttributes=None, NextToken=None, MaxResults=None):\n \"\"\"\n Query inventory information.\n See also: AWS API Documentation\n \n \n :example: response = client.get_inventory(\n Filters=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ],\n 'Type': 'Equal'|'NotEqual'|'BeginWith'|'LessThan'|'GreaterThan'|'Exists'\n },\n ],\n Aggregators=[\n {\n 'Expression': 'string',\n 'Aggregators': {'... recursive ...'},\n 'Groups': [\n {\n 'Name': 'string',\n 'Filters': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ],\n 'Type': 'Equal'|'NotEqual'|'BeginWith'|'LessThan'|'GreaterThan'|'Exists'\n },\n ]\n },\n ]\n },\n ],\n ResultAttributes=[\n {\n 'TypeName': 'string'\n },\n ],\n NextToken='string',\n MaxResults=123\n )\n \n \n :type Filters: list\n :param Filters: One or more filters. Use a filter to return a more specific list of results.\n (dict) --One or more filters. Use a filter to return a more specific list of results.\n Key (string) -- [REQUIRED]The name of the filter key.\n Values (list) -- [REQUIRED]Inventory filter values. Example: inventory filter where instance IDs are specified as values Key=AWS:InstanceInformation.InstanceId,Values= i-a12b3c4d5e6g, i-1a2b3c4d5e6,Type=Equal\n (string) --\n Type (string) --The type of filter. Valid values include the following: 'Equal'|'NotEqual'|'BeginWith'|'LessThan'|'GreaterThan'\n \n \n\n :type Aggregators: list\n :param Aggregators: Returns counts of inventory types based on one or more expressions. For example, if you aggregate by using an expression that uses the AWS:InstanceInformation.PlatformType type, you can see a count of how many Windows and Linux instances exist in your inventoried fleet.\n (dict) --Specifies the inventory type and attribute for the aggregation execution.\n Expression (string) --The inventory type and attribute name for aggregation.\n Aggregators (list) --Nested aggregators to further refine aggregation for an inventory type.\n Groups (list) --A user-defined set of one or more filters on which to aggregate inventory data. Groups return a count of resources that match and don't match the specified criteria.\n (dict) --A user-defined set of one or more filters on which to aggregate inventory data. Groups return a count of resources that match and don't match the specified criteria.\n Name (string) -- [REQUIRED]The name of the group.\n Filters (list) -- [REQUIRED]Filters define the criteria for the group. The matchingCount field displays the number of resources that match the criteria. The notMatchingCount field displays the number of resources that don't match the criteria.\n (dict) --One or more filters. Use a filter to return a more specific list of results.\n Key (string) -- [REQUIRED]The name of the filter key.\n Values (list) -- [REQUIRED]Inventory filter values. Example: inventory filter where instance IDs are specified as values Key=AWS:InstanceInformation.InstanceId,Values= i-a12b3c4d5e6g, i-1a2b3c4d5e6,Type=Equal\n (string) --\n Type (string) --The type of filter. Valid values include the following: 'Equal'|'NotEqual'|'BeginWith'|'LessThan'|'GreaterThan'\n \n \n \n \n\n :type ResultAttributes: list\n :param ResultAttributes: The list of inventory item types to return.\n (dict) --The inventory item result attribute.\n TypeName (string) -- [REQUIRED]Name of the inventory item type. Valid value: AWS:InstanceInformation. Default Value: AWS:InstanceInformation.\n \n \n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :rtype: dict\n :return: {\n 'Entities': [\n {\n 'Id': 'string',\n 'Data': {\n 'string': {\n 'TypeName': 'string',\n 'SchemaVersion': 'string',\n 'CaptureTime': 'string',\n 'ContentHash': 'string',\n 'Content': [\n {\n 'string': 'string'\n },\n ]\n }\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (dict) --\n (string) --\n (string) --\n \n \n \n \n \n \"\"\"\n pass\n\ndef get_inventory_schema(TypeName=None, NextToken=None, MaxResults=None, Aggregator=None, SubType=None):\n \"\"\"\n Return a list of inventory type names for the account, or return a list of attribute names for a specific Inventory item type.\n See also: AWS API Documentation\n \n \n :example: response = client.get_inventory_schema(\n TypeName='string',\n NextToken='string',\n MaxResults=123,\n Aggregator=True|False,\n SubType=True|False\n )\n \n \n :type TypeName: string\n :param TypeName: The type of inventory item to return.\n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type Aggregator: boolean\n :param Aggregator: Returns inventory schemas that support aggregation. For example, this call returns the AWS:InstanceInformation type, because it supports aggregation based on the PlatformName , PlatformType , and PlatformVersion attributes.\n\n :type SubType: boolean\n :param SubType: Returns the sub-type schema for a specified inventory type.\n\n :rtype: dict\n :return: {\n 'Schemas': [\n {\n 'TypeName': 'string',\n 'Version': 'string',\n 'Attributes': [\n {\n 'Name': 'string',\n 'DataType': 'string'|'number'\n },\n ],\n 'DisplayName': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_maintenance_window(WindowId=None):\n \"\"\"\n Retrieves a Maintenance Window.\n See also: AWS API Documentation\n \n \n :example: response = client.get_maintenance_window(\n WindowId='string'\n )\n \n \n :type WindowId: string\n :param WindowId: [REQUIRED]\n The ID of the desired Maintenance Window.\n \n\n :rtype: dict\n :return: {\n 'WindowId': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'StartDate': 'string',\n 'EndDate': 'string',\n 'Schedule': 'string',\n 'ScheduleTimezone': 'string',\n 'NextExecutionTime': 'string',\n 'Duration': 123,\n 'Cutoff': 123,\n 'AllowUnassociatedTargets': True|False,\n 'Enabled': True|False,\n 'CreatedDate': datetime(2015, 1, 1),\n 'ModifiedDate': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef get_maintenance_window_execution(WindowExecutionId=None):\n \"\"\"\n Retrieves details about a specific task executed as part of a Maintenance Window execution.\n See also: AWS API Documentation\n \n \n :example: response = client.get_maintenance_window_execution(\n WindowExecutionId='string'\n )\n \n \n :type WindowExecutionId: string\n :param WindowExecutionId: [REQUIRED]\n The ID of the Maintenance Window execution that includes the task.\n \n\n :rtype: dict\n :return: {\n 'WindowExecutionId': 'string',\n 'TaskIds': [\n 'string',\n ],\n 'Status': 'PENDING'|'IN_PROGRESS'|'SUCCESS'|'FAILED'|'TIMED_OUT'|'CANCELLING'|'CANCELLED'|'SKIPPED_OVERLAPPING',\n 'StatusDetails': 'string',\n 'StartTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef get_maintenance_window_execution_task(WindowExecutionId=None, TaskId=None):\n \"\"\"\n Retrieves the details about a specific task executed as part of a Maintenance Window execution.\n See also: AWS API Documentation\n \n \n :example: response = client.get_maintenance_window_execution_task(\n WindowExecutionId='string',\n TaskId='string'\n )\n \n \n :type WindowExecutionId: string\n :param WindowExecutionId: [REQUIRED]\n The ID of the Maintenance Window execution that includes the task.\n \n\n :type TaskId: string\n :param TaskId: [REQUIRED]\n The ID of the specific task execution in the Maintenance Window task that should be retrieved.\n \n\n :rtype: dict\n :return: {\n 'WindowExecutionId': 'string',\n 'TaskExecutionId': 'string',\n 'TaskArn': 'string',\n 'ServiceRole': 'string',\n 'Type': 'RUN_COMMAND'|'AUTOMATION'|'STEP_FUNCTIONS'|'LAMBDA',\n 'TaskParameters': [\n {\n 'string': {\n 'Values': [\n 'string',\n ]\n }\n },\n ],\n 'Priority': 123,\n 'MaxConcurrency': 'string',\n 'MaxErrors': 'string',\n 'Status': 'PENDING'|'IN_PROGRESS'|'SUCCESS'|'FAILED'|'TIMED_OUT'|'CANCELLING'|'CANCELLED'|'SKIPPED_OVERLAPPING',\n 'StatusDetails': 'string',\n 'StartTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1)\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_maintenance_window_execution_task_invocation(WindowExecutionId=None, TaskId=None, InvocationId=None):\n \"\"\"\n Retrieves a task invocation. A task invocation is a specific task executing on a specific target. Maintenance Windows report status for all invocations.\n See also: AWS API Documentation\n \n \n :example: response = client.get_maintenance_window_execution_task_invocation(\n WindowExecutionId='string',\n TaskId='string',\n InvocationId='string'\n )\n \n \n :type WindowExecutionId: string\n :param WindowExecutionId: [REQUIRED]\n The ID of the Maintenance Window execution for which the task is a part.\n \n\n :type TaskId: string\n :param TaskId: [REQUIRED]\n The ID of the specific task in the Maintenance Window task that should be retrieved.\n \n\n :type InvocationId: string\n :param InvocationId: [REQUIRED]\n The invocation ID to retrieve.\n \n\n :rtype: dict\n :return: {\n 'WindowExecutionId': 'string',\n 'TaskExecutionId': 'string',\n 'InvocationId': 'string',\n 'ExecutionId': 'string',\n 'TaskType': 'RUN_COMMAND'|'AUTOMATION'|'STEP_FUNCTIONS'|'LAMBDA',\n 'Parameters': 'string',\n 'Status': 'PENDING'|'IN_PROGRESS'|'SUCCESS'|'FAILED'|'TIMED_OUT'|'CANCELLING'|'CANCELLED'|'SKIPPED_OVERLAPPING',\n 'StatusDetails': 'string',\n 'StartTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'OwnerInformation': 'string',\n 'WindowTargetId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_maintenance_window_task(WindowId=None, WindowTaskId=None):\n \"\"\"\n Lists the tasks in a Maintenance Window.\n See also: AWS API Documentation\n \n \n :example: response = client.get_maintenance_window_task(\n WindowId='string',\n WindowTaskId='string'\n )\n \n \n :type WindowId: string\n :param WindowId: [REQUIRED]\n The Maintenance Window ID that includes the task to retrieve.\n \n\n :type WindowTaskId: string\n :param WindowTaskId: [REQUIRED]\n The Maintenance Window task ID to retrieve.\n \n\n :rtype: dict\n :return: {\n 'WindowId': 'string',\n 'WindowTaskId': 'string',\n 'Targets': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n 'TaskArn': 'string',\n 'ServiceRoleArn': 'string',\n 'TaskType': 'RUN_COMMAND'|'AUTOMATION'|'STEP_FUNCTIONS'|'LAMBDA',\n 'TaskParameters': {\n 'string': {\n 'Values': [\n 'string',\n ]\n }\n },\n 'TaskInvocationParameters': {\n 'RunCommand': {\n 'Comment': 'string',\n 'DocumentHash': 'string',\n 'DocumentHashType': 'Sha256'|'Sha1',\n 'NotificationConfig': {\n 'NotificationArn': 'string',\n 'NotificationEvents': [\n 'All'|'InProgress'|'Success'|'TimedOut'|'Cancelled'|'Failed',\n ],\n 'NotificationType': 'Command'|'Invocation'\n },\n 'OutputS3BucketName': 'string',\n 'OutputS3KeyPrefix': 'string',\n 'Parameters': {\n 'string': [\n 'string',\n ]\n },\n 'ServiceRoleArn': 'string',\n 'TimeoutSeconds': 123\n },\n 'Automation': {\n 'DocumentVersion': 'string',\n 'Parameters': {\n 'string': [\n 'string',\n ]\n }\n },\n 'StepFunctions': {\n 'Input': 'string',\n 'Name': 'string'\n },\n 'Lambda': {\n 'ClientContext': 'string',\n 'Qualifier': 'string',\n 'Payload': b'bytes'\n }\n },\n 'Priority': 123,\n 'MaxConcurrency': 'string',\n 'MaxErrors': 'string',\n 'LoggingInfo': {\n 'S3BucketName': 'string',\n 'S3KeyPrefix': 'string',\n 'S3Region': 'string'\n },\n 'Name': 'string',\n 'Description': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_parameter(Name=None, WithDecryption=None):\n \"\"\"\n Get information about a parameter by using the parameter name. Don't confuse this API action with the GetParameters API action.\n See also: AWS API Documentation\n \n \n :example: response = client.get_parameter(\n Name='string',\n WithDecryption=True|False\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the parameter you want to query.\n \n\n :type WithDecryption: boolean\n :param WithDecryption: Return decrypted values for secure string parameters. This flag is ignored for String and StringList parameter types.\n\n :rtype: dict\n :return: {\n 'Parameter': {\n 'Name': 'string',\n 'Type': 'String'|'StringList'|'SecureString',\n 'Value': 'string',\n 'Version': 123,\n 'Selector': 'string',\n 'SourceResult': 'string',\n 'LastModifiedDate': datetime(2015, 1, 1),\n 'ARN': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_parameter_history(Name=None, WithDecryption=None, MaxResults=None, NextToken=None):\n \"\"\"\n Query a list of all parameters used by the AWS account.\n See also: AWS API Documentation\n \n \n :example: response = client.get_parameter_history(\n Name='string',\n WithDecryption=True|False,\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of a parameter you want to query.\n \n\n :type WithDecryption: boolean\n :param WithDecryption: Return decrypted values for secure string parameters. This flag is ignored for String and StringList parameter types.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :rtype: dict\n :return: {\n 'Parameters': [\n {\n 'Name': 'string',\n 'Type': 'String'|'StringList'|'SecureString',\n 'KeyId': 'string',\n 'LastModifiedDate': datetime(2015, 1, 1),\n 'LastModifiedUser': 'string',\n 'Description': 'string',\n 'Value': 'string',\n 'AllowedPattern': 'string',\n 'Version': 123,\n 'Labels': [\n 'string',\n ]\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_parameters(Names=None, WithDecryption=None):\n \"\"\"\n Get details of a parameter. Don't confuse this API action with the GetParameter API action.\n See also: AWS API Documentation\n \n \n :example: response = client.get_parameters(\n Names=[\n 'string',\n ],\n WithDecryption=True|False\n )\n \n \n :type Names: list\n :param Names: [REQUIRED]\n Names of the parameters for which you want to query information.\n (string) --\n \n\n :type WithDecryption: boolean\n :param WithDecryption: Return decrypted secure string value. Return decrypted values for secure string parameters. This flag is ignored for String and StringList parameter types.\n\n :rtype: dict\n :return: {\n 'Parameters': [\n {\n 'Name': 'string',\n 'Type': 'String'|'StringList'|'SecureString',\n 'Value': 'string',\n 'Version': 123,\n 'Selector': 'string',\n 'SourceResult': 'string',\n 'LastModifiedDate': datetime(2015, 1, 1),\n 'ARN': 'string'\n },\n ],\n 'InvalidParameters': [\n 'string',\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_parameters_by_path(Path=None, Recursive=None, ParameterFilters=None, WithDecryption=None, MaxResults=None, NextToken=None):\n \"\"\"\n Retrieve parameters in a specific hierarchy. For more information, see Working with Systems Manager Parameters in the AWS Systems Manager User Guide .\n Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults . If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken . You can specify the NextToken in a subsequent call to get the next set of results.\n See also: AWS API Documentation\n \n \n :example: response = client.get_parameters_by_path(\n Path='string',\n Recursive=True|False,\n ParameterFilters=[\n {\n 'Key': 'string',\n 'Option': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n WithDecryption=True|False,\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type Path: string\n :param Path: [REQUIRED]\n The hierarchy for the parameter. Hierarchies start with a forward slash (/) and end with the parameter name. A parameter name hierarchy can have a maximum of 15 levels. Here is an example of a hierarchy: /Finance/Prod/IAD/WinServ2016/license33\n \n\n :type Recursive: boolean\n :param Recursive: Retrieve all parameters within a hierarchy.\n Warning\n If a user has access to a path, then the user can access all levels of that path. For example, if a user has permission to access path /a, then the user can also access /a/b. Even if a user has explicitly been denied access in IAM for parameter /a, they can still call the GetParametersByPath API action recursively and view /a/b.\n \n\n :type ParameterFilters: list\n :param ParameterFilters: Filters to limit the request results.\n Note\n You can't filter using the parameter name.\n (dict) --One or more filters. Use a filter to return a more specific list of results.\n Note\n The Name field can't be used with the GetParametersByPath API action.\n Key (string) -- [REQUIRED]The name of the filter.\n Option (string) --Valid options are Equals and BeginsWith. For Path filter, valid options are Recursive and OneLevel.\n Values (list) --The value you want to search for.\n (string) --\n \n \n\n :type WithDecryption: boolean\n :param WithDecryption: Retrieve all parameters in a hierarchy with their value decrypted.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type NextToken: string\n :param NextToken: A token to start the list. Use this token to get the next set of results.\n\n :rtype: dict\n :return: {\n 'Parameters': [\n {\n 'Name': 'string',\n 'Type': 'String'|'StringList'|'SecureString',\n 'Value': 'string',\n 'Version': 123,\n 'Selector': 'string',\n 'SourceResult': 'string',\n 'LastModifiedDate': datetime(2015, 1, 1),\n 'ARN': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_patch_baseline(BaselineId=None):\n \"\"\"\n Retrieves information about a patch baseline.\n See also: AWS API Documentation\n \n \n :example: response = client.get_patch_baseline(\n BaselineId='string'\n )\n \n \n :type BaselineId: string\n :param BaselineId: [REQUIRED]\n The ID of the patch baseline to retrieve.\n \n\n :rtype: dict\n :return: {\n 'BaselineId': 'string',\n 'Name': 'string',\n 'OperatingSystem': 'WINDOWS'|'AMAZON_LINUX'|'AMAZON_LINUX_2'|'UBUNTU'|'REDHAT_ENTERPRISE_LINUX'|'SUSE'|'CENTOS',\n 'GlobalFilters': {\n 'PatchFilters': [\n {\n 'Key': 'PRODUCT'|'CLASSIFICATION'|'MSRC_SEVERITY'|'PATCH_ID'|'SECTION'|'PRIORITY'|'SEVERITY',\n 'Values': [\n 'string',\n ]\n },\n ]\n },\n 'ApprovalRules': {\n 'PatchRules': [\n {\n 'PatchFilterGroup': {\n 'PatchFilters': [\n {\n 'Key': 'PRODUCT'|'CLASSIFICATION'|'MSRC_SEVERITY'|'PATCH_ID'|'SECTION'|'PRIORITY'|'SEVERITY',\n 'Values': [\n 'string',\n ]\n },\n ]\n },\n 'ComplianceLevel': 'CRITICAL'|'HIGH'|'MEDIUM'|'LOW'|'INFORMATIONAL'|'UNSPECIFIED',\n 'ApproveAfterDays': 123,\n 'EnableNonSecurity': True|False\n },\n ]\n },\n 'ApprovedPatches': [\n 'string',\n ],\n 'ApprovedPatchesComplianceLevel': 'CRITICAL'|'HIGH'|'MEDIUM'|'LOW'|'INFORMATIONAL'|'UNSPECIFIED',\n 'ApprovedPatchesEnableNonSecurity': True|False,\n 'RejectedPatches': [\n 'string',\n ],\n 'RejectedPatchesAction': 'ALLOW_AS_DEPENDENCY'|'BLOCK',\n 'PatchGroups': [\n 'string',\n ],\n 'CreatedDate': datetime(2015, 1, 1),\n 'ModifiedDate': datetime(2015, 1, 1),\n 'Description': 'string',\n 'Sources': [\n {\n 'Name': 'string',\n 'Products': [\n 'string',\n ],\n 'Configuration': 'string'\n },\n ]\n }\n \n \n :returns: \n CriticalUpdates\n DefinitionUpdates\n Drivers\n FeaturePacks\n SecurityUpdates\n ServicePacks\n Tools\n UpdateRollups\n Updates\n Upgrades\n \n \"\"\"\n pass\n\ndef get_patch_baseline_for_patch_group(PatchGroup=None, OperatingSystem=None):\n \"\"\"\n Retrieves the patch baseline that should be used for the specified patch group.\n See also: AWS API Documentation\n \n \n :example: response = client.get_patch_baseline_for_patch_group(\n PatchGroup='string',\n OperatingSystem='WINDOWS'|'AMAZON_LINUX'|'AMAZON_LINUX_2'|'UBUNTU'|'REDHAT_ENTERPRISE_LINUX'|'SUSE'|'CENTOS'\n )\n \n \n :type PatchGroup: string\n :param PatchGroup: [REQUIRED]\n The name of the patch group whose patch baseline should be retrieved.\n \n\n :type OperatingSystem: string\n :param OperatingSystem: Returns he operating system rule specified for patch groups using the patch baseline.\n\n :rtype: dict\n :return: {\n 'BaselineId': 'string',\n 'PatchGroup': 'string',\n 'OperatingSystem': 'WINDOWS'|'AMAZON_LINUX'|'AMAZON_LINUX_2'|'UBUNTU'|'REDHAT_ENTERPRISE_LINUX'|'SUSE'|'CENTOS'\n }\n \n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef label_parameter_version(Name=None, ParameterVersion=None, Labels=None):\n \"\"\"\n A parameter label is a user-defined alias to help you manage different versions of a parameter. When you modify a parameter, Systems Manager automatically saves a new version and increments the version number by one. A label can help you remember the purpose of a parameter when there are multiple versions.\n Parameter labels have the following requirements and restrictions.\n See also: AWS API Documentation\n \n \n :example: response = client.label_parameter_version(\n Name='string',\n ParameterVersion=123,\n Labels=[\n 'string',\n ]\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The parameter name on which you want to attach one or more labels.\n \n\n :type ParameterVersion: integer\n :param ParameterVersion: The specific version of the parameter on which you want to attach one or more labels. If no version is specified, the system attaches the label to the latest version.)\n\n :type Labels: list\n :param Labels: [REQUIRED]\n One or more labels to attach to the specified parameter version.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'InvalidLabels': [\n 'string',\n ]\n }\n \n \n :returns: \n Name (string) -- [REQUIRED]\n The parameter name on which you want to attach one or more labels.\n \n ParameterVersion (integer) -- The specific version of the parameter on which you want to attach one or more labels. If no version is specified, the system attaches the label to the latest version.)\n Labels (list) -- [REQUIRED]\n One or more labels to attach to the specified parameter version.\n \n (string) --\n \n \n \n \"\"\"\n pass\n\ndef list_association_versions(AssociationId=None, MaxResults=None, NextToken=None):\n \"\"\"\n Retrieves all versions of an association for a specific association ID.\n See also: AWS API Documentation\n \n \n :example: response = client.list_association_versions(\n AssociationId='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type AssociationId: string\n :param AssociationId: [REQUIRED]\n The association ID for which you want to view all versions.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type NextToken: string\n :param NextToken: A token to start the list. Use this token to get the next set of results.\n\n :rtype: dict\n :return: {\n 'AssociationVersions': [\n {\n 'AssociationId': 'string',\n 'AssociationVersion': 'string',\n 'CreatedDate': datetime(2015, 1, 1),\n 'Name': 'string',\n 'DocumentVersion': 'string',\n 'Parameters': {\n 'string': [\n 'string',\n ]\n },\n 'Targets': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n 'ScheduleExpression': 'string',\n 'OutputLocation': {\n 'S3Location': {\n 'OutputS3Region': 'string',\n 'OutputS3BucketName': 'string',\n 'OutputS3KeyPrefix': 'string'\n }\n },\n 'AssociationName': 'string',\n 'MaxErrors': 'string',\n 'MaxConcurrency': 'string',\n 'ComplianceSeverity': 'CRITICAL'|'HIGH'|'MEDIUM'|'LOW'|'UNSPECIFIED'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (list) --\n (string) --\n \n \n \n \n \n \"\"\"\n pass\n\ndef list_associations(AssociationFilterList=None, MaxResults=None, NextToken=None):\n \"\"\"\n Lists the associations for the specified Systems Manager document or instance.\n See also: AWS API Documentation\n \n \n :example: response = client.list_associations(\n AssociationFilterList=[\n {\n 'key': 'InstanceId'|'Name'|'AssociationId'|'AssociationStatusName'|'LastExecutedBefore'|'LastExecutedAfter'|'AssociationName',\n 'value': 'string'\n },\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type AssociationFilterList: list\n :param AssociationFilterList: One or more filters. Use a filter to return a more specific list of results.\n (dict) --Describes a filter.\n key (string) -- [REQUIRED]The name of the filter.\n value (string) -- [REQUIRED]The filter value.\n \n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :rtype: dict\n :return: {\n 'Associations': [\n {\n 'Name': 'string',\n 'InstanceId': 'string',\n 'AssociationId': 'string',\n 'AssociationVersion': 'string',\n 'DocumentVersion': 'string',\n 'Targets': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n 'LastExecutionDate': datetime(2015, 1, 1),\n 'Overview': {\n 'Status': 'string',\n 'DetailedStatus': 'string',\n 'AssociationStatusAggregatedCount': {\n 'string': 123\n }\n },\n 'ScheduleExpression': 'string',\n 'AssociationName': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_command_invocations(CommandId=None, InstanceId=None, MaxResults=None, NextToken=None, Filters=None, Details=None):\n \"\"\"\n An invocation is copy of a command sent to a specific instance. A command can apply to one or more instances. A command invocation applies to one instance. For example, if a user executes SendCommand against three instances, then a command invocation is created for each requested instance ID. ListCommandInvocations provide status about command execution.\n See also: AWS API Documentation\n \n \n :example: response = client.list_command_invocations(\n CommandId='string',\n InstanceId='string',\n MaxResults=123,\n NextToken='string',\n Filters=[\n {\n 'key': 'InvokedAfter'|'InvokedBefore'|'Status'|'ExecutionStage'|'DocumentName',\n 'value': 'string'\n },\n ],\n Details=True|False\n )\n \n \n :type CommandId: string\n :param CommandId: (Optional) The invocations for a specific command ID.\n\n :type InstanceId: string\n :param InstanceId: (Optional) The command execution details for a specific instance ID.\n\n :type MaxResults: integer\n :param MaxResults: (Optional) The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type NextToken: string\n :param NextToken: (Optional) The token for the next set of items to return. (You received this token from a previous call.)\n\n :type Filters: list\n :param Filters: (Optional) One or more filters. Use a filter to return a more specific list of results.\n (dict) --Describes a command filter.\n key (string) -- [REQUIRED]The name of the filter.\n value (string) -- [REQUIRED]The filter value. Valid values for each filter key are as follows:\n InvokedAfter : Specify a timestamp to limit your results. For example, specify 2018-07-07T00:00:00Z to see a list of command executions occurring July 7, 2018, and later.\n InvokedBefore : Specify a timestamp to limit your results. For example, specify 2018-07-07T00:00:00Z to see a list of command executions from before July 7, 2018.\n Status : Specify a valid command status to see a list of all command executions with that status. Status values you can specify include:\n Pending\n InProgress\n Success\n Cancelled\n Failed\n TimedOut\n Cancelling\n DocumentName : Specify name of the SSM document for which you want to see command execution results. For example, specify AWS-RunPatchBaseline to see command executions that used this SSM document to perform security patching operations on instances.\n ExecutionStage : Specify one of the following values:\n Executing : Returns a list of command executions that are currently still running.\n Complete : Returns a list of command executions that have already completed.\n \n \n\n :type Details: boolean\n :param Details: (Optional) If set this returns the response of the command executions and any command output. By default this is set to False.\n\n :rtype: dict\n :return: {\n 'CommandInvocations': [\n {\n 'CommandId': 'string',\n 'InstanceId': 'string',\n 'InstanceName': 'string',\n 'Comment': 'string',\n 'DocumentName': 'string',\n 'DocumentVersion': 'string',\n 'RequestedDateTime': datetime(2015, 1, 1),\n 'Status': 'Pending'|'InProgress'|'Delayed'|'Success'|'Cancelled'|'TimedOut'|'Failed'|'Cancelling',\n 'StatusDetails': 'string',\n 'TraceOutput': 'string',\n 'StandardOutputUrl': 'string',\n 'StandardErrorUrl': 'string',\n 'CommandPlugins': [\n {\n 'Name': 'string',\n 'Status': 'Pending'|'InProgress'|'Success'|'TimedOut'|'Cancelled'|'Failed',\n 'StatusDetails': 'string',\n 'ResponseCode': 123,\n 'ResponseStartDateTime': datetime(2015, 1, 1),\n 'ResponseFinishDateTime': datetime(2015, 1, 1),\n 'Output': 'string',\n 'StandardOutputUrl': 'string',\n 'StandardErrorUrl': 'string',\n 'OutputS3Region': 'string',\n 'OutputS3BucketName': 'string',\n 'OutputS3KeyPrefix': 'string'\n },\n ],\n 'ServiceRole': 'string',\n 'NotificationConfig': {\n 'NotificationArn': 'string',\n 'NotificationEvents': [\n 'All'|'InProgress'|'Success'|'TimedOut'|'Cancelled'|'Failed',\n ],\n 'NotificationType': 'Command'|'Invocation'\n },\n 'CloudWatchOutputConfig': {\n 'CloudWatchLogGroupName': 'string',\n 'CloudWatchOutputEnabled': True|False\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n Pending: The command has not been sent to the instance.\n In Progress: The command has been sent to the instance but has not reached a terminal state.\n Success: The execution of the command or plugin was successfully completed. This is a terminal state.\n Delivery Timed Out: The command was not delivered to the instance before the delivery timeout expired. Delivery timeouts do not count against the parent command's MaxErrors limit, but they do contribute to whether the parent command status is Success or Incomplete. This is a terminal state.\n Execution Timed Out: Command execution started on the instance, but the execution was not complete before the execution timeout expired. Execution timeouts count against the MaxErrors limit of the parent command. This is a terminal state.\n Failed: The command was not successful on the instance. For a plugin, this indicates that the result code was not zero. For a command invocation, this indicates that the result code for one or more plugins was not zero. Invocation failures count against the MaxErrors limit of the parent command. This is a terminal state.\n Canceled: The command was terminated before it was completed. This is a terminal state.\n Undeliverable: The command can't be delivered to the instance. The instance might not exist or might not be responding. Undeliverable invocations don't count against the parent command's MaxErrors limit and don't contribute to whether the parent command status is Success or Incomplete. This is a terminal state.\n Terminated: The parent command exceeded its MaxErrors limit and subsequent command invocations were canceled by the system. This is a terminal state.\n \n \"\"\"\n pass\n\ndef list_commands(CommandId=None, InstanceId=None, MaxResults=None, NextToken=None, Filters=None):\n \"\"\"\n Lists the commands requested by users of the AWS account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_commands(\n CommandId='string',\n InstanceId='string',\n MaxResults=123,\n NextToken='string',\n Filters=[\n {\n 'key': 'InvokedAfter'|'InvokedBefore'|'Status'|'ExecutionStage'|'DocumentName',\n 'value': 'string'\n },\n ]\n )\n \n \n :type CommandId: string\n :param CommandId: (Optional) If provided, lists only the specified command.\n\n :type InstanceId: string\n :param InstanceId: (Optional) Lists commands issued against this instance ID.\n\n :type MaxResults: integer\n :param MaxResults: (Optional) The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type NextToken: string\n :param NextToken: (Optional) The token for the next set of items to return. (You received this token from a previous call.)\n\n :type Filters: list\n :param Filters: (Optional) One or more filters. Use a filter to return a more specific list of results.\n (dict) --Describes a command filter.\n key (string) -- [REQUIRED]The name of the filter.\n value (string) -- [REQUIRED]The filter value. Valid values for each filter key are as follows:\n InvokedAfter : Specify a timestamp to limit your results. For example, specify 2018-07-07T00:00:00Z to see a list of command executions occurring July 7, 2018, and later.\n InvokedBefore : Specify a timestamp to limit your results. For example, specify 2018-07-07T00:00:00Z to see a list of command executions from before July 7, 2018.\n Status : Specify a valid command status to see a list of all command executions with that status. Status values you can specify include:\n Pending\n InProgress\n Success\n Cancelled\n Failed\n TimedOut\n Cancelling\n DocumentName : Specify name of the SSM document for which you want to see command execution results. For example, specify AWS-RunPatchBaseline to see command executions that used this SSM document to perform security patching operations on instances.\n ExecutionStage : Specify one of the following values:\n Executing : Returns a list of command executions that are currently still running.\n Complete : Returns a list of command executions that have already completed.\n \n \n\n :rtype: dict\n :return: {\n 'Commands': [\n {\n 'CommandId': 'string',\n 'DocumentName': 'string',\n 'DocumentVersion': 'string',\n 'Comment': 'string',\n 'ExpiresAfter': datetime(2015, 1, 1),\n 'Parameters': {\n 'string': [\n 'string',\n ]\n },\n 'InstanceIds': [\n 'string',\n ],\n 'Targets': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n 'RequestedDateTime': datetime(2015, 1, 1),\n 'Status': 'Pending'|'InProgress'|'Success'|'Cancelled'|'Failed'|'TimedOut'|'Cancelling',\n 'StatusDetails': 'string',\n 'OutputS3Region': 'string',\n 'OutputS3BucketName': 'string',\n 'OutputS3KeyPrefix': 'string',\n 'MaxConcurrency': 'string',\n 'MaxErrors': 'string',\n 'TargetCount': 123,\n 'CompletedCount': 123,\n 'ErrorCount': 123,\n 'DeliveryTimedOutCount': 123,\n 'ServiceRole': 'string',\n 'NotificationConfig': {\n 'NotificationArn': 'string',\n 'NotificationEvents': [\n 'All'|'InProgress'|'Success'|'TimedOut'|'Cancelled'|'Failed',\n ],\n 'NotificationType': 'Command'|'Invocation'\n },\n 'CloudWatchOutputConfig': {\n 'CloudWatchLogGroupName': 'string',\n 'CloudWatchOutputEnabled': True|False\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (list) --\n (string) --\n \n \n \n \n \n \"\"\"\n pass\n\ndef list_compliance_items(Filters=None, ResourceIds=None, ResourceTypes=None, NextToken=None, MaxResults=None):\n \"\"\"\n For a specified resource ID, this API action returns a list of compliance statuses for different resource types. Currently, you can only specify one resource ID per call. List results depend on the criteria specified in the filter.\n See also: AWS API Documentation\n \n \n :example: response = client.list_compliance_items(\n Filters=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ],\n 'Type': 'EQUAL'|'NOT_EQUAL'|'BEGIN_WITH'|'LESS_THAN'|'GREATER_THAN'\n },\n ],\n ResourceIds=[\n 'string',\n ],\n ResourceTypes=[\n 'string',\n ],\n NextToken='string',\n MaxResults=123\n )\n \n \n :type Filters: list\n :param Filters: One or more compliance filters. Use a filter to return a more specific list of results.\n (dict) --One or more filters. Use a filter to return a more specific list of results.\n Key (string) --The name of the filter.\n Values (list) --The value for which to search.\n (string) --\n Type (string) --The type of comparison that should be performed for the value: Equal, NotEqual, BeginWith, LessThan, or GreaterThan.\n \n \n\n :type ResourceIds: list\n :param ResourceIds: The ID for the resources from which to get compliance information. Currently, you can only specify one resource ID.\n (string) --\n \n\n :type ResourceTypes: list\n :param ResourceTypes: The type of resource from which to get compliance information. Currently, the only supported resource type is ManagedInstance .\n (string) --\n \n\n :type NextToken: string\n :param NextToken: A token to start the list. Use this token to get the next set of results.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :rtype: dict\n :return: {\n 'ComplianceItems': [\n {\n 'ComplianceType': 'string',\n 'ResourceType': 'string',\n 'ResourceId': 'string',\n 'Id': 'string',\n 'Title': 'string',\n 'Status': 'COMPLIANT'|'NON_COMPLIANT',\n 'Severity': 'CRITICAL'|'HIGH'|'MEDIUM'|'LOW'|'INFORMATIONAL'|'UNSPECIFIED',\n 'ExecutionSummary': {\n 'ExecutionTime': datetime(2015, 1, 1),\n 'ExecutionId': 'string',\n 'ExecutionType': 'string'\n },\n 'Details': {\n 'string': 'string'\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef list_compliance_summaries(Filters=None, NextToken=None, MaxResults=None):\n \"\"\"\n Returns a summary count of compliant and non-compliant resources for a compliance type. For example, this call can return State Manager associations, patches, or custom compliance types according to the filter criteria that you specify.\n See also: AWS API Documentation\n \n \n :example: response = client.list_compliance_summaries(\n Filters=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ],\n 'Type': 'EQUAL'|'NOT_EQUAL'|'BEGIN_WITH'|'LESS_THAN'|'GREATER_THAN'\n },\n ],\n NextToken='string',\n MaxResults=123\n )\n \n \n :type Filters: list\n :param Filters: One or more compliance or inventory filters. Use a filter to return a more specific list of results.\n (dict) --One or more filters. Use a filter to return a more specific list of results.\n Key (string) --The name of the filter.\n Values (list) --The value for which to search.\n (string) --\n Type (string) --The type of comparison that should be performed for the value: Equal, NotEqual, BeginWith, LessThan, or GreaterThan.\n \n \n\n :type NextToken: string\n :param NextToken: A token to start the list. Use this token to get the next set of results.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. Currently, you can specify null or 50. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :rtype: dict\n :return: {\n 'ComplianceSummaryItems': [\n {\n 'ComplianceType': 'string',\n 'CompliantSummary': {\n 'CompliantCount': 123,\n 'SeveritySummary': {\n 'CriticalCount': 123,\n 'HighCount': 123,\n 'MediumCount': 123,\n 'LowCount': 123,\n 'InformationalCount': 123,\n 'UnspecifiedCount': 123\n }\n },\n 'NonCompliantSummary': {\n 'NonCompliantCount': 123,\n 'SeveritySummary': {\n 'CriticalCount': 123,\n 'HighCount': 123,\n 'MediumCount': 123,\n 'LowCount': 123,\n 'InformationalCount': 123,\n 'UnspecifiedCount': 123\n }\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_document_versions(Name=None, MaxResults=None, NextToken=None):\n \"\"\"\n List all versions for a document.\n See also: AWS API Documentation\n \n \n :example: response = client.list_document_versions(\n Name='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the document about which you want version information.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :rtype: dict\n :return: {\n 'DocumentVersions': [\n {\n 'Name': 'string',\n 'DocumentVersion': 'string',\n 'VersionName': 'string',\n 'CreatedDate': datetime(2015, 1, 1),\n 'IsDefaultVersion': True|False,\n 'DocumentFormat': 'YAML'|'JSON',\n 'Status': 'Creating'|'Active'|'Updating'|'Deleting'|'Failed',\n 'StatusInformation': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_documents(DocumentFilterList=None, Filters=None, MaxResults=None, NextToken=None):\n \"\"\"\n Describes one or more of your Systems Manager documents.\n See also: AWS API Documentation\n \n \n :example: response = client.list_documents(\n DocumentFilterList=[\n {\n 'key': 'Name'|'Owner'|'PlatformTypes'|'DocumentType',\n 'value': 'string'\n },\n ],\n Filters=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type DocumentFilterList: list\n :param DocumentFilterList: One or more filters. Use a filter to return a more specific list of results.\n (dict) --Describes a filter.\n key (string) -- [REQUIRED]The name of the filter.\n value (string) -- [REQUIRED]The value of the filter.\n \n \n\n :type Filters: list\n :param Filters: One or more filters. Use a filter to return a more specific list of results.\n (dict) --One or more filters. Use a filter to return a more specific list of documents.\n For keys, you can specify one or more tags that have been applied to a document.\n Other valid values include Owner, Name, PlatformTypes, and DocumentType.\n Note that only one Owner can be specified in a request. For example: Key=Owner,Values=Self .\n If you use Name as a key, you can use a name prefix to return a list of documents. For example, in the AWS CLI, to return a list of all documents that begin with Te , run the following command:\n aws ssm list-documents --filters Key=Name,Values=Te\n If you specify more than two keys, only documents that are identified by all the tags are returned in the results. If you specify more than two values for a key, documents that are identified by any of the values are returned in the results.\n To specify a custom key and value pair, use the format Key=tag:[tagName],Values=[valueName] .\n For example, if you created a Key called region and are using the AWS CLI to call the list-documents command:\n aws ssm list-documents --filters Key=tag:region,Values=east,west Key=Owner,Values=Self\n Key (string) --The name of the filter key.\n Values (list) --The value for the filter key.\n (string) --\n \n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :rtype: dict\n :return: {\n 'DocumentIdentifiers': [\n {\n 'Name': 'string',\n 'Owner': 'string',\n 'VersionName': 'string',\n 'PlatformTypes': [\n 'Windows'|'Linux',\n ],\n 'DocumentVersion': 'string',\n 'DocumentType': 'Command'|'Policy'|'Automation'|'Session'|'Package',\n 'SchemaVersion': 'string',\n 'DocumentFormat': 'YAML'|'JSON',\n 'TargetType': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_inventory_entries(InstanceId=None, TypeName=None, Filters=None, NextToken=None, MaxResults=None):\n \"\"\"\n A list of inventory items returned by the request.\n See also: AWS API Documentation\n \n \n :example: response = client.list_inventory_entries(\n InstanceId='string',\n TypeName='string',\n Filters=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ],\n 'Type': 'Equal'|'NotEqual'|'BeginWith'|'LessThan'|'GreaterThan'|'Exists'\n },\n ],\n NextToken='string',\n MaxResults=123\n )\n \n \n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The instance ID for which you want inventory information.\n \n\n :type TypeName: string\n :param TypeName: [REQUIRED]\n The type of inventory item for which you want information.\n \n\n :type Filters: list\n :param Filters: One or more filters. Use a filter to return a more specific list of results.\n (dict) --One or more filters. Use a filter to return a more specific list of results.\n Key (string) -- [REQUIRED]The name of the filter key.\n Values (list) -- [REQUIRED]Inventory filter values. Example: inventory filter where instance IDs are specified as values Key=AWS:InstanceInformation.InstanceId,Values= i-a12b3c4d5e6g, i-1a2b3c4d5e6,Type=Equal\n (string) --\n Type (string) --The type of filter. Valid values include the following: 'Equal'|'NotEqual'|'BeginWith'|'LessThan'|'GreaterThan'\n \n \n\n :type NextToken: string\n :param NextToken: The token for the next set of items to return. (You received this token from a previous call.)\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :rtype: dict\n :return: {\n 'TypeName': 'string',\n 'InstanceId': 'string',\n 'SchemaVersion': 'string',\n 'CaptureTime': 'string',\n 'Entries': [\n {\n 'string': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (dict) --\n (string) --\n (string) --\n \n \n \n \n \n \"\"\"\n pass\n\ndef list_resource_compliance_summaries(Filters=None, NextToken=None, MaxResults=None):\n \"\"\"\n Returns a resource-level summary count. The summary includes information about compliant and non-compliant statuses and detailed compliance-item severity counts, according to the filter criteria you specify.\n See also: AWS API Documentation\n \n \n :example: response = client.list_resource_compliance_summaries(\n Filters=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ],\n 'Type': 'EQUAL'|'NOT_EQUAL'|'BEGIN_WITH'|'LESS_THAN'|'GREATER_THAN'\n },\n ],\n NextToken='string',\n MaxResults=123\n )\n \n \n :type Filters: list\n :param Filters: One or more filters. Use a filter to return a more specific list of results.\n (dict) --One or more filters. Use a filter to return a more specific list of results.\n Key (string) --The name of the filter.\n Values (list) --The value for which to search.\n (string) --\n Type (string) --The type of comparison that should be performed for the value: Equal, NotEqual, BeginWith, LessThan, or GreaterThan.\n \n \n\n :type NextToken: string\n :param NextToken: A token to start the list. Use this token to get the next set of results.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :rtype: dict\n :return: {\n 'ResourceComplianceSummaryItems': [\n {\n 'ComplianceType': 'string',\n 'ResourceType': 'string',\n 'ResourceId': 'string',\n 'Status': 'COMPLIANT'|'NON_COMPLIANT',\n 'OverallSeverity': 'CRITICAL'|'HIGH'|'MEDIUM'|'LOW'|'INFORMATIONAL'|'UNSPECIFIED',\n 'ExecutionSummary': {\n 'ExecutionTime': datetime(2015, 1, 1),\n 'ExecutionId': 'string',\n 'ExecutionType': 'string'\n },\n 'CompliantSummary': {\n 'CompliantCount': 123,\n 'SeveritySummary': {\n 'CriticalCount': 123,\n 'HighCount': 123,\n 'MediumCount': 123,\n 'LowCount': 123,\n 'InformationalCount': 123,\n 'UnspecifiedCount': 123\n }\n },\n 'NonCompliantSummary': {\n 'NonCompliantCount': 123,\n 'SeveritySummary': {\n 'CriticalCount': 123,\n 'HighCount': 123,\n 'MediumCount': 123,\n 'LowCount': 123,\n 'InformationalCount': 123,\n 'UnspecifiedCount': 123\n }\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_resource_data_sync(NextToken=None, MaxResults=None):\n \"\"\"\n Lists your resource data sync configurations. Includes information about the last time a sync attempted to start, the last sync status, and the last time a sync successfully completed.\n The number of sync configurations might be too large to return using a single call to ListResourceDataSync . You can limit the number of sync configurations returned by using the MaxResults parameter. To determine whether there are more sync configurations to list, check the value of NextToken in the output. If there are more sync configurations to list, you can request them by specifying the NextToken returned in the call to the parameter of a subsequent call.\n See also: AWS API Documentation\n \n \n :example: response = client.list_resource_data_sync(\n NextToken='string',\n MaxResults=123\n )\n \n \n :type NextToken: string\n :param NextToken: A token to start the list. Use this token to get the next set of results.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.\n\n :rtype: dict\n :return: {\n 'ResourceDataSyncItems': [\n {\n 'SyncName': 'string',\n 'S3Destination': {\n 'BucketName': 'string',\n 'Prefix': 'string',\n 'SyncFormat': 'JsonSerDe',\n 'Region': 'string',\n 'AWSKMSKeyARN': 'string'\n },\n 'LastSyncTime': datetime(2015, 1, 1),\n 'LastSuccessfulSyncTime': datetime(2015, 1, 1),\n 'LastStatus': 'Successful'|'Failed'|'InProgress',\n 'SyncCreatedTime': datetime(2015, 1, 1),\n 'LastSyncStatusMessage': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_tags_for_resource(ResourceType=None, ResourceId=None):\n \"\"\"\n Returns a list of the tags assigned to the specified resource.\n See also: AWS API Documentation\n \n \n :example: response = client.list_tags_for_resource(\n ResourceType='Document'|'ManagedInstance'|'MaintenanceWindow'|'Parameter'|'PatchBaseline',\n ResourceId='string'\n )\n \n \n :type ResourceType: string\n :param ResourceType: [REQUIRED]\n Returns a list of tags for a specific resource type.\n \n\n :type ResourceId: string\n :param ResourceId: [REQUIRED]\n The resource ID for which you want to see a list of tags.\n \n\n :rtype: dict\n :return: {\n 'TagList': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef modify_document_permission(Name=None, PermissionType=None, AccountIdsToAdd=None, AccountIdsToRemove=None):\n \"\"\"\n Shares a Systems Manager document publicly or privately. If you share a document privately, you must specify the AWS user account IDs for those people who can use the document. If you share a document publicly, you must specify All as the account ID.\n See also: AWS API Documentation\n \n \n :example: response = client.modify_document_permission(\n Name='string',\n PermissionType='Share',\n AccountIdsToAdd=[\n 'string',\n ],\n AccountIdsToRemove=[\n 'string',\n ]\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the document that you want to share.\n \n\n :type PermissionType: string\n :param PermissionType: [REQUIRED]\n The permission type for the document. The permission type can be Share .\n \n\n :type AccountIdsToAdd: list\n :param AccountIdsToAdd: The AWS user accounts that should have access to the document. The account IDs can either be a group of account IDs or All .\n (string) --\n \n\n :type AccountIdsToRemove: list\n :param AccountIdsToRemove: The AWS user accounts that should no longer have access to the document. The AWS user account can either be a group of account IDs or All . This action has a higher priority than AccountIdsToAdd . If you specify an account ID to add and the same ID to remove, the system removes access to the document.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef put_compliance_items(ResourceId=None, ResourceType=None, ComplianceType=None, ExecutionSummary=None, Items=None, ItemContentHash=None):\n \"\"\"\n Registers a compliance type and other compliance details on a designated resource. This action lets you register custom compliance details with a resource. This call overwrites existing compliance information on the resource, so you must provide a full list of compliance items each time that you send the request.\n ComplianceType can be one of the following:\n See also: AWS API Documentation\n \n \n :example: response = client.put_compliance_items(\n ResourceId='string',\n ResourceType='string',\n ComplianceType='string',\n ExecutionSummary={\n 'ExecutionTime': datetime(2015, 1, 1),\n 'ExecutionId': 'string',\n 'ExecutionType': 'string'\n },\n Items=[\n {\n 'Id': 'string',\n 'Title': 'string',\n 'Severity': 'CRITICAL'|'HIGH'|'MEDIUM'|'LOW'|'INFORMATIONAL'|'UNSPECIFIED',\n 'Status': 'COMPLIANT'|'NON_COMPLIANT',\n 'Details': {\n 'string': 'string'\n }\n },\n ],\n ItemContentHash='string'\n )\n \n \n :type ResourceId: string\n :param ResourceId: [REQUIRED]\n Specify an ID for this resource. For a managed instance, this is the instance ID.\n \n\n :type ResourceType: string\n :param ResourceType: [REQUIRED]\n Specify the type of resource. ManagedInstance is currently the only supported resource type.\n \n\n :type ComplianceType: string\n :param ComplianceType: [REQUIRED]\n Specify the compliance type. For example, specify Association (for a State Manager association), Patch, or Custom:string .\n \n\n :type ExecutionSummary: dict\n :param ExecutionSummary: [REQUIRED]\n A summary of the call execution that includes an execution ID, the type of execution (for example, Command ), and the date/time of the execution using a datetime object that is saved in the following format: yyyy-MM-dd'T'HH:mm:ss'Z'.\n ExecutionTime (datetime) -- [REQUIRED]The time the execution ran as a datetime object that is saved in the following format: yyyy-MM-dd'T'HH:mm:ss'Z'.\n ExecutionId (string) --An ID created by the system when PutComplianceItems was called. For example, CommandID is a valid execution ID. You can use this ID in subsequent calls.\n ExecutionType (string) --The type of execution. For example, Command is a valid execution type.\n \n\n :type Items: list\n :param Items: [REQUIRED]\n Information about the compliance as defined by the resource type. For example, for a patch compliance type, Items includes information about the PatchSeverity, Classification, etc.\n (dict) --Information about a compliance item.\n Id (string) --The compliance item ID. For example, if the compliance item is a Windows patch, the ID could be the number of the KB article.\n Title (string) --The title of the compliance item. For example, if the compliance item is a Windows patch, the title could be the title of the KB article for the patch; for example: Security Update for Active Directory Federation Services.\n Severity (string) -- [REQUIRED]The severity of the compliance status. Severity can be one of the following: Critical, High, Medium, Low, Informational, Unspecified.\n Status (string) -- [REQUIRED]The status of the compliance item. An item is either COMPLIANT or NON_COMPLIANT.\n Details (dict) --A 'Key': 'Value' tag combination for the compliance item.\n (string) --\n (string) --\n \n \n\n :type ItemContentHash: string\n :param ItemContentHash: MD5 or SHA-256 content hash. The content hash is used to determine if existing information should be overwritten or ignored. If the content hashes match, the request to put compliance information is ignored.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n ResourceId (string) -- [REQUIRED]\n Specify an ID for this resource. For a managed instance, this is the instance ID.\n \n ResourceType (string) -- [REQUIRED]\n Specify the type of resource. ManagedInstance is currently the only supported resource type.\n \n ComplianceType (string) -- [REQUIRED]\n Specify the compliance type. For example, specify Association (for a State Manager association), Patch, or Custom:string .\n \n ExecutionSummary (dict) -- [REQUIRED]\n A summary of the call execution that includes an execution ID, the type of execution (for example, Command ), and the date/time of the execution using a datetime object that is saved in the following format: yyyy-MM-dd'T'HH:mm:ss'Z'.\n \n ExecutionTime (datetime) -- [REQUIRED]The time the execution ran as a datetime object that is saved in the following format: yyyy-MM-dd'T'HH:mm:ss'Z'.\n \n ExecutionId (string) --An ID created by the system when PutComplianceItems was called. For example, CommandID is a valid execution ID. You can use this ID in subsequent calls.\n \n ExecutionType (string) --The type of execution. For example, Command is a valid execution type.\n \n \n \n Items (list) -- [REQUIRED]\n Information about the compliance as defined by the resource type. For example, for a patch compliance type, Items includes information about the PatchSeverity, Classification, etc.\n \n (dict) --Information about a compliance item.\n \n Id (string) --The compliance item ID. For example, if the compliance item is a Windows patch, the ID could be the number of the KB article.\n \n Title (string) --The title of the compliance item. For example, if the compliance item is a Windows patch, the title could be the title of the KB article for the patch; for example: Security Update for Active Directory Federation Services.\n \n Severity (string) -- [REQUIRED]The severity of the compliance status. Severity can be one of the following: Critical, High, Medium, Low, Informational, Unspecified.\n \n Status (string) -- [REQUIRED]The status of the compliance item. An item is either COMPLIANT or NON_COMPLIANT.\n \n Details (dict) --A \"Key\": \"Value\" tag combination for the compliance item.\n \n (string) --\n (string) --\n \n \n \n \n \n \n \n \n ItemContentHash (string) -- MD5 or SHA-256 content hash. The content hash is used to determine if existing information should be overwritten or ignored. If the content hashes match, the request to put compliance information is ignored.\n \n \"\"\"\n pass\n\ndef put_inventory(InstanceId=None, Items=None):\n \"\"\"\n Bulk update custom inventory items on one more instance. The request adds an inventory item, if it doesn't already exist, or updates an inventory item, if it does exist.\n See also: AWS API Documentation\n \n \n :example: response = client.put_inventory(\n InstanceId='string',\n Items=[\n {\n 'TypeName': 'string',\n 'SchemaVersion': 'string',\n 'CaptureTime': 'string',\n 'ContentHash': 'string',\n 'Content': [\n {\n 'string': 'string'\n },\n ],\n 'Context': {\n 'string': 'string'\n }\n },\n ]\n )\n \n \n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n One or more instance IDs where you want to add or update inventory items.\n \n\n :type Items: list\n :param Items: [REQUIRED]\n The inventory items that you want to add or update on instances.\n (dict) --Information collected from managed instances based on your inventory policy document\n TypeName (string) -- [REQUIRED]The name of the inventory type. Default inventory item type names start with AWS. Custom inventory type names will start with Custom. Default inventory item types include the following: AWS:AWSComponent, AWS:Application, AWS:InstanceInformation, AWS:Network, and AWS:WindowsUpdate.\n SchemaVersion (string) -- [REQUIRED]The schema version for the inventory item.\n CaptureTime (string) -- [REQUIRED]The time the inventory information was collected.\n ContentHash (string) --MD5 hash of the inventory item type contents. The content hash is used to determine whether to update inventory information. The PutInventory API does not update the inventory item type contents if the MD5 hash has not changed since last update.\n Content (list) --The inventory data of the inventory type.\n (dict) --\n (string) --\n (string) --\n \n Context (dict) --A map of associated properties for a specified inventory type. For example, with this attribute, you can specify the ExecutionId , ExecutionType , ComplianceType properties of the AWS:ComplianceItem type.\n (string) --\n (string) --\n \n \n\n :rtype: dict\n :return: {\n 'Message': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef put_parameter(Name=None, Description=None, Value=None, Type=None, KeyId=None, Overwrite=None, AllowedPattern=None):\n \"\"\"\n Add a parameter to the system.\n See also: AWS API Documentation\n \n \n :example: response = client.put_parameter(\n Name='string',\n Description='string',\n Value='string',\n Type='String'|'StringList'|'SecureString',\n KeyId='string',\n Overwrite=True|False,\n AllowedPattern='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The fully qualified name of the parameter that you want to add to the system. The fully qualified name includes the complete hierarchy of the parameter path and name. For example: /Dev/DBServer/MySQL/db-string13\n Naming Constraints:\n Parameter names are case sensitive.\n A parameter name must be unique within an AWS Region\n A parameter name can't be prefixed with 'aws' or 'ssm' (case-insensitive).\n Parameter names can include only the following symbols and letters: a-zA-Z0-9_.-/\n A parameter name can't include spaces.\n Parameter hierarchies are limited to a maximum depth of fifteen levels.\n For additional information about valid values for parameter names, see Requirements and Constraints for Parameter Names in the AWS Systems Manager User Guide .\n Note\n The maximum length constraint listed below includes capacity for additional system attributes that are not part of the name. The maximum length for the fully qualified parameter name is 1011 characters.\n \n\n :type Description: string\n :param Description: Information about the parameter that you want to add to the system. Optional but recommended.\n Warning\n Do not enter personally identifiable information in this field.\n \n\n :type Value: string\n :param Value: [REQUIRED]\n The parameter value that you want to add to the system.\n \n\n :type Type: string\n :param Type: [REQUIRED]\n The type of parameter that you want to add to the system.\n Items in a StringList must be separated by a comma (,). You can't use other punctuation or special character to escape items in the list. If you have a parameter value that requires a comma, then use the String data type.\n Note\n SecureString is not currently supported for AWS CloudFormation templates or in the China Regions.\n \n\n :type KeyId: string\n :param KeyId: The KMS Key ID that you want to use to encrypt a parameter. Either the default AWS Key Management Service (AWS KMS) key automatically assigned to your AWS account or a custom key. Required for parameters that use the SecureString data type.\n If you don't specify a key ID, the system uses the default key associated with your AWS account.\n To use your default AWS KMS key, choose the SecureString data type, and do not specify the Key ID when you create the parameter. The system automatically populates Key ID with your default KMS key.\n To use a custom KMS key, choose the SecureString data type with the Key ID parameter.\n \n\n :type Overwrite: boolean\n :param Overwrite: Overwrite an existing parameter. If not specified, will default to 'false'.\n\n :type AllowedPattern: string\n :param AllowedPattern: A regular expression used to validate the parameter value. For example, for String types with values restricted to numbers, you can specify the following: AllowedPattern=^d+$\n\n :rtype: dict\n :return: {\n 'Version': 123\n }\n \n \n \"\"\"\n pass\n\ndef register_default_patch_baseline(BaselineId=None):\n \"\"\"\n Defines the default patch baseline.\n See also: AWS API Documentation\n \n \n :example: response = client.register_default_patch_baseline(\n BaselineId='string'\n )\n \n \n :type BaselineId: string\n :param BaselineId: [REQUIRED]\n The ID of the patch baseline that should be the default patch baseline.\n \n\n :rtype: dict\n :return: {\n 'BaselineId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef register_patch_baseline_for_patch_group(BaselineId=None, PatchGroup=None):\n \"\"\"\n Registers a patch baseline for a patch group.\n See also: AWS API Documentation\n \n \n :example: response = client.register_patch_baseline_for_patch_group(\n BaselineId='string',\n PatchGroup='string'\n )\n \n \n :type BaselineId: string\n :param BaselineId: [REQUIRED]\n The ID of the patch baseline to register the patch group with.\n \n\n :type PatchGroup: string\n :param PatchGroup: [REQUIRED]\n The name of the patch group that should be registered with the patch baseline.\n \n\n :rtype: dict\n :return: {\n 'BaselineId': 'string',\n 'PatchGroup': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef register_target_with_maintenance_window(WindowId=None, ResourceType=None, Targets=None, OwnerInformation=None, Name=None, Description=None, ClientToken=None):\n \"\"\"\n Registers a target with a Maintenance Window.\n See also: AWS API Documentation\n \n \n :example: response = client.register_target_with_maintenance_window(\n WindowId='string',\n ResourceType='INSTANCE',\n Targets=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n OwnerInformation='string',\n Name='string',\n Description='string',\n ClientToken='string'\n )\n \n \n :type WindowId: string\n :param WindowId: [REQUIRED]\n The ID of the Maintenance Window the target should be registered with.\n \n\n :type ResourceType: string\n :param ResourceType: [REQUIRED]\n The type of target being registered with the Maintenance Window.\n \n\n :type Targets: list\n :param Targets: [REQUIRED]\n The targets (either instances or tags).\n Specify instances using the following format:\n Key=InstanceIds,Values=<instance-id-1>,<instance-id-2>\n Specify tags using either of the following formats:\n Key=tag:<tag-key>,Values=<tag-value-1>,<tag-value-2>Key=tag-key,Values=<tag-key-1>,<tag-key-2>\n (dict) --An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call.\n Key (string) --User-defined criteria for sending commands that target instances that meet the criteria. Key can be tag:<Amazon EC2 tag> or InstanceIds. For more information about how to send commands that target instances using Key,Value parameters, see Targeting Multiple Instances in the AWS Systems Manager User Guide .\n Values (list) --User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, you could specify value:WebServer to execute a command on instances that include Amazon EC2 tags of ServerRole,WebServer. For more information about how to send commands that target instances using Key,Value parameters, see Sending Commands to a Fleet in the AWS Systems Manager User Guide .\n (string) --\n \n \n\n :type OwnerInformation: string\n :param OwnerInformation: User-provided value that will be included in any CloudWatch events raised while running tasks for these targets in this Maintenance Window.\n\n :type Name: string\n :param Name: An optional name for the target.\n\n :type Description: string\n :param Description: An optional description for the target.\n\n :type ClientToken: string\n :param ClientToken: User-provided idempotency token.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'WindowTargetId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef register_task_with_maintenance_window(WindowId=None, Targets=None, TaskArn=None, ServiceRoleArn=None, TaskType=None, TaskParameters=None, TaskInvocationParameters=None, Priority=None, MaxConcurrency=None, MaxErrors=None, LoggingInfo=None, Name=None, Description=None, ClientToken=None):\n \"\"\"\n Adds a new task to a Maintenance Window.\n See also: AWS API Documentation\n \n \n :example: response = client.register_task_with_maintenance_window(\n WindowId='string',\n Targets=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n TaskArn='string',\n ServiceRoleArn='string',\n TaskType='RUN_COMMAND'|'AUTOMATION'|'STEP_FUNCTIONS'|'LAMBDA',\n TaskParameters={\n 'string': {\n 'Values': [\n 'string',\n ]\n }\n },\n TaskInvocationParameters={\n 'RunCommand': {\n 'Comment': 'string',\n 'DocumentHash': 'string',\n 'DocumentHashType': 'Sha256'|'Sha1',\n 'NotificationConfig': {\n 'NotificationArn': 'string',\n 'NotificationEvents': [\n 'All'|'InProgress'|'Success'|'TimedOut'|'Cancelled'|'Failed',\n ],\n 'NotificationType': 'Command'|'Invocation'\n },\n 'OutputS3BucketName': 'string',\n 'OutputS3KeyPrefix': 'string',\n 'Parameters': {\n 'string': [\n 'string',\n ]\n },\n 'ServiceRoleArn': 'string',\n 'TimeoutSeconds': 123\n },\n 'Automation': {\n 'DocumentVersion': 'string',\n 'Parameters': {\n 'string': [\n 'string',\n ]\n }\n },\n 'StepFunctions': {\n 'Input': 'string',\n 'Name': 'string'\n },\n 'Lambda': {\n 'ClientContext': 'string',\n 'Qualifier': 'string',\n 'Payload': b'bytes'\n }\n },\n Priority=123,\n MaxConcurrency='string',\n MaxErrors='string',\n LoggingInfo={\n 'S3BucketName': 'string',\n 'S3KeyPrefix': 'string',\n 'S3Region': 'string'\n },\n Name='string',\n Description='string',\n ClientToken='string'\n )\n \n \n :type WindowId: string\n :param WindowId: [REQUIRED]\n The ID of the Maintenance Window the task should be added to.\n \n\n :type Targets: list\n :param Targets: [REQUIRED]\n The targets (either instances or Maintenance Window targets).\n Specify instances using the following format:\n Key=InstanceIds,Values=<instance-id-1>,<instance-id-2>\n Specify Maintenance Window targets using the following format:\n Key=<WindowTargetIds>,Values=<window-target-id-1>,<window-target-id-2>\n (dict) --An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call.\n Key (string) --User-defined criteria for sending commands that target instances that meet the criteria. Key can be tag:<Amazon EC2 tag> or InstanceIds. For more information about how to send commands that target instances using Key,Value parameters, see Targeting Multiple Instances in the AWS Systems Manager User Guide .\n Values (list) --User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, you could specify value:WebServer to execute a command on instances that include Amazon EC2 tags of ServerRole,WebServer. For more information about how to send commands that target instances using Key,Value parameters, see Sending Commands to a Fleet in the AWS Systems Manager User Guide .\n (string) --\n \n \n\n :type TaskArn: string\n :param TaskArn: [REQUIRED]\n The ARN of the task to execute\n \n\n :type ServiceRoleArn: string\n :param ServiceRoleArn: The role to assume when running the Maintenance Window task.\n If you do not specify a service role ARN, Systems Manager will use your account's service-linked role for Systems Manager by default. If no service-linked role for Systems Manager exists in your account, it will be created when you run RegisterTaskWithMaintenanceWindow without specifying a service role ARN.\n For more information, see Service-Linked Role Permissions for Systems Manager and Should I Use a Service-Linked Role or a Custom Service Role to Run Maintenance Window Tasks? in the AWS Systems Manager User Guide .\n \n\n :type TaskType: string\n :param TaskType: [REQUIRED]\n The type of task being registered.\n \n\n :type TaskParameters: dict\n :param TaskParameters: The parameters that should be passed to the task when it is executed.\n Note\n TaskParameters has been deprecated. To specify parameters to pass to a task when it runs, instead use the Parameters option in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters .\n (string) --\n (dict) --Defines the values for a task parameter.\n Values (list) --This field contains an array of 0 or more strings, each 1 to 255 characters in length.\n (string) --\n \n \n\n :type TaskInvocationParameters: dict\n :param TaskInvocationParameters: The parameters that the task should use during execution. Populate only the fields that match the task type. All other fields should be empty.\n RunCommand (dict) --The parameters for a RUN_COMMAND task type.\n Comment (string) --Information about the command(s) to execute.\n DocumentHash (string) --The SHA-256 or SHA-1 hash created by the system when the document was created. SHA-1 hashes have been deprecated.\n DocumentHashType (string) --SHA-256 or SHA-1. SHA-1 hashes have been deprecated.\n NotificationConfig (dict) --Configurations for sending notifications about command status changes on a per-instance basis.\n NotificationArn (string) --An Amazon Resource Name (ARN) for a Simple Notification Service (SNS) topic. Run Command pushes notifications about command status changes to this topic.\n NotificationEvents (list) --The different events for which you can receive notifications. These events include the following: All (events), InProgress, Success, TimedOut, Cancelled, Failed. To learn more about these events, see Configuring Amazon SNS Notifications for Run Command in the AWS Systems Manager User Guide .\n (string) --\n NotificationType (string) --Command: Receive notification when the status of a command changes. Invocation: For commands sent to multiple instances, receive notification on a per-instance basis when the status of a command changes.\n OutputS3BucketName (string) --The name of the Amazon S3 bucket.\n OutputS3KeyPrefix (string) --The Amazon S3 bucket subfolder.\n Parameters (dict) --The parameters for the RUN_COMMAND task execution.\n (string) --\n (list) --\n (string) --\n \n ServiceRoleArn (string) --The IAM service role to assume during task execution.\n TimeoutSeconds (integer) --If this time is reached and the command has not already started executing, it doesn't run.\n Automation (dict) --The parameters for an AUTOMATION task type.\n DocumentVersion (string) --The version of an Automation document to use during task execution.\n Parameters (dict) --The parameters for the AUTOMATION task.\n For information about specifying and updating task parameters, see RegisterTaskWithMaintenanceWindow and UpdateMaintenanceWindowTask .\n Note\n LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters .TaskParameters has been deprecated. To specify parameters to pass to a task when it runs, instead use the Parameters option in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters .\n For AUTOMATION task types, Systems Manager ignores any values specified for these parameters.\n (string) --\n (list) --\n (string) --\n \n \n StepFunctions (dict) --The parameters for a STEP_FUNCTION task type.\n Input (string) --The inputs for the STEP_FUNCTION task.\n Name (string) --The name of the STEP_FUNCTION task.\n Lambda (dict) --The parameters for a LAMBDA task type.\n ClientContext (string) --Pass client-specific information to the Lambda function that you are invoking. You can then process the client information in your Lambda function as you choose through the context variable.\n Qualifier (string) --(Optional) Specify a Lambda function version or alias name. If you specify a function version, the action uses the qualified function ARN to invoke a specific Lambda function. If you specify an alias name, the action uses the alias ARN to invoke the Lambda function version to which the alias points.\n Payload (bytes) --JSON to provide to your Lambda function as input.\n \n \n\n :type Priority: integer\n :param Priority: The priority of the task in the Maintenance Window, the lower the number the higher the priority. Tasks in a Maintenance Window are scheduled in priority order with tasks that have the same priority scheduled in parallel.\n\n :type MaxConcurrency: string\n :param MaxConcurrency: [REQUIRED]\n The maximum number of targets this task can be run for in parallel.\n \n\n :type MaxErrors: string\n :param MaxErrors: [REQUIRED]\n The maximum number of errors allowed before this task stops being scheduled.\n \n\n :type LoggingInfo: dict\n :param LoggingInfo: A structure containing information about an Amazon S3 bucket to write instance-level logs to.\n Note\n LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters .\n S3BucketName (string) -- [REQUIRED]The name of an Amazon S3 bucket where execution logs are stored .\n S3KeyPrefix (string) --(Optional) The Amazon S3 bucket subfolder.\n S3Region (string) -- [REQUIRED]The region where the Amazon S3 bucket is located.\n \n\n :type Name: string\n :param Name: An optional name for the task.\n\n :type Description: string\n :param Description: An optional description for the task.\n\n :type ClientToken: string\n :param ClientToken: User-provided idempotency token.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'WindowTaskId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef remove_tags_from_resource(ResourceType=None, ResourceId=None, TagKeys=None):\n \"\"\"\n Removes all tags from the specified resource.\n See also: AWS API Documentation\n \n \n :example: response = client.remove_tags_from_resource(\n ResourceType='Document'|'ManagedInstance'|'MaintenanceWindow'|'Parameter'|'PatchBaseline',\n ResourceId='string',\n TagKeys=[\n 'string',\n ]\n )\n \n \n :type ResourceType: string\n :param ResourceType: [REQUIRED]\n The type of resource of which you want to remove a tag.\n Note\n The ManagedInstance type for this API action is only for on-premises managed instances. You must specify the the name of the managed instance in the following format: mi-ID_number. For example, mi-1a2b3c4d5e6f.\n \n\n :type ResourceId: string\n :param ResourceId: [REQUIRED]\n The resource ID for which you want to remove tags. Use the ID of the resource. Here are some examples:\n ManagedInstance: mi-012345abcde\n MaintenanceWindow: mw-012345abcde\n PatchBaseline: pb-012345abcde\n For the Document and Parameter values, use the name of the resource.\n Note\n The ManagedInstance type for this API action is only for on-premises managed instances. You must specify the the name of the managed instance in the following format: mi-ID_number. For example, mi-1a2b3c4d5e6f.\n \n\n :type TagKeys: list\n :param TagKeys: [REQUIRED]\n Tag keys that you want to remove from the specified resource.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef resume_session(SessionId=None):\n \"\"\"\n Reconnects a session to an instance after it has been disconnected. Connections can be resumed for disconnected sessions, but not terminated sessions.\n See also: AWS API Documentation\n \n \n :example: response = client.resume_session(\n SessionId='string'\n )\n \n \n :type SessionId: string\n :param SessionId: [REQUIRED]\n The ID of the disconnected session to resume.\n \n\n :rtype: dict\n :return: {\n 'SessionId': 'string',\n 'TokenValue': 'string',\n 'StreamUrl': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef send_automation_signal(AutomationExecutionId=None, SignalType=None, Payload=None):\n \"\"\"\n Sends a signal to an Automation execution to change the current behavior or status of the execution.\n See also: AWS API Documentation\n \n \n :example: response = client.send_automation_signal(\n AutomationExecutionId='string',\n SignalType='Approve'|'Reject'|'StartStep'|'StopStep'|'Resume',\n Payload={\n 'string': [\n 'string',\n ]\n }\n )\n \n \n :type AutomationExecutionId: string\n :param AutomationExecutionId: [REQUIRED]\n The unique identifier for an existing Automation execution that you want to send the signal to.\n \n\n :type SignalType: string\n :param SignalType: [REQUIRED]\n The type of signal. Valid signal types include the following: Approve and Reject\n \n\n :type Payload: dict\n :param Payload: The data sent with the signal. The data schema depends on the type of signal used in the request.\n (string) --\n (list) --\n (string) --\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef send_command(InstanceIds=None, Targets=None, DocumentName=None, DocumentVersion=None, DocumentHash=None, DocumentHashType=None, TimeoutSeconds=None, Comment=None, Parameters=None, OutputS3Region=None, OutputS3BucketName=None, OutputS3KeyPrefix=None, MaxConcurrency=None, MaxErrors=None, ServiceRoleArn=None, NotificationConfig=None, CloudWatchOutputConfig=None):\n \"\"\"\n Executes commands on one or more managed instances.\n See also: AWS API Documentation\n \n \n :example: response = client.send_command(\n InstanceIds=[\n 'string',\n ],\n Targets=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n DocumentName='string',\n DocumentVersion='string',\n DocumentHash='string',\n DocumentHashType='Sha256'|'Sha1',\n TimeoutSeconds=123,\n Comment='string',\n Parameters={\n 'string': [\n 'string',\n ]\n },\n OutputS3Region='string',\n OutputS3BucketName='string',\n OutputS3KeyPrefix='string',\n MaxConcurrency='string',\n MaxErrors='string',\n ServiceRoleArn='string',\n NotificationConfig={\n 'NotificationArn': 'string',\n 'NotificationEvents': [\n 'All'|'InProgress'|'Success'|'TimedOut'|'Cancelled'|'Failed',\n ],\n 'NotificationType': 'Command'|'Invocation'\n },\n CloudWatchOutputConfig={\n 'CloudWatchLogGroupName': 'string',\n 'CloudWatchOutputEnabled': True|False\n }\n )\n \n \n :type InstanceIds: list\n :param InstanceIds: The instance IDs where the command should execute. You can specify a maximum of 50 IDs. If you prefer not to list individual instance IDs, you can instead send commands to a fleet of instances using the Targets parameter, which accepts EC2 tags. For more information about how to use targets, see Sending Commands to a Fleet in the AWS Systems Manager User Guide .\n (string) --\n \n\n :type Targets: list\n :param Targets: (Optional) An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call. For more information about how to use targets, see Sending Commands to a Fleet in the AWS Systems Manager User Guide .\n (dict) --An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call.\n Key (string) --User-defined criteria for sending commands that target instances that meet the criteria. Key can be tag:<Amazon EC2 tag> or InstanceIds. For more information about how to send commands that target instances using Key,Value parameters, see Targeting Multiple Instances in the AWS Systems Manager User Guide .\n Values (list) --User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, you could specify value:WebServer to execute a command on instances that include Amazon EC2 tags of ServerRole,WebServer. For more information about how to send commands that target instances using Key,Value parameters, see Sending Commands to a Fleet in the AWS Systems Manager User Guide .\n (string) --\n \n \n\n :type DocumentName: string\n :param DocumentName: [REQUIRED]\n Required. The name of the Systems Manager document to execute. This can be a public document or a custom document.\n \n\n :type DocumentVersion: string\n :param DocumentVersion: The SSM document version to use in the request. You can specify $DEFAULT, $LATEST, or a specific version number. If you execute commands by using the AWS CLI, then you must escape the first two options by using a backslash. If you specify a version number, then you don't need to use the backslash. For example:\n --document-version '$DEFAULT'\n --document-version '$LATEST'\n --document-version '3'\n \n\n :type DocumentHash: string\n :param DocumentHash: The Sha256 or Sha1 hash created by the system when the document was created.\n Note\n Sha1 hashes have been deprecated.\n \n\n :type DocumentHashType: string\n :param DocumentHashType: Sha256 or Sha1.\n Note\n Sha1 hashes have been deprecated.\n \n\n :type TimeoutSeconds: integer\n :param TimeoutSeconds: If this time is reached and the command has not already started executing, it will not run.\n\n :type Comment: string\n :param Comment: User-specified information about the command, such as a brief description of what the command should do.\n\n :type Parameters: dict\n :param Parameters: The required and optional parameters specified in the document being executed.\n (string) --\n (list) --\n (string) --\n \n \n\n :type OutputS3Region: string\n :param OutputS3Region: (Deprecated) You can no longer specify this parameter. The system ignores it. Instead, Systems Manager automatically determines the Amazon S3 bucket region.\n\n :type OutputS3BucketName: string\n :param OutputS3BucketName: The name of the S3 bucket where command execution responses should be stored.\n\n :type OutputS3KeyPrefix: string\n :param OutputS3KeyPrefix: The directory structure within the S3 bucket where the responses should be stored.\n\n :type MaxConcurrency: string\n :param MaxConcurrency: (Optional) The maximum number of instances that are allowed to execute the command at the same time. You can specify a number such as 10 or a percentage such as 10%. The default value is 50. For more information about how to use MaxConcurrency, see Using Concurrency Controls in the AWS Systems Manager User Guide .\n\n :type MaxErrors: string\n :param MaxErrors: The maximum number of errors allowed without the command failing. When the command fails one more time beyond the value of MaxErrors, the systems stops sending the command to additional targets. You can specify a number like 10 or a percentage like 10%. The default value is 0. For more information about how to use MaxErrors, see Using Error Controls in the AWS Systems Manager User Guide .\n\n :type ServiceRoleArn: string\n :param ServiceRoleArn: The IAM role that Systems Manager uses to send notifications.\n\n :type NotificationConfig: dict\n :param NotificationConfig: Configurations for sending notifications.\n NotificationArn (string) --An Amazon Resource Name (ARN) for a Simple Notification Service (SNS) topic. Run Command pushes notifications about command status changes to this topic.\n NotificationEvents (list) --The different events for which you can receive notifications. These events include the following: All (events), InProgress, Success, TimedOut, Cancelled, Failed. To learn more about these events, see Configuring Amazon SNS Notifications for Run Command in the AWS Systems Manager User Guide .\n (string) --\n NotificationType (string) --Command: Receive notification when the status of a command changes. Invocation: For commands sent to multiple instances, receive notification on a per-instance basis when the status of a command changes.\n \n\n :type CloudWatchOutputConfig: dict\n :param CloudWatchOutputConfig: Enables Systems Manager to send Run Command output to Amazon CloudWatch Logs.\n CloudWatchLogGroupName (string) --The name of the CloudWatch log group where you want to send command output. If you don't specify a group name, Systems Manager automatically creates a log group for you. The log group uses the following naming format: aws/ssm/SystemsManagerDocumentName .\n CloudWatchOutputEnabled (boolean) --Enables Systems Manager to send command output to CloudWatch Logs.\n \n\n :rtype: dict\n :return: {\n 'Command': {\n 'CommandId': 'string',\n 'DocumentName': 'string',\n 'DocumentVersion': 'string',\n 'Comment': 'string',\n 'ExpiresAfter': datetime(2015, 1, 1),\n 'Parameters': {\n 'string': [\n 'string',\n ]\n },\n 'InstanceIds': [\n 'string',\n ],\n 'Targets': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n 'RequestedDateTime': datetime(2015, 1, 1),\n 'Status': 'Pending'|'InProgress'|'Success'|'Cancelled'|'Failed'|'TimedOut'|'Cancelling',\n 'StatusDetails': 'string',\n 'OutputS3Region': 'string',\n 'OutputS3BucketName': 'string',\n 'OutputS3KeyPrefix': 'string',\n 'MaxConcurrency': 'string',\n 'MaxErrors': 'string',\n 'TargetCount': 123,\n 'CompletedCount': 123,\n 'ErrorCount': 123,\n 'DeliveryTimedOutCount': 123,\n 'ServiceRole': 'string',\n 'NotificationConfig': {\n 'NotificationArn': 'string',\n 'NotificationEvents': [\n 'All'|'InProgress'|'Success'|'TimedOut'|'Cancelled'|'Failed',\n ],\n 'NotificationType': 'Command'|'Invocation'\n },\n 'CloudWatchOutputConfig': {\n 'CloudWatchLogGroupName': 'string',\n 'CloudWatchOutputEnabled': True|False\n }\n }\n }\n \n \n :returns: \n (string) --\n (list) --\n (string) --\n \n \n \n \n \n \"\"\"\n pass\n\ndef start_associations_once(AssociationIds=None):\n \"\"\"\n Use this API action to execute an association immediately and only one time. This action can be helpful when troubleshooting associations.\n See also: AWS API Documentation\n \n \n :example: response = client.start_associations_once(\n AssociationIds=[\n 'string',\n ]\n )\n \n \n :type AssociationIds: list\n :param AssociationIds: [REQUIRED]\n The association IDs that you want to execute immediately and only one time.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef start_automation_execution(DocumentName=None, DocumentVersion=None, Parameters=None, ClientToken=None, Mode=None, TargetParameterName=None, Targets=None, TargetMaps=None, MaxConcurrency=None, MaxErrors=None, TargetLocations=None):\n \"\"\"\n Initiates execution of an Automation document.\n See also: AWS API Documentation\n \n \n :example: response = client.start_automation_execution(\n DocumentName='string',\n DocumentVersion='string',\n Parameters={\n 'string': [\n 'string',\n ]\n },\n ClientToken='string',\n Mode='Auto'|'Interactive',\n TargetParameterName='string',\n Targets=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n TargetMaps=[\n {\n 'string': [\n 'string',\n ]\n },\n ],\n MaxConcurrency='string',\n MaxErrors='string',\n TargetLocations=[\n {\n 'Accounts': [\n 'string',\n ],\n 'Regions': [\n 'string',\n ],\n 'TargetLocationMaxConcurrency': 'string',\n 'TargetLocationMaxErrors': 'string',\n 'ExecutionRoleName': 'string'\n },\n ]\n )\n \n \n :type DocumentName: string\n :param DocumentName: [REQUIRED]\n The name of the Automation document to use for this execution.\n \n\n :type DocumentVersion: string\n :param DocumentVersion: The version of the Automation document to use for this execution.\n\n :type Parameters: dict\n :param Parameters: A key-value map of execution parameters, which match the declared parameters in the Automation document.\n (string) --\n (list) --\n (string) --\n \n \n\n :type ClientToken: string\n :param ClientToken: User-provided idempotency token. The token must be unique, is case insensitive, enforces the UUID format, and can't be reused.\n\n :type Mode: string\n :param Mode: The execution mode of the automation. Valid modes include the following: Auto and Interactive. The default mode is Auto.\n\n :type TargetParameterName: string\n :param TargetParameterName: The name of the parameter used as the target resource for the rate-controlled execution. Required if you specify targets.\n\n :type Targets: list\n :param Targets: A key-value mapping to target resources. Required if you specify TargetParameterName.\n (dict) --An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call.\n Key (string) --User-defined criteria for sending commands that target instances that meet the criteria. Key can be tag:<Amazon EC2 tag> or InstanceIds. For more information about how to send commands that target instances using Key,Value parameters, see Targeting Multiple Instances in the AWS Systems Manager User Guide .\n Values (list) --User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, you could specify value:WebServer to execute a command on instances that include Amazon EC2 tags of ServerRole,WebServer. For more information about how to send commands that target instances using Key,Value parameters, see Sending Commands to a Fleet in the AWS Systems Manager User Guide .\n (string) --\n \n \n\n :type TargetMaps: list\n :param TargetMaps: A key-value mapping of document parameters to target resources. Both Targets and TargetMaps cannot be specified together.\n (dict) --\n (string) --\n (list) --\n (string) --\n \n \n\n :type MaxConcurrency: string\n :param MaxConcurrency: The maximum number of targets allowed to run this task in parallel. You can specify a number, such as 10, or a percentage, such as 10%. The default value is 10.\n\n :type MaxErrors: string\n :param MaxErrors: The number of errors that are allowed before the system stops running the automation on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops running the automation when the fourth error is received. If you specify 0, then the system stops running the automation on additional targets after the first error result is returned. If you run an automation on 50 resources and set max-errors to 10%, then the system stops running the automation on additional targets when the sixth error is received.\n Executions that are already running an automation when max-errors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set max-concurrency to 1 so the executions proceed one at a time.\n \n\n :type TargetLocations: list\n :param TargetLocations: A location is a combination of AWS Regions and/or AWS accounts where you want to execute the Automation. Use this action to start an Automation in multiple Regions and multiple accounts. For more information, see Concurrently Executing Automations in Multiple AWS Regions and Accounts in the AWS Systems Manager User Guide .\n (dict) --The combination of AWS Regions and accounts targeted by the current Automation execution.\n Accounts (list) --The AWS accounts targeted by the current Automation execution.\n (string) --\n Regions (list) --The AWS Regions targeted by the current Automation execution.\n (string) --\n TargetLocationMaxConcurrency (string) --The maxium number of AWS accounts and AWS regions allowed to run the Automation concurrently\n TargetLocationMaxErrors (string) --The maxium number of errors allowed before the system stops queueing additional Automation executions for the currently executing Automation.\n ExecutionRoleName (string) --The Automation execution role used by the currently executing Automation.\n \n \n\n :rtype: dict\n :return: {\n 'AutomationExecutionId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef start_session(Target=None, DocumentName=None, Parameters=None):\n \"\"\"\n Initiates a connection to a target (for example, an instance) for a Session Manager session. Returns a URL and token that can be used to open a WebSocket connection for sending input and receiving outputs.\n See also: AWS API Documentation\n \n \n :example: response = client.start_session(\n Target='string',\n DocumentName='string',\n Parameters={\n 'string': [\n 'string',\n ]\n }\n )\n \n \n :type Target: string\n :param Target: [REQUIRED]\n The instance to connect to for the session.\n \n\n :type DocumentName: string\n :param DocumentName: The name of the SSM document to define the parameters and plugin settings for the session. For example, SSM-SessionManagerRunShell . If no document name is provided, a shell to the instance is launched by default.\n\n :type Parameters: dict\n :param Parameters: Reserved for future use.\n (string) --\n (list) --\n (string) --\n \n \n\n :rtype: dict\n :return: {\n 'SessionId': 'string',\n 'TokenValue': 'string',\n 'StreamUrl': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef stop_automation_execution(AutomationExecutionId=None, Type=None):\n \"\"\"\n Stop an Automation that is currently executing.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_automation_execution(\n AutomationExecutionId='string',\n Type='Complete'|'Cancel'\n )\n \n \n :type AutomationExecutionId: string\n :param AutomationExecutionId: [REQUIRED]\n The execution ID of the Automation to stop.\n \n\n :type Type: string\n :param Type: The stop request type. Valid types include the following: Cancel and Complete. The default type is Cancel.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef terminate_session(SessionId=None):\n \"\"\"\n Permanently ends a session and closes the data connection between the Session Manager client and SSM Agent on the instance. A terminated session cannot be resumed.\n See also: AWS API Documentation\n \n \n :example: response = client.terminate_session(\n SessionId='string'\n )\n \n \n :type SessionId: string\n :param SessionId: [REQUIRED]\n The ID of the session to terminate.\n \n\n :rtype: dict\n :return: {\n 'SessionId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef update_association(AssociationId=None, Parameters=None, DocumentVersion=None, ScheduleExpression=None, OutputLocation=None, Name=None, Targets=None, AssociationName=None, AssociationVersion=None, MaxErrors=None, MaxConcurrency=None, ComplianceSeverity=None):\n \"\"\"\n Updates an association. You can update the association name and version, the document version, schedule, parameters, and Amazon S3 output.\n See also: AWS API Documentation\n \n \n :example: response = client.update_association(\n AssociationId='string',\n Parameters={\n 'string': [\n 'string',\n ]\n },\n DocumentVersion='string',\n ScheduleExpression='string',\n OutputLocation={\n 'S3Location': {\n 'OutputS3Region': 'string',\n 'OutputS3BucketName': 'string',\n 'OutputS3KeyPrefix': 'string'\n }\n },\n Name='string',\n Targets=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n AssociationName='string',\n AssociationVersion='string',\n MaxErrors='string',\n MaxConcurrency='string',\n ComplianceSeverity='CRITICAL'|'HIGH'|'MEDIUM'|'LOW'|'UNSPECIFIED'\n )\n \n \n :type AssociationId: string\n :param AssociationId: [REQUIRED]\n The ID of the association you want to update.\n \n\n :type Parameters: dict\n :param Parameters: The parameters you want to update for the association. If you create a parameter using Parameter Store, you can reference the parameter using {{ssm:parameter-name}}\n (string) --\n (list) --\n (string) --\n \n \n\n :type DocumentVersion: string\n :param DocumentVersion: The document version you want update for the association.\n\n :type ScheduleExpression: string\n :param ScheduleExpression: The cron expression used to schedule the association that you want to update.\n\n :type OutputLocation: dict\n :param OutputLocation: An Amazon S3 bucket where you want to store the results of this request.\n S3Location (dict) --An Amazon S3 bucket where you want to store the results of this request.\n OutputS3Region (string) --(Deprecated) You can no longer specify this parameter. The system ignores it. Instead, Systems Manager automatically determines the Amazon S3 bucket region.\n OutputS3BucketName (string) --The name of the Amazon S3 bucket.\n OutputS3KeyPrefix (string) --The Amazon S3 bucket subfolder.\n \n \n\n :type Name: string\n :param Name: The name of the association document.\n\n :type Targets: list\n :param Targets: The targets of the association.\n (dict) --An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call.\n Key (string) --User-defined criteria for sending commands that target instances that meet the criteria. Key can be tag:<Amazon EC2 tag> or InstanceIds. For more information about how to send commands that target instances using Key,Value parameters, see Targeting Multiple Instances in the AWS Systems Manager User Guide .\n Values (list) --User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, you could specify value:WebServer to execute a command on instances that include Amazon EC2 tags of ServerRole,WebServer. For more information about how to send commands that target instances using Key,Value parameters, see Sending Commands to a Fleet in the AWS Systems Manager User Guide .\n (string) --\n \n \n\n :type AssociationName: string\n :param AssociationName: The name of the association that you want to update.\n\n :type AssociationVersion: string\n :param AssociationVersion: This parameter is provided for concurrency control purposes. You must specify the latest association version in the service. If you want to ensure that this request succeeds, either specify $LATEST , or omit this parameter.\n\n :type MaxErrors: string\n :param MaxErrors: The number of errors that are allowed before the system stops sending requests to run the association on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops sending requests when the fourth error is received. If you specify 0, then the system stops sending requests after the first error is returned. If you run an association on 50 instances and set MaxError to 10%, then the system stops sending the request when the sixth error is received.\n Executions that are already running an association when MaxErrors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set MaxConcurrency to 1 so that executions proceed one at a time.\n \n\n :type MaxConcurrency: string\n :param MaxConcurrency: The maximum number of targets allowed to run the association at the same time. You can specify a number, for example 10, or a percentage of the target set, for example 10%. The default value is 100%, which means all targets run the association at the same time.\n If a new instance starts and attempts to execute an association while Systems Manager is executing MaxConcurrency associations, the association is allowed to run. During the next association interval, the new instance will process its association within the limit specified for MaxConcurrency.\n \n\n :type ComplianceSeverity: string\n :param ComplianceSeverity: The severity level to assign to the association.\n\n :rtype: dict\n :return: {\n 'AssociationDescription': {\n 'Name': 'string',\n 'InstanceId': 'string',\n 'AssociationVersion': 'string',\n 'Date': datetime(2015, 1, 1),\n 'LastUpdateAssociationDate': datetime(2015, 1, 1),\n 'Status': {\n 'Date': datetime(2015, 1, 1),\n 'Name': 'Pending'|'Success'|'Failed',\n 'Message': 'string',\n 'AdditionalInfo': 'string'\n },\n 'Overview': {\n 'Status': 'string',\n 'DetailedStatus': 'string',\n 'AssociationStatusAggregatedCount': {\n 'string': 123\n }\n },\n 'DocumentVersion': 'string',\n 'Parameters': {\n 'string': [\n 'string',\n ]\n },\n 'AssociationId': 'string',\n 'Targets': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n 'ScheduleExpression': 'string',\n 'OutputLocation': {\n 'S3Location': {\n 'OutputS3Region': 'string',\n 'OutputS3BucketName': 'string',\n 'OutputS3KeyPrefix': 'string'\n }\n },\n 'LastExecutionDate': datetime(2015, 1, 1),\n 'LastSuccessfulExecutionDate': datetime(2015, 1, 1),\n 'AssociationName': 'string',\n 'MaxErrors': 'string',\n 'MaxConcurrency': 'string',\n 'ComplianceSeverity': 'CRITICAL'|'HIGH'|'MEDIUM'|'LOW'|'UNSPECIFIED'\n }\n }\n \n \n :returns: \n (string) --\n (integer) --\n \n \n \n \"\"\"\n pass\n\ndef update_association_status(Name=None, InstanceId=None, AssociationStatus=None):\n \"\"\"\n Updates the status of the Systems Manager document associated with the specified instance.\n See also: AWS API Documentation\n \n \n :example: response = client.update_association_status(\n Name='string',\n InstanceId='string',\n AssociationStatus={\n 'Date': datetime(2015, 1, 1),\n 'Name': 'Pending'|'Success'|'Failed',\n 'Message': 'string',\n 'AdditionalInfo': 'string'\n }\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the Systems Manager document.\n \n\n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The ID of the instance.\n \n\n :type AssociationStatus: dict\n :param AssociationStatus: [REQUIRED]\n The association status.\n Date (datetime) -- [REQUIRED]The date when the status changed.\n Name (string) -- [REQUIRED]The status.\n Message (string) -- [REQUIRED]The reason for the status.\n AdditionalInfo (string) --A user-defined string.\n \n\n :rtype: dict\n :return: {\n 'AssociationDescription': {\n 'Name': 'string',\n 'InstanceId': 'string',\n 'AssociationVersion': 'string',\n 'Date': datetime(2015, 1, 1),\n 'LastUpdateAssociationDate': datetime(2015, 1, 1),\n 'Status': {\n 'Date': datetime(2015, 1, 1),\n 'Name': 'Pending'|'Success'|'Failed',\n 'Message': 'string',\n 'AdditionalInfo': 'string'\n },\n 'Overview': {\n 'Status': 'string',\n 'DetailedStatus': 'string',\n 'AssociationStatusAggregatedCount': {\n 'string': 123\n }\n },\n 'DocumentVersion': 'string',\n 'Parameters': {\n 'string': [\n 'string',\n ]\n },\n 'AssociationId': 'string',\n 'Targets': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n 'ScheduleExpression': 'string',\n 'OutputLocation': {\n 'S3Location': {\n 'OutputS3Region': 'string',\n 'OutputS3BucketName': 'string',\n 'OutputS3KeyPrefix': 'string'\n }\n },\n 'LastExecutionDate': datetime(2015, 1, 1),\n 'LastSuccessfulExecutionDate': datetime(2015, 1, 1),\n 'AssociationName': 'string',\n 'MaxErrors': 'string',\n 'MaxConcurrency': 'string',\n 'ComplianceSeverity': 'CRITICAL'|'HIGH'|'MEDIUM'|'LOW'|'UNSPECIFIED'\n }\n }\n \n \n :returns: \n (string) --\n (integer) --\n \n \n \n \"\"\"\n pass\n\ndef update_document(Content=None, Attachments=None, Name=None, VersionName=None, DocumentVersion=None, DocumentFormat=None, TargetType=None):\n \"\"\"\n The document you want to update.\n See also: AWS API Documentation\n \n \n :example: response = client.update_document(\n Content='string',\n Attachments=[\n {\n 'Key': 'SourceUrl',\n 'Values': [\n 'string',\n ]\n },\n ],\n Name='string',\n VersionName='string',\n DocumentVersion='string',\n DocumentFormat='YAML'|'JSON',\n TargetType='string'\n )\n \n \n :type Content: string\n :param Content: [REQUIRED]\n A valid JSON or YAML string.\n \n\n :type Attachments: list\n :param Attachments: A list of key and value pairs that describe attachments to a version of a document.\n (dict) --A key and value pair that identifies the location of an attachment to a document.\n Key (string) --The key of a key and value pair that identifies the location of an attachment to a document.\n Values (list) --The URL of the location of a document attachment, such as the URL of an Amazon S3 bucket.\n (string) --\n \n \n\n :type Name: string\n :param Name: [REQUIRED]\n The name of the document that you want to update.\n \n\n :type VersionName: string\n :param VersionName: An optional field specifying the version of the artifact you are updating with the document. For example, 'Release 12, Update 6'. This value is unique across all versions of a document, and cannot be changed.\n\n :type DocumentVersion: string\n :param DocumentVersion: The version of the document that you want to update.\n\n :type DocumentFormat: string\n :param DocumentFormat: Specify the document format for the new document version. Systems Manager supports JSON and YAML documents. JSON is the default format.\n\n :type TargetType: string\n :param TargetType: Specify a new target type for the document.\n\n :rtype: dict\n :return: {\n 'DocumentDescription': {\n 'Sha1': 'string',\n 'Hash': 'string',\n 'HashType': 'Sha256'|'Sha1',\n 'Name': 'string',\n 'VersionName': 'string',\n 'Owner': 'string',\n 'CreatedDate': datetime(2015, 1, 1),\n 'Status': 'Creating'|'Active'|'Updating'|'Deleting'|'Failed',\n 'StatusInformation': 'string',\n 'DocumentVersion': 'string',\n 'Description': 'string',\n 'Parameters': [\n {\n 'Name': 'string',\n 'Type': 'String'|'StringList',\n 'Description': 'string',\n 'DefaultValue': 'string'\n },\n ],\n 'PlatformTypes': [\n 'Windows'|'Linux',\n ],\n 'DocumentType': 'Command'|'Policy'|'Automation'|'Session'|'Package',\n 'SchemaVersion': 'string',\n 'LatestVersion': 'string',\n 'DefaultVersion': 'string',\n 'DocumentFormat': 'YAML'|'JSON',\n 'TargetType': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'AttachmentsInformation': [\n {\n 'Name': 'string'\n },\n ]\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef update_document_default_version(Name=None, DocumentVersion=None):\n \"\"\"\n Set the default version of a document.\n See also: AWS API Documentation\n \n \n :example: response = client.update_document_default_version(\n Name='string',\n DocumentVersion='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of a custom document that you want to set as the default version.\n \n\n :type DocumentVersion: string\n :param DocumentVersion: [REQUIRED]\n The version of a custom document that you want to set as the default version.\n \n\n :rtype: dict\n :return: {\n 'Description': {\n 'Name': 'string',\n 'DefaultVersion': 'string',\n 'DefaultVersionName': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef update_maintenance_window(WindowId=None, Name=None, Description=None, StartDate=None, EndDate=None, Schedule=None, ScheduleTimezone=None, Duration=None, Cutoff=None, AllowUnassociatedTargets=None, Enabled=None, Replace=None):\n \"\"\"\n Updates an existing Maintenance Window. Only specified parameters are modified.\n See also: AWS API Documentation\n \n \n :example: response = client.update_maintenance_window(\n WindowId='string',\n Name='string',\n Description='string',\n StartDate='string',\n EndDate='string',\n Schedule='string',\n ScheduleTimezone='string',\n Duration=123,\n Cutoff=123,\n AllowUnassociatedTargets=True|False,\n Enabled=True|False,\n Replace=True|False\n )\n \n \n :type WindowId: string\n :param WindowId: [REQUIRED]\n The ID of the Maintenance Window to update.\n \n\n :type Name: string\n :param Name: The name of the Maintenance Window.\n\n :type Description: string\n :param Description: An optional description for the update request.\n\n :type StartDate: string\n :param StartDate: The time zone that the scheduled Maintenance Window executions are based on, in Internet Assigned Numbers Authority (IANA) format. For example: 'America/Los_Angeles', 'etc/UTC', or 'Asia/Seoul'. For more information, see the Time Zone Database on the IANA website.\n\n :type EndDate: string\n :param EndDate: The date and time, in ISO-8601 Extended format, for when you want the Maintenance Window to become inactive. EndDate allows you to set a date and time in the future when the Maintenance Window will no longer run.\n\n :type Schedule: string\n :param Schedule: The schedule of the Maintenance Window in the form of a cron or rate expression.\n\n :type ScheduleTimezone: string\n :param ScheduleTimezone: The time zone that the scheduled Maintenance Window executions are based on, in Internet Assigned Numbers Authority (IANA) format. For example: 'America/Los_Angeles', 'etc/UTC', or 'Asia/Seoul'. For more information, see the Time Zone Database on the IANA website.\n\n :type Duration: integer\n :param Duration: The duration of the Maintenance Window in hours.\n\n :type Cutoff: integer\n :param Cutoff: The number of hours before the end of the Maintenance Window that Systems Manager stops scheduling new tasks for execution.\n\n :type AllowUnassociatedTargets: boolean\n :param AllowUnassociatedTargets: Whether targets must be registered with the Maintenance Window before tasks can be defined for those targets.\n\n :type Enabled: boolean\n :param Enabled: Whether the Maintenance Window is enabled.\n\n :type Replace: boolean\n :param Replace: If True, then all fields that are required by the CreateMaintenanceWindow action are also required for this API request. Optional fields that are not specified are set to null.\n\n :rtype: dict\n :return: {\n 'WindowId': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'StartDate': 'string',\n 'EndDate': 'string',\n 'Schedule': 'string',\n 'ScheduleTimezone': 'string',\n 'Duration': 123,\n 'Cutoff': 123,\n 'AllowUnassociatedTargets': True|False,\n 'Enabled': True|False\n }\n \n \n \"\"\"\n pass\n\ndef update_maintenance_window_target(WindowId=None, WindowTargetId=None, Targets=None, OwnerInformation=None, Name=None, Description=None, Replace=None):\n \"\"\"\n Modifies the target of an existing Maintenance Window. You can't change the target type, but you can change the following:\n The target from being an ID target to a Tag target, or a Tag target to an ID target.\n IDs for an ID target.\n Tags for a Tag target.\n Owner.\n Name.\n Description.\n If a parameter is null, then the corresponding field is not modified.\n See also: AWS API Documentation\n \n \n :example: response = client.update_maintenance_window_target(\n WindowId='string',\n WindowTargetId='string',\n Targets=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n OwnerInformation='string',\n Name='string',\n Description='string',\n Replace=True|False\n )\n \n \n :type WindowId: string\n :param WindowId: [REQUIRED]\n The Maintenance Window ID with which to modify the target.\n \n\n :type WindowTargetId: string\n :param WindowTargetId: [REQUIRED]\n The target ID to modify.\n \n\n :type Targets: list\n :param Targets: The targets to add or replace.\n (dict) --An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call.\n Key (string) --User-defined criteria for sending commands that target instances that meet the criteria. Key can be tag:<Amazon EC2 tag> or InstanceIds. For more information about how to send commands that target instances using Key,Value parameters, see Targeting Multiple Instances in the AWS Systems Manager User Guide .\n Values (list) --User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, you could specify value:WebServer to execute a command on instances that include Amazon EC2 tags of ServerRole,WebServer. For more information about how to send commands that target instances using Key,Value parameters, see Sending Commands to a Fleet in the AWS Systems Manager User Guide .\n (string) --\n \n \n\n :type OwnerInformation: string\n :param OwnerInformation: User-provided value that will be included in any CloudWatch events raised while running tasks for these targets in this Maintenance Window.\n\n :type Name: string\n :param Name: A name for the update.\n\n :type Description: string\n :param Description: An optional description for the update.\n\n :type Replace: boolean\n :param Replace: If True, then all fields that are required by the RegisterTargetWithMaintenanceWindow action are also required for this API request. Optional fields that are not specified are set to null.\n\n :rtype: dict\n :return: {\n 'WindowId': 'string',\n 'WindowTargetId': 'string',\n 'Targets': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n 'OwnerInformation': 'string',\n 'Name': 'string',\n 'Description': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef update_maintenance_window_task(WindowId=None, WindowTaskId=None, Targets=None, TaskArn=None, ServiceRoleArn=None, TaskParameters=None, TaskInvocationParameters=None, Priority=None, MaxConcurrency=None, MaxErrors=None, LoggingInfo=None, Name=None, Description=None, Replace=None):\n \"\"\"\n Modifies a task assigned to a Maintenance Window. You can't change the task type, but you can change the following values:\n If a parameter is null, then the corresponding field is not modified. Also, if you set Replace to true, then all fields required by the RegisterTaskWithMaintenanceWindow action are required for this request. Optional fields that aren't specified are set to null.\n See also: AWS API Documentation\n \n \n :example: response = client.update_maintenance_window_task(\n WindowId='string',\n WindowTaskId='string',\n Targets=[\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n TaskArn='string',\n ServiceRoleArn='string',\n TaskParameters={\n 'string': {\n 'Values': [\n 'string',\n ]\n }\n },\n TaskInvocationParameters={\n 'RunCommand': {\n 'Comment': 'string',\n 'DocumentHash': 'string',\n 'DocumentHashType': 'Sha256'|'Sha1',\n 'NotificationConfig': {\n 'NotificationArn': 'string',\n 'NotificationEvents': [\n 'All'|'InProgress'|'Success'|'TimedOut'|'Cancelled'|'Failed',\n ],\n 'NotificationType': 'Command'|'Invocation'\n },\n 'OutputS3BucketName': 'string',\n 'OutputS3KeyPrefix': 'string',\n 'Parameters': {\n 'string': [\n 'string',\n ]\n },\n 'ServiceRoleArn': 'string',\n 'TimeoutSeconds': 123\n },\n 'Automation': {\n 'DocumentVersion': 'string',\n 'Parameters': {\n 'string': [\n 'string',\n ]\n }\n },\n 'StepFunctions': {\n 'Input': 'string',\n 'Name': 'string'\n },\n 'Lambda': {\n 'ClientContext': 'string',\n 'Qualifier': 'string',\n 'Payload': b'bytes'\n }\n },\n Priority=123,\n MaxConcurrency='string',\n MaxErrors='string',\n LoggingInfo={\n 'S3BucketName': 'string',\n 'S3KeyPrefix': 'string',\n 'S3Region': 'string'\n },\n Name='string',\n Description='string',\n Replace=True|False\n )\n \n \n :type WindowId: string\n :param WindowId: [REQUIRED]\n The Maintenance Window ID that contains the task to modify.\n \n\n :type WindowTaskId: string\n :param WindowTaskId: [REQUIRED]\n The task ID to modify.\n \n\n :type Targets: list\n :param Targets: The targets (either instances or tags) to modify. Instances are specified using Key=instanceids,Values=instanceID_1,instanceID_2. Tags are specified using Key=tag_name,Values=tag_value.\n (dict) --An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call.\n Key (string) --User-defined criteria for sending commands that target instances that meet the criteria. Key can be tag:<Amazon EC2 tag> or InstanceIds. For more information about how to send commands that target instances using Key,Value parameters, see Targeting Multiple Instances in the AWS Systems Manager User Guide .\n Values (list) --User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, you could specify value:WebServer to execute a command on instances that include Amazon EC2 tags of ServerRole,WebServer. For more information about how to send commands that target instances using Key,Value parameters, see Sending Commands to a Fleet in the AWS Systems Manager User Guide .\n (string) --\n \n \n\n :type TaskArn: string\n :param TaskArn: The task ARN to modify.\n\n :type ServiceRoleArn: string\n :param ServiceRoleArn: The IAM service role ARN to modify. The system assumes this role during task execution.\n If you do not specify a service role ARN, Systems Manager will use your account's service-linked role for Systems Manager by default. If no service-linked role for Systems Manager exists in your account, it will be created when you run RegisterTaskWithMaintenanceWindow without specifying a service role ARN.\n For more information, see Service-Linked Role Permissions for Systems Manager and Should I Use a Service-Linked Role or a Custom Service Role to Run Maintenance Window Tasks? in the AWS Systems Manager User Guide .\n \n\n :type TaskParameters: dict\n :param TaskParameters: The parameters to modify.\n Note\n TaskParameters has been deprecated. To specify parameters to pass to a task when it runs, instead use the Parameters option in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters .\n The map has the following format:\n Key: string, between 1 and 255 characters\n Value: an array of strings, each string is between 1 and 255 characters\n (string) --\n (dict) --Defines the values for a task parameter.\n Values (list) --This field contains an array of 0 or more strings, each 1 to 255 characters in length.\n (string) --\n \n \n\n :type TaskInvocationParameters: dict\n :param TaskInvocationParameters: The parameters that the task should use during execution. Populate only the fields that match the task type. All other fields should be empty.\n RunCommand (dict) --The parameters for a RUN_COMMAND task type.\n Comment (string) --Information about the command(s) to execute.\n DocumentHash (string) --The SHA-256 or SHA-1 hash created by the system when the document was created. SHA-1 hashes have been deprecated.\n DocumentHashType (string) --SHA-256 or SHA-1. SHA-1 hashes have been deprecated.\n NotificationConfig (dict) --Configurations for sending notifications about command status changes on a per-instance basis.\n NotificationArn (string) --An Amazon Resource Name (ARN) for a Simple Notification Service (SNS) topic. Run Command pushes notifications about command status changes to this topic.\n NotificationEvents (list) --The different events for which you can receive notifications. These events include the following: All (events), InProgress, Success, TimedOut, Cancelled, Failed. To learn more about these events, see Configuring Amazon SNS Notifications for Run Command in the AWS Systems Manager User Guide .\n (string) --\n NotificationType (string) --Command: Receive notification when the status of a command changes. Invocation: For commands sent to multiple instances, receive notification on a per-instance basis when the status of a command changes.\n OutputS3BucketName (string) --The name of the Amazon S3 bucket.\n OutputS3KeyPrefix (string) --The Amazon S3 bucket subfolder.\n Parameters (dict) --The parameters for the RUN_COMMAND task execution.\n (string) --\n (list) --\n (string) --\n \n ServiceRoleArn (string) --The IAM service role to assume during task execution.\n TimeoutSeconds (integer) --If this time is reached and the command has not already started executing, it doesn't run.\n Automation (dict) --The parameters for an AUTOMATION task type.\n DocumentVersion (string) --The version of an Automation document to use during task execution.\n Parameters (dict) --The parameters for the AUTOMATION task.\n For information about specifying and updating task parameters, see RegisterTaskWithMaintenanceWindow and UpdateMaintenanceWindowTask .\n Note\n LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters .TaskParameters has been deprecated. To specify parameters to pass to a task when it runs, instead use the Parameters option in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters .\n For AUTOMATION task types, Systems Manager ignores any values specified for these parameters.\n (string) --\n (list) --\n (string) --\n \n \n StepFunctions (dict) --The parameters for a STEP_FUNCTION task type.\n Input (string) --The inputs for the STEP_FUNCTION task.\n Name (string) --The name of the STEP_FUNCTION task.\n Lambda (dict) --The parameters for a LAMBDA task type.\n ClientContext (string) --Pass client-specific information to the Lambda function that you are invoking. You can then process the client information in your Lambda function as you choose through the context variable.\n Qualifier (string) --(Optional) Specify a Lambda function version or alias name. If you specify a function version, the action uses the qualified function ARN to invoke a specific Lambda function. If you specify an alias name, the action uses the alias ARN to invoke the Lambda function version to which the alias points.\n Payload (bytes) --JSON to provide to your Lambda function as input.\n \n \n\n :type Priority: integer\n :param Priority: The new task priority to specify. The lower the number, the higher the priority. Tasks that have the same priority are scheduled in parallel.\n\n :type MaxConcurrency: string\n :param MaxConcurrency: The new MaxConcurrency value you want to specify. MaxConcurrency is the number of targets that are allowed to run this task in parallel.\n\n :type MaxErrors: string\n :param MaxErrors: The new MaxErrors value to specify. MaxErrors is the maximum number of errors that are allowed before the task stops being scheduled.\n\n :type LoggingInfo: dict\n :param LoggingInfo: The new logging location in Amazon S3 to specify.\n Note\n LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters .\n S3BucketName (string) -- [REQUIRED]The name of an Amazon S3 bucket where execution logs are stored .\n S3KeyPrefix (string) --(Optional) The Amazon S3 bucket subfolder.\n S3Region (string) -- [REQUIRED]The region where the Amazon S3 bucket is located.\n \n\n :type Name: string\n :param Name: The new task name to specify.\n\n :type Description: string\n :param Description: The new task description to specify.\n\n :type Replace: boolean\n :param Replace: If True, then all fields that are required by the RegisterTaskWithMaintenanceWndow action are also required for this API request. Optional fields that are not specified are set to null.\n\n :rtype: dict\n :return: {\n 'WindowId': 'string',\n 'WindowTaskId': 'string',\n 'Targets': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n 'TaskArn': 'string',\n 'ServiceRoleArn': 'string',\n 'TaskParameters': {\n 'string': {\n 'Values': [\n 'string',\n ]\n }\n },\n 'TaskInvocationParameters': {\n 'RunCommand': {\n 'Comment': 'string',\n 'DocumentHash': 'string',\n 'DocumentHashType': 'Sha256'|'Sha1',\n 'NotificationConfig': {\n 'NotificationArn': 'string',\n 'NotificationEvents': [\n 'All'|'InProgress'|'Success'|'TimedOut'|'Cancelled'|'Failed',\n ],\n 'NotificationType': 'Command'|'Invocation'\n },\n 'OutputS3BucketName': 'string',\n 'OutputS3KeyPrefix': 'string',\n 'Parameters': {\n 'string': [\n 'string',\n ]\n },\n 'ServiceRoleArn': 'string',\n 'TimeoutSeconds': 123\n },\n 'Automation': {\n 'DocumentVersion': 'string',\n 'Parameters': {\n 'string': [\n 'string',\n ]\n }\n },\n 'StepFunctions': {\n 'Input': 'string',\n 'Name': 'string'\n },\n 'Lambda': {\n 'ClientContext': 'string',\n 'Qualifier': 'string',\n 'Payload': b'bytes'\n }\n },\n 'Priority': 123,\n 'MaxConcurrency': 'string',\n 'MaxErrors': 'string',\n 'LoggingInfo': {\n 'S3BucketName': 'string',\n 'S3KeyPrefix': 'string',\n 'S3Region': 'string'\n },\n 'Name': 'string',\n 'Description': 'string'\n }\n \n \n :returns: \n WindowId (string) -- [REQUIRED]\n The Maintenance Window ID that contains the task to modify.\n \n WindowTaskId (string) -- [REQUIRED]\n The task ID to modify.\n \n Targets (list) -- The targets (either instances or tags) to modify. Instances are specified using Key=instanceids,Values=instanceID_1,instanceID_2. Tags are specified using Key=tag_name,Values=tag_value.\n \n (dict) --An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call.\n \n Key (string) --User-defined criteria for sending commands that target instances that meet the criteria. Key can be tag:<Amazon EC2 tag> or InstanceIds. For more information about how to send commands that target instances using Key,Value parameters, see Targeting Multiple Instances in the AWS Systems Manager User Guide .\n \n Values (list) --User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, you could specify value:WebServer to execute a command on instances that include Amazon EC2 tags of ServerRole,WebServer. For more information about how to send commands that target instances using Key,Value parameters, see Sending Commands to a Fleet in the AWS Systems Manager User Guide .\n \n (string) --\n \n \n \n \n \n \n TaskArn (string) -- The task ARN to modify.\n ServiceRoleArn (string) -- The IAM service role ARN to modify. The system assumes this role during task execution.\n If you do not specify a service role ARN, Systems Manager will use your account's service-linked role for Systems Manager by default. If no service-linked role for Systems Manager exists in your account, it will be created when you run RegisterTaskWithMaintenanceWindow without specifying a service role ARN.\n For more information, see Service-Linked Role Permissions for Systems Manager and Should I Use a Service-Linked Role or a Custom Service Role to Run Maintenance Window Tasks? in the AWS Systems Manager User Guide .\n \n TaskParameters (dict) -- The parameters to modify.\n \n Note\n TaskParameters has been deprecated. To specify parameters to pass to a task when it runs, instead use the Parameters option in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters .\n \n The map has the following format:\n Key: string, between 1 and 255 characters\n Value: an array of strings, each string is between 1 and 255 characters\n \n (string) --\n (dict) --Defines the values for a task parameter.\n \n Values (list) --This field contains an array of 0 or more strings, each 1 to 255 characters in length.\n \n (string) --\n \n \n \n \n \n \n \n \n TaskInvocationParameters (dict) -- The parameters that the task should use during execution. Populate only the fields that match the task type. All other fields should be empty.\n \n RunCommand (dict) --The parameters for a RUN_COMMAND task type.\n \n Comment (string) --Information about the command(s) to execute.\n \n DocumentHash (string) --The SHA-256 or SHA-1 hash created by the system when the document was created. SHA-1 hashes have been deprecated.\n \n DocumentHashType (string) --SHA-256 or SHA-1. SHA-1 hashes have been deprecated.\n \n NotificationConfig (dict) --Configurations for sending notifications about command status changes on a per-instance basis.\n \n NotificationArn (string) --An Amazon Resource Name (ARN) for a Simple Notification Service (SNS) topic. Run Command pushes notifications about command status changes to this topic.\n \n NotificationEvents (list) --The different events for which you can receive notifications. These events include the following: All (events), InProgress, Success, TimedOut, Cancelled, Failed. To learn more about these events, see Configuring Amazon SNS Notifications for Run Command in the AWS Systems Manager User Guide .\n \n (string) --\n \n \n NotificationType (string) --Command: Receive notification when the status of a command changes. Invocation: For commands sent to multiple instances, receive notification on a per-instance basis when the status of a command changes.\n \n \n \n OutputS3BucketName (string) --The name of the Amazon S3 bucket.\n \n OutputS3KeyPrefix (string) --The Amazon S3 bucket subfolder.\n \n Parameters (dict) --The parameters for the RUN_COMMAND task execution.\n \n (string) --\n (list) --\n (string) --\n \n \n \n \n \n \n ServiceRoleArn (string) --The IAM service role to assume during task execution.\n \n TimeoutSeconds (integer) --If this time is reached and the command has not already started executing, it doesn't run.\n \n \n \n Automation (dict) --The parameters for an AUTOMATION task type.\n \n DocumentVersion (string) --The version of an Automation document to use during task execution.\n \n Parameters (dict) --The parameters for the AUTOMATION task.\n For information about specifying and updating task parameters, see RegisterTaskWithMaintenanceWindow and UpdateMaintenanceWindowTask .\n \n Note\n \n LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters .TaskParameters has been deprecated. To specify parameters to pass to a task when it runs, instead use the Parameters option in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters .\n \n For AUTOMATION task types, Systems Manager ignores any values specified for these parameters.\n \n \n (string) --\n (list) --\n (string) --\n \n \n \n \n \n \n \n \n StepFunctions (dict) --The parameters for a STEP_FUNCTION task type.\n \n Input (string) --The inputs for the STEP_FUNCTION task.\n \n Name (string) --The name of the STEP_FUNCTION task.\n \n \n \n Lambda (dict) --The parameters for a LAMBDA task type.\n \n ClientContext (string) --Pass client-specific information to the Lambda function that you are invoking. You can then process the client information in your Lambda function as you choose through the context variable.\n \n Qualifier (string) --(Optional) Specify a Lambda function version or alias name. If you specify a function version, the action uses the qualified function ARN to invoke a specific Lambda function. If you specify an alias name, the action uses the alias ARN to invoke the Lambda function version to which the alias points.\n \n Payload (bytes) --JSON to provide to your Lambda function as input.\n \n \n \n \n \n Priority (integer) -- The new task priority to specify. The lower the number, the higher the priority. Tasks that have the same priority are scheduled in parallel.\n MaxConcurrency (string) -- The new MaxConcurrency value you want to specify. MaxConcurrency is the number of targets that are allowed to run this task in parallel.\n MaxErrors (string) -- The new MaxErrors value to specify. MaxErrors is the maximum number of errors that are allowed before the task stops being scheduled.\n LoggingInfo (dict) -- The new logging location in Amazon S3 to specify.\n \n Note\n LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters .\n \n \n S3BucketName (string) -- [REQUIRED]The name of an Amazon S3 bucket where execution logs are stored .\n \n S3KeyPrefix (string) --(Optional) The Amazon S3 bucket subfolder.\n \n S3Region (string) -- [REQUIRED]The region where the Amazon S3 bucket is located.\n \n \n \n Name (string) -- The new task name to specify.\n Description (string) -- The new task description to specify.\n Replace (boolean) -- If True, then all fields that are required by the RegisterTaskWithMaintenanceWndow action are also required for this API request. Optional fields that are not specified are set to null.\n \n \"\"\"\n pass\n\ndef update_managed_instance_role(InstanceId=None, IamRole=None):\n \"\"\"\n Assigns or changes an Amazon Identity and Access Management (IAM) role to the managed instance.\n See also: AWS API Documentation\n \n \n :example: response = client.update_managed_instance_role(\n InstanceId='string',\n IamRole='string'\n )\n \n \n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The ID of the managed instance where you want to update the role.\n \n\n :type IamRole: string\n :param IamRole: [REQUIRED]\n The IAM role you want to assign or change.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_patch_baseline(BaselineId=None, Name=None, GlobalFilters=None, ApprovalRules=None, ApprovedPatches=None, ApprovedPatchesComplianceLevel=None, ApprovedPatchesEnableNonSecurity=None, RejectedPatches=None, RejectedPatchesAction=None, Description=None, Sources=None, Replace=None):\n \"\"\"\n Modifies an existing patch baseline. Fields not specified in the request are left unchanged.\n See also: AWS API Documentation\n \n \n :example: response = client.update_patch_baseline(\n BaselineId='string',\n Name='string',\n GlobalFilters={\n 'PatchFilters': [\n {\n 'Key': 'PRODUCT'|'CLASSIFICATION'|'MSRC_SEVERITY'|'PATCH_ID'|'SECTION'|'PRIORITY'|'SEVERITY',\n 'Values': [\n 'string',\n ]\n },\n ]\n },\n ApprovalRules={\n 'PatchRules': [\n {\n 'PatchFilterGroup': {\n 'PatchFilters': [\n {\n 'Key': 'PRODUCT'|'CLASSIFICATION'|'MSRC_SEVERITY'|'PATCH_ID'|'SECTION'|'PRIORITY'|'SEVERITY',\n 'Values': [\n 'string',\n ]\n },\n ]\n },\n 'ComplianceLevel': 'CRITICAL'|'HIGH'|'MEDIUM'|'LOW'|'INFORMATIONAL'|'UNSPECIFIED',\n 'ApproveAfterDays': 123,\n 'EnableNonSecurity': True|False\n },\n ]\n },\n ApprovedPatches=[\n 'string',\n ],\n ApprovedPatchesComplianceLevel='CRITICAL'|'HIGH'|'MEDIUM'|'LOW'|'INFORMATIONAL'|'UNSPECIFIED',\n ApprovedPatchesEnableNonSecurity=True|False,\n RejectedPatches=[\n 'string',\n ],\n RejectedPatchesAction='ALLOW_AS_DEPENDENCY'|'BLOCK',\n Description='string',\n Sources=[\n {\n 'Name': 'string',\n 'Products': [\n 'string',\n ],\n 'Configuration': 'string'\n },\n ],\n Replace=True|False\n )\n \n \n :type BaselineId: string\n :param BaselineId: [REQUIRED]\n The ID of the patch baseline to update.\n \n\n :type Name: string\n :param Name: The name of the patch baseline.\n\n :type GlobalFilters: dict\n :param GlobalFilters: A set of global filters used to exclude patches from the baseline.\n PatchFilters (list) -- [REQUIRED]The set of patch filters that make up the group.\n (dict) --Defines a patch filter.\n A patch filter consists of key/value pairs, but not all keys are valid for all operating system types. For example, the key PRODUCT is valid for all supported operating system types. The key MSRC_SEVERITY , however, is valid only for Windows operating systems, and the key SECTION is valid only for Ubuntu operating systems.\n Refer to the following sections for information about which keys may be used with each major operating system, and which values are valid for each key.\n Windows Operating Systems\n The supported keys for Windows operating systems are PRODUCT , CLASSIFICATION , and MSRC_SEVERITY . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n Windows7\n Windows8\n Windows8.1\n Windows8Embedded\n Windows10\n Windows10LTSB\n WindowsServer2008\n WindowsServer2008R2\n WindowsServer2012\n WindowsServer2012R2\n WindowsServer2016\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: CLASSIFICATIONSupported values:\n CriticalUpdates\n DefinitionUpdates\n Drivers\n FeaturePacks\n SecurityUpdates\n ServicePacks\n Tools\n UpdateRollups\n Updates\n Upgrades\n Supported key: MSRC_SEVERITYSupported values:\n Critical\n Important\n Moderate\n Low\n Unspecified\n Ubuntu Operating Systems\n The supported keys for Ubuntu operating systems are PRODUCT , PRIORITY , and SECTION . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n Ubuntu14.04\n Ubuntu16.04\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: PRIORITYSupported values:\n Required\n Important\n Standard\n Optional\n Extra\n Supported key: SECTION\n Only the length of the key value is validated. Minimum length is 1. Maximum length is 64.\n Amazon Linux Operating Systems\n The supported keys for Amazon Linux operating systems are PRODUCT , CLASSIFICATION , and SEVERITY . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n AmazonLinux2012.03\n AmazonLinux2012.09\n AmazonLinux2013.03\n AmazonLinux2013.09\n AmazonLinux2014.03\n AmazonLinux2014.09\n AmazonLinux2015.03\n AmazonLinux2015.09\n AmazonLinux2016.03\n AmazonLinux2016.09\n AmazonLinux2017.03\n AmazonLinux2017.09\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: CLASSIFICATIONSupported values:\n Security\n Bugfix\n Enhancement\n Recommended\n Newpackage\n Supported key: SEVERITYSupported values:\n Critical\n Important\n Medium\n Low\n Amazon Linux 2 Operating Systems\n The supported keys for Amazon Linux 2 operating systems are PRODUCT , CLASSIFICATION , and SEVERITY . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n AmazonLinux2\n AmazonLinux2.0\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: CLASSIFICATIONSupported values:\n Security\n Bugfix\n Enhancement\n Recommended\n Newpackage\n Supported key: SEVERITYSupported values:\n Critical\n Important\n Medium\n Low\n RedHat Enterprise Linux (RHEL) Operating Systems\n The supported keys for RedHat Enterprise Linux operating systems are PRODUCT , CLASSIFICATION , and SEVERITY . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n RedhatEnterpriseLinux6.5\n RedhatEnterpriseLinux6.6\n RedhatEnterpriseLinux6.7\n RedhatEnterpriseLinux6.8\n RedhatEnterpriseLinux6.9\n RedhatEnterpriseLinux7.0\n RedhatEnterpriseLinux7.1\n RedhatEnterpriseLinux7.2\n RedhatEnterpriseLinux7.3\n RedhatEnterpriseLinux7.4\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: CLASSIFICATIONSupported values:\n Security\n Bugfix\n Enhancement\n Recommended\n Newpackage\n Supported key: SEVERITYSupported values:\n Critical\n Important\n Medium\n Low\n SUSE Linux Enterprise Server (SLES) Operating Systems\n The supported keys for SLES operating systems are PRODUCT , CLASSIFICATION , and SEVERITY . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n Suse12.0\n Suse12.1\n Suse12.2\n Suse12.3\n Suse12.4\n Suse12.5\n Suse12.6\n Suse12.7\n Suse12.8\n Suse12.9\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: CLASSIFICATIONSupported values:\n Security\n Recommended\n Optional\n Feature\n Document\n Yast\n Supported key: SEVERITYSupported values:\n Critical\n Important\n Moderate\n Low\n CentOS Operating Systems\n The supported keys for CentOS operating systems are PRODUCT , CLASSIFICATION , and SEVERITY . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n CentOS6.5\n CentOS6.6\n CentOS6.7\n CentOS6.8\n CentOS6.9\n CentOS7.0\n CentOS7.1\n CentOS7.2\n CentOS7.3\n CentOS7.4\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: CLASSIFICATIONSupported values:\n Security\n Bugfix\n Enhancement\n Recommended\n Newpackage\n Supported key: SEVERITYSupported values:\n Critical\n Important\n Medium\n Low\n Key (string) -- [REQUIRED]The key for the filter.\n See PatchFilter for lists of valid keys for each operating system type.\n Values (list) -- [REQUIRED]The value for the filter key.\n See PatchFilter for lists of valid values for each key based on operating system type.\n (string) --\n \n \n\n :type ApprovalRules: dict\n :param ApprovalRules: A set of rules used to include patches in the baseline.\n PatchRules (list) -- [REQUIRED]The rules that make up the rule group.\n (dict) --Defines an approval rule for a patch baseline.\n PatchFilterGroup (dict) -- [REQUIRED]The patch filter group that defines the criteria for the rule.\n PatchFilters (list) -- [REQUIRED]The set of patch filters that make up the group.\n (dict) --Defines a patch filter.\n A patch filter consists of key/value pairs, but not all keys are valid for all operating system types. For example, the key PRODUCT is valid for all supported operating system types. The key MSRC_SEVERITY , however, is valid only for Windows operating systems, and the key SECTION is valid only for Ubuntu operating systems.\n Refer to the following sections for information about which keys may be used with each major operating system, and which values are valid for each key.\n Windows Operating Systems\n The supported keys for Windows operating systems are PRODUCT , CLASSIFICATION , and MSRC_SEVERITY . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n Windows7\n Windows8\n Windows8.1\n Windows8Embedded\n Windows10\n Windows10LTSB\n WindowsServer2008\n WindowsServer2008R2\n WindowsServer2012\n WindowsServer2012R2\n WindowsServer2016\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: CLASSIFICATIONSupported values:\n CriticalUpdates\n DefinitionUpdates\n Drivers\n FeaturePacks\n SecurityUpdates\n ServicePacks\n Tools\n UpdateRollups\n Updates\n Upgrades\n Supported key: MSRC_SEVERITYSupported values:\n Critical\n Important\n Moderate\n Low\n Unspecified\n Ubuntu Operating Systems\n The supported keys for Ubuntu operating systems are PRODUCT , PRIORITY , and SECTION . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n Ubuntu14.04\n Ubuntu16.04\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: PRIORITYSupported values:\n Required\n Important\n Standard\n Optional\n Extra\n Supported key: SECTION\n Only the length of the key value is validated. Minimum length is 1. Maximum length is 64.\n Amazon Linux Operating Systems\n The supported keys for Amazon Linux operating systems are PRODUCT , CLASSIFICATION , and SEVERITY . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n AmazonLinux2012.03\n AmazonLinux2012.09\n AmazonLinux2013.03\n AmazonLinux2013.09\n AmazonLinux2014.03\n AmazonLinux2014.09\n AmazonLinux2015.03\n AmazonLinux2015.09\n AmazonLinux2016.03\n AmazonLinux2016.09\n AmazonLinux2017.03\n AmazonLinux2017.09\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: CLASSIFICATIONSupported values:\n Security\n Bugfix\n Enhancement\n Recommended\n Newpackage\n Supported key: SEVERITYSupported values:\n Critical\n Important\n Medium\n Low\n Amazon Linux 2 Operating Systems\n The supported keys for Amazon Linux 2 operating systems are PRODUCT , CLASSIFICATION , and SEVERITY . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n AmazonLinux2\n AmazonLinux2.0\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: CLASSIFICATIONSupported values:\n Security\n Bugfix\n Enhancement\n Recommended\n Newpackage\n Supported key: SEVERITYSupported values:\n Critical\n Important\n Medium\n Low\n RedHat Enterprise Linux (RHEL) Operating Systems\n The supported keys for RedHat Enterprise Linux operating systems are PRODUCT , CLASSIFICATION , and SEVERITY . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n RedhatEnterpriseLinux6.5\n RedhatEnterpriseLinux6.6\n RedhatEnterpriseLinux6.7\n RedhatEnterpriseLinux6.8\n RedhatEnterpriseLinux6.9\n RedhatEnterpriseLinux7.0\n RedhatEnterpriseLinux7.1\n RedhatEnterpriseLinux7.2\n RedhatEnterpriseLinux7.3\n RedhatEnterpriseLinux7.4\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: CLASSIFICATIONSupported values:\n Security\n Bugfix\n Enhancement\n Recommended\n Newpackage\n Supported key: SEVERITYSupported values:\n Critical\n Important\n Medium\n Low\n SUSE Linux Enterprise Server (SLES) Operating Systems\n The supported keys for SLES operating systems are PRODUCT , CLASSIFICATION , and SEVERITY . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n Suse12.0\n Suse12.1\n Suse12.2\n Suse12.3\n Suse12.4\n Suse12.5\n Suse12.6\n Suse12.7\n Suse12.8\n Suse12.9\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: CLASSIFICATIONSupported values:\n Security\n Recommended\n Optional\n Feature\n Document\n Yast\n Supported key: SEVERITYSupported values:\n Critical\n Important\n Moderate\n Low\n CentOS Operating Systems\n The supported keys for CentOS operating systems are PRODUCT , CLASSIFICATION , and SEVERITY . See the following lists for valid values for each of these keys.\n Supported key: PRODUCTSupported values:\n CentOS6.5\n CentOS6.6\n CentOS6.7\n CentOS6.8\n CentOS6.9\n CentOS7.0\n CentOS7.1\n CentOS7.2\n CentOS7.3\n CentOS7.4\n * Use a wildcard character () to target all supported operating system versions.*\n Supported key: CLASSIFICATIONSupported values:\n Security\n Bugfix\n Enhancement\n Recommended\n Newpackage\n Supported key: SEVERITYSupported values:\n Critical\n Important\n Medium\n Low\n Key (string) -- [REQUIRED]The key for the filter.\n See PatchFilter for lists of valid keys for each operating system type.\n Values (list) -- [REQUIRED]The value for the filter key.\n See PatchFilter for lists of valid values for each key based on operating system type.\n (string) --\n \n \n ComplianceLevel (string) --A compliance severity level for all approved patches in a patch baseline. Valid compliance severity levels include the following: Unspecified, Critical, High, Medium, Low, and Informational.\n ApproveAfterDays (integer) -- [REQUIRED]The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of 7 means that patches are approved seven days after they are released.\n EnableNonSecurity (boolean) --For instances identified by the approval rule filters, enables a patch baseline to apply non-security updates available in the specified repository. The default value is 'false'. Applies to Linux instances only.\n \n \n\n :type ApprovedPatches: list\n :param ApprovedPatches: A list of explicitly approved patches for the baseline.\n For information about accepted formats for lists of approved patches and rejected patches, see Package Name Formats for Approved and Rejected Patch Lists in the AWS Systems Manager User Guide .\n (string) --\n \n\n :type ApprovedPatchesComplianceLevel: string\n :param ApprovedPatchesComplianceLevel: Assigns a new compliance severity level to an existing patch baseline.\n\n :type ApprovedPatchesEnableNonSecurity: boolean\n :param ApprovedPatchesEnableNonSecurity: Indicates whether the list of approved patches includes non-security updates that should be applied to the instances. The default value is 'false'. Applies to Linux instances only.\n\n :type RejectedPatches: list\n :param RejectedPatches: A list of explicitly rejected patches for the baseline.\n For information about accepted formats for lists of approved patches and rejected patches, see Package Name Formats for Approved and Rejected Patch Lists in the AWS Systems Manager User Guide .\n (string) --\n \n\n :type RejectedPatchesAction: string\n :param RejectedPatchesAction: The action for Patch Manager to take on patches included in the RejectedPackages list.\n ALLOW_AS_DEPENDENCY : A package in the Rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as InstalledOther . This is the default action if no option is specified.\n BLOCK : Packages in the RejectedPatches list, and packages that include them as dependencies, are not installed under any circumstances. If a package was installed before it was added to the Rejected patches list, it is considered non-compliant with the patch baseline, and its status is reported as InstalledRejected .\n \n\n :type Description: string\n :param Description: A description of the patch baseline.\n\n :type Sources: list\n :param Sources: Information about the patches to use to update the instances, including target operating systems and source repositories. Applies to Linux instances only.\n (dict) --Information about the patches to use to update the instances, including target operating systems and source repository. Applies to Linux instances only.\n Name (string) -- [REQUIRED]The name specified to identify the patch source.\n Products (list) -- [REQUIRED]The specific operating system versions a patch repository applies to, such as 'Ubuntu16.04', 'AmazonLinux2016.09', 'RedhatEnterpriseLinux7.2' or 'Suse12.7'. For lists of supported product values, see PatchFilter .\n (string) --\n Configuration (string) -- [REQUIRED]The value of the yum repo configuration. For example:\n cachedir=/var/cache/yum/$basesearch$releasever\n keepcache=0\n debuglevel=2\n \n \n\n :type Replace: boolean\n :param Replace: If True, then all fields that are required by the CreatePatchBaseline action are also required for this API request. Optional fields that are not specified are set to null.\n\n :rtype: dict\n :return: {\n 'BaselineId': 'string',\n 'Name': 'string',\n 'OperatingSystem': 'WINDOWS'|'AMAZON_LINUX'|'AMAZON_LINUX_2'|'UBUNTU'|'REDHAT_ENTERPRISE_LINUX'|'SUSE'|'CENTOS',\n 'GlobalFilters': {\n 'PatchFilters': [\n {\n 'Key': 'PRODUCT'|'CLASSIFICATION'|'MSRC_SEVERITY'|'PATCH_ID'|'SECTION'|'PRIORITY'|'SEVERITY',\n 'Values': [\n 'string',\n ]\n },\n ]\n },\n 'ApprovalRules': {\n 'PatchRules': [\n {\n 'PatchFilterGroup': {\n 'PatchFilters': [\n {\n 'Key': 'PRODUCT'|'CLASSIFICATION'|'MSRC_SEVERITY'|'PATCH_ID'|'SECTION'|'PRIORITY'|'SEVERITY',\n 'Values': [\n 'string',\n ]\n },\n ]\n },\n 'ComplianceLevel': 'CRITICAL'|'HIGH'|'MEDIUM'|'LOW'|'INFORMATIONAL'|'UNSPECIFIED',\n 'ApproveAfterDays': 123,\n 'EnableNonSecurity': True|False\n },\n ]\n },\n 'ApprovedPatches': [\n 'string',\n ],\n 'ApprovedPatchesComplianceLevel': 'CRITICAL'|'HIGH'|'MEDIUM'|'LOW'|'INFORMATIONAL'|'UNSPECIFIED',\n 'ApprovedPatchesEnableNonSecurity': True|False,\n 'RejectedPatches': [\n 'string',\n ],\n 'RejectedPatchesAction': 'ALLOW_AS_DEPENDENCY'|'BLOCK',\n 'CreatedDate': datetime(2015, 1, 1),\n 'ModifiedDate': datetime(2015, 1, 1),\n 'Description': 'string',\n 'Sources': [\n {\n 'Name': 'string',\n 'Products': [\n 'string',\n ],\n 'Configuration': 'string'\n },\n ]\n }\n \n \n :returns: \n Windows7\n Windows8\n Windows8.1\n Windows8Embedded\n Windows10\n Windows10LTSB\n WindowsServer2008\n WindowsServer2008R2\n WindowsServer2012\n WindowsServer2012R2\n WindowsServer2016\n * Use a wildcard character () to target all supported operating system versions.*\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.572509765625, "alphanum_fraction": 0.5772140026092529, "avg_line_length": 33.23633575439453, "blob_id": "69aee37c6b325521f5f3728e0b02c7d292cebe0e", "content_id": "62e69005cd67f123fc4bb2c7f45c0c0d47c0d215", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 270605, "license_type": "permissive", "max_line_length": 538, "num_lines": 7904, "path": "/pyboto3/iot.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef accept_certificate_transfer(certificateId=None, setAsActive=None):\n \"\"\"\n Accepts a pending certificate transfer. The default state of the certificate is INACTIVE.\n To check for pending certificate transfers, call ListCertificates to enumerate your certificates.\n See also: AWS API Documentation\n \n \n :example: response = client.accept_certificate_transfer(\n certificateId='string',\n setAsActive=True|False\n )\n \n \n :type certificateId: string\n :param certificateId: [REQUIRED]\n The ID of the certificate. (The last part of the certificate ARN contains the certificate ID.)\n \n\n :type setAsActive: boolean\n :param setAsActive: Specifies whether the certificate is active.\n\n \"\"\"\n pass\n\ndef add_thing_to_billing_group(billingGroupName=None, billingGroupArn=None, thingName=None, thingArn=None):\n \"\"\"\n Adds a thing to a billing group.\n See also: AWS API Documentation\n \n \n :example: response = client.add_thing_to_billing_group(\n billingGroupName='string',\n billingGroupArn='string',\n thingName='string',\n thingArn='string'\n )\n \n \n :type billingGroupName: string\n :param billingGroupName: The name of the billing group.\n\n :type billingGroupArn: string\n :param billingGroupArn: The ARN of the billing group.\n\n :type thingName: string\n :param thingName: The name of the thing to be added to the billing group.\n\n :type thingArn: string\n :param thingArn: The ARN of the thing to be added to the billing group.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef add_thing_to_thing_group(thingGroupName=None, thingGroupArn=None, thingName=None, thingArn=None, overrideDynamicGroups=None):\n \"\"\"\n Adds a thing to a thing group.\n See also: AWS API Documentation\n \n \n :example: response = client.add_thing_to_thing_group(\n thingGroupName='string',\n thingGroupArn='string',\n thingName='string',\n thingArn='string',\n overrideDynamicGroups=True|False\n )\n \n \n :type thingGroupName: string\n :param thingGroupName: The name of the group to which you are adding a thing.\n\n :type thingGroupArn: string\n :param thingGroupArn: The ARN of the group to which you are adding a thing.\n\n :type thingName: string\n :param thingName: The name of the thing to add to a group.\n\n :type thingArn: string\n :param thingArn: The ARN of the thing to add to a group.\n\n :type overrideDynamicGroups: boolean\n :param overrideDynamicGroups: Override dynamic thing groups with static thing groups when 10-group limit is reached. If a thing belongs to 10 thing groups, and one or more of those groups are dynamic thing groups, adding a thing to a static group removes the thing from the last dynamic group.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef associate_targets_with_job(targets=None, jobId=None, comment=None):\n \"\"\"\n Associates a group with a continuous job. The following criteria must be met:\n See also: AWS API Documentation\n \n \n :example: response = client.associate_targets_with_job(\n targets=[\n 'string',\n ],\n jobId='string',\n comment='string'\n )\n \n \n :type targets: list\n :param targets: [REQUIRED]\n A list of thing group ARNs that define the targets of the job.\n (string) --\n \n\n :type jobId: string\n :param jobId: [REQUIRED]\n The unique identifier you assigned to this job when it was created.\n \n\n :type comment: string\n :param comment: An optional comment string describing why the job was associated with the targets.\n\n :rtype: dict\n :return: {\n 'jobArn': 'string',\n 'jobId': 'string',\n 'description': 'string'\n }\n \n \n :returns: \n targets (list) -- [REQUIRED]\n A list of thing group ARNs that define the targets of the job.\n \n (string) --\n \n \n jobId (string) -- [REQUIRED]\n The unique identifier you assigned to this job when it was created.\n \n comment (string) -- An optional comment string describing why the job was associated with the targets.\n \n \"\"\"\n pass\n\ndef attach_policy(policyName=None, target=None):\n \"\"\"\n Attaches a policy to the specified target.\n See also: AWS API Documentation\n \n \n :example: response = client.attach_policy(\n policyName='string',\n target='string'\n )\n \n \n :type policyName: string\n :param policyName: [REQUIRED]\n The name of the policy to attach.\n \n\n :type target: string\n :param target: [REQUIRED]\n The identity to which the policy is attached.\n \n\n \"\"\"\n pass\n\ndef attach_principal_policy(policyName=None, principal=None):\n \"\"\"\n Attaches the specified policy to the specified principal (certificate or other credential).\n See also: AWS API Documentation\n \n \n :example: response = client.attach_principal_policy(\n policyName='string',\n principal='string'\n )\n \n \n :type policyName: string\n :param policyName: [REQUIRED]\n The policy name.\n \n\n :type principal: string\n :param principal: [REQUIRED]\n The principal, which can be a certificate ARN (as returned from the CreateCertificate operation) or an Amazon Cognito ID.\n \n\n \"\"\"\n pass\n\ndef attach_security_profile(securityProfileName=None, securityProfileTargetArn=None):\n \"\"\"\n Associates a Device Defender security profile with a thing group or with this account. Each thing group or account can have up to five security profiles associated with it.\n See also: AWS API Documentation\n \n \n :example: response = client.attach_security_profile(\n securityProfileName='string',\n securityProfileTargetArn='string'\n )\n \n \n :type securityProfileName: string\n :param securityProfileName: [REQUIRED]\n The security profile that is attached.\n \n\n :type securityProfileTargetArn: string\n :param securityProfileTargetArn: [REQUIRED]\n The ARN of the target (thing group) to which the security profile is attached.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef attach_thing_principal(thingName=None, principal=None):\n \"\"\"\n Attaches the specified principal to the specified thing.\n See also: AWS API Documentation\n \n \n :example: response = client.attach_thing_principal(\n thingName='string',\n principal='string'\n )\n \n \n :type thingName: string\n :param thingName: [REQUIRED]\n The name of the thing.\n \n\n :type principal: string\n :param principal: [REQUIRED]\n The principal, such as a certificate or other credential.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef cancel_audit_task(taskId=None):\n \"\"\"\n Cancels an audit that is in progress. The audit can be either scheduled or on-demand. If the audit is not in progress, an \"InvalidRequestException\" occurs.\n See also: AWS API Documentation\n \n \n :example: response = client.cancel_audit_task(\n taskId='string'\n )\n \n \n :type taskId: string\n :param taskId: [REQUIRED]\n The ID of the audit you want to cancel. You can only cancel an audit that is 'IN_PROGRESS'.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef cancel_certificate_transfer(certificateId=None):\n \"\"\"\n Cancels a pending transfer for the specified certificate.\n After a certificate transfer is cancelled, the status of the certificate changes from PENDING_TRANSFER to INACTIVE.\n See also: AWS API Documentation\n \n \n :example: response = client.cancel_certificate_transfer(\n certificateId='string'\n )\n \n \n :type certificateId: string\n :param certificateId: [REQUIRED]\n The ID of the certificate. (The last part of the certificate ARN contains the certificate ID.)\n \n\n \"\"\"\n pass\n\ndef cancel_job(jobId=None, reasonCode=None, comment=None, force=None):\n \"\"\"\n Cancels a job.\n See also: AWS API Documentation\n \n \n :example: response = client.cancel_job(\n jobId='string',\n reasonCode='string',\n comment='string',\n force=True|False\n )\n \n \n :type jobId: string\n :param jobId: [REQUIRED]\n The unique identifier you assigned to this job when it was created.\n \n\n :type reasonCode: string\n :param reasonCode: (Optional)A reason code string that explains why the job was canceled.\n\n :type comment: string\n :param comment: An optional comment string describing why the job was canceled.\n\n :type force: boolean\n :param force: (Optional) If true job executions with status 'IN_PROGRESS' and 'QUEUED' are canceled, otherwise only job executions with status 'QUEUED' are canceled. The default is false .\n Canceling a job which is 'IN_PROGRESS', will cause a device which is executing the job to be unable to update the job execution status. Use caution and ensure that each device executing a job which is canceled is able to recover to a valid state.\n \n\n :rtype: dict\n :return: {\n 'jobArn': 'string',\n 'jobId': 'string',\n 'description': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef cancel_job_execution(jobId=None, thingName=None, force=None, expectedVersion=None, statusDetails=None):\n \"\"\"\n Cancels the execution of a job for a given thing.\n See also: AWS API Documentation\n \n \n :example: response = client.cancel_job_execution(\n jobId='string',\n thingName='string',\n force=True|False,\n expectedVersion=123,\n statusDetails={\n 'string': 'string'\n }\n )\n \n \n :type jobId: string\n :param jobId: [REQUIRED]\n The ID of the job to be canceled.\n \n\n :type thingName: string\n :param thingName: [REQUIRED]\n The name of the thing whose execution of the job will be canceled.\n \n\n :type force: boolean\n :param force: (Optional) If true the job execution will be canceled if it has status IN_PROGRESS or QUEUED, otherwise the job execution will be canceled only if it has status QUEUED. If you attempt to cancel a job execution that is IN_PROGRESS, and you do not set force to true , then an InvalidStateTransitionException will be thrown. The default is false .\n Canceling a job execution which is 'IN_PROGRESS', will cause the device to be unable to update the job execution status. Use caution and ensure that the device is able to recover to a valid state.\n \n\n :type expectedVersion: integer\n :param expectedVersion: (Optional) The expected current version of the job execution. Each time you update the job execution, its version is incremented. If the version of the job execution stored in Jobs does not match, the update is rejected with a VersionMismatch error, and an ErrorResponse that contains the current job execution status data is returned. (This makes it unnecessary to perform a separate DescribeJobExecution request in order to obtain the job execution status data.)\n\n :type statusDetails: dict\n :param statusDetails: A collection of name/value pairs that describe the status of the job execution. If not specified, the statusDetails are unchanged. You can specify at most 10 name/value pairs.\n (string) --\n (string) --\n \n\n \"\"\"\n pass\n\ndef clear_default_authorizer():\n \"\"\"\n Clears the default authorizer.\n See also: AWS API Documentation\n \n \n :example: response = client.clear_default_authorizer()\n \n \n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef create_authorizer(authorizerName=None, authorizerFunctionArn=None, tokenKeyName=None, tokenSigningPublicKeys=None, status=None):\n \"\"\"\n Creates an authorizer.\n See also: AWS API Documentation\n \n \n :example: response = client.create_authorizer(\n authorizerName='string',\n authorizerFunctionArn='string',\n tokenKeyName='string',\n tokenSigningPublicKeys={\n 'string': 'string'\n },\n status='ACTIVE'|'INACTIVE'\n )\n \n \n :type authorizerName: string\n :param authorizerName: [REQUIRED]\n The authorizer name.\n \n\n :type authorizerFunctionArn: string\n :param authorizerFunctionArn: [REQUIRED]\n The ARN of the authorizer's Lambda function.\n \n\n :type tokenKeyName: string\n :param tokenKeyName: [REQUIRED]\n The name of the token key used to extract the token from the HTTP headers.\n \n\n :type tokenSigningPublicKeys: dict\n :param tokenSigningPublicKeys: [REQUIRED]\n The public keys used to verify the digital signature returned by your custom authentication service.\n (string) --\n (string) --\n \n\n :type status: string\n :param status: The status of the create authorizer request.\n\n :rtype: dict\n :return: {\n 'authorizerName': 'string',\n 'authorizerArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_billing_group(billingGroupName=None, billingGroupProperties=None, tags=None):\n \"\"\"\n Creates a billing group.\n See also: AWS API Documentation\n \n \n :example: response = client.create_billing_group(\n billingGroupName='string',\n billingGroupProperties={\n 'billingGroupDescription': 'string'\n },\n tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type billingGroupName: string\n :param billingGroupName: [REQUIRED]\n The name you wish to give to the billing group.\n \n\n :type billingGroupProperties: dict\n :param billingGroupProperties: The properties of the billing group.\n billingGroupDescription (string) --The description of the billing group.\n \n\n :type tags: list\n :param tags: Metadata which can be used to manage the billing group.\n (dict) --A set of key/value pairs that are used to manage the resource.\n Key (string) --The tag's key.\n Value (string) --The tag's value.\n \n \n\n :rtype: dict\n :return: {\n 'billingGroupName': 'string',\n 'billingGroupArn': 'string',\n 'billingGroupId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_certificate_from_csr(certificateSigningRequest=None, setAsActive=None):\n \"\"\"\n Creates an X.509 certificate using the specified certificate signing request.\n You can create multiple certificates in a batch by creating a directory, copying multiple .csr files into that directory, and then specifying that directory on the command line. The following commands show how to create a batch of certificates given a batch of CSRs.\n Assuming a set of CSRs are located inside of the directory my-csr-directory:\n On Linux and OS X, the command is:\n $ ls my-csr-directory/ | xargs -I {} aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/{}\n This command lists all of the CSRs in my-csr-directory and pipes each CSR file name to the aws iot create-certificate-from-csr AWS CLI command to create a certificate for the corresponding CSR.\n The aws iot create-certificate-from-csr part of the command can also be run in parallel to speed up the certificate creation process:\n $ ls my-csr-directory/ | xargs -P 10 -I {} aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/{}\n On Windows PowerShell, the command to create certificates for all CSRs in my-csr-directory is:\n > ls -Name my-csr-directory | %{aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/$_}\n On a Windows command prompt, the command to create certificates for all CSRs in my-csr-directory is:\n > forfiles /p my-csr-directory /c \"cmd /c aws iot create-certificate-from-csr --certificate-signing-request file://@path\"\n See also: AWS API Documentation\n \n \n :example: response = client.create_certificate_from_csr(\n certificateSigningRequest='string',\n setAsActive=True|False\n )\n \n \n :type certificateSigningRequest: string\n :param certificateSigningRequest: [REQUIRED]\n The certificate signing request (CSR).\n \n\n :type setAsActive: boolean\n :param setAsActive: Specifies whether the certificate is active.\n\n :rtype: dict\n :return: {\n 'certificateArn': 'string',\n 'certificateId': 'string',\n 'certificatePem': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_dynamic_thing_group(thingGroupName=None, thingGroupProperties=None, indexName=None, queryString=None, queryVersion=None, tags=None):\n \"\"\"\n Creates a dynamic thing group.\n See also: AWS API Documentation\n \n \n :example: response = client.create_dynamic_thing_group(\n thingGroupName='string',\n thingGroupProperties={\n 'thingGroupDescription': 'string',\n 'attributePayload': {\n 'attributes': {\n 'string': 'string'\n },\n 'merge': True|False\n }\n },\n indexName='string',\n queryString='string',\n queryVersion='string',\n tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type thingGroupName: string\n :param thingGroupName: [REQUIRED]\n The dynamic thing group name to create.\n \n\n :type thingGroupProperties: dict\n :param thingGroupProperties: The dynamic thing group properties.\n thingGroupDescription (string) --The thing group description.\n attributePayload (dict) --The thing group attributes in JSON format.\n attributes (dict) --A JSON string containing up to three key-value pair in JSON format. For example:\n {\\'attributes\\':{\\'string1\\':\\'string2\\'}}\n (string) --\n (string) --\n \n merge (boolean) --Specifies whether the list of attributes provided in the AttributePayload is merged with the attributes stored in the registry, instead of overwriting them.\n To remove an attribute, call UpdateThing with an empty attribute value.\n Note\n The merge attribute is only valid when calling UpdateThing .\n \n \n\n :type indexName: string\n :param indexName: The dynamic thing group index name.\n Note\n Currently one index is supported: 'AWS_Things'.\n \n\n :type queryString: string\n :param queryString: [REQUIRED]\n The dynamic thing group search query string.\n See Query Syntax for information about query string syntax.\n \n\n :type queryVersion: string\n :param queryVersion: The dynamic thing group query version.\n Note\n Currently one query version is supported: '2017-09-30'. If not specified, the query version defaults to this value.\n \n\n :type tags: list\n :param tags: Metadata which can be used to manage the dynamic thing group.\n (dict) --A set of key/value pairs that are used to manage the resource.\n Key (string) --The tag's key.\n Value (string) --The tag's value.\n \n \n\n :rtype: dict\n :return: {\n 'thingGroupName': 'string',\n 'thingGroupArn': 'string',\n 'thingGroupId': 'string',\n 'indexName': 'string',\n 'queryString': 'string',\n 'queryVersion': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_job(jobId=None, targets=None, documentSource=None, document=None, description=None, presignedUrlConfig=None, targetSelection=None, jobExecutionsRolloutConfig=None, abortConfig=None, timeoutConfig=None, tags=None):\n \"\"\"\n Creates a job.\n See also: AWS API Documentation\n \n \n :example: response = client.create_job(\n jobId='string',\n targets=[\n 'string',\n ],\n documentSource='string',\n document='string',\n description='string',\n presignedUrlConfig={\n 'roleArn': 'string',\n 'expiresInSec': 123\n },\n targetSelection='CONTINUOUS'|'SNAPSHOT',\n jobExecutionsRolloutConfig={\n 'maximumPerMinute': 123,\n 'exponentialRate': {\n 'baseRatePerMinute': 123,\n 'incrementFactor': 123.0,\n 'rateIncreaseCriteria': {\n 'numberOfNotifiedThings': 123,\n 'numberOfSucceededThings': 123\n }\n }\n },\n abortConfig={\n 'criteriaList': [\n {\n 'failureType': 'FAILED'|'REJECTED'|'TIMED_OUT'|'ALL',\n 'action': 'CANCEL',\n 'thresholdPercentage': 123.0,\n 'minNumberOfExecutedThings': 123\n },\n ]\n },\n timeoutConfig={\n 'inProgressTimeoutInMinutes': 123\n },\n tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type jobId: string\n :param jobId: [REQUIRED]\n A job identifier which must be unique for your AWS account. We recommend using a UUID. Alpha-numeric characters, '-' and '_' are valid for use here.\n \n\n :type targets: list\n :param targets: [REQUIRED]\n A list of things and thing groups to which the job should be sent.\n (string) --\n \n\n :type documentSource: string\n :param documentSource: An S3 link to the job document.\n\n :type document: string\n :param document: The job document.\n Note\n If the job document resides in an S3 bucket, you must use a placeholder link when specifying the document.\n The placeholder link is of the following form:\n ${aws:iot:s3-presigned-url:https://s3.amazonaws.com/*bucket* /*key* }\n where bucket is your bucket name and key is the object in the bucket to which you are linking.\n \n\n :type description: string\n :param description: A short text description of the job.\n\n :type presignedUrlConfig: dict\n :param presignedUrlConfig: Configuration information for pre-signed S3 URLs.\n roleArn (string) --The ARN of an IAM role that grants grants permission to download files from the S3 bucket where the job data/updates are stored. The role must also grant permission for IoT to download the files.\n expiresInSec (integer) --How long (in seconds) pre-signed URLs are valid. Valid values are 60 - 3600, the default value is 3600 seconds. Pre-signed URLs are generated when Jobs receives an MQTT request for the job document.\n \n\n :type targetSelection: string\n :param targetSelection: Specifies whether the job will continue to run (CONTINUOUS), or will be complete after all those things specified as targets have completed the job (SNAPSHOT). If continuous, the job may also be run on a thing when a change is detected in a target. For example, a job will run on a thing when the thing is added to a target group, even after the job was completed by all things originally in the group.\n\n :type jobExecutionsRolloutConfig: dict\n :param jobExecutionsRolloutConfig: Allows you to create a staged rollout of the job.\n maximumPerMinute (integer) --The maximum number of things that will be notified of a pending job, per minute. This parameter allows you to create a staged rollout.\n exponentialRate (dict) --The rate of increase for a job rollout. This parameter allows you to define an exponential rate for a job rollout.\n baseRatePerMinute (integer) -- [REQUIRED]The minimum number of things that will be notified of a pending job, per minute at the start of job rollout. This parameter allows you to define the initial rate of rollout.\n incrementFactor (float) -- [REQUIRED]The exponential factor to increase the rate of rollout for a job.\n rateIncreaseCriteria (dict) -- [REQUIRED]The criteria to initiate the increase in rate of rollout for a job.\n AWS IoT supports up to one digit after the decimal (for example, 1.5, but not 1.55).\n numberOfNotifiedThings (integer) --The threshold for number of notified things that will initiate the increase in rate of rollout.\n numberOfSucceededThings (integer) --The threshold for number of succeeded things that will initiate the increase in rate of rollout.\n \n \n\n :type abortConfig: dict\n :param abortConfig: Allows you to create criteria to abort a job.\n criteriaList (list) -- [REQUIRED]The list of abort criteria to define rules to abort the job.\n (dict) --Details of abort criteria to define rules to abort the job.\n failureType (string) -- [REQUIRED]The type of job execution failure to define a rule to initiate a job abort.\n action (string) -- [REQUIRED]The type of abort action to initiate a job abort.\n thresholdPercentage (float) -- [REQUIRED]The threshold as a percentage of the total number of executed things that will initiate a job abort.\n AWS IoT supports up to two digits after the decimal (for example, 10.9 and 10.99, but not 10.999).\n minNumberOfExecutedThings (integer) -- [REQUIRED]Minimum number of executed things before evaluating an abort rule.\n \n \n\n :type timeoutConfig: dict\n :param timeoutConfig: Specifies the amount of time each device has to finish its execution of the job. The timer is started when the job execution status is set to IN_PROGRESS . If the job execution status is not set to another terminal state before the time expires, it will be automatically set to TIMED_OUT .\n inProgressTimeoutInMinutes (integer) --Specifies the amount of time, in minutes, this device has to finish execution of this job. The timeout interval can be anywhere between 1 minute and 7 days (1 to 10080 minutes). The in progress timer can't be updated and will apply to all job executions for the job. Whenever a job execution remains in the IN_PROGRESS status for longer than this interval, the job execution will fail and switch to the terminal TIMED_OUT status.\n \n\n :type tags: list\n :param tags: Metadata which can be used to manage the job.\n (dict) --A set of key/value pairs that are used to manage the resource.\n Key (string) --The tag's key.\n Value (string) --The tag's value.\n \n \n\n :rtype: dict\n :return: {\n 'jobArn': 'string',\n 'jobId': 'string',\n 'description': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_keys_and_certificate(setAsActive=None):\n \"\"\"\n Creates a 2048-bit RSA key pair and issues an X.509 certificate using the issued public key.\n See also: AWS API Documentation\n \n \n :example: response = client.create_keys_and_certificate(\n setAsActive=True|False\n )\n \n \n :type setAsActive: boolean\n :param setAsActive: Specifies whether the certificate is active.\n\n :rtype: dict\n :return: {\n 'certificateArn': 'string',\n 'certificateId': 'string',\n 'certificatePem': 'string',\n 'keyPair': {\n 'PublicKey': 'string',\n 'PrivateKey': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef create_ota_update(otaUpdateId=None, description=None, targets=None, targetSelection=None, awsJobExecutionsRolloutConfig=None, files=None, roleArn=None, additionalParameters=None):\n \"\"\"\n Creates an AWS IoT OTAUpdate on a target group of things or groups.\n See also: AWS API Documentation\n \n \n :example: response = client.create_ota_update(\n otaUpdateId='string',\n description='string',\n targets=[\n 'string',\n ],\n targetSelection='CONTINUOUS'|'SNAPSHOT',\n awsJobExecutionsRolloutConfig={\n 'maximumPerMinute': 123\n },\n files=[\n {\n 'fileName': 'string',\n 'fileVersion': 'string',\n 'fileLocation': {\n 'stream': {\n 'streamId': 'string',\n 'fileId': 123\n },\n 's3Location': {\n 'bucket': 'string',\n 'key': 'string',\n 'version': 'string'\n }\n },\n 'codeSigning': {\n 'awsSignerJobId': 'string',\n 'startSigningJobParameter': {\n 'signingProfileParameter': {\n 'certificateArn': 'string',\n 'platform': 'string',\n 'certificatePathOnDevice': 'string'\n },\n 'signingProfileName': 'string',\n 'destination': {\n 's3Destination': {\n 'bucket': 'string',\n 'prefix': 'string'\n }\n }\n },\n 'customCodeSigning': {\n 'signature': {\n 'inlineDocument': b'bytes'\n },\n 'certificateChain': {\n 'certificateName': 'string',\n 'inlineDocument': 'string'\n },\n 'hashAlgorithm': 'string',\n 'signatureAlgorithm': 'string'\n }\n },\n 'attributes': {\n 'string': 'string'\n }\n },\n ],\n roleArn='string',\n additionalParameters={\n 'string': 'string'\n }\n )\n \n \n :type otaUpdateId: string\n :param otaUpdateId: [REQUIRED]\n The ID of the OTA update to be created.\n \n\n :type description: string\n :param description: The description of the OTA update.\n\n :type targets: list\n :param targets: [REQUIRED]\n The targeted devices to receive OTA updates.\n (string) --\n \n\n :type targetSelection: string\n :param targetSelection: Specifies whether the update will continue to run (CONTINUOUS), or will be complete after all the things specified as targets have completed the update (SNAPSHOT). If continuous, the update may also be run on a thing when a change is detected in a target. For example, an update will run on a thing when the thing is added to a target group, even after the update was completed by all things originally in the group. Valid values: CONTINUOUS | SNAPSHOT.\n\n :type awsJobExecutionsRolloutConfig: dict\n :param awsJobExecutionsRolloutConfig: Configuration for the rollout of OTA updates.\n maximumPerMinute (integer) --The maximum number of OTA update job executions started per minute.\n \n\n :type files: list\n :param files: [REQUIRED]\n The files to be streamed by the OTA update.\n (dict) --Describes a file to be associated with an OTA update.\n fileName (string) --The name of the file.\n fileVersion (string) --The file version.\n fileLocation (dict) --The location of the updated firmware.\n stream (dict) --The stream that contains the OTA update.\n streamId (string) --The stream ID.\n fileId (integer) --The ID of a file associated with a stream.\n s3Location (dict) --The location of the updated firmware in S3.\n bucket (string) --The S3 bucket.\n key (string) --The S3 key.\n version (string) --The S3 bucket version.\n \n codeSigning (dict) --The code signing method of the file.\n awsSignerJobId (string) --The ID of the AWSSignerJob which was created to sign the file.\n startSigningJobParameter (dict) --Describes the code-signing job.\n signingProfileParameter (dict) --Describes the code-signing profile.\n certificateArn (string) --Certificate ARN.\n platform (string) --The hardware platform of your device.\n certificatePathOnDevice (string) --The location of the code-signing certificate on your device.\n signingProfileName (string) --The code-signing profile name.\n destination (dict) --The location to write the code-signed file.\n s3Destination (dict) --Describes the location in S3 of the updated firmware.\n bucket (string) --The S3 bucket that contains the updated firmware.\n prefix (string) --The S3 prefix.\n \n customCodeSigning (dict) --A custom method for code signing a file.\n signature (dict) --The signature for the file.\n inlineDocument (bytes) --A base64 encoded binary representation of the code signing signature.\n certificateChain (dict) --The certificate chain.\n certificateName (string) --The name of the certificate.\n inlineDocument (string) --A base64 encoded binary representation of the code signing certificate chain.\n hashAlgorithm (string) --The hash algorithm used to code sign the file.\n signatureAlgorithm (string) --The signature algorithm used to code sign the file.\n \n attributes (dict) --A list of name/attribute pairs.\n (string) --\n (string) --\n \n \n\n :type roleArn: string\n :param roleArn: [REQUIRED]\n The IAM role that allows access to the AWS IoT Jobs service.\n \n\n :type additionalParameters: dict\n :param additionalParameters: A list of additional OTA update parameters which are name-value pairs.\n (string) --\n (string) --\n \n\n :rtype: dict\n :return: {\n 'otaUpdateId': 'string',\n 'awsIotJobId': 'string',\n 'otaUpdateArn': 'string',\n 'awsIotJobArn': 'string',\n 'otaUpdateStatus': 'CREATE_PENDING'|'CREATE_IN_PROGRESS'|'CREATE_COMPLETE'|'CREATE_FAILED'\n }\n \n \n \"\"\"\n pass\n\ndef create_policy(policyName=None, policyDocument=None):\n \"\"\"\n Creates an AWS IoT policy.\n The created policy is the default version for the policy. This operation creates a policy version with a version identifier of 1 and sets 1 as the policy's default version.\n See also: AWS API Documentation\n \n \n :example: response = client.create_policy(\n policyName='string',\n policyDocument='string'\n )\n \n \n :type policyName: string\n :param policyName: [REQUIRED]\n The policy name.\n \n\n :type policyDocument: string\n :param policyDocument: [REQUIRED]\n The JSON document that describes the policy. policyDocument must have a minimum length of 1, with a maximum length of 2048, excluding whitespace.\n \n\n :rtype: dict\n :return: {\n 'policyName': 'string',\n 'policyArn': 'string',\n 'policyDocument': 'string',\n 'policyVersionId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_policy_version(policyName=None, policyDocument=None, setAsDefault=None):\n \"\"\"\n Creates a new version of the specified AWS IoT policy. To update a policy, create a new policy version. A managed policy can have up to five versions. If the policy has five versions, you must use DeletePolicyVersion to delete an existing version before you create a new one.\n Optionally, you can set the new version as the policy's default version. The default version is the operative version (that is, the version that is in effect for the certificates to which the policy is attached).\n See also: AWS API Documentation\n \n \n :example: response = client.create_policy_version(\n policyName='string',\n policyDocument='string',\n setAsDefault=True|False\n )\n \n \n :type policyName: string\n :param policyName: [REQUIRED]\n The policy name.\n \n\n :type policyDocument: string\n :param policyDocument: [REQUIRED]\n The JSON document that describes the policy. Minimum length of 1. Maximum length of 2048, excluding whitespace.\n \n\n :type setAsDefault: boolean\n :param setAsDefault: Specifies whether the policy version is set as the default. When this parameter is true, the new policy version becomes the operative version (that is, the version that is in effect for the certificates to which the policy is attached).\n\n :rtype: dict\n :return: {\n 'policyArn': 'string',\n 'policyDocument': 'string',\n 'policyVersionId': 'string',\n 'isDefaultVersion': True|False\n }\n \n \n \"\"\"\n pass\n\ndef create_role_alias(roleAlias=None, roleArn=None, credentialDurationSeconds=None):\n \"\"\"\n Creates a role alias.\n See also: AWS API Documentation\n \n \n :example: response = client.create_role_alias(\n roleAlias='string',\n roleArn='string',\n credentialDurationSeconds=123\n )\n \n \n :type roleAlias: string\n :param roleAlias: [REQUIRED]\n The role alias that points to a role ARN. This allows you to change the role without having to update the device.\n \n\n :type roleArn: string\n :param roleArn: [REQUIRED]\n The role ARN.\n \n\n :type credentialDurationSeconds: integer\n :param credentialDurationSeconds: How long (in seconds) the credentials will be valid.\n\n :rtype: dict\n :return: {\n 'roleAlias': 'string',\n 'roleAliasArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_scheduled_audit(frequency=None, dayOfMonth=None, dayOfWeek=None, targetCheckNames=None, scheduledAuditName=None):\n \"\"\"\n Creates a scheduled audit that is run at a specified time interval.\n See also: AWS API Documentation\n \n \n :example: response = client.create_scheduled_audit(\n frequency='DAILY'|'WEEKLY'|'BIWEEKLY'|'MONTHLY',\n dayOfMonth='string',\n dayOfWeek='SUN'|'MON'|'TUE'|'WED'|'THU'|'FRI'|'SAT',\n targetCheckNames=[\n 'string',\n ],\n scheduledAuditName='string'\n )\n \n \n :type frequency: string\n :param frequency: [REQUIRED]\n How often the scheduled audit takes place. Can be one of 'DAILY', 'WEEKLY', 'BIWEEKLY' or 'MONTHLY'. The actual start time of each audit is determined by the system.\n \n\n :type dayOfMonth: string\n :param dayOfMonth: The day of the month on which the scheduled audit takes place. Can be '1' through '31' or 'LAST'. This field is required if the 'frequency' parameter is set to 'MONTHLY'. If days 29-31 are specified, and the month does not have that many days, the audit takes place on the 'LAST' day of the month.\n\n :type dayOfWeek: string\n :param dayOfWeek: The day of the week on which the scheduled audit takes place. Can be one of 'SUN', 'MON', 'TUE', 'WED', 'THU', 'FRI' or 'SAT'. This field is required if the 'frequency' parameter is set to 'WEEKLY' or 'BIWEEKLY'.\n\n :type targetCheckNames: list\n :param targetCheckNames: [REQUIRED]\n Which checks are performed during the scheduled audit. Checks must be enabled for your account. (Use DescribeAccountAuditConfiguration to see the list of all checks including those that are enabled or UpdateAccountAuditConfiguration to select which checks are enabled.)\n (string) --An audit check name. Checks must be enabled for your account. (Use DescribeAccountAuditConfiguration to see the list of all checks including those that are enabled or UpdateAccountAuditConfiguration to select which checks are enabled.)\n \n\n :type scheduledAuditName: string\n :param scheduledAuditName: [REQUIRED]\n The name you want to give to the scheduled audit. (Max. 128 chars)\n \n\n :rtype: dict\n :return: {\n 'scheduledAuditArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_security_profile(securityProfileName=None, securityProfileDescription=None, behaviors=None, alertTargets=None, tags=None):\n \"\"\"\n Creates a Device Defender security profile.\n See also: AWS API Documentation\n \n \n :example: response = client.create_security_profile(\n securityProfileName='string',\n securityProfileDescription='string',\n behaviors=[\n {\n 'name': 'string',\n 'metric': 'string',\n 'criteria': {\n 'comparisonOperator': 'less-than'|'less-than-equals'|'greater-than'|'greater-than-equals'|'in-cidr-set'|'not-in-cidr-set'|'in-port-set'|'not-in-port-set',\n 'value': {\n 'count': 123,\n 'cidrs': [\n 'string',\n ],\n 'ports': [\n 123,\n ]\n },\n 'durationSeconds': 123\n }\n },\n ],\n alertTargets={\n 'string': {\n 'alertTargetArn': 'string',\n 'roleArn': 'string'\n }\n },\n tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type securityProfileName: string\n :param securityProfileName: [REQUIRED]\n The name you are giving to the security profile.\n \n\n :type securityProfileDescription: string\n :param securityProfileDescription: A description of the security profile.\n\n :type behaviors: list\n :param behaviors: [REQUIRED]\n Specifies the behaviors that, when violated by a device (thing), cause an alert.\n (dict) --A Device Defender security profile behavior.\n name (string) -- [REQUIRED]The name you have given to the behavior.\n metric (string) --What is measured by the behavior.\n criteria (dict) --The criteria that determine if a device is behaving normally in regard to the metric .\n comparisonOperator (string) --The operator that relates the thing measured (metric ) to the criteria (value ).\n value (dict) --The value to be compared with the metric .\n count (integer) --If the comparisonOperator calls for a numeric value, use this to specify that numeric value to be compared with the metric .\n cidrs (list) --If the comparisonOperator calls for a set of CIDRs, use this to specify that set to be compared with the metric .\n (string) --\n ports (list) --If the comparisonOperator calls for a set of ports, use this to specify that set to be compared with the metric .\n (integer) --\n \n durationSeconds (integer) --Use this to specify the period of time over which the behavior is evaluated, for those criteria which have a time dimension (for example, NUM_MESSAGES_SENT ).\n \n \n\n :type alertTargets: dict\n :param alertTargets: Specifies the destinations to which alerts are sent. (Alerts are always sent to the console.) Alerts are generated when a device (thing) violates a behavior.\n (string) --The type of alert target: one of 'SNS'.\n (dict) --A structure containing the alert target ARN and the role ARN.\n alertTargetArn (string) -- [REQUIRED]The ARN of the notification target to which alerts are sent.\n roleArn (string) -- [REQUIRED]The ARN of the role that grants permission to send alerts to the notification target.\n \n \n\n :type tags: list\n :param tags: Metadata which can be used to manage the security profile.\n (dict) --A set of key/value pairs that are used to manage the resource.\n Key (string) --The tag's key.\n Value (string) --The tag's value.\n \n \n\n :rtype: dict\n :return: {\n 'securityProfileName': 'string',\n 'securityProfileArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_stream(streamId=None, description=None, files=None, roleArn=None):\n \"\"\"\n Creates a stream for delivering one or more large files in chunks over MQTT. A stream transports data bytes in chunks or blocks packaged as MQTT messages from a source like S3. You can have one or more files associated with a stream. The total size of a file associated with the stream cannot exceed more than 2 MB. The stream will be created with version 0. If a stream is created with the same streamID as a stream that existed and was deleted within last 90 days, we will resurrect that old stream by incrementing the version by 1.\n See also: AWS API Documentation\n \n \n :example: response = client.create_stream(\n streamId='string',\n description='string',\n files=[\n {\n 'fileId': 123,\n 's3Location': {\n 'bucket': 'string',\n 'key': 'string',\n 'version': 'string'\n }\n },\n ],\n roleArn='string'\n )\n \n \n :type streamId: string\n :param streamId: [REQUIRED]\n The stream ID.\n \n\n :type description: string\n :param description: A description of the stream.\n\n :type files: list\n :param files: [REQUIRED]\n The files to stream.\n (dict) --Represents a file to stream.\n fileId (integer) --The file ID.\n s3Location (dict) --The location of the file in S3.\n bucket (string) --The S3 bucket.\n key (string) --The S3 key.\n version (string) --The S3 bucket version.\n \n \n\n :type roleArn: string\n :param roleArn: [REQUIRED]\n An IAM role that allows the IoT service principal assumes to access your S3 files.\n \n\n :rtype: dict\n :return: {\n 'streamId': 'string',\n 'streamArn': 'string',\n 'description': 'string',\n 'streamVersion': 123\n }\n \n \n \"\"\"\n pass\n\ndef create_thing(thingName=None, thingTypeName=None, attributePayload=None, billingGroupName=None):\n \"\"\"\n Creates a thing record in the registry.\n See also: AWS API Documentation\n \n \n :example: response = client.create_thing(\n thingName='string',\n thingTypeName='string',\n attributePayload={\n 'attributes': {\n 'string': 'string'\n },\n 'merge': True|False\n },\n billingGroupName='string'\n )\n \n \n :type thingName: string\n :param thingName: [REQUIRED]\n The name of the thing to create.\n \n\n :type thingTypeName: string\n :param thingTypeName: The name of the thing type associated with the new thing.\n\n :type attributePayload: dict\n :param attributePayload: The attribute payload, which consists of up to three name/value pairs in a JSON document. For example:\n {\\'attributes\\':{\\'string1\\':\\'string2\\'}}\n attributes (dict) --A JSON string containing up to three key-value pair in JSON format. For example:\n {\\'attributes\\':{\\'string1\\':\\'string2\\'}}\n (string) --\n (string) --\n \n merge (boolean) --Specifies whether the list of attributes provided in the AttributePayload is merged with the attributes stored in the registry, instead of overwriting them.\n To remove an attribute, call UpdateThing with an empty attribute value.\n Note\n The merge attribute is only valid when calling UpdateThing .\n \n\n :type billingGroupName: string\n :param billingGroupName: The name of the billing group the thing will be added to.\n\n :rtype: dict\n :return: {\n 'thingName': 'string',\n 'thingArn': 'string',\n 'thingId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_thing_group(thingGroupName=None, parentGroupName=None, thingGroupProperties=None, tags=None):\n \"\"\"\n Create a thing group.\n See also: AWS API Documentation\n \n \n :example: response = client.create_thing_group(\n thingGroupName='string',\n parentGroupName='string',\n thingGroupProperties={\n 'thingGroupDescription': 'string',\n 'attributePayload': {\n 'attributes': {\n 'string': 'string'\n },\n 'merge': True|False\n }\n },\n tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type thingGroupName: string\n :param thingGroupName: [REQUIRED]\n The thing group name to create.\n \n\n :type parentGroupName: string\n :param parentGroupName: The name of the parent thing group.\n\n :type thingGroupProperties: dict\n :param thingGroupProperties: The thing group properties.\n thingGroupDescription (string) --The thing group description.\n attributePayload (dict) --The thing group attributes in JSON format.\n attributes (dict) --A JSON string containing up to three key-value pair in JSON format. For example:\n {\\'attributes\\':{\\'string1\\':\\'string2\\'}}\n (string) --\n (string) --\n \n merge (boolean) --Specifies whether the list of attributes provided in the AttributePayload is merged with the attributes stored in the registry, instead of overwriting them.\n To remove an attribute, call UpdateThing with an empty attribute value.\n Note\n The merge attribute is only valid when calling UpdateThing .\n \n \n\n :type tags: list\n :param tags: Metadata which can be used to manage the thing group.\n (dict) --A set of key/value pairs that are used to manage the resource.\n Key (string) --The tag's key.\n Value (string) --The tag's value.\n \n \n\n :rtype: dict\n :return: {\n 'thingGroupName': 'string',\n 'thingGroupArn': 'string',\n 'thingGroupId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_thing_type(thingTypeName=None, thingTypeProperties=None, tags=None):\n \"\"\"\n Creates a new thing type.\n See also: AWS API Documentation\n \n \n :example: response = client.create_thing_type(\n thingTypeName='string',\n thingTypeProperties={\n 'thingTypeDescription': 'string',\n 'searchableAttributes': [\n 'string',\n ]\n },\n tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type thingTypeName: string\n :param thingTypeName: [REQUIRED]\n The name of the thing type.\n \n\n :type thingTypeProperties: dict\n :param thingTypeProperties: The ThingTypeProperties for the thing type to create. It contains information about the new thing type including a description, and a list of searchable thing attribute names.\n thingTypeDescription (string) --The description of the thing type.\n searchableAttributes (list) --A list of searchable thing attribute names.\n (string) --\n \n\n :type tags: list\n :param tags: Metadata which can be used to manage the thing type.\n (dict) --A set of key/value pairs that are used to manage the resource.\n Key (string) --The tag's key.\n Value (string) --The tag's value.\n \n \n\n :rtype: dict\n :return: {\n 'thingTypeName': 'string',\n 'thingTypeArn': 'string',\n 'thingTypeId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_topic_rule(ruleName=None, topicRulePayload=None):\n \"\"\"\n Creates a rule. Creating rules is an administrator-level action. Any user who has permission to create rules will be able to access data processed by the rule.\n See also: AWS API Documentation\n \n \n :example: response = client.create_topic_rule(\n ruleName='string',\n topicRulePayload={\n 'sql': 'string',\n 'description': 'string',\n 'actions': [\n {\n 'dynamoDB': {\n 'tableName': 'string',\n 'roleArn': 'string',\n 'operation': 'string',\n 'hashKeyField': 'string',\n 'hashKeyValue': 'string',\n 'hashKeyType': 'STRING'|'NUMBER',\n 'rangeKeyField': 'string',\n 'rangeKeyValue': 'string',\n 'rangeKeyType': 'STRING'|'NUMBER',\n 'payloadField': 'string'\n },\n 'dynamoDBv2': {\n 'roleArn': 'string',\n 'putItem': {\n 'tableName': 'string'\n }\n },\n 'lambda': {\n 'functionArn': 'string'\n },\n 'sns': {\n 'targetArn': 'string',\n 'roleArn': 'string',\n 'messageFormat': 'RAW'|'JSON'\n },\n 'sqs': {\n 'roleArn': 'string',\n 'queueUrl': 'string',\n 'useBase64': True|False\n },\n 'kinesis': {\n 'roleArn': 'string',\n 'streamName': 'string',\n 'partitionKey': 'string'\n },\n 'republish': {\n 'roleArn': 'string',\n 'topic': 'string'\n },\n 's3': {\n 'roleArn': 'string',\n 'bucketName': 'string',\n 'key': 'string',\n 'cannedAcl': 'private'|'public-read'|'public-read-write'|'aws-exec-read'|'authenticated-read'|'bucket-owner-read'|'bucket-owner-full-control'|'log-delivery-write'\n },\n 'firehose': {\n 'roleArn': 'string',\n 'deliveryStreamName': 'string',\n 'separator': 'string'\n },\n 'cloudwatchMetric': {\n 'roleArn': 'string',\n 'metricNamespace': 'string',\n 'metricName': 'string',\n 'metricValue': 'string',\n 'metricUnit': 'string',\n 'metricTimestamp': 'string'\n },\n 'cloudwatchAlarm': {\n 'roleArn': 'string',\n 'alarmName': 'string',\n 'stateReason': 'string',\n 'stateValue': 'string'\n },\n 'elasticsearch': {\n 'roleArn': 'string',\n 'endpoint': 'string',\n 'index': 'string',\n 'type': 'string',\n 'id': 'string'\n },\n 'salesforce': {\n 'token': 'string',\n 'url': 'string'\n },\n 'iotAnalytics': {\n 'channelArn': 'string',\n 'channelName': 'string',\n 'roleArn': 'string'\n },\n 'iotEvents': {\n 'inputName': 'string',\n 'messageId': 'string',\n 'roleArn': 'string'\n },\n 'stepFunctions': {\n 'executionNamePrefix': 'string',\n 'stateMachineName': 'string',\n 'roleArn': 'string'\n }\n },\n ],\n 'ruleDisabled': True|False,\n 'awsIotSqlVersion': 'string',\n 'errorAction': {\n 'dynamoDB': {\n 'tableName': 'string',\n 'roleArn': 'string',\n 'operation': 'string',\n 'hashKeyField': 'string',\n 'hashKeyValue': 'string',\n 'hashKeyType': 'STRING'|'NUMBER',\n 'rangeKeyField': 'string',\n 'rangeKeyValue': 'string',\n 'rangeKeyType': 'STRING'|'NUMBER',\n 'payloadField': 'string'\n },\n 'dynamoDBv2': {\n 'roleArn': 'string',\n 'putItem': {\n 'tableName': 'string'\n }\n },\n 'lambda': {\n 'functionArn': 'string'\n },\n 'sns': {\n 'targetArn': 'string',\n 'roleArn': 'string',\n 'messageFormat': 'RAW'|'JSON'\n },\n 'sqs': {\n 'roleArn': 'string',\n 'queueUrl': 'string',\n 'useBase64': True|False\n },\n 'kinesis': {\n 'roleArn': 'string',\n 'streamName': 'string',\n 'partitionKey': 'string'\n },\n 'republish': {\n 'roleArn': 'string',\n 'topic': 'string'\n },\n 's3': {\n 'roleArn': 'string',\n 'bucketName': 'string',\n 'key': 'string',\n 'cannedAcl': 'private'|'public-read'|'public-read-write'|'aws-exec-read'|'authenticated-read'|'bucket-owner-read'|'bucket-owner-full-control'|'log-delivery-write'\n },\n 'firehose': {\n 'roleArn': 'string',\n 'deliveryStreamName': 'string',\n 'separator': 'string'\n },\n 'cloudwatchMetric': {\n 'roleArn': 'string',\n 'metricNamespace': 'string',\n 'metricName': 'string',\n 'metricValue': 'string',\n 'metricUnit': 'string',\n 'metricTimestamp': 'string'\n },\n 'cloudwatchAlarm': {\n 'roleArn': 'string',\n 'alarmName': 'string',\n 'stateReason': 'string',\n 'stateValue': 'string'\n },\n 'elasticsearch': {\n 'roleArn': 'string',\n 'endpoint': 'string',\n 'index': 'string',\n 'type': 'string',\n 'id': 'string'\n },\n 'salesforce': {\n 'token': 'string',\n 'url': 'string'\n },\n 'iotAnalytics': {\n 'channelArn': 'string',\n 'channelName': 'string',\n 'roleArn': 'string'\n },\n 'iotEvents': {\n 'inputName': 'string',\n 'messageId': 'string',\n 'roleArn': 'string'\n },\n 'stepFunctions': {\n 'executionNamePrefix': 'string',\n 'stateMachineName': 'string',\n 'roleArn': 'string'\n }\n }\n }\n )\n \n \n :type ruleName: string\n :param ruleName: [REQUIRED]\n The name of the rule.\n \n\n :type topicRulePayload: dict\n :param topicRulePayload: [REQUIRED]\n The rule payload.\n sql (string) -- [REQUIRED]The SQL statement used to query the topic. For more information, see AWS IoT SQL Reference in the AWS IoT Developer Guide .\n description (string) --The description of the rule.\n actions (list) -- [REQUIRED]The actions associated with the rule.\n (dict) --Describes the actions associated with a rule.\n dynamoDB (dict) --Write to a DynamoDB table.\n tableName (string) -- [REQUIRED]The name of the DynamoDB table.\n roleArn (string) -- [REQUIRED]The ARN of the IAM role that grants access to the DynamoDB table.\n operation (string) --The type of operation to be performed. This follows the substitution template, so it can be ${operation} , but the substitution must result in one of the following: INSERT , UPDATE , or DELETE .\n hashKeyField (string) -- [REQUIRED]The hash key name.\n hashKeyValue (string) -- [REQUIRED]The hash key value.\n hashKeyType (string) --The hash key type. Valid values are 'STRING' or 'NUMBER'\n rangeKeyField (string) --The range key name.\n rangeKeyValue (string) --The range key value.\n rangeKeyType (string) --The range key type. Valid values are 'STRING' or 'NUMBER'\n payloadField (string) --The action payload. This name can be customized.\n dynamoDBv2 (dict) --Write to a DynamoDB table. This is a new version of the DynamoDB action. It allows you to write each attribute in an MQTT message payload into a separate DynamoDB column.\n roleArn (string) --The ARN of the IAM role that grants access to the DynamoDB table.\n putItem (dict) --Specifies the DynamoDB table to which the message data will be written. For example:\n { 'dynamoDBv2': { 'roleArn': 'aws:iam:12341251:my-role' 'putItem': { 'tableName': 'my-table' } } }\n Each attribute in the message payload will be written to a separate column in the DynamoDB database.\n tableName (string) -- [REQUIRED]The table where the message data will be written\n \n lambda (dict) --Invoke a Lambda function.\n functionArn (string) -- [REQUIRED]The ARN of the Lambda function.\n sns (dict) --Publish to an Amazon SNS topic.\n targetArn (string) -- [REQUIRED]The ARN of the SNS topic.\n roleArn (string) -- [REQUIRED]The ARN of the IAM role that grants access.\n messageFormat (string) --(Optional) The message format of the message to publish. Accepted values are 'JSON' and 'RAW'. The default value of the attribute is 'RAW'. SNS uses this setting to determine if the payload should be parsed and relevant platform-specific bits of the payload should be extracted. To read more about SNS message formats, see http://docs.aws.amazon.com/sns/latest/dg/json-formats.html refer to their official documentation.\n sqs (dict) --Publish to an Amazon SQS queue.\n roleArn (string) -- [REQUIRED]The ARN of the IAM role that grants access.\n queueUrl (string) -- [REQUIRED]The URL of the Amazon SQS queue.\n useBase64 (boolean) --Specifies whether to use Base64 encoding.\n kinesis (dict) --Write data to an Amazon Kinesis stream.\n roleArn (string) -- [REQUIRED]The ARN of the IAM role that grants access to the Amazon Kinesis stream.\n streamName (string) -- [REQUIRED]The name of the Amazon Kinesis stream.\n partitionKey (string) --The partition key.\n republish (dict) --Publish to another MQTT topic.\n roleArn (string) -- [REQUIRED]The ARN of the IAM role that grants access.\n topic (string) -- [REQUIRED]The name of the MQTT topic.\n s3 (dict) --Write to an Amazon S3 bucket.\n roleArn (string) -- [REQUIRED]The ARN of the IAM role that grants access.\n bucketName (string) -- [REQUIRED]The Amazon S3 bucket.\n key (string) -- [REQUIRED]The object key.\n cannedAcl (string) --The Amazon S3 canned ACL that controls access to the object identified by the object key. For more information, see S3 canned ACLs .\n firehose (dict) --Write to an Amazon Kinesis Firehose stream.\n roleArn (string) -- [REQUIRED]The IAM role that grants access to the Amazon Kinesis Firehose stream.\n deliveryStreamName (string) -- [REQUIRED]The delivery stream name.\n separator (string) --A character separator that will be used to separate records written to the Firehose stream. Valid values are: 'n' (newline), 't' (tab), 'rn' (Windows newline), ',' (comma).\n cloudwatchMetric (dict) --Capture a CloudWatch metric.\n roleArn (string) -- [REQUIRED]The IAM role that allows access to the CloudWatch metric.\n metricNamespace (string) -- [REQUIRED]The CloudWatch metric namespace name.\n metricName (string) -- [REQUIRED]The CloudWatch metric name.\n metricValue (string) -- [REQUIRED]The CloudWatch metric value.\n metricUnit (string) -- [REQUIRED]The metric unit supported by CloudWatch.\n metricTimestamp (string) --An optional Unix timestamp .\n cloudwatchAlarm (dict) --Change the state of a CloudWatch alarm.\n roleArn (string) -- [REQUIRED]The IAM role that allows access to the CloudWatch alarm.\n alarmName (string) -- [REQUIRED]The CloudWatch alarm name.\n stateReason (string) -- [REQUIRED]The reason for the alarm change.\n stateValue (string) -- [REQUIRED]The value of the alarm state. Acceptable values are: OK, ALARM, INSUFFICIENT_DATA.\n elasticsearch (dict) --Write data to an Amazon Elasticsearch Service domain.\n roleArn (string) -- [REQUIRED]The IAM role ARN that has access to Elasticsearch.\n endpoint (string) -- [REQUIRED]The endpoint of your Elasticsearch domain.\n index (string) -- [REQUIRED]The Elasticsearch index where you want to store your data.\n type (string) -- [REQUIRED]The type of document you are storing.\n id (string) -- [REQUIRED]The unique identifier for the document you are storing.\n salesforce (dict) --Send a message to a Salesforce IoT Cloud Input Stream.\n token (string) -- [REQUIRED]The token used to authenticate access to the Salesforce IoT Cloud Input Stream. The token is available from the Salesforce IoT Cloud platform after creation of the Input Stream.\n url (string) -- [REQUIRED]The URL exposed by the Salesforce IoT Cloud Input Stream. The URL is available from the Salesforce IoT Cloud platform after creation of the Input Stream.\n iotAnalytics (dict) --Sends message data to an AWS IoT Analytics channel.\n channelArn (string) --(deprecated) The ARN of the IoT Analytics channel to which message data will be sent.\n channelName (string) --The name of the IoT Analytics channel to which message data will be sent.\n roleArn (string) --The ARN of the role which has a policy that grants IoT Analytics permission to send message data via IoT Analytics (iotanalytics:BatchPutMessage).\n iotEvents (dict) --Sends an input to an AWS IoT Events detector.\n inputName (string) -- [REQUIRED]The name of the AWS IoT Events input.\n messageId (string) --[Optional] Use this to ensure that only one input (message) with a given messageId will be processed by an AWS IoT Events detector.\n roleArn (string) -- [REQUIRED]The ARN of the role that grants AWS IoT permission to send an input to an AWS IoT Events detector. ('Action':'iotevents:BatchPutMessage').\n stepFunctions (dict) --Starts execution of a Step Functions state machine.\n executionNamePrefix (string) --(Optional) A name will be given to the state machine execution consisting of this prefix followed by a UUID. Step Functions automatically creates a unique name for each state machine execution if one is not provided.\n stateMachineName (string) -- [REQUIRED]The name of the Step Functions state machine whose execution will be started.\n roleArn (string) -- [REQUIRED]The ARN of the role that grants IoT permission to start execution of a state machine ('Action':'states:StartExecution').\n \n ruleDisabled (boolean) --Specifies whether the rule is disabled.\n awsIotSqlVersion (string) --The version of the SQL rules engine to use when evaluating the rule.\n errorAction (dict) --The action to take when an error occurs.\n dynamoDB (dict) --Write to a DynamoDB table.\n tableName (string) -- [REQUIRED]The name of the DynamoDB table.\n roleArn (string) -- [REQUIRED]The ARN of the IAM role that grants access to the DynamoDB table.\n operation (string) --The type of operation to be performed. This follows the substitution template, so it can be ${operation} , but the substitution must result in one of the following: INSERT , UPDATE , or DELETE .\n hashKeyField (string) -- [REQUIRED]The hash key name.\n hashKeyValue (string) -- [REQUIRED]The hash key value.\n hashKeyType (string) --The hash key type. Valid values are 'STRING' or 'NUMBER'\n rangeKeyField (string) --The range key name.\n rangeKeyValue (string) --The range key value.\n rangeKeyType (string) --The range key type. Valid values are 'STRING' or 'NUMBER'\n payloadField (string) --The action payload. This name can be customized.\n dynamoDBv2 (dict) --Write to a DynamoDB table. This is a new version of the DynamoDB action. It allows you to write each attribute in an MQTT message payload into a separate DynamoDB column.\n roleArn (string) --The ARN of the IAM role that grants access to the DynamoDB table.\n putItem (dict) --Specifies the DynamoDB table to which the message data will be written. For example:\n { 'dynamoDBv2': { 'roleArn': 'aws:iam:12341251:my-role' 'putItem': { 'tableName': 'my-table' } } }\n Each attribute in the message payload will be written to a separate column in the DynamoDB database.\n tableName (string) -- [REQUIRED]The table where the message data will be written\n \n lambda (dict) --Invoke a Lambda function.\n functionArn (string) -- [REQUIRED]The ARN of the Lambda function.\n sns (dict) --Publish to an Amazon SNS topic.\n targetArn (string) -- [REQUIRED]The ARN of the SNS topic.\n roleArn (string) -- [REQUIRED]The ARN of the IAM role that grants access.\n messageFormat (string) --(Optional) The message format of the message to publish. Accepted values are 'JSON' and 'RAW'. The default value of the attribute is 'RAW'. SNS uses this setting to determine if the payload should be parsed and relevant platform-specific bits of the payload should be extracted. To read more about SNS message formats, see http://docs.aws.amazon.com/sns/latest/dg/json-formats.html refer to their official documentation.\n sqs (dict) --Publish to an Amazon SQS queue.\n roleArn (string) -- [REQUIRED]The ARN of the IAM role that grants access.\n queueUrl (string) -- [REQUIRED]The URL of the Amazon SQS queue.\n useBase64 (boolean) --Specifies whether to use Base64 encoding.\n kinesis (dict) --Write data to an Amazon Kinesis stream.\n roleArn (string) -- [REQUIRED]The ARN of the IAM role that grants access to the Amazon Kinesis stream.\n streamName (string) -- [REQUIRED]The name of the Amazon Kinesis stream.\n partitionKey (string) --The partition key.\n republish (dict) --Publish to another MQTT topic.\n roleArn (string) -- [REQUIRED]The ARN of the IAM role that grants access.\n topic (string) -- [REQUIRED]The name of the MQTT topic.\n s3 (dict) --Write to an Amazon S3 bucket.\n roleArn (string) -- [REQUIRED]The ARN of the IAM role that grants access.\n bucketName (string) -- [REQUIRED]The Amazon S3 bucket.\n key (string) -- [REQUIRED]The object key.\n cannedAcl (string) --The Amazon S3 canned ACL that controls access to the object identified by the object key. For more information, see S3 canned ACLs .\n firehose (dict) --Write to an Amazon Kinesis Firehose stream.\n roleArn (string) -- [REQUIRED]The IAM role that grants access to the Amazon Kinesis Firehose stream.\n deliveryStreamName (string) -- [REQUIRED]The delivery stream name.\n separator (string) --A character separator that will be used to separate records written to the Firehose stream. Valid values are: 'n' (newline), 't' (tab), 'rn' (Windows newline), ',' (comma).\n cloudwatchMetric (dict) --Capture a CloudWatch metric.\n roleArn (string) -- [REQUIRED]The IAM role that allows access to the CloudWatch metric.\n metricNamespace (string) -- [REQUIRED]The CloudWatch metric namespace name.\n metricName (string) -- [REQUIRED]The CloudWatch metric name.\n metricValue (string) -- [REQUIRED]The CloudWatch metric value.\n metricUnit (string) -- [REQUIRED]The metric unit supported by CloudWatch.\n metricTimestamp (string) --An optional Unix timestamp .\n cloudwatchAlarm (dict) --Change the state of a CloudWatch alarm.\n roleArn (string) -- [REQUIRED]The IAM role that allows access to the CloudWatch alarm.\n alarmName (string) -- [REQUIRED]The CloudWatch alarm name.\n stateReason (string) -- [REQUIRED]The reason for the alarm change.\n stateValue (string) -- [REQUIRED]The value of the alarm state. Acceptable values are: OK, ALARM, INSUFFICIENT_DATA.\n elasticsearch (dict) --Write data to an Amazon Elasticsearch Service domain.\n roleArn (string) -- [REQUIRED]The IAM role ARN that has access to Elasticsearch.\n endpoint (string) -- [REQUIRED]The endpoint of your Elasticsearch domain.\n index (string) -- [REQUIRED]The Elasticsearch index where you want to store your data.\n type (string) -- [REQUIRED]The type of document you are storing.\n id (string) -- [REQUIRED]The unique identifier for the document you are storing.\n salesforce (dict) --Send a message to a Salesforce IoT Cloud Input Stream.\n token (string) -- [REQUIRED]The token used to authenticate access to the Salesforce IoT Cloud Input Stream. The token is available from the Salesforce IoT Cloud platform after creation of the Input Stream.\n url (string) -- [REQUIRED]The URL exposed by the Salesforce IoT Cloud Input Stream. The URL is available from the Salesforce IoT Cloud platform after creation of the Input Stream.\n iotAnalytics (dict) --Sends message data to an AWS IoT Analytics channel.\n channelArn (string) --(deprecated) The ARN of the IoT Analytics channel to which message data will be sent.\n channelName (string) --The name of the IoT Analytics channel to which message data will be sent.\n roleArn (string) --The ARN of the role which has a policy that grants IoT Analytics permission to send message data via IoT Analytics (iotanalytics:BatchPutMessage).\n iotEvents (dict) --Sends an input to an AWS IoT Events detector.\n inputName (string) -- [REQUIRED]The name of the AWS IoT Events input.\n messageId (string) --[Optional] Use this to ensure that only one input (message) with a given messageId will be processed by an AWS IoT Events detector.\n roleArn (string) -- [REQUIRED]The ARN of the role that grants AWS IoT permission to send an input to an AWS IoT Events detector. ('Action':'iotevents:BatchPutMessage').\n stepFunctions (dict) --Starts execution of a Step Functions state machine.\n executionNamePrefix (string) --(Optional) A name will be given to the state machine execution consisting of this prefix followed by a UUID. Step Functions automatically creates a unique name for each state machine execution if one is not provided.\n stateMachineName (string) -- [REQUIRED]The name of the Step Functions state machine whose execution will be started.\n roleArn (string) -- [REQUIRED]The ARN of the role that grants IoT permission to start execution of a state machine ('Action':'states:StartExecution').\n \n \n\n \"\"\"\n pass\n\ndef delete_account_audit_configuration(deleteScheduledAudits=None):\n \"\"\"\n Restores the default settings for Device Defender audits for this account. Any configuration data you entered is deleted and all audit checks are reset to disabled.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_account_audit_configuration(\n deleteScheduledAudits=True|False\n )\n \n \n :type deleteScheduledAudits: boolean\n :param deleteScheduledAudits: If true, all scheduled audits are deleted.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_authorizer(authorizerName=None):\n \"\"\"\n Deletes an authorizer.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_authorizer(\n authorizerName='string'\n )\n \n \n :type authorizerName: string\n :param authorizerName: [REQUIRED]\n The name of the authorizer to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_billing_group(billingGroupName=None, expectedVersion=None):\n \"\"\"\n Deletes the billing group.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_billing_group(\n billingGroupName='string',\n expectedVersion=123\n )\n \n \n :type billingGroupName: string\n :param billingGroupName: [REQUIRED]\n The name of the billing group.\n \n\n :type expectedVersion: integer\n :param expectedVersion: The expected version of the billing group. If the version of the billing group does not match the expected version specified in the request, the DeleteBillingGroup request is rejected with a VersionConflictException .\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_ca_certificate(certificateId=None):\n \"\"\"\n Deletes a registered CA certificate.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_ca_certificate(\n certificateId='string'\n )\n \n \n :type certificateId: string\n :param certificateId: [REQUIRED]\n The ID of the certificate to delete. (The last part of the certificate ARN contains the certificate ID.)\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_certificate(certificateId=None, forceDelete=None):\n \"\"\"\n Deletes the specified certificate.\n A certificate cannot be deleted if it has a policy attached to it or if its status is set to ACTIVE. To delete a certificate, first use the DetachPrincipalPolicy API to detach all policies. Next, use the UpdateCertificate API to set the certificate to the INACTIVE status.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_certificate(\n certificateId='string',\n forceDelete=True|False\n )\n \n \n :type certificateId: string\n :param certificateId: [REQUIRED]\n The ID of the certificate. (The last part of the certificate ARN contains the certificate ID.)\n \n\n :type forceDelete: boolean\n :param forceDelete: Forces a certificate request to be deleted.\n\n \"\"\"\n pass\n\ndef delete_dynamic_thing_group(thingGroupName=None, expectedVersion=None):\n \"\"\"\n Deletes a dynamic thing group.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_dynamic_thing_group(\n thingGroupName='string',\n expectedVersion=123\n )\n \n \n :type thingGroupName: string\n :param thingGroupName: [REQUIRED]\n The name of the dynamic thing group to delete.\n \n\n :type expectedVersion: integer\n :param expectedVersion: The expected version of the dynamic thing group to delete.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_job(jobId=None, force=None):\n \"\"\"\n Deletes a job and its related job executions.\n Deleting a job may take time, depending on the number of job executions created for the job and various other factors. While the job is being deleted, the status of the job will be shown as \"DELETION_IN_PROGRESS\". Attempting to delete or cancel a job whose status is already \"DELETION_IN_PROGRESS\" will result in an error.\n Only 10 jobs may have status \"DELETION_IN_PROGRESS\" at the same time, or a LimitExceededException will occur.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_job(\n jobId='string',\n force=True|False\n )\n \n \n :type jobId: string\n :param jobId: [REQUIRED]\n The ID of the job to be deleted.\n After a job deletion is completed, you may reuse this jobId when you create a new job. However, this is not recommended, and you must ensure that your devices are not using the jobId to refer to the deleted job.\n \n\n :type force: boolean\n :param force: (Optional) When true, you can delete a job which is 'IN_PROGRESS'. Otherwise, you can only delete a job which is in a terminal state ('COMPLETED' or 'CANCELED') or an exception will occur. The default is false.\n Note\n Deleting a job which is 'IN_PROGRESS', will cause a device which is executing the job to be unable to access job information or update the job execution status. Use caution and ensure that each device executing a job which is deleted is able to recover to a valid state.\n \n\n \"\"\"\n pass\n\ndef delete_job_execution(jobId=None, thingName=None, executionNumber=None, force=None):\n \"\"\"\n Deletes a job execution.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_job_execution(\n jobId='string',\n thingName='string',\n executionNumber=123,\n force=True|False\n )\n \n \n :type jobId: string\n :param jobId: [REQUIRED]\n The ID of the job whose execution on a particular device will be deleted.\n \n\n :type thingName: string\n :param thingName: [REQUIRED]\n The name of the thing whose job execution will be deleted.\n \n\n :type executionNumber: integer\n :param executionNumber: [REQUIRED]\n The ID of the job execution to be deleted. The executionNumber refers to the execution of a particular job on a particular device.\n Note that once a job execution is deleted, the executionNumber may be reused by IoT, so be sure you get and use the correct value here.\n \n\n :type force: boolean\n :param force: (Optional) When true, you can delete a job execution which is 'IN_PROGRESS'. Otherwise, you can only delete a job execution which is in a terminal state ('SUCCEEDED', 'FAILED', 'REJECTED', 'REMOVED' or 'CANCELED') or an exception will occur. The default is false.\n Note\n Deleting a job execution which is 'IN_PROGRESS', will cause the device to be unable to access job information or update the job execution status. Use caution and ensure that the device is able to recover to a valid state.\n \n\n \"\"\"\n pass\n\ndef delete_ota_update(otaUpdateId=None, deleteStream=None, forceDeleteAWSJob=None):\n \"\"\"\n Delete an OTA update.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_ota_update(\n otaUpdateId='string',\n deleteStream=True|False,\n forceDeleteAWSJob=True|False\n )\n \n \n :type otaUpdateId: string\n :param otaUpdateId: [REQUIRED]\n The OTA update ID to delete.\n \n\n :type deleteStream: boolean\n :param deleteStream: Specifies if the stream associated with an OTA update should be deleted when the OTA update is deleted.\n\n :type forceDeleteAWSJob: boolean\n :param forceDeleteAWSJob: Specifies if the AWS Job associated with the OTA update should be deleted with the OTA update is deleted.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_policy(policyName=None):\n \"\"\"\n Deletes the specified policy.\n A policy cannot be deleted if it has non-default versions or it is attached to any certificate.\n To delete a policy, use the DeletePolicyVersion API to delete all non-default versions of the policy; use the DetachPrincipalPolicy API to detach the policy from any certificate; and then use the DeletePolicy API to delete the policy.\n When a policy is deleted using DeletePolicy, its default version is deleted with it.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_policy(\n policyName='string'\n )\n \n \n :type policyName: string\n :param policyName: [REQUIRED]\n The name of the policy to delete.\n \n\n \"\"\"\n pass\n\ndef delete_policy_version(policyName=None, policyVersionId=None):\n \"\"\"\n Deletes the specified version of the specified policy. You cannot delete the default version of a policy using this API. To delete the default version of a policy, use DeletePolicy . To find out which version of a policy is marked as the default version, use ListPolicyVersions.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_policy_version(\n policyName='string',\n policyVersionId='string'\n )\n \n \n :type policyName: string\n :param policyName: [REQUIRED]\n The name of the policy.\n \n\n :type policyVersionId: string\n :param policyVersionId: [REQUIRED]\n The policy version ID.\n \n\n \"\"\"\n pass\n\ndef delete_registration_code():\n \"\"\"\n Deletes a CA certificate registration code.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_registration_code()\n \n \n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_role_alias(roleAlias=None):\n \"\"\"\n Deletes a role alias\n See also: AWS API Documentation\n \n \n :example: response = client.delete_role_alias(\n roleAlias='string'\n )\n \n \n :type roleAlias: string\n :param roleAlias: [REQUIRED]\n The role alias to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_scheduled_audit(scheduledAuditName=None):\n \"\"\"\n Deletes a scheduled audit.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_scheduled_audit(\n scheduledAuditName='string'\n )\n \n \n :type scheduledAuditName: string\n :param scheduledAuditName: [REQUIRED]\n The name of the scheduled audit you want to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_security_profile(securityProfileName=None, expectedVersion=None):\n \"\"\"\n Deletes a Device Defender security profile.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_security_profile(\n securityProfileName='string',\n expectedVersion=123\n )\n \n \n :type securityProfileName: string\n :param securityProfileName: [REQUIRED]\n The name of the security profile to be deleted.\n \n\n :type expectedVersion: integer\n :param expectedVersion: The expected version of the security profile. A new version is generated whenever the security profile is updated. If you specify a value that is different than the actual version, a VersionConflictException is thrown.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_stream(streamId=None):\n \"\"\"\n Deletes a stream.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_stream(\n streamId='string'\n )\n \n \n :type streamId: string\n :param streamId: [REQUIRED]\n The stream ID.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_thing(thingName=None, expectedVersion=None):\n \"\"\"\n Deletes the specified thing. Returns successfully with no error if the deletion is successful or you specify a thing that doesn't exist.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_thing(\n thingName='string',\n expectedVersion=123\n )\n \n \n :type thingName: string\n :param thingName: [REQUIRED]\n The name of the thing to delete.\n \n\n :type expectedVersion: integer\n :param expectedVersion: The expected version of the thing record in the registry. If the version of the record in the registry does not match the expected version specified in the request, the DeleteThing request is rejected with a VersionConflictException .\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_thing_group(thingGroupName=None, expectedVersion=None):\n \"\"\"\n Deletes a thing group.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_thing_group(\n thingGroupName='string',\n expectedVersion=123\n )\n \n \n :type thingGroupName: string\n :param thingGroupName: [REQUIRED]\n The name of the thing group to delete.\n \n\n :type expectedVersion: integer\n :param expectedVersion: The expected version of the thing group to delete.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_thing_type(thingTypeName=None):\n \"\"\"\n Deletes the specified thing type. You cannot delete a thing type if it has things associated with it. To delete a thing type, first mark it as deprecated by calling DeprecateThingType , then remove any associated things by calling UpdateThing to change the thing type on any associated thing, and finally use DeleteThingType to delete the thing type.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_thing_type(\n thingTypeName='string'\n )\n \n \n :type thingTypeName: string\n :param thingTypeName: [REQUIRED]\n The name of the thing type.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_topic_rule(ruleName=None):\n \"\"\"\n Deletes the rule.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_topic_rule(\n ruleName='string'\n )\n \n \n :type ruleName: string\n :param ruleName: [REQUIRED]\n The name of the rule.\n \n\n \"\"\"\n pass\n\ndef delete_v2_logging_level(targetType=None, targetName=None):\n \"\"\"\n Deletes a logging level.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_v2_logging_level(\n targetType='DEFAULT'|'THING_GROUP',\n targetName='string'\n )\n \n \n :type targetType: string\n :param targetType: [REQUIRED]\n The type of resource for which you are configuring logging. Must be THING_Group .\n \n\n :type targetName: string\n :param targetName: [REQUIRED]\n The name of the resource for which you are configuring logging.\n \n\n \"\"\"\n pass\n\ndef deprecate_thing_type(thingTypeName=None, undoDeprecate=None):\n \"\"\"\n Deprecates a thing type. You can not associate new things with deprecated thing type.\n See also: AWS API Documentation\n \n \n :example: response = client.deprecate_thing_type(\n thingTypeName='string',\n undoDeprecate=True|False\n )\n \n \n :type thingTypeName: string\n :param thingTypeName: [REQUIRED]\n The name of the thing type to deprecate.\n \n\n :type undoDeprecate: boolean\n :param undoDeprecate: Whether to undeprecate a deprecated thing type. If true , the thing type will not be deprecated anymore and you can associate it with things.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef describe_account_audit_configuration():\n \"\"\"\n Gets information about the Device Defender audit settings for this account. Settings include how audit notifications are sent and which audit checks are enabled or disabled.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_account_audit_configuration()\n \n \n :rtype: dict\n :return: {\n 'roleArn': 'string',\n 'auditNotificationTargetConfigurations': {\n 'string': {\n 'targetArn': 'string',\n 'roleArn': 'string',\n 'enabled': True|False\n }\n },\n 'auditCheckConfigurations': {\n 'string': {\n 'enabled': True|False\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_audit_task(taskId=None):\n \"\"\"\n Gets information about a Device Defender audit.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_audit_task(\n taskId='string'\n )\n \n \n :type taskId: string\n :param taskId: [REQUIRED]\n The ID of the audit whose information you want to get.\n \n\n :rtype: dict\n :return: {\n 'taskStatus': 'IN_PROGRESS'|'COMPLETED'|'FAILED'|'CANCELED',\n 'taskType': 'ON_DEMAND_AUDIT_TASK'|'SCHEDULED_AUDIT_TASK',\n 'taskStartTime': datetime(2015, 1, 1),\n 'taskStatistics': {\n 'totalChecks': 123,\n 'inProgressChecks': 123,\n 'waitingForDataCollectionChecks': 123,\n 'compliantChecks': 123,\n 'nonCompliantChecks': 123,\n 'failedChecks': 123,\n 'canceledChecks': 123\n },\n 'scheduledAuditName': 'string',\n 'auditDetails': {\n 'string': {\n 'checkRunStatus': 'IN_PROGRESS'|'WAITING_FOR_DATA_COLLECTION'|'CANCELED'|'COMPLETED_COMPLIANT'|'COMPLETED_NON_COMPLIANT'|'FAILED',\n 'checkCompliant': True|False,\n 'totalResourcesCount': 123,\n 'nonCompliantResourcesCount': 123,\n 'errorCode': 'string',\n 'message': 'string'\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_authorizer(authorizerName=None):\n \"\"\"\n Describes an authorizer.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_authorizer(\n authorizerName='string'\n )\n \n \n :type authorizerName: string\n :param authorizerName: [REQUIRED]\n The name of the authorizer to describe.\n \n\n :rtype: dict\n :return: {\n 'authorizerDescription': {\n 'authorizerName': 'string',\n 'authorizerArn': 'string',\n 'authorizerFunctionArn': 'string',\n 'tokenKeyName': 'string',\n 'tokenSigningPublicKeys': {\n 'string': 'string'\n },\n 'status': 'ACTIVE'|'INACTIVE',\n 'creationDate': datetime(2015, 1, 1),\n 'lastModifiedDate': datetime(2015, 1, 1)\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_billing_group(billingGroupName=None):\n \"\"\"\n Returns information about a billing group.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_billing_group(\n billingGroupName='string'\n )\n \n \n :type billingGroupName: string\n :param billingGroupName: [REQUIRED]\n The name of the billing group.\n \n\n :rtype: dict\n :return: {\n 'billingGroupName': 'string',\n 'billingGroupId': 'string',\n 'billingGroupArn': 'string',\n 'version': 123,\n 'billingGroupProperties': {\n 'billingGroupDescription': 'string'\n },\n 'billingGroupMetadata': {\n 'creationDate': datetime(2015, 1, 1)\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_ca_certificate(certificateId=None):\n \"\"\"\n Describes a registered CA certificate.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_ca_certificate(\n certificateId='string'\n )\n \n \n :type certificateId: string\n :param certificateId: [REQUIRED]\n The CA certificate identifier.\n \n\n :rtype: dict\n :return: {\n 'certificateDescription': {\n 'certificateArn': 'string',\n 'certificateId': 'string',\n 'status': 'ACTIVE'|'INACTIVE',\n 'certificatePem': 'string',\n 'ownedBy': 'string',\n 'creationDate': datetime(2015, 1, 1),\n 'autoRegistrationStatus': 'ENABLE'|'DISABLE',\n 'lastModifiedDate': datetime(2015, 1, 1),\n 'customerVersion': 123,\n 'generationId': 'string',\n 'validity': {\n 'notBefore': datetime(2015, 1, 1),\n 'notAfter': datetime(2015, 1, 1)\n }\n },\n 'registrationConfig': {\n 'templateBody': 'string',\n 'roleArn': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_certificate(certificateId=None):\n \"\"\"\n Gets information about the specified certificate.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_certificate(\n certificateId='string'\n )\n \n \n :type certificateId: string\n :param certificateId: [REQUIRED]\n The ID of the certificate. (The last part of the certificate ARN contains the certificate ID.)\n \n\n :rtype: dict\n :return: {\n 'certificateDescription': {\n 'certificateArn': 'string',\n 'certificateId': 'string',\n 'caCertificateId': 'string',\n 'status': 'ACTIVE'|'INACTIVE'|'REVOKED'|'PENDING_TRANSFER'|'REGISTER_INACTIVE'|'PENDING_ACTIVATION',\n 'certificatePem': 'string',\n 'ownedBy': 'string',\n 'previousOwnedBy': 'string',\n 'creationDate': datetime(2015, 1, 1),\n 'lastModifiedDate': datetime(2015, 1, 1),\n 'customerVersion': 123,\n 'transferData': {\n 'transferMessage': 'string',\n 'rejectReason': 'string',\n 'transferDate': datetime(2015, 1, 1),\n 'acceptDate': datetime(2015, 1, 1),\n 'rejectDate': datetime(2015, 1, 1)\n },\n 'generationId': 'string',\n 'validity': {\n 'notBefore': datetime(2015, 1, 1),\n 'notAfter': datetime(2015, 1, 1)\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_default_authorizer():\n \"\"\"\n Describes the default authorizer.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_default_authorizer()\n \n \n :rtype: dict\n :return: {\n 'authorizerDescription': {\n 'authorizerName': 'string',\n 'authorizerArn': 'string',\n 'authorizerFunctionArn': 'string',\n 'tokenKeyName': 'string',\n 'tokenSigningPublicKeys': {\n 'string': 'string'\n },\n 'status': 'ACTIVE'|'INACTIVE',\n 'creationDate': datetime(2015, 1, 1),\n 'lastModifiedDate': datetime(2015, 1, 1)\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_endpoint(endpointType=None):\n \"\"\"\n Returns a unique endpoint specific to the AWS account making the call.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_endpoint(\n endpointType='string'\n )\n \n \n :type endpointType: string\n :param endpointType: The endpoint type. Valid endpoint types include:\n iot:Data - Returns a VeriSign signed data endpoint.\n iot:Data-ATS - Returns an ATS signed data endpoint.\n iot:CredentialProvider - Returns an AWS IoT credentials provider API endpoint.\n iot:Jobs - Returns an AWS IoT device management Jobs API endpoint.\n \n\n :rtype: dict\n :return: {\n 'endpointAddress': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_event_configurations():\n \"\"\"\n Describes event configurations.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_event_configurations()\n \n \n :rtype: dict\n :return: {\n 'eventConfigurations': {\n 'string': {\n 'Enabled': True|False\n }\n },\n 'creationDate': datetime(2015, 1, 1),\n 'lastModifiedDate': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef describe_index(indexName=None):\n \"\"\"\n Describes a search index.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_index(\n indexName='string'\n )\n \n \n :type indexName: string\n :param indexName: [REQUIRED]\n The index name.\n \n\n :rtype: dict\n :return: {\n 'indexName': 'string',\n 'indexStatus': 'ACTIVE'|'BUILDING'|'REBUILDING',\n 'schema': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_job(jobId=None):\n \"\"\"\n Describes a job.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_job(\n jobId='string'\n )\n \n \n :type jobId: string\n :param jobId: [REQUIRED]\n The unique identifier you assigned to this job when it was created.\n \n\n :rtype: dict\n :return: {\n 'documentSource': 'string',\n 'job': {\n 'jobArn': 'string',\n 'jobId': 'string',\n 'targetSelection': 'CONTINUOUS'|'SNAPSHOT',\n 'status': 'IN_PROGRESS'|'CANCELED'|'COMPLETED'|'DELETION_IN_PROGRESS',\n 'forceCanceled': True|False,\n 'reasonCode': 'string',\n 'comment': 'string',\n 'targets': [\n 'string',\n ],\n 'description': 'string',\n 'presignedUrlConfig': {\n 'roleArn': 'string',\n 'expiresInSec': 123\n },\n 'jobExecutionsRolloutConfig': {\n 'maximumPerMinute': 123,\n 'exponentialRate': {\n 'baseRatePerMinute': 123,\n 'incrementFactor': 123.0,\n 'rateIncreaseCriteria': {\n 'numberOfNotifiedThings': 123,\n 'numberOfSucceededThings': 123\n }\n }\n },\n 'abortConfig': {\n 'criteriaList': [\n {\n 'failureType': 'FAILED'|'REJECTED'|'TIMED_OUT'|'ALL',\n 'action': 'CANCEL',\n 'thresholdPercentage': 123.0,\n 'minNumberOfExecutedThings': 123\n },\n ]\n },\n 'createdAt': datetime(2015, 1, 1),\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'completedAt': datetime(2015, 1, 1),\n 'jobProcessDetails': {\n 'processingTargets': [\n 'string',\n ],\n 'numberOfCanceledThings': 123,\n 'numberOfSucceededThings': 123,\n 'numberOfFailedThings': 123,\n 'numberOfRejectedThings': 123,\n 'numberOfQueuedThings': 123,\n 'numberOfInProgressThings': 123,\n 'numberOfRemovedThings': 123,\n 'numberOfTimedOutThings': 123\n },\n 'timeoutConfig': {\n 'inProgressTimeoutInMinutes': 123\n }\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_job_execution(jobId=None, thingName=None, executionNumber=None):\n \"\"\"\n Describes a job execution.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_job_execution(\n jobId='string',\n thingName='string',\n executionNumber=123\n )\n \n \n :type jobId: string\n :param jobId: [REQUIRED]\n The unique identifier you assigned to this job when it was created.\n \n\n :type thingName: string\n :param thingName: [REQUIRED]\n The name of the thing on which the job execution is running.\n \n\n :type executionNumber: integer\n :param executionNumber: A string (consisting of the digits '0' through '9' which is used to specify a particular job execution on a particular device.\n\n :rtype: dict\n :return: {\n 'execution': {\n 'jobId': 'string',\n 'status': 'QUEUED'|'IN_PROGRESS'|'SUCCEEDED'|'FAILED'|'TIMED_OUT'|'REJECTED'|'REMOVED'|'CANCELED',\n 'forceCanceled': True|False,\n 'statusDetails': {\n 'detailsMap': {\n 'string': 'string'\n }\n },\n 'thingArn': 'string',\n 'queuedAt': datetime(2015, 1, 1),\n 'startedAt': datetime(2015, 1, 1),\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'executionNumber': 123,\n 'versionNumber': 123,\n 'approximateSecondsBeforeTimedOut': 123\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef describe_role_alias(roleAlias=None):\n \"\"\"\n Describes a role alias.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_role_alias(\n roleAlias='string'\n )\n \n \n :type roleAlias: string\n :param roleAlias: [REQUIRED]\n The role alias to describe.\n \n\n :rtype: dict\n :return: {\n 'roleAliasDescription': {\n 'roleAlias': 'string',\n 'roleAliasArn': 'string',\n 'roleArn': 'string',\n 'owner': 'string',\n 'credentialDurationSeconds': 123,\n 'creationDate': datetime(2015, 1, 1),\n 'lastModifiedDate': datetime(2015, 1, 1)\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_scheduled_audit(scheduledAuditName=None):\n \"\"\"\n Gets information about a scheduled audit.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_scheduled_audit(\n scheduledAuditName='string'\n )\n \n \n :type scheduledAuditName: string\n :param scheduledAuditName: [REQUIRED]\n The name of the scheduled audit whose information you want to get.\n \n\n :rtype: dict\n :return: {\n 'frequency': 'DAILY'|'WEEKLY'|'BIWEEKLY'|'MONTHLY',\n 'dayOfMonth': 'string',\n 'dayOfWeek': 'SUN'|'MON'|'TUE'|'WED'|'THU'|'FRI'|'SAT',\n 'targetCheckNames': [\n 'string',\n ],\n 'scheduledAuditName': 'string',\n 'scheduledAuditArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_security_profile(securityProfileName=None):\n \"\"\"\n Gets information about a Device Defender security profile.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_security_profile(\n securityProfileName='string'\n )\n \n \n :type securityProfileName: string\n :param securityProfileName: [REQUIRED]\n The name of the security profile whose information you want to get.\n \n\n :rtype: dict\n :return: {\n 'securityProfileName': 'string',\n 'securityProfileArn': 'string',\n 'securityProfileDescription': 'string',\n 'behaviors': [\n {\n 'name': 'string',\n 'metric': 'string',\n 'criteria': {\n 'comparisonOperator': 'less-than'|'less-than-equals'|'greater-than'|'greater-than-equals'|'in-cidr-set'|'not-in-cidr-set'|'in-port-set'|'not-in-port-set',\n 'value': {\n 'count': 123,\n 'cidrs': [\n 'string',\n ],\n 'ports': [\n 123,\n ]\n },\n 'durationSeconds': 123\n }\n },\n ],\n 'alertTargets': {\n 'string': {\n 'alertTargetArn': 'string',\n 'roleArn': 'string'\n }\n },\n 'version': 123,\n 'creationDate': datetime(2015, 1, 1),\n 'lastModifiedDate': datetime(2015, 1, 1)\n }\n \n \n :returns: \n (integer) --\n \n \"\"\"\n pass\n\ndef describe_stream(streamId=None):\n \"\"\"\n Gets information about a stream.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_stream(\n streamId='string'\n )\n \n \n :type streamId: string\n :param streamId: [REQUIRED]\n The stream ID.\n \n\n :rtype: dict\n :return: {\n 'streamInfo': {\n 'streamId': 'string',\n 'streamArn': 'string',\n 'streamVersion': 123,\n 'description': 'string',\n 'files': [\n {\n 'fileId': 123,\n 's3Location': {\n 'bucket': 'string',\n 'key': 'string',\n 'version': 'string'\n }\n },\n ],\n 'createdAt': datetime(2015, 1, 1),\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'roleArn': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_thing(thingName=None):\n \"\"\"\n Gets information about the specified thing.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_thing(\n thingName='string'\n )\n \n \n :type thingName: string\n :param thingName: [REQUIRED]\n The name of the thing.\n \n\n :rtype: dict\n :return: {\n 'defaultClientId': 'string',\n 'thingName': 'string',\n 'thingId': 'string',\n 'thingArn': 'string',\n 'thingTypeName': 'string',\n 'attributes': {\n 'string': 'string'\n },\n 'version': 123,\n 'billingGroupName': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_thing_group(thingGroupName=None):\n \"\"\"\n Describe a thing group.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_thing_group(\n thingGroupName='string'\n )\n \n \n :type thingGroupName: string\n :param thingGroupName: [REQUIRED]\n The name of the thing group.\n \n\n :rtype: dict\n :return: {\n 'thingGroupName': 'string',\n 'thingGroupId': 'string',\n 'thingGroupArn': 'string',\n 'version': 123,\n 'thingGroupProperties': {\n 'thingGroupDescription': 'string',\n 'attributePayload': {\n 'attributes': {\n 'string': 'string'\n },\n 'merge': True|False\n }\n },\n 'thingGroupMetadata': {\n 'parentGroupName': 'string',\n 'rootToParentThingGroups': [\n {\n 'groupName': 'string',\n 'groupArn': 'string'\n },\n ],\n 'creationDate': datetime(2015, 1, 1)\n },\n 'indexName': 'string',\n 'queryString': 'string',\n 'queryVersion': 'string',\n 'status': 'ACTIVE'|'BUILDING'|'REBUILDING'\n }\n \n \n \"\"\"\n pass\n\ndef describe_thing_registration_task(taskId=None):\n \"\"\"\n Describes a bulk thing provisioning task.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_thing_registration_task(\n taskId='string'\n )\n \n \n :type taskId: string\n :param taskId: [REQUIRED]\n The task ID.\n \n\n :rtype: dict\n :return: {\n 'taskId': 'string',\n 'creationDate': datetime(2015, 1, 1),\n 'lastModifiedDate': datetime(2015, 1, 1),\n 'templateBody': 'string',\n 'inputFileBucket': 'string',\n 'inputFileKey': 'string',\n 'roleArn': 'string',\n 'status': 'InProgress'|'Completed'|'Failed'|'Cancelled'|'Cancelling',\n 'message': 'string',\n 'successCount': 123,\n 'failureCount': 123,\n 'percentageProgress': 123\n }\n \n \n \"\"\"\n pass\n\ndef describe_thing_type(thingTypeName=None):\n \"\"\"\n Gets information about the specified thing type.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_thing_type(\n thingTypeName='string'\n )\n \n \n :type thingTypeName: string\n :param thingTypeName: [REQUIRED]\n The name of the thing type.\n \n\n :rtype: dict\n :return: {\n 'thingTypeName': 'string',\n 'thingTypeId': 'string',\n 'thingTypeArn': 'string',\n 'thingTypeProperties': {\n 'thingTypeDescription': 'string',\n 'searchableAttributes': [\n 'string',\n ]\n },\n 'thingTypeMetadata': {\n 'deprecated': True|False,\n 'deprecationDate': datetime(2015, 1, 1),\n 'creationDate': datetime(2015, 1, 1)\n }\n }\n \n \n \"\"\"\n pass\n\ndef detach_policy(policyName=None, target=None):\n \"\"\"\n Detaches a policy from the specified target.\n See also: AWS API Documentation\n \n \n :example: response = client.detach_policy(\n policyName='string',\n target='string'\n )\n \n \n :type policyName: string\n :param policyName: [REQUIRED]\n The policy to detach.\n \n\n :type target: string\n :param target: [REQUIRED]\n The target from which the policy will be detached.\n \n\n \"\"\"\n pass\n\ndef detach_principal_policy(policyName=None, principal=None):\n \"\"\"\n Removes the specified policy from the specified certificate.\n See also: AWS API Documentation\n \n \n :example: response = client.detach_principal_policy(\n policyName='string',\n principal='string'\n )\n \n \n :type policyName: string\n :param policyName: [REQUIRED]\n The name of the policy to detach.\n \n\n :type principal: string\n :param principal: [REQUIRED]\n The principal.\n If the principal is a certificate, specify the certificate ARN. If the principal is an Amazon Cognito identity, specify the identity ID.\n \n\n \"\"\"\n pass\n\ndef detach_security_profile(securityProfileName=None, securityProfileTargetArn=None):\n \"\"\"\n Disassociates a Device Defender security profile from a thing group or from this account.\n See also: AWS API Documentation\n \n \n :example: response = client.detach_security_profile(\n securityProfileName='string',\n securityProfileTargetArn='string'\n )\n \n \n :type securityProfileName: string\n :param securityProfileName: [REQUIRED]\n The security profile that is detached.\n \n\n :type securityProfileTargetArn: string\n :param securityProfileTargetArn: [REQUIRED]\n The ARN of the thing group from which the security profile is detached.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef detach_thing_principal(thingName=None, principal=None):\n \"\"\"\n Detaches the specified principal from the specified thing.\n See also: AWS API Documentation\n \n \n :example: response = client.detach_thing_principal(\n thingName='string',\n principal='string'\n )\n \n \n :type thingName: string\n :param thingName: [REQUIRED]\n The name of the thing.\n \n\n :type principal: string\n :param principal: [REQUIRED]\n If the principal is a certificate, this value must be ARN of the certificate. If the principal is an Amazon Cognito identity, this value must be the ID of the Amazon Cognito identity.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef disable_topic_rule(ruleName=None):\n \"\"\"\n Disables the rule.\n See also: AWS API Documentation\n \n \n :example: response = client.disable_topic_rule(\n ruleName='string'\n )\n \n \n :type ruleName: string\n :param ruleName: [REQUIRED]\n The name of the rule to disable.\n \n\n \"\"\"\n pass\n\ndef enable_topic_rule(ruleName=None):\n \"\"\"\n Enables the rule.\n See also: AWS API Documentation\n \n \n :example: response = client.enable_topic_rule(\n ruleName='string'\n )\n \n \n :type ruleName: string\n :param ruleName: [REQUIRED]\n The name of the topic rule to enable.\n \n\n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_effective_policies(principal=None, cognitoIdentityPoolId=None, thingName=None):\n \"\"\"\n Gets a list of the policies that have an effect on the authorization behavior of the specified device when it connects to the AWS IoT device gateway.\n See also: AWS API Documentation\n \n \n :example: response = client.get_effective_policies(\n principal='string',\n cognitoIdentityPoolId='string',\n thingName='string'\n )\n \n \n :type principal: string\n :param principal: The principal.\n\n :type cognitoIdentityPoolId: string\n :param cognitoIdentityPoolId: The Cognito identity pool ID.\n\n :type thingName: string\n :param thingName: The thing name.\n\n :rtype: dict\n :return: {\n 'effectivePolicies': [\n {\n 'policyName': 'string',\n 'policyArn': 'string',\n 'policyDocument': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_indexing_configuration():\n \"\"\"\n Gets the search configuration.\n See also: AWS API Documentation\n \n \n :example: response = client.get_indexing_configuration()\n \n \n :rtype: dict\n :return: {\n 'thingIndexingConfiguration': {\n 'thingIndexingMode': 'OFF'|'REGISTRY'|'REGISTRY_AND_SHADOW',\n 'thingConnectivityIndexingMode': 'OFF'|'STATUS'\n },\n 'thingGroupIndexingConfiguration': {\n 'thingGroupIndexingMode': 'OFF'|'ON'\n }\n }\n \n \n :returns: \n STATUS Your thing index will contain connectivity status. In order to enable thing connectivity indexing, thingIndexMode must not be set to OFF.\n OFF - Thing connectivity status indexing is disabled.\n \n \"\"\"\n pass\n\ndef get_job_document(jobId=None):\n \"\"\"\n Gets a job document.\n See also: AWS API Documentation\n \n \n :example: response = client.get_job_document(\n jobId='string'\n )\n \n \n :type jobId: string\n :param jobId: [REQUIRED]\n The unique identifier you assigned to this job when it was created.\n \n\n :rtype: dict\n :return: {\n 'document': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_logging_options():\n \"\"\"\n Gets the logging options.\n NOTE: use of this command is not recommended. Use GetV2LoggingOptions instead.\n See also: AWS API Documentation\n \n \n :example: response = client.get_logging_options()\n \n \n :rtype: dict\n :return: {\n 'roleArn': 'string',\n 'logLevel': 'DEBUG'|'INFO'|'ERROR'|'WARN'|'DISABLED'\n }\n \n \n \"\"\"\n pass\n\ndef get_ota_update(otaUpdateId=None):\n \"\"\"\n Gets an OTA update.\n See also: AWS API Documentation\n \n \n :example: response = client.get_ota_update(\n otaUpdateId='string'\n )\n \n \n :type otaUpdateId: string\n :param otaUpdateId: [REQUIRED]\n The OTA update ID.\n \n\n :rtype: dict\n :return: {\n 'otaUpdateInfo': {\n 'otaUpdateId': 'string',\n 'otaUpdateArn': 'string',\n 'creationDate': datetime(2015, 1, 1),\n 'lastModifiedDate': datetime(2015, 1, 1),\n 'description': 'string',\n 'targets': [\n 'string',\n ],\n 'awsJobExecutionsRolloutConfig': {\n 'maximumPerMinute': 123\n },\n 'targetSelection': 'CONTINUOUS'|'SNAPSHOT',\n 'otaUpdateFiles': [\n {\n 'fileName': 'string',\n 'fileVersion': 'string',\n 'fileLocation': {\n 'stream': {\n 'streamId': 'string',\n 'fileId': 123\n },\n 's3Location': {\n 'bucket': 'string',\n 'key': 'string',\n 'version': 'string'\n }\n },\n 'codeSigning': {\n 'awsSignerJobId': 'string',\n 'startSigningJobParameter': {\n 'signingProfileParameter': {\n 'certificateArn': 'string',\n 'platform': 'string',\n 'certificatePathOnDevice': 'string'\n },\n 'signingProfileName': 'string',\n 'destination': {\n 's3Destination': {\n 'bucket': 'string',\n 'prefix': 'string'\n }\n }\n },\n 'customCodeSigning': {\n 'signature': {\n 'inlineDocument': b'bytes'\n },\n 'certificateChain': {\n 'certificateName': 'string',\n 'inlineDocument': 'string'\n },\n 'hashAlgorithm': 'string',\n 'signatureAlgorithm': 'string'\n }\n },\n 'attributes': {\n 'string': 'string'\n }\n },\n ],\n 'otaUpdateStatus': 'CREATE_PENDING'|'CREATE_IN_PROGRESS'|'CREATE_COMPLETE'|'CREATE_FAILED',\n 'awsIotJobId': 'string',\n 'awsIotJobArn': 'string',\n 'errorInfo': {\n 'code': 'string',\n 'message': 'string'\n },\n 'additionalParameters': {\n 'string': 'string'\n }\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_policy(policyName=None):\n \"\"\"\n Gets information about the specified policy with the policy document of the default version.\n See also: AWS API Documentation\n \n \n :example: response = client.get_policy(\n policyName='string'\n )\n \n \n :type policyName: string\n :param policyName: [REQUIRED]\n The name of the policy.\n \n\n :rtype: dict\n :return: {\n 'policyName': 'string',\n 'policyArn': 'string',\n 'policyDocument': 'string',\n 'defaultVersionId': 'string',\n 'creationDate': datetime(2015, 1, 1),\n 'lastModifiedDate': datetime(2015, 1, 1),\n 'generationId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_policy_version(policyName=None, policyVersionId=None):\n \"\"\"\n Gets information about the specified policy version.\n See also: AWS API Documentation\n \n \n :example: response = client.get_policy_version(\n policyName='string',\n policyVersionId='string'\n )\n \n \n :type policyName: string\n :param policyName: [REQUIRED]\n The name of the policy.\n \n\n :type policyVersionId: string\n :param policyVersionId: [REQUIRED]\n The policy version ID.\n \n\n :rtype: dict\n :return: {\n 'policyArn': 'string',\n 'policyName': 'string',\n 'policyDocument': 'string',\n 'policyVersionId': 'string',\n 'isDefaultVersion': True|False,\n 'creationDate': datetime(2015, 1, 1),\n 'lastModifiedDate': datetime(2015, 1, 1),\n 'generationId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_registration_code():\n \"\"\"\n Gets a registration code used to register a CA certificate with AWS IoT.\n See also: AWS API Documentation\n \n \n :example: response = client.get_registration_code()\n \n \n :rtype: dict\n :return: {\n 'registrationCode': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_topic_rule(ruleName=None):\n \"\"\"\n Gets information about the rule.\n See also: AWS API Documentation\n \n \n :example: response = client.get_topic_rule(\n ruleName='string'\n )\n \n \n :type ruleName: string\n :param ruleName: [REQUIRED]\n The name of the rule.\n \n\n :rtype: dict\n :return: {\n 'ruleArn': 'string',\n 'rule': {\n 'ruleName': 'string',\n 'sql': 'string',\n 'description': 'string',\n 'createdAt': datetime(2015, 1, 1),\n 'actions': [\n {\n 'dynamoDB': {\n 'tableName': 'string',\n 'roleArn': 'string',\n 'operation': 'string',\n 'hashKeyField': 'string',\n 'hashKeyValue': 'string',\n 'hashKeyType': 'STRING'|'NUMBER',\n 'rangeKeyField': 'string',\n 'rangeKeyValue': 'string',\n 'rangeKeyType': 'STRING'|'NUMBER',\n 'payloadField': 'string'\n },\n 'dynamoDBv2': {\n 'roleArn': 'string',\n 'putItem': {\n 'tableName': 'string'\n }\n },\n 'lambda': {\n 'functionArn': 'string'\n },\n 'sns': {\n 'targetArn': 'string',\n 'roleArn': 'string',\n 'messageFormat': 'RAW'|'JSON'\n },\n 'sqs': {\n 'roleArn': 'string',\n 'queueUrl': 'string',\n 'useBase64': True|False\n },\n 'kinesis': {\n 'roleArn': 'string',\n 'streamName': 'string',\n 'partitionKey': 'string'\n },\n 'republish': {\n 'roleArn': 'string',\n 'topic': 'string'\n },\n 's3': {\n 'roleArn': 'string',\n 'bucketName': 'string',\n 'key': 'string',\n 'cannedAcl': 'private'|'public-read'|'public-read-write'|'aws-exec-read'|'authenticated-read'|'bucket-owner-read'|'bucket-owner-full-control'|'log-delivery-write'\n },\n 'firehose': {\n 'roleArn': 'string',\n 'deliveryStreamName': 'string',\n 'separator': 'string'\n },\n 'cloudwatchMetric': {\n 'roleArn': 'string',\n 'metricNamespace': 'string',\n 'metricName': 'string',\n 'metricValue': 'string',\n 'metricUnit': 'string',\n 'metricTimestamp': 'string'\n },\n 'cloudwatchAlarm': {\n 'roleArn': 'string',\n 'alarmName': 'string',\n 'stateReason': 'string',\n 'stateValue': 'string'\n },\n 'elasticsearch': {\n 'roleArn': 'string',\n 'endpoint': 'string',\n 'index': 'string',\n 'type': 'string',\n 'id': 'string'\n },\n 'salesforce': {\n 'token': 'string',\n 'url': 'string'\n },\n 'iotAnalytics': {\n 'channelArn': 'string',\n 'channelName': 'string',\n 'roleArn': 'string'\n },\n 'iotEvents': {\n 'inputName': 'string',\n 'messageId': 'string',\n 'roleArn': 'string'\n },\n 'stepFunctions': {\n 'executionNamePrefix': 'string',\n 'stateMachineName': 'string',\n 'roleArn': 'string'\n }\n },\n ],\n 'ruleDisabled': True|False,\n 'awsIotSqlVersion': 'string',\n 'errorAction': {\n 'dynamoDB': {\n 'tableName': 'string',\n 'roleArn': 'string',\n 'operation': 'string',\n 'hashKeyField': 'string',\n 'hashKeyValue': 'string',\n 'hashKeyType': 'STRING'|'NUMBER',\n 'rangeKeyField': 'string',\n 'rangeKeyValue': 'string',\n 'rangeKeyType': 'STRING'|'NUMBER',\n 'payloadField': 'string'\n },\n 'dynamoDBv2': {\n 'roleArn': 'string',\n 'putItem': {\n 'tableName': 'string'\n }\n },\n 'lambda': {\n 'functionArn': 'string'\n },\n 'sns': {\n 'targetArn': 'string',\n 'roleArn': 'string',\n 'messageFormat': 'RAW'|'JSON'\n },\n 'sqs': {\n 'roleArn': 'string',\n 'queueUrl': 'string',\n 'useBase64': True|False\n },\n 'kinesis': {\n 'roleArn': 'string',\n 'streamName': 'string',\n 'partitionKey': 'string'\n },\n 'republish': {\n 'roleArn': 'string',\n 'topic': 'string'\n },\n 's3': {\n 'roleArn': 'string',\n 'bucketName': 'string',\n 'key': 'string',\n 'cannedAcl': 'private'|'public-read'|'public-read-write'|'aws-exec-read'|'authenticated-read'|'bucket-owner-read'|'bucket-owner-full-control'|'log-delivery-write'\n },\n 'firehose': {\n 'roleArn': 'string',\n 'deliveryStreamName': 'string',\n 'separator': 'string'\n },\n 'cloudwatchMetric': {\n 'roleArn': 'string',\n 'metricNamespace': 'string',\n 'metricName': 'string',\n 'metricValue': 'string',\n 'metricUnit': 'string',\n 'metricTimestamp': 'string'\n },\n 'cloudwatchAlarm': {\n 'roleArn': 'string',\n 'alarmName': 'string',\n 'stateReason': 'string',\n 'stateValue': 'string'\n },\n 'elasticsearch': {\n 'roleArn': 'string',\n 'endpoint': 'string',\n 'index': 'string',\n 'type': 'string',\n 'id': 'string'\n },\n 'salesforce': {\n 'token': 'string',\n 'url': 'string'\n },\n 'iotAnalytics': {\n 'channelArn': 'string',\n 'channelName': 'string',\n 'roleArn': 'string'\n },\n 'iotEvents': {\n 'inputName': 'string',\n 'messageId': 'string',\n 'roleArn': 'string'\n },\n 'stepFunctions': {\n 'executionNamePrefix': 'string',\n 'stateMachineName': 'string',\n 'roleArn': 'string'\n }\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_v2_logging_options():\n \"\"\"\n Gets the fine grained logging options.\n See also: AWS API Documentation\n \n \n :example: response = client.get_v2_logging_options()\n \n \n :rtype: dict\n :return: {\n 'roleArn': 'string',\n 'defaultLogLevel': 'DEBUG'|'INFO'|'ERROR'|'WARN'|'DISABLED',\n 'disableAllLogs': True|False\n }\n \n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_active_violations(thingName=None, securityProfileName=None, nextToken=None, maxResults=None):\n \"\"\"\n Lists the active violations for a given Device Defender security profile.\n See also: AWS API Documentation\n \n \n :example: response = client.list_active_violations(\n thingName='string',\n securityProfileName='string',\n nextToken='string',\n maxResults=123\n )\n \n \n :type thingName: string\n :param thingName: The name of the thing whose active violations are listed.\n\n :type securityProfileName: string\n :param securityProfileName: The name of the Device Defender security profile for which violations are listed.\n\n :type nextToken: string\n :param nextToken: The token for the next set of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return at one time.\n\n :rtype: dict\n :return: {\n 'activeViolations': [\n {\n 'violationId': 'string',\n 'thingName': 'string',\n 'securityProfileName': 'string',\n 'behavior': {\n 'name': 'string',\n 'metric': 'string',\n 'criteria': {\n 'comparisonOperator': 'less-than'|'less-than-equals'|'greater-than'|'greater-than-equals'|'in-cidr-set'|'not-in-cidr-set'|'in-port-set'|'not-in-port-set',\n 'value': {\n 'count': 123,\n 'cidrs': [\n 'string',\n ],\n 'ports': [\n 123,\n ]\n },\n 'durationSeconds': 123\n }\n },\n 'lastViolationValue': {\n 'count': 123,\n 'cidrs': [\n 'string',\n ],\n 'ports': [\n 123,\n ]\n },\n 'lastViolationTime': datetime(2015, 1, 1),\n 'violationStartTime': datetime(2015, 1, 1)\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_attached_policies(target=None, recursive=None, marker=None, pageSize=None):\n \"\"\"\n Lists the policies attached to the specified thing group.\n See also: AWS API Documentation\n \n \n :example: response = client.list_attached_policies(\n target='string',\n recursive=True|False,\n marker='string',\n pageSize=123\n )\n \n \n :type target: string\n :param target: [REQUIRED]\n The group for which the policies will be listed.\n \n\n :type recursive: boolean\n :param recursive: When true, recursively list attached policies.\n\n :type marker: string\n :param marker: The token to retrieve the next set of results.\n\n :type pageSize: integer\n :param pageSize: The maximum number of results to be returned per request.\n\n :rtype: dict\n :return: {\n 'policies': [\n {\n 'policyName': 'string',\n 'policyArn': 'string'\n },\n ],\n 'nextMarker': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_audit_findings(taskId=None, checkName=None, resourceIdentifier=None, maxResults=None, nextToken=None, startTime=None, endTime=None):\n \"\"\"\n Lists the findings (results) of a Device Defender audit or of the audits performed during a specified time period. (Findings are retained for 180 days.)\n See also: AWS API Documentation\n \n \n :example: response = client.list_audit_findings(\n taskId='string',\n checkName='string',\n resourceIdentifier={\n 'deviceCertificateId': 'string',\n 'caCertificateId': 'string',\n 'cognitoIdentityPoolId': 'string',\n 'clientId': 'string',\n 'policyVersionIdentifier': {\n 'policyName': 'string',\n 'policyVersionId': 'string'\n },\n 'account': 'string'\n },\n maxResults=123,\n nextToken='string',\n startTime=datetime(2015, 1, 1),\n endTime=datetime(2015, 1, 1)\n )\n \n \n :type taskId: string\n :param taskId: A filter to limit results to the audit with the specified ID. You must specify either the taskId or the startTime and endTime, but not both.\n\n :type checkName: string\n :param checkName: A filter to limit results to the findings for the specified audit check.\n\n :type resourceIdentifier: dict\n :param resourceIdentifier: Information identifying the non-compliant resource.\n deviceCertificateId (string) --The ID of the certificate attached to the resource.\n caCertificateId (string) --The ID of the CA certificate used to authorize the certificate.\n cognitoIdentityPoolId (string) --The ID of the Cognito Identity Pool.\n clientId (string) --The client ID.\n policyVersionIdentifier (dict) --The version of the policy associated with the resource.\n policyName (string) --The name of the policy.\n policyVersionId (string) --The ID of the version of the policy associated with the resource.\n account (string) --The account with which the resource is associated.\n \n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return at one time. The default is 25.\n\n :type nextToken: string\n :param nextToken: The token for the next set of results.\n\n :type startTime: datetime\n :param startTime: A filter to limit results to those found after the specified time. You must specify either the startTime and endTime or the taskId, but not both.\n\n :type endTime: datetime\n :param endTime: A filter to limit results to those found before the specified time. You must specify either the startTime and endTime or the taskId, but not both.\n\n :rtype: dict\n :return: {\n 'findings': [\n {\n 'taskId': 'string',\n 'checkName': 'string',\n 'taskStartTime': datetime(2015, 1, 1),\n 'findingTime': datetime(2015, 1, 1),\n 'severity': 'CRITICAL'|'HIGH'|'MEDIUM'|'LOW',\n 'nonCompliantResource': {\n 'resourceType': 'DEVICE_CERTIFICATE'|'CA_CERTIFICATE'|'IOT_POLICY'|'COGNITO_IDENTITY_POOL'|'CLIENT_ID'|'ACCOUNT_SETTINGS',\n 'resourceIdentifier': {\n 'deviceCertificateId': 'string',\n 'caCertificateId': 'string',\n 'cognitoIdentityPoolId': 'string',\n 'clientId': 'string',\n 'policyVersionIdentifier': {\n 'policyName': 'string',\n 'policyVersionId': 'string'\n },\n 'account': 'string'\n },\n 'additionalInfo': {\n 'string': 'string'\n }\n },\n 'relatedResources': [\n {\n 'resourceType': 'DEVICE_CERTIFICATE'|'CA_CERTIFICATE'|'IOT_POLICY'|'COGNITO_IDENTITY_POOL'|'CLIENT_ID'|'ACCOUNT_SETTINGS',\n 'resourceIdentifier': {\n 'deviceCertificateId': 'string',\n 'caCertificateId': 'string',\n 'cognitoIdentityPoolId': 'string',\n 'clientId': 'string',\n 'policyVersionIdentifier': {\n 'policyName': 'string',\n 'policyVersionId': 'string'\n },\n 'account': 'string'\n },\n 'additionalInfo': {\n 'string': 'string'\n }\n },\n ],\n 'reasonForNonCompliance': 'string',\n 'reasonForNonComplianceCode': 'string'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef list_audit_tasks(startTime=None, endTime=None, taskType=None, taskStatus=None, nextToken=None, maxResults=None):\n \"\"\"\n Lists the Device Defender audits that have been performed during a given time period.\n See also: AWS API Documentation\n \n \n :example: response = client.list_audit_tasks(\n startTime=datetime(2015, 1, 1),\n endTime=datetime(2015, 1, 1),\n taskType='ON_DEMAND_AUDIT_TASK'|'SCHEDULED_AUDIT_TASK',\n taskStatus='IN_PROGRESS'|'COMPLETED'|'FAILED'|'CANCELED',\n nextToken='string',\n maxResults=123\n )\n \n \n :type startTime: datetime\n :param startTime: [REQUIRED]\n The beginning of the time period. Note that audit information is retained for a limited time (180 days). Requesting a start time prior to what is retained results in an 'InvalidRequestException'.\n \n\n :type endTime: datetime\n :param endTime: [REQUIRED]\n The end of the time period.\n \n\n :type taskType: string\n :param taskType: A filter to limit the output to the specified type of audit: can be one of 'ON_DEMAND_AUDIT_TASK' or 'SCHEDULED__AUDIT_TASK'.\n\n :type taskStatus: string\n :param taskStatus: A filter to limit the output to audits with the specified completion status: can be one of 'IN_PROGRESS', 'COMPLETED', 'FAILED' or 'CANCELED'.\n\n :type nextToken: string\n :param nextToken: The token for the next set of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return at one time. The default is 25.\n\n :rtype: dict\n :return: {\n 'tasks': [\n {\n 'taskId': 'string',\n 'taskStatus': 'IN_PROGRESS'|'COMPLETED'|'FAILED'|'CANCELED',\n 'taskType': 'ON_DEMAND_AUDIT_TASK'|'SCHEDULED_AUDIT_TASK'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_authorizers(pageSize=None, marker=None, ascendingOrder=None, status=None):\n \"\"\"\n Lists the authorizers registered in your account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_authorizers(\n pageSize=123,\n marker='string',\n ascendingOrder=True|False,\n status='ACTIVE'|'INACTIVE'\n )\n \n \n :type pageSize: integer\n :param pageSize: The maximum number of results to return at one time.\n\n :type marker: string\n :param marker: A marker used to get the next set of results.\n\n :type ascendingOrder: boolean\n :param ascendingOrder: Return the list of authorizers in ascending alphabetical order.\n\n :type status: string\n :param status: The status of the list authorizers request.\n\n :rtype: dict\n :return: {\n 'authorizers': [\n {\n 'authorizerName': 'string',\n 'authorizerArn': 'string'\n },\n ],\n 'nextMarker': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_billing_groups(nextToken=None, maxResults=None, namePrefixFilter=None):\n \"\"\"\n Lists the billing groups you have created.\n See also: AWS API Documentation\n \n \n :example: response = client.list_billing_groups(\n nextToken='string',\n maxResults=123,\n namePrefixFilter='string'\n )\n \n \n :type nextToken: string\n :param nextToken: The token to retrieve the next set of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return per request.\n\n :type namePrefixFilter: string\n :param namePrefixFilter: Limit the results to billing groups whose names have the given prefix.\n\n :rtype: dict\n :return: {\n 'billingGroups': [\n {\n 'groupName': 'string',\n 'groupArn': 'string'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_ca_certificates(pageSize=None, marker=None, ascendingOrder=None):\n \"\"\"\n Lists the CA certificates registered for your AWS account.\n The results are paginated with a default page size of 25. You can use the returned marker to retrieve additional results.\n See also: AWS API Documentation\n \n \n :example: response = client.list_ca_certificates(\n pageSize=123,\n marker='string',\n ascendingOrder=True|False\n )\n \n \n :type pageSize: integer\n :param pageSize: The result page size.\n\n :type marker: string\n :param marker: The marker for the next set of results.\n\n :type ascendingOrder: boolean\n :param ascendingOrder: Determines the order of the results.\n\n :rtype: dict\n :return: {\n 'certificates': [\n {\n 'certificateArn': 'string',\n 'certificateId': 'string',\n 'status': 'ACTIVE'|'INACTIVE',\n 'creationDate': datetime(2015, 1, 1)\n },\n ],\n 'nextMarker': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_certificates(pageSize=None, marker=None, ascendingOrder=None):\n \"\"\"\n Lists the certificates registered in your AWS account.\n The results are paginated with a default page size of 25. You can use the returned marker to retrieve additional results.\n See also: AWS API Documentation\n \n \n :example: response = client.list_certificates(\n pageSize=123,\n marker='string',\n ascendingOrder=True|False\n )\n \n \n :type pageSize: integer\n :param pageSize: The result page size.\n\n :type marker: string\n :param marker: The marker for the next set of results.\n\n :type ascendingOrder: boolean\n :param ascendingOrder: Specifies the order for results. If True, the results are returned in ascending order, based on the creation date.\n\n :rtype: dict\n :return: {\n 'certificates': [\n {\n 'certificateArn': 'string',\n 'certificateId': 'string',\n 'status': 'ACTIVE'|'INACTIVE'|'REVOKED'|'PENDING_TRANSFER'|'REGISTER_INACTIVE'|'PENDING_ACTIVATION',\n 'creationDate': datetime(2015, 1, 1)\n },\n ],\n 'nextMarker': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_certificates_by_ca(caCertificateId=None, pageSize=None, marker=None, ascendingOrder=None):\n \"\"\"\n List the device certificates signed by the specified CA certificate.\n See also: AWS API Documentation\n \n \n :example: response = client.list_certificates_by_ca(\n caCertificateId='string',\n pageSize=123,\n marker='string',\n ascendingOrder=True|False\n )\n \n \n :type caCertificateId: string\n :param caCertificateId: [REQUIRED]\n The ID of the CA certificate. This operation will list all registered device certificate that were signed by this CA certificate.\n \n\n :type pageSize: integer\n :param pageSize: The result page size.\n\n :type marker: string\n :param marker: The marker for the next set of results.\n\n :type ascendingOrder: boolean\n :param ascendingOrder: Specifies the order for results. If True, the results are returned in ascending order, based on the creation date.\n\n :rtype: dict\n :return: {\n 'certificates': [\n {\n 'certificateArn': 'string',\n 'certificateId': 'string',\n 'status': 'ACTIVE'|'INACTIVE'|'REVOKED'|'PENDING_TRANSFER'|'REGISTER_INACTIVE'|'PENDING_ACTIVATION',\n 'creationDate': datetime(2015, 1, 1)\n },\n ],\n 'nextMarker': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_indices(nextToken=None, maxResults=None):\n \"\"\"\n Lists the search indices.\n See also: AWS API Documentation\n \n \n :example: response = client.list_indices(\n nextToken='string',\n maxResults=123\n )\n \n \n :type nextToken: string\n :param nextToken: The token used to get the next set of results, or null if there are no additional results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return at one time.\n\n :rtype: dict\n :return: {\n 'indexNames': [\n 'string',\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_job_executions_for_job(jobId=None, status=None, maxResults=None, nextToken=None):\n \"\"\"\n Lists the job executions for a job.\n See also: AWS API Documentation\n \n \n :example: response = client.list_job_executions_for_job(\n jobId='string',\n status='QUEUED'|'IN_PROGRESS'|'SUCCEEDED'|'FAILED'|'TIMED_OUT'|'REJECTED'|'REMOVED'|'CANCELED',\n maxResults=123,\n nextToken='string'\n )\n \n \n :type jobId: string\n :param jobId: [REQUIRED]\n The unique identifier you assigned to this job when it was created.\n \n\n :type status: string\n :param status: The status of the job.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to be returned per request.\n\n :type nextToken: string\n :param nextToken: The token to retrieve the next set of results.\n\n :rtype: dict\n :return: {\n 'executionSummaries': [\n {\n 'thingArn': 'string',\n 'jobExecutionSummary': {\n 'status': 'QUEUED'|'IN_PROGRESS'|'SUCCEEDED'|'FAILED'|'TIMED_OUT'|'REJECTED'|'REMOVED'|'CANCELED',\n 'queuedAt': datetime(2015, 1, 1),\n 'startedAt': datetime(2015, 1, 1),\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'executionNumber': 123\n }\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_job_executions_for_thing(thingName=None, status=None, maxResults=None, nextToken=None):\n \"\"\"\n Lists the job executions for the specified thing.\n See also: AWS API Documentation\n \n \n :example: response = client.list_job_executions_for_thing(\n thingName='string',\n status='QUEUED'|'IN_PROGRESS'|'SUCCEEDED'|'FAILED'|'TIMED_OUT'|'REJECTED'|'REMOVED'|'CANCELED',\n maxResults=123,\n nextToken='string'\n )\n \n \n :type thingName: string\n :param thingName: [REQUIRED]\n The thing name.\n \n\n :type status: string\n :param status: An optional filter that lets you search for jobs that have the specified status.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to be returned per request.\n\n :type nextToken: string\n :param nextToken: The token to retrieve the next set of results.\n\n :rtype: dict\n :return: {\n 'executionSummaries': [\n {\n 'jobId': 'string',\n 'jobExecutionSummary': {\n 'status': 'QUEUED'|'IN_PROGRESS'|'SUCCEEDED'|'FAILED'|'TIMED_OUT'|'REJECTED'|'REMOVED'|'CANCELED',\n 'queuedAt': datetime(2015, 1, 1),\n 'startedAt': datetime(2015, 1, 1),\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'executionNumber': 123\n }\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_jobs(status=None, targetSelection=None, maxResults=None, nextToken=None, thingGroupName=None, thingGroupId=None):\n \"\"\"\n Lists jobs.\n See also: AWS API Documentation\n \n \n :example: response = client.list_jobs(\n status='IN_PROGRESS'|'CANCELED'|'COMPLETED'|'DELETION_IN_PROGRESS',\n targetSelection='CONTINUOUS'|'SNAPSHOT',\n maxResults=123,\n nextToken='string',\n thingGroupName='string',\n thingGroupId='string'\n )\n \n \n :type status: string\n :param status: An optional filter that lets you search for jobs that have the specified status.\n\n :type targetSelection: string\n :param targetSelection: Specifies whether the job will continue to run (CONTINUOUS), or will be complete after all those things specified as targets have completed the job (SNAPSHOT). If continuous, the job may also be run on a thing when a change is detected in a target. For example, a job will run on a thing when the thing is added to a target group, even after the job was completed by all things originally in the group.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return per request.\n\n :type nextToken: string\n :param nextToken: The token to retrieve the next set of results.\n\n :type thingGroupName: string\n :param thingGroupName: A filter that limits the returned jobs to those for the specified group.\n\n :type thingGroupId: string\n :param thingGroupId: A filter that limits the returned jobs to those for the specified group.\n\n :rtype: dict\n :return: {\n 'jobs': [\n {\n 'jobArn': 'string',\n 'jobId': 'string',\n 'thingGroupId': 'string',\n 'targetSelection': 'CONTINUOUS'|'SNAPSHOT',\n 'status': 'IN_PROGRESS'|'CANCELED'|'COMPLETED'|'DELETION_IN_PROGRESS',\n 'createdAt': datetime(2015, 1, 1),\n 'lastUpdatedAt': datetime(2015, 1, 1),\n 'completedAt': datetime(2015, 1, 1)\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_ota_updates(maxResults=None, nextToken=None, otaUpdateStatus=None):\n \"\"\"\n Lists OTA updates.\n See also: AWS API Documentation\n \n \n :example: response = client.list_ota_updates(\n maxResults=123,\n nextToken='string',\n otaUpdateStatus='CREATE_PENDING'|'CREATE_IN_PROGRESS'|'CREATE_COMPLETE'|'CREATE_FAILED'\n )\n \n \n :type maxResults: integer\n :param maxResults: The maximum number of results to return at one time.\n\n :type nextToken: string\n :param nextToken: A token used to retrieve the next set of results.\n\n :type otaUpdateStatus: string\n :param otaUpdateStatus: The OTA update job status.\n\n :rtype: dict\n :return: {\n 'otaUpdates': [\n {\n 'otaUpdateId': 'string',\n 'otaUpdateArn': 'string',\n 'creationDate': datetime(2015, 1, 1)\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_outgoing_certificates(pageSize=None, marker=None, ascendingOrder=None):\n \"\"\"\n Lists certificates that are being transferred but not yet accepted.\n See also: AWS API Documentation\n \n \n :example: response = client.list_outgoing_certificates(\n pageSize=123,\n marker='string',\n ascendingOrder=True|False\n )\n \n \n :type pageSize: integer\n :param pageSize: The result page size.\n\n :type marker: string\n :param marker: The marker for the next set of results.\n\n :type ascendingOrder: boolean\n :param ascendingOrder: Specifies the order for results. If True, the results are returned in ascending order, based on the creation date.\n\n :rtype: dict\n :return: {\n 'outgoingCertificates': [\n {\n 'certificateArn': 'string',\n 'certificateId': 'string',\n 'transferredTo': 'string',\n 'transferDate': datetime(2015, 1, 1),\n 'transferMessage': 'string',\n 'creationDate': datetime(2015, 1, 1)\n },\n ],\n 'nextMarker': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_policies(marker=None, pageSize=None, ascendingOrder=None):\n \"\"\"\n Lists your policies.\n See also: AWS API Documentation\n \n \n :example: response = client.list_policies(\n marker='string',\n pageSize=123,\n ascendingOrder=True|False\n )\n \n \n :type marker: string\n :param marker: The marker for the next set of results.\n\n :type pageSize: integer\n :param pageSize: The result page size.\n\n :type ascendingOrder: boolean\n :param ascendingOrder: Specifies the order for results. If true, the results are returned in ascending creation order.\n\n :rtype: dict\n :return: {\n 'policies': [\n {\n 'policyName': 'string',\n 'policyArn': 'string'\n },\n ],\n 'nextMarker': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_policy_principals(policyName=None, marker=None, pageSize=None, ascendingOrder=None):\n \"\"\"\n Lists the principals associated with the specified policy.\n See also: AWS API Documentation\n \n \n :example: response = client.list_policy_principals(\n policyName='string',\n marker='string',\n pageSize=123,\n ascendingOrder=True|False\n )\n \n \n :type policyName: string\n :param policyName: [REQUIRED]\n The policy name.\n \n\n :type marker: string\n :param marker: The marker for the next set of results.\n\n :type pageSize: integer\n :param pageSize: The result page size.\n\n :type ascendingOrder: boolean\n :param ascendingOrder: Specifies the order for results. If true, the results are returned in ascending creation order.\n\n :rtype: dict\n :return: {\n 'principals': [\n 'string',\n ],\n 'nextMarker': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_policy_versions(policyName=None):\n \"\"\"\n Lists the versions of the specified policy and identifies the default version.\n See also: AWS API Documentation\n \n \n :example: response = client.list_policy_versions(\n policyName='string'\n )\n \n \n :type policyName: string\n :param policyName: [REQUIRED]\n The policy name.\n \n\n :rtype: dict\n :return: {\n 'policyVersions': [\n {\n 'versionId': 'string',\n 'isDefaultVersion': True|False,\n 'createDate': datetime(2015, 1, 1)\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef list_principal_policies(principal=None, marker=None, pageSize=None, ascendingOrder=None):\n \"\"\"\n Lists the policies attached to the specified principal. If you use an Cognito identity, the ID must be in AmazonCognito Identity format .\n See also: AWS API Documentation\n \n \n :example: response = client.list_principal_policies(\n principal='string',\n marker='string',\n pageSize=123,\n ascendingOrder=True|False\n )\n \n \n :type principal: string\n :param principal: [REQUIRED]\n The principal.\n \n\n :type marker: string\n :param marker: The marker for the next set of results.\n\n :type pageSize: integer\n :param pageSize: The result page size.\n\n :type ascendingOrder: boolean\n :param ascendingOrder: Specifies the order for results. If true, results are returned in ascending creation order.\n\n :rtype: dict\n :return: {\n 'policies': [\n {\n 'policyName': 'string',\n 'policyArn': 'string'\n },\n ],\n 'nextMarker': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_principal_things(nextToken=None, maxResults=None, principal=None):\n \"\"\"\n Lists the things associated with the specified principal.\n See also: AWS API Documentation\n \n \n :example: response = client.list_principal_things(\n nextToken='string',\n maxResults=123,\n principal='string'\n )\n \n \n :type nextToken: string\n :param nextToken: The token to retrieve the next set of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return in this operation.\n\n :type principal: string\n :param principal: [REQUIRED]\n The principal.\n \n\n :rtype: dict\n :return: {\n 'things': [\n 'string',\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_role_aliases(pageSize=None, marker=None, ascendingOrder=None):\n \"\"\"\n Lists the role aliases registered in your account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_role_aliases(\n pageSize=123,\n marker='string',\n ascendingOrder=True|False\n )\n \n \n :type pageSize: integer\n :param pageSize: The maximum number of results to return at one time.\n\n :type marker: string\n :param marker: A marker used to get the next set of results.\n\n :type ascendingOrder: boolean\n :param ascendingOrder: Return the list of role aliases in ascending alphabetical order.\n\n :rtype: dict\n :return: {\n 'roleAliases': [\n 'string',\n ],\n 'nextMarker': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_scheduled_audits(nextToken=None, maxResults=None):\n \"\"\"\n Lists all of your scheduled audits.\n See also: AWS API Documentation\n \n \n :example: response = client.list_scheduled_audits(\n nextToken='string',\n maxResults=123\n )\n \n \n :type nextToken: string\n :param nextToken: The token for the next set of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return at one time. The default is 25.\n\n :rtype: dict\n :return: {\n 'scheduledAudits': [\n {\n 'scheduledAuditName': 'string',\n 'scheduledAuditArn': 'string',\n 'frequency': 'DAILY'|'WEEKLY'|'BIWEEKLY'|'MONTHLY',\n 'dayOfMonth': 'string',\n 'dayOfWeek': 'SUN'|'MON'|'TUE'|'WED'|'THU'|'FRI'|'SAT'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_security_profiles(nextToken=None, maxResults=None):\n \"\"\"\n Lists the Device Defender security profiles you have created. You can use filters to list only those security profiles associated with a thing group or only those associated with your account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_security_profiles(\n nextToken='string',\n maxResults=123\n )\n \n \n :type nextToken: string\n :param nextToken: The token for the next set of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return at one time.\n\n :rtype: dict\n :return: {\n 'securityProfileIdentifiers': [\n {\n 'name': 'string',\n 'arn': 'string'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_security_profiles_for_target(nextToken=None, maxResults=None, recursive=None, securityProfileTargetArn=None):\n \"\"\"\n Lists the Device Defender security profiles attached to a target (thing group).\n See also: AWS API Documentation\n \n \n :example: response = client.list_security_profiles_for_target(\n nextToken='string',\n maxResults=123,\n recursive=True|False,\n securityProfileTargetArn='string'\n )\n \n \n :type nextToken: string\n :param nextToken: The token for the next set of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return at one time.\n\n :type recursive: boolean\n :param recursive: If true, return child groups as well.\n\n :type securityProfileTargetArn: string\n :param securityProfileTargetArn: [REQUIRED]\n The ARN of the target (thing group) whose attached security profiles you want to get.\n \n\n :rtype: dict\n :return: {\n 'securityProfileTargetMappings': [\n {\n 'securityProfileIdentifier': {\n 'name': 'string',\n 'arn': 'string'\n },\n 'target': {\n 'arn': 'string'\n }\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_streams(maxResults=None, nextToken=None, ascendingOrder=None):\n \"\"\"\n Lists all of the streams in your AWS account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_streams(\n maxResults=123,\n nextToken='string',\n ascendingOrder=True|False\n )\n \n \n :type maxResults: integer\n :param maxResults: The maximum number of results to return at a time.\n\n :type nextToken: string\n :param nextToken: A token used to get the next set of results.\n\n :type ascendingOrder: boolean\n :param ascendingOrder: Set to true to return the list of streams in ascending order.\n\n :rtype: dict\n :return: {\n 'streams': [\n {\n 'streamId': 'string',\n 'streamArn': 'string',\n 'streamVersion': 123,\n 'description': 'string'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_tags_for_resource(resourceArn=None, nextToken=None):\n \"\"\"\n Lists the tags (metadata) you have assigned to the resource.\n See also: AWS API Documentation\n \n \n :example: response = client.list_tags_for_resource(\n resourceArn='string',\n nextToken='string'\n )\n \n \n :type resourceArn: string\n :param resourceArn: [REQUIRED]\n The ARN of the resource.\n \n\n :type nextToken: string\n :param nextToken: The token to retrieve the next set of results.\n\n :rtype: dict\n :return: {\n 'tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_targets_for_policy(policyName=None, marker=None, pageSize=None):\n \"\"\"\n List targets for the specified policy.\n See also: AWS API Documentation\n \n \n :example: response = client.list_targets_for_policy(\n policyName='string',\n marker='string',\n pageSize=123\n )\n \n \n :type policyName: string\n :param policyName: [REQUIRED]\n The policy name.\n \n\n :type marker: string\n :param marker: A marker used to get the next set of results.\n\n :type pageSize: integer\n :param pageSize: The maximum number of results to return at one time.\n\n :rtype: dict\n :return: {\n 'targets': [\n 'string',\n ],\n 'nextMarker': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_targets_for_security_profile(securityProfileName=None, nextToken=None, maxResults=None):\n \"\"\"\n Lists the targets (thing groups) associated with a given Device Defender security profile.\n See also: AWS API Documentation\n \n \n :example: response = client.list_targets_for_security_profile(\n securityProfileName='string',\n nextToken='string',\n maxResults=123\n )\n \n \n :type securityProfileName: string\n :param securityProfileName: [REQUIRED]\n The security profile.\n \n\n :type nextToken: string\n :param nextToken: The token for the next set of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return at one time.\n\n :rtype: dict\n :return: {\n 'securityProfileTargets': [\n {\n 'arn': 'string'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_thing_groups(nextToken=None, maxResults=None, parentGroup=None, namePrefixFilter=None, recursive=None):\n \"\"\"\n List the thing groups in your account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_thing_groups(\n nextToken='string',\n maxResults=123,\n parentGroup='string',\n namePrefixFilter='string',\n recursive=True|False\n )\n \n \n :type nextToken: string\n :param nextToken: The token to retrieve the next set of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return at one time.\n\n :type parentGroup: string\n :param parentGroup: A filter that limits the results to those with the specified parent group.\n\n :type namePrefixFilter: string\n :param namePrefixFilter: A filter that limits the results to those with the specified name prefix.\n\n :type recursive: boolean\n :param recursive: If true, return child groups as well.\n\n :rtype: dict\n :return: {\n 'thingGroups': [\n {\n 'groupName': 'string',\n 'groupArn': 'string'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_thing_groups_for_thing(thingName=None, nextToken=None, maxResults=None):\n \"\"\"\n List the thing groups to which the specified thing belongs.\n See also: AWS API Documentation\n \n \n :example: response = client.list_thing_groups_for_thing(\n thingName='string',\n nextToken='string',\n maxResults=123\n )\n \n \n :type thingName: string\n :param thingName: [REQUIRED]\n The thing name.\n \n\n :type nextToken: string\n :param nextToken: The token to retrieve the next set of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return at one time.\n\n :rtype: dict\n :return: {\n 'thingGroups': [\n {\n 'groupName': 'string',\n 'groupArn': 'string'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_thing_principals(thingName=None):\n \"\"\"\n Lists the principals associated with the specified thing.\n See also: AWS API Documentation\n \n \n :example: response = client.list_thing_principals(\n thingName='string'\n )\n \n \n :type thingName: string\n :param thingName: [REQUIRED]\n The name of the thing.\n \n\n :rtype: dict\n :return: {\n 'principals': [\n 'string',\n ]\n }\n \n \n \"\"\"\n pass\n\ndef list_thing_registration_task_reports(taskId=None, reportType=None, nextToken=None, maxResults=None):\n \"\"\"\n Information about the thing registration tasks.\n See also: AWS API Documentation\n \n \n :example: response = client.list_thing_registration_task_reports(\n taskId='string',\n reportType='ERRORS'|'RESULTS',\n nextToken='string',\n maxResults=123\n )\n \n \n :type taskId: string\n :param taskId: [REQUIRED]\n The id of the task.\n \n\n :type reportType: string\n :param reportType: [REQUIRED]\n The type of task report.\n \n\n :type nextToken: string\n :param nextToken: The token to retrieve the next set of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return per request.\n\n :rtype: dict\n :return: {\n 'resourceLinks': [\n 'string',\n ],\n 'reportType': 'ERRORS'|'RESULTS',\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_thing_registration_tasks(nextToken=None, maxResults=None, status=None):\n \"\"\"\n List bulk thing provisioning tasks.\n See also: AWS API Documentation\n \n \n :example: response = client.list_thing_registration_tasks(\n nextToken='string',\n maxResults=123,\n status='InProgress'|'Completed'|'Failed'|'Cancelled'|'Cancelling'\n )\n \n \n :type nextToken: string\n :param nextToken: The token to retrieve the next set of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return at one time.\n\n :type status: string\n :param status: The status of the bulk thing provisioning task.\n\n :rtype: dict\n :return: {\n 'taskIds': [\n 'string',\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_thing_types(nextToken=None, maxResults=None, thingTypeName=None):\n \"\"\"\n Lists the existing thing types.\n See also: AWS API Documentation\n \n \n :example: response = client.list_thing_types(\n nextToken='string',\n maxResults=123,\n thingTypeName='string'\n )\n \n \n :type nextToken: string\n :param nextToken: The token to retrieve the next set of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return in this operation.\n\n :type thingTypeName: string\n :param thingTypeName: The name of the thing type.\n\n :rtype: dict\n :return: {\n 'thingTypes': [\n {\n 'thingTypeName': 'string',\n 'thingTypeArn': 'string',\n 'thingTypeProperties': {\n 'thingTypeDescription': 'string',\n 'searchableAttributes': [\n 'string',\n ]\n },\n 'thingTypeMetadata': {\n 'deprecated': True|False,\n 'deprecationDate': datetime(2015, 1, 1),\n 'creationDate': datetime(2015, 1, 1)\n }\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_things(nextToken=None, maxResults=None, attributeName=None, attributeValue=None, thingTypeName=None):\n \"\"\"\n Lists your things. Use the attributeName and attributeValue parameters to filter your things. For example, calling ListThings with attributeName=Color and attributeValue=Red retrieves all things in the registry that contain an attribute Color with the value Red .\n See also: AWS API Documentation\n \n \n :example: response = client.list_things(\n nextToken='string',\n maxResults=123,\n attributeName='string',\n attributeValue='string',\n thingTypeName='string'\n )\n \n \n :type nextToken: string\n :param nextToken: The token to retrieve the next set of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return in this operation.\n\n :type attributeName: string\n :param attributeName: The attribute name used to search for things.\n\n :type attributeValue: string\n :param attributeValue: The attribute value used to search for things.\n\n :type thingTypeName: string\n :param thingTypeName: The name of the thing type used to search for things.\n\n :rtype: dict\n :return: {\n 'things': [\n {\n 'thingName': 'string',\n 'thingTypeName': 'string',\n 'thingArn': 'string',\n 'attributes': {\n 'string': 'string'\n },\n 'version': 123\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef list_things_in_billing_group(billingGroupName=None, nextToken=None, maxResults=None):\n \"\"\"\n Lists the things you have added to the given billing group.\n See also: AWS API Documentation\n \n \n :example: response = client.list_things_in_billing_group(\n billingGroupName='string',\n nextToken='string',\n maxResults=123\n )\n \n \n :type billingGroupName: string\n :param billingGroupName: [REQUIRED]\n The name of the billing group.\n \n\n :type nextToken: string\n :param nextToken: The token to retrieve the next set of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return per request.\n\n :rtype: dict\n :return: {\n 'things': [\n 'string',\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_things_in_thing_group(thingGroupName=None, recursive=None, nextToken=None, maxResults=None):\n \"\"\"\n Lists the things in the specified group.\n See also: AWS API Documentation\n \n \n :example: response = client.list_things_in_thing_group(\n thingGroupName='string',\n recursive=True|False,\n nextToken='string',\n maxResults=123\n )\n \n \n :type thingGroupName: string\n :param thingGroupName: [REQUIRED]\n The thing group name.\n \n\n :type recursive: boolean\n :param recursive: When true, list things in this thing group and in all child groups as well.\n\n :type nextToken: string\n :param nextToken: The token to retrieve the next set of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return at one time.\n\n :rtype: dict\n :return: {\n 'things': [\n 'string',\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_topic_rules(topic=None, maxResults=None, nextToken=None, ruleDisabled=None):\n \"\"\"\n Lists the rules for the specific topic.\n See also: AWS API Documentation\n \n \n :example: response = client.list_topic_rules(\n topic='string',\n maxResults=123,\n nextToken='string',\n ruleDisabled=True|False\n )\n \n \n :type topic: string\n :param topic: The topic.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return.\n\n :type nextToken: string\n :param nextToken: A token used to retrieve the next value.\n\n :type ruleDisabled: boolean\n :param ruleDisabled: Specifies whether the rule is disabled.\n\n :rtype: dict\n :return: {\n 'rules': [\n {\n 'ruleArn': 'string',\n 'ruleName': 'string',\n 'topicPattern': 'string',\n 'createdAt': datetime(2015, 1, 1),\n 'ruleDisabled': True|False\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_v2_logging_levels(targetType=None, nextToken=None, maxResults=None):\n \"\"\"\n Lists logging levels.\n See also: AWS API Documentation\n \n \n :example: response = client.list_v2_logging_levels(\n targetType='DEFAULT'|'THING_GROUP',\n nextToken='string',\n maxResults=123\n )\n \n \n :type targetType: string\n :param targetType: The type of resource for which you are configuring logging. Must be THING_Group .\n\n :type nextToken: string\n :param nextToken: The token used to get the next set of results, or null if there are no additional results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return at one time.\n\n :rtype: dict\n :return: {\n 'logTargetConfigurations': [\n {\n 'logTarget': {\n 'targetType': 'DEFAULT'|'THING_GROUP',\n 'targetName': 'string'\n },\n 'logLevel': 'DEBUG'|'INFO'|'ERROR'|'WARN'|'DISABLED'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_violation_events(startTime=None, endTime=None, thingName=None, securityProfileName=None, nextToken=None, maxResults=None):\n \"\"\"\n Lists the Device Defender security profile violations discovered during the given time period. You can use filters to limit the results to those alerts issued for a particular security profile, behavior or thing (device).\n See also: AWS API Documentation\n \n \n :example: response = client.list_violation_events(\n startTime=datetime(2015, 1, 1),\n endTime=datetime(2015, 1, 1),\n thingName='string',\n securityProfileName='string',\n nextToken='string',\n maxResults=123\n )\n \n \n :type startTime: datetime\n :param startTime: [REQUIRED]\n The start time for the alerts to be listed.\n \n\n :type endTime: datetime\n :param endTime: [REQUIRED]\n The end time for the alerts to be listed.\n \n\n :type thingName: string\n :param thingName: A filter to limit results to those alerts caused by the specified thing.\n\n :type securityProfileName: string\n :param securityProfileName: A filter to limit results to those alerts generated by the specified security profile.\n\n :type nextToken: string\n :param nextToken: The token for the next set of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return at one time.\n\n :rtype: dict\n :return: {\n 'violationEvents': [\n {\n 'violationId': 'string',\n 'thingName': 'string',\n 'securityProfileName': 'string',\n 'behavior': {\n 'name': 'string',\n 'metric': 'string',\n 'criteria': {\n 'comparisonOperator': 'less-than'|'less-than-equals'|'greater-than'|'greater-than-equals'|'in-cidr-set'|'not-in-cidr-set'|'in-port-set'|'not-in-port-set',\n 'value': {\n 'count': 123,\n 'cidrs': [\n 'string',\n ],\n 'ports': [\n 123,\n ]\n },\n 'durationSeconds': 123\n }\n },\n 'metricValue': {\n 'count': 123,\n 'cidrs': [\n 'string',\n ],\n 'ports': [\n 123,\n ]\n },\n 'violationEventType': 'in-alarm'|'alarm-cleared'|'alarm-invalidated',\n 'violationEventTime': datetime(2015, 1, 1)\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef register_ca_certificate(caCertificate=None, verificationCertificate=None, setAsActive=None, allowAutoRegistration=None, registrationConfig=None):\n \"\"\"\n Registers a CA certificate with AWS IoT. This CA certificate can then be used to sign device certificates, which can be then registered with AWS IoT. You can register up to 10 CA certificates per AWS account that have the same subject field. This enables you to have up to 10 certificate authorities sign your device certificates. If you have more than one CA certificate registered, make sure you pass the CA certificate when you register your device certificates with the RegisterCertificate API.\n See also: AWS API Documentation\n \n \n :example: response = client.register_ca_certificate(\n caCertificate='string',\n verificationCertificate='string',\n setAsActive=True|False,\n allowAutoRegistration=True|False,\n registrationConfig={\n 'templateBody': 'string',\n 'roleArn': 'string'\n }\n )\n \n \n :type caCertificate: string\n :param caCertificate: [REQUIRED]\n The CA certificate.\n \n\n :type verificationCertificate: string\n :param verificationCertificate: [REQUIRED]\n The private key verification certificate.\n \n\n :type setAsActive: boolean\n :param setAsActive: A boolean value that specifies if the CA certificate is set to active.\n\n :type allowAutoRegistration: boolean\n :param allowAutoRegistration: Allows this CA certificate to be used for auto registration of device certificates.\n\n :type registrationConfig: dict\n :param registrationConfig: Information about the registration configuration.\n templateBody (string) --The template body.\n roleArn (string) --The ARN of the role.\n \n\n :rtype: dict\n :return: {\n 'certificateArn': 'string',\n 'certificateId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef register_certificate(certificatePem=None, caCertificatePem=None, setAsActive=None, status=None):\n \"\"\"\n Registers a device certificate with AWS IoT. If you have more than one CA certificate that has the same subject field, you must specify the CA certificate that was used to sign the device certificate being registered.\n See also: AWS API Documentation\n \n \n :example: response = client.register_certificate(\n certificatePem='string',\n caCertificatePem='string',\n setAsActive=True|False,\n status='ACTIVE'|'INACTIVE'|'REVOKED'|'PENDING_TRANSFER'|'REGISTER_INACTIVE'|'PENDING_ACTIVATION'\n )\n \n \n :type certificatePem: string\n :param certificatePem: [REQUIRED]\n The certificate data, in PEM format.\n \n\n :type caCertificatePem: string\n :param caCertificatePem: The CA certificate used to sign the device certificate being registered.\n\n :type setAsActive: boolean\n :param setAsActive: A boolean value that specifies if the CA certificate is set to active.\n\n :type status: string\n :param status: The status of the register certificate request.\n\n :rtype: dict\n :return: {\n 'certificateArn': 'string',\n 'certificateId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef register_thing(templateBody=None, parameters=None):\n \"\"\"\n Provisions a thing.\n See also: AWS API Documentation\n \n \n :example: response = client.register_thing(\n templateBody='string',\n parameters={\n 'string': 'string'\n }\n )\n \n \n :type templateBody: string\n :param templateBody: [REQUIRED]\n The provisioning template. See Programmatic Provisioning for more information.\n \n\n :type parameters: dict\n :param parameters: The parameters for provisioning a thing. See Programmatic Provisioning for more information.\n (string) --\n (string) --\n \n\n :rtype: dict\n :return: {\n 'certificatePem': 'string',\n 'resourceArns': {\n 'string': 'string'\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef reject_certificate_transfer(certificateId=None, rejectReason=None):\n \"\"\"\n Rejects a pending certificate transfer. After AWS IoT rejects a certificate transfer, the certificate status changes from PENDING_TRANSFER to INACTIVE .\n To check for pending certificate transfers, call ListCertificates to enumerate your certificates.\n This operation can only be called by the transfer destination. After it is called, the certificate will be returned to the source's account in the INACTIVE state.\n See also: AWS API Documentation\n \n \n :example: response = client.reject_certificate_transfer(\n certificateId='string',\n rejectReason='string'\n )\n \n \n :type certificateId: string\n :param certificateId: [REQUIRED]\n The ID of the certificate. (The last part of the certificate ARN contains the certificate ID.)\n \n\n :type rejectReason: string\n :param rejectReason: The reason the certificate transfer was rejected.\n\n \"\"\"\n pass\n\ndef remove_thing_from_billing_group(billingGroupName=None, billingGroupArn=None, thingName=None, thingArn=None):\n \"\"\"\n Removes the given thing from the billing group.\n See also: AWS API Documentation\n \n \n :example: response = client.remove_thing_from_billing_group(\n billingGroupName='string',\n billingGroupArn='string',\n thingName='string',\n thingArn='string'\n )\n \n \n :type billingGroupName: string\n :param billingGroupName: The name of the billing group.\n\n :type billingGroupArn: string\n :param billingGroupArn: The ARN of the billing group.\n\n :type thingName: string\n :param thingName: The name of the thing to be removed from the billing group.\n\n :type thingArn: string\n :param thingArn: The ARN of the thing to be removed from the billing group.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef remove_thing_from_thing_group(thingGroupName=None, thingGroupArn=None, thingName=None, thingArn=None):\n \"\"\"\n Remove the specified thing from the specified group.\n See also: AWS API Documentation\n \n \n :example: response = client.remove_thing_from_thing_group(\n thingGroupName='string',\n thingGroupArn='string',\n thingName='string',\n thingArn='string'\n )\n \n \n :type thingGroupName: string\n :param thingGroupName: The group name.\n\n :type thingGroupArn: string\n :param thingGroupArn: The group ARN.\n\n :type thingName: string\n :param thingName: The name of the thing to remove from the group.\n\n :type thingArn: string\n :param thingArn: The ARN of the thing to remove from the group.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef replace_topic_rule(ruleName=None, topicRulePayload=None):\n \"\"\"\n Replaces the rule. You must specify all parameters for the new rule. Creating rules is an administrator-level action. Any user who has permission to create rules will be able to access data processed by the rule.\n See also: AWS API Documentation\n \n \n :example: response = client.replace_topic_rule(\n ruleName='string',\n topicRulePayload={\n 'sql': 'string',\n 'description': 'string',\n 'actions': [\n {\n 'dynamoDB': {\n 'tableName': 'string',\n 'roleArn': 'string',\n 'operation': 'string',\n 'hashKeyField': 'string',\n 'hashKeyValue': 'string',\n 'hashKeyType': 'STRING'|'NUMBER',\n 'rangeKeyField': 'string',\n 'rangeKeyValue': 'string',\n 'rangeKeyType': 'STRING'|'NUMBER',\n 'payloadField': 'string'\n },\n 'dynamoDBv2': {\n 'roleArn': 'string',\n 'putItem': {\n 'tableName': 'string'\n }\n },\n 'lambda': {\n 'functionArn': 'string'\n },\n 'sns': {\n 'targetArn': 'string',\n 'roleArn': 'string',\n 'messageFormat': 'RAW'|'JSON'\n },\n 'sqs': {\n 'roleArn': 'string',\n 'queueUrl': 'string',\n 'useBase64': True|False\n },\n 'kinesis': {\n 'roleArn': 'string',\n 'streamName': 'string',\n 'partitionKey': 'string'\n },\n 'republish': {\n 'roleArn': 'string',\n 'topic': 'string'\n },\n 's3': {\n 'roleArn': 'string',\n 'bucketName': 'string',\n 'key': 'string',\n 'cannedAcl': 'private'|'public-read'|'public-read-write'|'aws-exec-read'|'authenticated-read'|'bucket-owner-read'|'bucket-owner-full-control'|'log-delivery-write'\n },\n 'firehose': {\n 'roleArn': 'string',\n 'deliveryStreamName': 'string',\n 'separator': 'string'\n },\n 'cloudwatchMetric': {\n 'roleArn': 'string',\n 'metricNamespace': 'string',\n 'metricName': 'string',\n 'metricValue': 'string',\n 'metricUnit': 'string',\n 'metricTimestamp': 'string'\n },\n 'cloudwatchAlarm': {\n 'roleArn': 'string',\n 'alarmName': 'string',\n 'stateReason': 'string',\n 'stateValue': 'string'\n },\n 'elasticsearch': {\n 'roleArn': 'string',\n 'endpoint': 'string',\n 'index': 'string',\n 'type': 'string',\n 'id': 'string'\n },\n 'salesforce': {\n 'token': 'string',\n 'url': 'string'\n },\n 'iotAnalytics': {\n 'channelArn': 'string',\n 'channelName': 'string',\n 'roleArn': 'string'\n },\n 'iotEvents': {\n 'inputName': 'string',\n 'messageId': 'string',\n 'roleArn': 'string'\n },\n 'stepFunctions': {\n 'executionNamePrefix': 'string',\n 'stateMachineName': 'string',\n 'roleArn': 'string'\n }\n },\n ],\n 'ruleDisabled': True|False,\n 'awsIotSqlVersion': 'string',\n 'errorAction': {\n 'dynamoDB': {\n 'tableName': 'string',\n 'roleArn': 'string',\n 'operation': 'string',\n 'hashKeyField': 'string',\n 'hashKeyValue': 'string',\n 'hashKeyType': 'STRING'|'NUMBER',\n 'rangeKeyField': 'string',\n 'rangeKeyValue': 'string',\n 'rangeKeyType': 'STRING'|'NUMBER',\n 'payloadField': 'string'\n },\n 'dynamoDBv2': {\n 'roleArn': 'string',\n 'putItem': {\n 'tableName': 'string'\n }\n },\n 'lambda': {\n 'functionArn': 'string'\n },\n 'sns': {\n 'targetArn': 'string',\n 'roleArn': 'string',\n 'messageFormat': 'RAW'|'JSON'\n },\n 'sqs': {\n 'roleArn': 'string',\n 'queueUrl': 'string',\n 'useBase64': True|False\n },\n 'kinesis': {\n 'roleArn': 'string',\n 'streamName': 'string',\n 'partitionKey': 'string'\n },\n 'republish': {\n 'roleArn': 'string',\n 'topic': 'string'\n },\n 's3': {\n 'roleArn': 'string',\n 'bucketName': 'string',\n 'key': 'string',\n 'cannedAcl': 'private'|'public-read'|'public-read-write'|'aws-exec-read'|'authenticated-read'|'bucket-owner-read'|'bucket-owner-full-control'|'log-delivery-write'\n },\n 'firehose': {\n 'roleArn': 'string',\n 'deliveryStreamName': 'string',\n 'separator': 'string'\n },\n 'cloudwatchMetric': {\n 'roleArn': 'string',\n 'metricNamespace': 'string',\n 'metricName': 'string',\n 'metricValue': 'string',\n 'metricUnit': 'string',\n 'metricTimestamp': 'string'\n },\n 'cloudwatchAlarm': {\n 'roleArn': 'string',\n 'alarmName': 'string',\n 'stateReason': 'string',\n 'stateValue': 'string'\n },\n 'elasticsearch': {\n 'roleArn': 'string',\n 'endpoint': 'string',\n 'index': 'string',\n 'type': 'string',\n 'id': 'string'\n },\n 'salesforce': {\n 'token': 'string',\n 'url': 'string'\n },\n 'iotAnalytics': {\n 'channelArn': 'string',\n 'channelName': 'string',\n 'roleArn': 'string'\n },\n 'iotEvents': {\n 'inputName': 'string',\n 'messageId': 'string',\n 'roleArn': 'string'\n },\n 'stepFunctions': {\n 'executionNamePrefix': 'string',\n 'stateMachineName': 'string',\n 'roleArn': 'string'\n }\n }\n }\n )\n \n \n :type ruleName: string\n :param ruleName: [REQUIRED]\n The name of the rule.\n \n\n :type topicRulePayload: dict\n :param topicRulePayload: [REQUIRED]\n The rule payload.\n sql (string) -- [REQUIRED]The SQL statement used to query the topic. For more information, see AWS IoT SQL Reference in the AWS IoT Developer Guide .\n description (string) --The description of the rule.\n actions (list) -- [REQUIRED]The actions associated with the rule.\n (dict) --Describes the actions associated with a rule.\n dynamoDB (dict) --Write to a DynamoDB table.\n tableName (string) -- [REQUIRED]The name of the DynamoDB table.\n roleArn (string) -- [REQUIRED]The ARN of the IAM role that grants access to the DynamoDB table.\n operation (string) --The type of operation to be performed. This follows the substitution template, so it can be ${operation} , but the substitution must result in one of the following: INSERT , UPDATE , or DELETE .\n hashKeyField (string) -- [REQUIRED]The hash key name.\n hashKeyValue (string) -- [REQUIRED]The hash key value.\n hashKeyType (string) --The hash key type. Valid values are 'STRING' or 'NUMBER'\n rangeKeyField (string) --The range key name.\n rangeKeyValue (string) --The range key value.\n rangeKeyType (string) --The range key type. Valid values are 'STRING' or 'NUMBER'\n payloadField (string) --The action payload. This name can be customized.\n dynamoDBv2 (dict) --Write to a DynamoDB table. This is a new version of the DynamoDB action. It allows you to write each attribute in an MQTT message payload into a separate DynamoDB column.\n roleArn (string) --The ARN of the IAM role that grants access to the DynamoDB table.\n putItem (dict) --Specifies the DynamoDB table to which the message data will be written. For example:\n { 'dynamoDBv2': { 'roleArn': 'aws:iam:12341251:my-role' 'putItem': { 'tableName': 'my-table' } } }\n Each attribute in the message payload will be written to a separate column in the DynamoDB database.\n tableName (string) -- [REQUIRED]The table where the message data will be written\n \n lambda (dict) --Invoke a Lambda function.\n functionArn (string) -- [REQUIRED]The ARN of the Lambda function.\n sns (dict) --Publish to an Amazon SNS topic.\n targetArn (string) -- [REQUIRED]The ARN of the SNS topic.\n roleArn (string) -- [REQUIRED]The ARN of the IAM role that grants access.\n messageFormat (string) --(Optional) The message format of the message to publish. Accepted values are 'JSON' and 'RAW'. The default value of the attribute is 'RAW'. SNS uses this setting to determine if the payload should be parsed and relevant platform-specific bits of the payload should be extracted. To read more about SNS message formats, see http://docs.aws.amazon.com/sns/latest/dg/json-formats.html refer to their official documentation.\n sqs (dict) --Publish to an Amazon SQS queue.\n roleArn (string) -- [REQUIRED]The ARN of the IAM role that grants access.\n queueUrl (string) -- [REQUIRED]The URL of the Amazon SQS queue.\n useBase64 (boolean) --Specifies whether to use Base64 encoding.\n kinesis (dict) --Write data to an Amazon Kinesis stream.\n roleArn (string) -- [REQUIRED]The ARN of the IAM role that grants access to the Amazon Kinesis stream.\n streamName (string) -- [REQUIRED]The name of the Amazon Kinesis stream.\n partitionKey (string) --The partition key.\n republish (dict) --Publish to another MQTT topic.\n roleArn (string) -- [REQUIRED]The ARN of the IAM role that grants access.\n topic (string) -- [REQUIRED]The name of the MQTT topic.\n s3 (dict) --Write to an Amazon S3 bucket.\n roleArn (string) -- [REQUIRED]The ARN of the IAM role that grants access.\n bucketName (string) -- [REQUIRED]The Amazon S3 bucket.\n key (string) -- [REQUIRED]The object key.\n cannedAcl (string) --The Amazon S3 canned ACL that controls access to the object identified by the object key. For more information, see S3 canned ACLs .\n firehose (dict) --Write to an Amazon Kinesis Firehose stream.\n roleArn (string) -- [REQUIRED]The IAM role that grants access to the Amazon Kinesis Firehose stream.\n deliveryStreamName (string) -- [REQUIRED]The delivery stream name.\n separator (string) --A character separator that will be used to separate records written to the Firehose stream. Valid values are: 'n' (newline), 't' (tab), 'rn' (Windows newline), ',' (comma).\n cloudwatchMetric (dict) --Capture a CloudWatch metric.\n roleArn (string) -- [REQUIRED]The IAM role that allows access to the CloudWatch metric.\n metricNamespace (string) -- [REQUIRED]The CloudWatch metric namespace name.\n metricName (string) -- [REQUIRED]The CloudWatch metric name.\n metricValue (string) -- [REQUIRED]The CloudWatch metric value.\n metricUnit (string) -- [REQUIRED]The metric unit supported by CloudWatch.\n metricTimestamp (string) --An optional Unix timestamp .\n cloudwatchAlarm (dict) --Change the state of a CloudWatch alarm.\n roleArn (string) -- [REQUIRED]The IAM role that allows access to the CloudWatch alarm.\n alarmName (string) -- [REQUIRED]The CloudWatch alarm name.\n stateReason (string) -- [REQUIRED]The reason for the alarm change.\n stateValue (string) -- [REQUIRED]The value of the alarm state. Acceptable values are: OK, ALARM, INSUFFICIENT_DATA.\n elasticsearch (dict) --Write data to an Amazon Elasticsearch Service domain.\n roleArn (string) -- [REQUIRED]The IAM role ARN that has access to Elasticsearch.\n endpoint (string) -- [REQUIRED]The endpoint of your Elasticsearch domain.\n index (string) -- [REQUIRED]The Elasticsearch index where you want to store your data.\n type (string) -- [REQUIRED]The type of document you are storing.\n id (string) -- [REQUIRED]The unique identifier for the document you are storing.\n salesforce (dict) --Send a message to a Salesforce IoT Cloud Input Stream.\n token (string) -- [REQUIRED]The token used to authenticate access to the Salesforce IoT Cloud Input Stream. The token is available from the Salesforce IoT Cloud platform after creation of the Input Stream.\n url (string) -- [REQUIRED]The URL exposed by the Salesforce IoT Cloud Input Stream. The URL is available from the Salesforce IoT Cloud platform after creation of the Input Stream.\n iotAnalytics (dict) --Sends message data to an AWS IoT Analytics channel.\n channelArn (string) --(deprecated) The ARN of the IoT Analytics channel to which message data will be sent.\n channelName (string) --The name of the IoT Analytics channel to which message data will be sent.\n roleArn (string) --The ARN of the role which has a policy that grants IoT Analytics permission to send message data via IoT Analytics (iotanalytics:BatchPutMessage).\n iotEvents (dict) --Sends an input to an AWS IoT Events detector.\n inputName (string) -- [REQUIRED]The name of the AWS IoT Events input.\n messageId (string) --[Optional] Use this to ensure that only one input (message) with a given messageId will be processed by an AWS IoT Events detector.\n roleArn (string) -- [REQUIRED]The ARN of the role that grants AWS IoT permission to send an input to an AWS IoT Events detector. ('Action':'iotevents:BatchPutMessage').\n stepFunctions (dict) --Starts execution of a Step Functions state machine.\n executionNamePrefix (string) --(Optional) A name will be given to the state machine execution consisting of this prefix followed by a UUID. Step Functions automatically creates a unique name for each state machine execution if one is not provided.\n stateMachineName (string) -- [REQUIRED]The name of the Step Functions state machine whose execution will be started.\n roleArn (string) -- [REQUIRED]The ARN of the role that grants IoT permission to start execution of a state machine ('Action':'states:StartExecution').\n \n ruleDisabled (boolean) --Specifies whether the rule is disabled.\n awsIotSqlVersion (string) --The version of the SQL rules engine to use when evaluating the rule.\n errorAction (dict) --The action to take when an error occurs.\n dynamoDB (dict) --Write to a DynamoDB table.\n tableName (string) -- [REQUIRED]The name of the DynamoDB table.\n roleArn (string) -- [REQUIRED]The ARN of the IAM role that grants access to the DynamoDB table.\n operation (string) --The type of operation to be performed. This follows the substitution template, so it can be ${operation} , but the substitution must result in one of the following: INSERT , UPDATE , or DELETE .\n hashKeyField (string) -- [REQUIRED]The hash key name.\n hashKeyValue (string) -- [REQUIRED]The hash key value.\n hashKeyType (string) --The hash key type. Valid values are 'STRING' or 'NUMBER'\n rangeKeyField (string) --The range key name.\n rangeKeyValue (string) --The range key value.\n rangeKeyType (string) --The range key type. Valid values are 'STRING' or 'NUMBER'\n payloadField (string) --The action payload. This name can be customized.\n dynamoDBv2 (dict) --Write to a DynamoDB table. This is a new version of the DynamoDB action. It allows you to write each attribute in an MQTT message payload into a separate DynamoDB column.\n roleArn (string) --The ARN of the IAM role that grants access to the DynamoDB table.\n putItem (dict) --Specifies the DynamoDB table to which the message data will be written. For example:\n { 'dynamoDBv2': { 'roleArn': 'aws:iam:12341251:my-role' 'putItem': { 'tableName': 'my-table' } } }\n Each attribute in the message payload will be written to a separate column in the DynamoDB database.\n tableName (string) -- [REQUIRED]The table where the message data will be written\n \n lambda (dict) --Invoke a Lambda function.\n functionArn (string) -- [REQUIRED]The ARN of the Lambda function.\n sns (dict) --Publish to an Amazon SNS topic.\n targetArn (string) -- [REQUIRED]The ARN of the SNS topic.\n roleArn (string) -- [REQUIRED]The ARN of the IAM role that grants access.\n messageFormat (string) --(Optional) The message format of the message to publish. Accepted values are 'JSON' and 'RAW'. The default value of the attribute is 'RAW'. SNS uses this setting to determine if the payload should be parsed and relevant platform-specific bits of the payload should be extracted. To read more about SNS message formats, see http://docs.aws.amazon.com/sns/latest/dg/json-formats.html refer to their official documentation.\n sqs (dict) --Publish to an Amazon SQS queue.\n roleArn (string) -- [REQUIRED]The ARN of the IAM role that grants access.\n queueUrl (string) -- [REQUIRED]The URL of the Amazon SQS queue.\n useBase64 (boolean) --Specifies whether to use Base64 encoding.\n kinesis (dict) --Write data to an Amazon Kinesis stream.\n roleArn (string) -- [REQUIRED]The ARN of the IAM role that grants access to the Amazon Kinesis stream.\n streamName (string) -- [REQUIRED]The name of the Amazon Kinesis stream.\n partitionKey (string) --The partition key.\n republish (dict) --Publish to another MQTT topic.\n roleArn (string) -- [REQUIRED]The ARN of the IAM role that grants access.\n topic (string) -- [REQUIRED]The name of the MQTT topic.\n s3 (dict) --Write to an Amazon S3 bucket.\n roleArn (string) -- [REQUIRED]The ARN of the IAM role that grants access.\n bucketName (string) -- [REQUIRED]The Amazon S3 bucket.\n key (string) -- [REQUIRED]The object key.\n cannedAcl (string) --The Amazon S3 canned ACL that controls access to the object identified by the object key. For more information, see S3 canned ACLs .\n firehose (dict) --Write to an Amazon Kinesis Firehose stream.\n roleArn (string) -- [REQUIRED]The IAM role that grants access to the Amazon Kinesis Firehose stream.\n deliveryStreamName (string) -- [REQUIRED]The delivery stream name.\n separator (string) --A character separator that will be used to separate records written to the Firehose stream. Valid values are: 'n' (newline), 't' (tab), 'rn' (Windows newline), ',' (comma).\n cloudwatchMetric (dict) --Capture a CloudWatch metric.\n roleArn (string) -- [REQUIRED]The IAM role that allows access to the CloudWatch metric.\n metricNamespace (string) -- [REQUIRED]The CloudWatch metric namespace name.\n metricName (string) -- [REQUIRED]The CloudWatch metric name.\n metricValue (string) -- [REQUIRED]The CloudWatch metric value.\n metricUnit (string) -- [REQUIRED]The metric unit supported by CloudWatch.\n metricTimestamp (string) --An optional Unix timestamp .\n cloudwatchAlarm (dict) --Change the state of a CloudWatch alarm.\n roleArn (string) -- [REQUIRED]The IAM role that allows access to the CloudWatch alarm.\n alarmName (string) -- [REQUIRED]The CloudWatch alarm name.\n stateReason (string) -- [REQUIRED]The reason for the alarm change.\n stateValue (string) -- [REQUIRED]The value of the alarm state. Acceptable values are: OK, ALARM, INSUFFICIENT_DATA.\n elasticsearch (dict) --Write data to an Amazon Elasticsearch Service domain.\n roleArn (string) -- [REQUIRED]The IAM role ARN that has access to Elasticsearch.\n endpoint (string) -- [REQUIRED]The endpoint of your Elasticsearch domain.\n index (string) -- [REQUIRED]The Elasticsearch index where you want to store your data.\n type (string) -- [REQUIRED]The type of document you are storing.\n id (string) -- [REQUIRED]The unique identifier for the document you are storing.\n salesforce (dict) --Send a message to a Salesforce IoT Cloud Input Stream.\n token (string) -- [REQUIRED]The token used to authenticate access to the Salesforce IoT Cloud Input Stream. The token is available from the Salesforce IoT Cloud platform after creation of the Input Stream.\n url (string) -- [REQUIRED]The URL exposed by the Salesforce IoT Cloud Input Stream. The URL is available from the Salesforce IoT Cloud platform after creation of the Input Stream.\n iotAnalytics (dict) --Sends message data to an AWS IoT Analytics channel.\n channelArn (string) --(deprecated) The ARN of the IoT Analytics channel to which message data will be sent.\n channelName (string) --The name of the IoT Analytics channel to which message data will be sent.\n roleArn (string) --The ARN of the role which has a policy that grants IoT Analytics permission to send message data via IoT Analytics (iotanalytics:BatchPutMessage).\n iotEvents (dict) --Sends an input to an AWS IoT Events detector.\n inputName (string) -- [REQUIRED]The name of the AWS IoT Events input.\n messageId (string) --[Optional] Use this to ensure that only one input (message) with a given messageId will be processed by an AWS IoT Events detector.\n roleArn (string) -- [REQUIRED]The ARN of the role that grants AWS IoT permission to send an input to an AWS IoT Events detector. ('Action':'iotevents:BatchPutMessage').\n stepFunctions (dict) --Starts execution of a Step Functions state machine.\n executionNamePrefix (string) --(Optional) A name will be given to the state machine execution consisting of this prefix followed by a UUID. Step Functions automatically creates a unique name for each state machine execution if one is not provided.\n stateMachineName (string) -- [REQUIRED]The name of the Step Functions state machine whose execution will be started.\n roleArn (string) -- [REQUIRED]The ARN of the role that grants IoT permission to start execution of a state machine ('Action':'states:StartExecution').\n \n \n\n \"\"\"\n pass\n\ndef search_index(indexName=None, queryString=None, nextToken=None, maxResults=None, queryVersion=None):\n \"\"\"\n The query search index.\n See also: AWS API Documentation\n \n \n :example: response = client.search_index(\n indexName='string',\n queryString='string',\n nextToken='string',\n maxResults=123,\n queryVersion='string'\n )\n \n \n :type indexName: string\n :param indexName: The search index name.\n\n :type queryString: string\n :param queryString: [REQUIRED]\n The search query string.\n \n\n :type nextToken: string\n :param nextToken: The token used to get the next set of results, or null if there are no additional results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return at one time.\n\n :type queryVersion: string\n :param queryVersion: The query version.\n\n :rtype: dict\n :return: {\n 'nextToken': 'string',\n 'things': [\n {\n 'thingName': 'string',\n 'thingId': 'string',\n 'thingTypeName': 'string',\n 'thingGroupNames': [\n 'string',\n ],\n 'attributes': {\n 'string': 'string'\n },\n 'shadow': 'string',\n 'connectivity': {\n 'connected': True|False,\n 'timestamp': 123\n }\n },\n ],\n 'thingGroups': [\n {\n 'thingGroupName': 'string',\n 'thingGroupId': 'string',\n 'thingGroupDescription': 'string',\n 'attributes': {\n 'string': 'string'\n },\n 'parentGroupNames': [\n 'string',\n ]\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef set_default_authorizer(authorizerName=None):\n \"\"\"\n Sets the default authorizer. This will be used if a websocket connection is made without specifying an authorizer.\n See also: AWS API Documentation\n \n \n :example: response = client.set_default_authorizer(\n authorizerName='string'\n )\n \n \n :type authorizerName: string\n :param authorizerName: [REQUIRED]\n The authorizer name.\n \n\n :rtype: dict\n :return: {\n 'authorizerName': 'string',\n 'authorizerArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef set_default_policy_version(policyName=None, policyVersionId=None):\n \"\"\"\n Sets the specified version of the specified policy as the policy's default (operative) version. This action affects all certificates to which the policy is attached. To list the principals the policy is attached to, use the ListPrincipalPolicy API.\n See also: AWS API Documentation\n \n \n :example: response = client.set_default_policy_version(\n policyName='string',\n policyVersionId='string'\n )\n \n \n :type policyName: string\n :param policyName: [REQUIRED]\n The policy name.\n \n\n :type policyVersionId: string\n :param policyVersionId: [REQUIRED]\n The policy version ID.\n \n\n \"\"\"\n pass\n\ndef set_logging_options(loggingOptionsPayload=None):\n \"\"\"\n Sets the logging options.\n NOTE: use of this command is not recommended. Use SetV2LoggingOptions instead.\n See also: AWS API Documentation\n \n \n :example: response = client.set_logging_options(\n loggingOptionsPayload={\n 'roleArn': 'string',\n 'logLevel': 'DEBUG'|'INFO'|'ERROR'|'WARN'|'DISABLED'\n }\n )\n \n \n :type loggingOptionsPayload: dict\n :param loggingOptionsPayload: [REQUIRED]\n The logging options payload.\n roleArn (string) -- [REQUIRED]The ARN of the IAM role that grants access.\n logLevel (string) --The log level.\n \n\n \"\"\"\n pass\n\ndef set_v2_logging_level(logTarget=None, logLevel=None):\n \"\"\"\n Sets the logging level.\n See also: AWS API Documentation\n \n \n :example: response = client.set_v2_logging_level(\n logTarget={\n 'targetType': 'DEFAULT'|'THING_GROUP',\n 'targetName': 'string'\n },\n logLevel='DEBUG'|'INFO'|'ERROR'|'WARN'|'DISABLED'\n )\n \n \n :type logTarget: dict\n :param logTarget: [REQUIRED]\n The log target.\n targetType (string) -- [REQUIRED]The target type.\n targetName (string) --The target name.\n \n\n :type logLevel: string\n :param logLevel: [REQUIRED]\n The log level.\n \n\n \"\"\"\n pass\n\ndef set_v2_logging_options(roleArn=None, defaultLogLevel=None, disableAllLogs=None):\n \"\"\"\n Sets the logging options for the V2 logging service.\n See also: AWS API Documentation\n \n \n :example: response = client.set_v2_logging_options(\n roleArn='string',\n defaultLogLevel='DEBUG'|'INFO'|'ERROR'|'WARN'|'DISABLED',\n disableAllLogs=True|False\n )\n \n \n :type roleArn: string\n :param roleArn: The ARN of the role that allows IoT to write to Cloudwatch logs.\n\n :type defaultLogLevel: string\n :param defaultLogLevel: The default logging level.\n\n :type disableAllLogs: boolean\n :param disableAllLogs: If true all logs are disabled. The default is false.\n\n \"\"\"\n pass\n\ndef start_on_demand_audit_task(targetCheckNames=None):\n \"\"\"\n Starts an on-demand Device Defender audit.\n See also: AWS API Documentation\n \n \n :example: response = client.start_on_demand_audit_task(\n targetCheckNames=[\n 'string',\n ]\n )\n \n \n :type targetCheckNames: list\n :param targetCheckNames: [REQUIRED]\n Which checks are performed during the audit. The checks you specify must be enabled for your account or an exception occurs. Use DescribeAccountAuditConfiguration to see the list of all checks including those that are enabled or UpdateAccountAuditConfiguration to select which checks are enabled.\n (string) --An audit check name. Checks must be enabled for your account. (Use DescribeAccountAuditConfiguration to see the list of all checks including those that are enabled or UpdateAccountAuditConfiguration to select which checks are enabled.)\n \n\n :rtype: dict\n :return: {\n 'taskId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef start_thing_registration_task(templateBody=None, inputFileBucket=None, inputFileKey=None, roleArn=None):\n \"\"\"\n Creates a bulk thing provisioning task.\n See also: AWS API Documentation\n \n \n :example: response = client.start_thing_registration_task(\n templateBody='string',\n inputFileBucket='string',\n inputFileKey='string',\n roleArn='string'\n )\n \n \n :type templateBody: string\n :param templateBody: [REQUIRED]\n The provisioning template.\n \n\n :type inputFileBucket: string\n :param inputFileBucket: [REQUIRED]\n The S3 bucket that contains the input file.\n \n\n :type inputFileKey: string\n :param inputFileKey: [REQUIRED]\n The name of input file within the S3 bucket. This file contains a newline delimited JSON file. Each line contains the parameter values to provision one device (thing).\n \n\n :type roleArn: string\n :param roleArn: [REQUIRED]\n The IAM role ARN that grants permission the input file.\n \n\n :rtype: dict\n :return: {\n 'taskId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef stop_thing_registration_task(taskId=None):\n \"\"\"\n Cancels a bulk thing provisioning task.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_thing_registration_task(\n taskId='string'\n )\n \n \n :type taskId: string\n :param taskId: [REQUIRED]\n The bulk thing provisioning task ID.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef tag_resource(resourceArn=None, tags=None):\n \"\"\"\n Adds to or modifies the tags of the given resource. Tags are metadata which can be used to manage a resource.\n See also: AWS API Documentation\n \n \n :example: response = client.tag_resource(\n resourceArn='string',\n tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type resourceArn: string\n :param resourceArn: [REQUIRED]\n The ARN of the resource.\n \n\n :type tags: list\n :param tags: [REQUIRED]\n The new or modified tags for the resource.\n (dict) --A set of key/value pairs that are used to manage the resource.\n Key (string) --The tag's key.\n Value (string) --The tag's value.\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef test_authorization(principal=None, cognitoIdentityPoolId=None, authInfos=None, clientId=None, policyNamesToAdd=None, policyNamesToSkip=None):\n \"\"\"\n Tests if a specified principal is authorized to perform an AWS IoT action on a specified resource. Use this to test and debug the authorization behavior of devices that connect to the AWS IoT device gateway.\n See also: AWS API Documentation\n \n \n :example: response = client.test_authorization(\n principal='string',\n cognitoIdentityPoolId='string',\n authInfos=[\n {\n 'actionType': 'PUBLISH'|'SUBSCRIBE'|'RECEIVE'|'CONNECT',\n 'resources': [\n 'string',\n ]\n },\n ],\n clientId='string',\n policyNamesToAdd=[\n 'string',\n ],\n policyNamesToSkip=[\n 'string',\n ]\n )\n \n \n :type principal: string\n :param principal: The principal.\n\n :type cognitoIdentityPoolId: string\n :param cognitoIdentityPoolId: The Cognito identity pool ID.\n\n :type authInfos: list\n :param authInfos: [REQUIRED]\n A list of authorization info objects. Simulating authorization will create a response for each authInfo object in the list.\n (dict) --A collection of authorization information.\n actionType (string) --The type of action for which the principal is being authorized.\n resources (list) --The resources for which the principal is being authorized to perform the specified action.\n (string) --\n \n \n\n :type clientId: string\n :param clientId: The MQTT client ID.\n\n :type policyNamesToAdd: list\n :param policyNamesToAdd: When testing custom authorization, the policies specified here are treated as if they are attached to the principal being authorized.\n (string) --\n \n\n :type policyNamesToSkip: list\n :param policyNamesToSkip: When testing custom authorization, the policies specified here are treated as if they are not attached to the principal being authorized.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'authResults': [\n {\n 'authInfo': {\n 'actionType': 'PUBLISH'|'SUBSCRIBE'|'RECEIVE'|'CONNECT',\n 'resources': [\n 'string',\n ]\n },\n 'allowed': {\n 'policies': [\n {\n 'policyName': 'string',\n 'policyArn': 'string'\n },\n ]\n },\n 'denied': {\n 'implicitDeny': {\n 'policies': [\n {\n 'policyName': 'string',\n 'policyArn': 'string'\n },\n ]\n },\n 'explicitDeny': {\n 'policies': [\n {\n 'policyName': 'string',\n 'policyArn': 'string'\n },\n ]\n }\n },\n 'authDecision': 'ALLOWED'|'EXPLICIT_DENY'|'IMPLICIT_DENY',\n 'missingContextValues': [\n 'string',\n ]\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef test_invoke_authorizer(authorizerName=None, token=None, tokenSignature=None):\n \"\"\"\n Tests a custom authorization behavior by invoking a specified custom authorizer. Use this to test and debug the custom authorization behavior of devices that connect to the AWS IoT device gateway.\n See also: AWS API Documentation\n \n \n :example: response = client.test_invoke_authorizer(\n authorizerName='string',\n token='string',\n tokenSignature='string'\n )\n \n \n :type authorizerName: string\n :param authorizerName: [REQUIRED]\n The custom authorizer name.\n \n\n :type token: string\n :param token: [REQUIRED]\n The token returned by your custom authentication service.\n \n\n :type tokenSignature: string\n :param tokenSignature: [REQUIRED]\n The signature made with the token and your custom authentication service's private key.\n \n\n :rtype: dict\n :return: {\n 'isAuthenticated': True|False,\n 'principalId': 'string',\n 'policyDocuments': [\n 'string',\n ],\n 'refreshAfterInSeconds': 123,\n 'disconnectAfterInSeconds': 123\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef transfer_certificate(certificateId=None, targetAwsAccount=None, transferMessage=None):\n \"\"\"\n Transfers the specified certificate to the specified AWS account.\n You can cancel the transfer until it is acknowledged by the recipient.\n No notification is sent to the transfer destination's account. It is up to the caller to notify the transfer target.\n The certificate being transferred must not be in the ACTIVE state. You can use the UpdateCertificate API to deactivate it.\n The certificate must not have any policies attached to it. You can use the DetachPrincipalPolicy API to detach them.\n See also: AWS API Documentation\n \n \n :example: response = client.transfer_certificate(\n certificateId='string',\n targetAwsAccount='string',\n transferMessage='string'\n )\n \n \n :type certificateId: string\n :param certificateId: [REQUIRED]\n The ID of the certificate. (The last part of the certificate ARN contains the certificate ID.)\n \n\n :type targetAwsAccount: string\n :param targetAwsAccount: [REQUIRED]\n The AWS account.\n \n\n :type transferMessage: string\n :param transferMessage: The transfer message.\n\n :rtype: dict\n :return: {\n 'transferredCertificateArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef untag_resource(resourceArn=None, tagKeys=None):\n \"\"\"\n Removes the given tags (metadata) from the resource.\n See also: AWS API Documentation\n \n \n :example: response = client.untag_resource(\n resourceArn='string',\n tagKeys=[\n 'string',\n ]\n )\n \n \n :type resourceArn: string\n :param resourceArn: [REQUIRED]\n The ARN of the resource.\n \n\n :type tagKeys: list\n :param tagKeys: [REQUIRED]\n A list of the keys of the tags to be removed from the resource.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_account_audit_configuration(roleArn=None, auditNotificationTargetConfigurations=None, auditCheckConfigurations=None):\n \"\"\"\n Configures or reconfigures the Device Defender audit settings for this account. Settings include how audit notifications are sent and which audit checks are enabled or disabled.\n See also: AWS API Documentation\n \n \n :example: response = client.update_account_audit_configuration(\n roleArn='string',\n auditNotificationTargetConfigurations={\n 'string': {\n 'targetArn': 'string',\n 'roleArn': 'string',\n 'enabled': True|False\n }\n },\n auditCheckConfigurations={\n 'string': {\n 'enabled': True|False\n }\n }\n )\n \n \n :type roleArn: string\n :param roleArn: The ARN of the role that grants permission to AWS IoT to access information about your devices, policies, certificates and other items as necessary when performing an audit.\n\n :type auditNotificationTargetConfigurations: dict\n :param auditNotificationTargetConfigurations: Information about the targets to which audit notifications are sent.\n (string) --\n (dict) --Information about the targets to which audit notifications are sent.\n targetArn (string) --The ARN of the target (SNS topic) to which audit notifications are sent.\n roleArn (string) --The ARN of the role that grants permission to send notifications to the target.\n enabled (boolean) --True if notifications to the target are enabled.\n \n \n\n :type auditCheckConfigurations: dict\n :param auditCheckConfigurations: Specifies which audit checks are enabled and disabled for this account. Use DescribeAccountAuditConfiguration to see the list of all checks including those that are currently enabled.\n Note that some data collection may begin immediately when certain checks are enabled. When a check is disabled, any data collected so far in relation to the check is deleted.\n You cannot disable a check if it is used by any scheduled audit. You must first delete the check from the scheduled audit or delete the scheduled audit itself.\n On the first call to UpdateAccountAuditConfiguration this parameter is required and must specify at least one enabled check.\n (string) --An audit check name. Checks must be enabled for your account. (Use DescribeAccountAuditConfiguration to see the list of all checks including those that are enabled or UpdateAccountAuditConfiguration to select which checks are enabled.)\n (dict) --Which audit checks are enabled and disabled for this account.\n enabled (boolean) --True if this audit check is enabled for this account.\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_authorizer(authorizerName=None, authorizerFunctionArn=None, tokenKeyName=None, tokenSigningPublicKeys=None, status=None):\n \"\"\"\n Updates an authorizer.\n See also: AWS API Documentation\n \n \n :example: response = client.update_authorizer(\n authorizerName='string',\n authorizerFunctionArn='string',\n tokenKeyName='string',\n tokenSigningPublicKeys={\n 'string': 'string'\n },\n status='ACTIVE'|'INACTIVE'\n )\n \n \n :type authorizerName: string\n :param authorizerName: [REQUIRED]\n The authorizer name.\n \n\n :type authorizerFunctionArn: string\n :param authorizerFunctionArn: The ARN of the authorizer's Lambda function.\n\n :type tokenKeyName: string\n :param tokenKeyName: The key used to extract the token from the HTTP headers.\n\n :type tokenSigningPublicKeys: dict\n :param tokenSigningPublicKeys: The public keys used to verify the token signature.\n (string) --\n (string) --\n \n\n :type status: string\n :param status: The status of the update authorizer request.\n\n :rtype: dict\n :return: {\n 'authorizerName': 'string',\n 'authorizerArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef update_billing_group(billingGroupName=None, billingGroupProperties=None, expectedVersion=None):\n \"\"\"\n Updates information about the billing group.\n See also: AWS API Documentation\n \n \n :example: response = client.update_billing_group(\n billingGroupName='string',\n billingGroupProperties={\n 'billingGroupDescription': 'string'\n },\n expectedVersion=123\n )\n \n \n :type billingGroupName: string\n :param billingGroupName: [REQUIRED]\n The name of the billing group.\n \n\n :type billingGroupProperties: dict\n :param billingGroupProperties: [REQUIRED]\n The properties of the billing group.\n billingGroupDescription (string) --The description of the billing group.\n \n\n :type expectedVersion: integer\n :param expectedVersion: The expected version of the billing group. If the version of the billing group does not match the expected version specified in the request, the UpdateBillingGroup request is rejected with a VersionConflictException .\n\n :rtype: dict\n :return: {\n 'version': 123\n }\n \n \n \"\"\"\n pass\n\ndef update_ca_certificate(certificateId=None, newStatus=None, newAutoRegistrationStatus=None, registrationConfig=None, removeAutoRegistration=None):\n \"\"\"\n Updates a registered CA certificate.\n See also: AWS API Documentation\n \n \n :example: response = client.update_ca_certificate(\n certificateId='string',\n newStatus='ACTIVE'|'INACTIVE',\n newAutoRegistrationStatus='ENABLE'|'DISABLE',\n registrationConfig={\n 'templateBody': 'string',\n 'roleArn': 'string'\n },\n removeAutoRegistration=True|False\n )\n \n \n :type certificateId: string\n :param certificateId: [REQUIRED]\n The CA certificate identifier.\n \n\n :type newStatus: string\n :param newStatus: The updated status of the CA certificate.\n Note: The status value REGISTER_INACTIVE is deprecated and should not be used.\n \n\n :type newAutoRegistrationStatus: string\n :param newAutoRegistrationStatus: The new value for the auto registration status. Valid values are: 'ENABLE' or 'DISABLE'.\n\n :type registrationConfig: dict\n :param registrationConfig: Information about the registration configuration.\n templateBody (string) --The template body.\n roleArn (string) --The ARN of the role.\n \n\n :type removeAutoRegistration: boolean\n :param removeAutoRegistration: If true, remove auto registration.\n\n \"\"\"\n pass\n\ndef update_certificate(certificateId=None, newStatus=None):\n \"\"\"\n Updates the status of the specified certificate. This operation is idempotent.\n Moving a certificate from the ACTIVE state (including REVOKED) will not disconnect currently connected devices, but these devices will be unable to reconnect.\n The ACTIVE state is required to authenticate devices connecting to AWS IoT using a certificate.\n See also: AWS API Documentation\n \n \n :example: response = client.update_certificate(\n certificateId='string',\n newStatus='ACTIVE'|'INACTIVE'|'REVOKED'|'PENDING_TRANSFER'|'REGISTER_INACTIVE'|'PENDING_ACTIVATION'\n )\n \n \n :type certificateId: string\n :param certificateId: [REQUIRED]\n The ID of the certificate. (The last part of the certificate ARN contains the certificate ID.)\n \n\n :type newStatus: string\n :param newStatus: [REQUIRED]\n The new status.\n Note: Setting the status to PENDING_TRANSFER will result in an exception being thrown. PENDING_TRANSFER is a status used internally by AWS IoT. It is not intended for developer use.Note: The status value REGISTER_INACTIVE is deprecated and should not be used.\n \n\n \"\"\"\n pass\n\ndef update_dynamic_thing_group(thingGroupName=None, thingGroupProperties=None, expectedVersion=None, indexName=None, queryString=None, queryVersion=None):\n \"\"\"\n Updates a dynamic thing group.\n See also: AWS API Documentation\n \n \n :example: response = client.update_dynamic_thing_group(\n thingGroupName='string',\n thingGroupProperties={\n 'thingGroupDescription': 'string',\n 'attributePayload': {\n 'attributes': {\n 'string': 'string'\n },\n 'merge': True|False\n }\n },\n expectedVersion=123,\n indexName='string',\n queryString='string',\n queryVersion='string'\n )\n \n \n :type thingGroupName: string\n :param thingGroupName: [REQUIRED]\n The name of the dynamic thing group to update.\n \n\n :type thingGroupProperties: dict\n :param thingGroupProperties: [REQUIRED]\n The dynamic thing group properties to update.\n thingGroupDescription (string) --The thing group description.\n attributePayload (dict) --The thing group attributes in JSON format.\n attributes (dict) --A JSON string containing up to three key-value pair in JSON format. For example:\n {\\'attributes\\':{\\'string1\\':\\'string2\\'}}\n (string) --\n (string) --\n \n merge (boolean) --Specifies whether the list of attributes provided in the AttributePayload is merged with the attributes stored in the registry, instead of overwriting them.\n To remove an attribute, call UpdateThing with an empty attribute value.\n Note\n The merge attribute is only valid when calling UpdateThing .\n \n \n\n :type expectedVersion: integer\n :param expectedVersion: The expected version of the dynamic thing group to update.\n\n :type indexName: string\n :param indexName: The dynamic thing group index to update.\n Note\n Currently one index is supported: 'AWS_Things'.\n \n\n :type queryString: string\n :param queryString: The dynamic thing group search query string to update.\n\n :type queryVersion: string\n :param queryVersion: The dynamic thing group query version to update.\n Note\n Currently one query version is supported: '2017-09-30'. If not specified, the query version defaults to this value.\n \n\n :rtype: dict\n :return: {\n 'version': 123\n }\n \n \n \"\"\"\n pass\n\ndef update_event_configurations(eventConfigurations=None):\n \"\"\"\n Updates the event configurations.\n See also: AWS API Documentation\n \n \n :example: response = client.update_event_configurations(\n eventConfigurations={\n 'string': {\n 'Enabled': True|False\n }\n }\n )\n \n \n :type eventConfigurations: dict\n :param eventConfigurations: The new event configuration values.\n (string) --\n (dict) --Configuration.\n Enabled (boolean) --True to enable the configuration.\n \n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef update_indexing_configuration(thingIndexingConfiguration=None, thingGroupIndexingConfiguration=None):\n \"\"\"\n Updates the search configuration.\n See also: AWS API Documentation\n \n \n :example: response = client.update_indexing_configuration(\n thingIndexingConfiguration={\n 'thingIndexingMode': 'OFF'|'REGISTRY'|'REGISTRY_AND_SHADOW',\n 'thingConnectivityIndexingMode': 'OFF'|'STATUS'\n },\n thingGroupIndexingConfiguration={\n 'thingGroupIndexingMode': 'OFF'|'ON'\n }\n )\n \n \n :type thingIndexingConfiguration: dict\n :param thingIndexingConfiguration: Thing indexing configuration.\n thingIndexingMode (string) -- [REQUIRED]Thing indexing mode. Valid values are:\n REGISTRY Your thing index will contain only registry data.\n REGISTRY_AND_SHADOW - Your thing index will contain registry and shadow data.\n OFF - Thing indexing is disabled.\n thingConnectivityIndexingMode (string) --Thing connectivity indexing mode. Valid values are:\n STATUS Your thing index will contain connectivity status. In order to enable thing connectivity indexing, thingIndexMode must not be set to OFF.\n OFF - Thing connectivity status indexing is disabled.\n \n\n :type thingGroupIndexingConfiguration: dict\n :param thingGroupIndexingConfiguration: Thing group indexing configuration.\n thingGroupIndexingMode (string) -- [REQUIRED]Thing group indexing mode.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_job(jobId=None, description=None, presignedUrlConfig=None, jobExecutionsRolloutConfig=None, abortConfig=None, timeoutConfig=None):\n \"\"\"\n Updates supported fields of the specified job.\n See also: AWS API Documentation\n \n \n :example: response = client.update_job(\n jobId='string',\n description='string',\n presignedUrlConfig={\n 'roleArn': 'string',\n 'expiresInSec': 123\n },\n jobExecutionsRolloutConfig={\n 'maximumPerMinute': 123,\n 'exponentialRate': {\n 'baseRatePerMinute': 123,\n 'incrementFactor': 123.0,\n 'rateIncreaseCriteria': {\n 'numberOfNotifiedThings': 123,\n 'numberOfSucceededThings': 123\n }\n }\n },\n abortConfig={\n 'criteriaList': [\n {\n 'failureType': 'FAILED'|'REJECTED'|'TIMED_OUT'|'ALL',\n 'action': 'CANCEL',\n 'thresholdPercentage': 123.0,\n 'minNumberOfExecutedThings': 123\n },\n ]\n },\n timeoutConfig={\n 'inProgressTimeoutInMinutes': 123\n }\n )\n \n \n :type jobId: string\n :param jobId: [REQUIRED]\n The ID of the job to be updated.\n \n\n :type description: string\n :param description: A short text description of the job.\n\n :type presignedUrlConfig: dict\n :param presignedUrlConfig: Configuration information for pre-signed S3 URLs.\n roleArn (string) --The ARN of an IAM role that grants grants permission to download files from the S3 bucket where the job data/updates are stored. The role must also grant permission for IoT to download the files.\n expiresInSec (integer) --How long (in seconds) pre-signed URLs are valid. Valid values are 60 - 3600, the default value is 3600 seconds. Pre-signed URLs are generated when Jobs receives an MQTT request for the job document.\n \n\n :type jobExecutionsRolloutConfig: dict\n :param jobExecutionsRolloutConfig: Allows you to create a staged rollout of the job.\n maximumPerMinute (integer) --The maximum number of things that will be notified of a pending job, per minute. This parameter allows you to create a staged rollout.\n exponentialRate (dict) --The rate of increase for a job rollout. This parameter allows you to define an exponential rate for a job rollout.\n baseRatePerMinute (integer) -- [REQUIRED]The minimum number of things that will be notified of a pending job, per minute at the start of job rollout. This parameter allows you to define the initial rate of rollout.\n incrementFactor (float) -- [REQUIRED]The exponential factor to increase the rate of rollout for a job.\n rateIncreaseCriteria (dict) -- [REQUIRED]The criteria to initiate the increase in rate of rollout for a job.\n AWS IoT supports up to one digit after the decimal (for example, 1.5, but not 1.55).\n numberOfNotifiedThings (integer) --The threshold for number of notified things that will initiate the increase in rate of rollout.\n numberOfSucceededThings (integer) --The threshold for number of succeeded things that will initiate the increase in rate of rollout.\n \n \n\n :type abortConfig: dict\n :param abortConfig: Allows you to create criteria to abort a job.\n criteriaList (list) -- [REQUIRED]The list of abort criteria to define rules to abort the job.\n (dict) --Details of abort criteria to define rules to abort the job.\n failureType (string) -- [REQUIRED]The type of job execution failure to define a rule to initiate a job abort.\n action (string) -- [REQUIRED]The type of abort action to initiate a job abort.\n thresholdPercentage (float) -- [REQUIRED]The threshold as a percentage of the total number of executed things that will initiate a job abort.\n AWS IoT supports up to two digits after the decimal (for example, 10.9 and 10.99, but not 10.999).\n minNumberOfExecutedThings (integer) -- [REQUIRED]Minimum number of executed things before evaluating an abort rule.\n \n \n\n :type timeoutConfig: dict\n :param timeoutConfig: Specifies the amount of time each device has to finish its execution of the job. The timer is started when the job execution status is set to IN_PROGRESS . If the job execution status is not set to another terminal state before the time expires, it will be automatically set to TIMED_OUT .\n inProgressTimeoutInMinutes (integer) --Specifies the amount of time, in minutes, this device has to finish execution of this job. The timeout interval can be anywhere between 1 minute and 7 days (1 to 10080 minutes). The in progress timer can't be updated and will apply to all job executions for the job. Whenever a job execution remains in the IN_PROGRESS status for longer than this interval, the job execution will fail and switch to the terminal TIMED_OUT status.\n \n\n \"\"\"\n pass\n\ndef update_role_alias(roleAlias=None, roleArn=None, credentialDurationSeconds=None):\n \"\"\"\n Updates a role alias.\n See also: AWS API Documentation\n \n \n :example: response = client.update_role_alias(\n roleAlias='string',\n roleArn='string',\n credentialDurationSeconds=123\n )\n \n \n :type roleAlias: string\n :param roleAlias: [REQUIRED]\n The role alias to update.\n \n\n :type roleArn: string\n :param roleArn: The role ARN.\n\n :type credentialDurationSeconds: integer\n :param credentialDurationSeconds: The number of seconds the credential will be valid.\n\n :rtype: dict\n :return: {\n 'roleAlias': 'string',\n 'roleAliasArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef update_scheduled_audit(frequency=None, dayOfMonth=None, dayOfWeek=None, targetCheckNames=None, scheduledAuditName=None):\n \"\"\"\n Updates a scheduled audit, including what checks are performed and how often the audit takes place.\n See also: AWS API Documentation\n \n \n :example: response = client.update_scheduled_audit(\n frequency='DAILY'|'WEEKLY'|'BIWEEKLY'|'MONTHLY',\n dayOfMonth='string',\n dayOfWeek='SUN'|'MON'|'TUE'|'WED'|'THU'|'FRI'|'SAT',\n targetCheckNames=[\n 'string',\n ],\n scheduledAuditName='string'\n )\n \n \n :type frequency: string\n :param frequency: How often the scheduled audit takes place. Can be one of 'DAILY', 'WEEKLY', 'BIWEEKLY' or 'MONTHLY'. The actual start time of each audit is determined by the system.\n\n :type dayOfMonth: string\n :param dayOfMonth: The day of the month on which the scheduled audit takes place. Can be '1' through '31' or 'LAST'. This field is required if the 'frequency' parameter is set to 'MONTHLY'. If days 29-31 are specified, and the month does not have that many days, the audit takes place on the 'LAST' day of the month.\n\n :type dayOfWeek: string\n :param dayOfWeek: The day of the week on which the scheduled audit takes place. Can be one of 'SUN', 'MON', 'TUE', 'WED', 'THU', 'FRI' or 'SAT'. This field is required if the 'frequency' parameter is set to 'WEEKLY' or 'BIWEEKLY'.\n\n :type targetCheckNames: list\n :param targetCheckNames: Which checks are performed during the scheduled audit. Checks must be enabled for your account. (Use DescribeAccountAuditConfiguration to see the list of all checks including those that are enabled or UpdateAccountAuditConfiguration to select which checks are enabled.)\n (string) --An audit check name. Checks must be enabled for your account. (Use DescribeAccountAuditConfiguration to see the list of all checks including those that are enabled or UpdateAccountAuditConfiguration to select which checks are enabled.)\n \n\n :type scheduledAuditName: string\n :param scheduledAuditName: [REQUIRED]\n The name of the scheduled audit. (Max. 128 chars)\n \n\n :rtype: dict\n :return: {\n 'scheduledAuditArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef update_security_profile(securityProfileName=None, securityProfileDescription=None, behaviors=None, alertTargets=None, expectedVersion=None):\n \"\"\"\n Updates a Device Defender security profile.\n See also: AWS API Documentation\n \n \n :example: response = client.update_security_profile(\n securityProfileName='string',\n securityProfileDescription='string',\n behaviors=[\n {\n 'name': 'string',\n 'metric': 'string',\n 'criteria': {\n 'comparisonOperator': 'less-than'|'less-than-equals'|'greater-than'|'greater-than-equals'|'in-cidr-set'|'not-in-cidr-set'|'in-port-set'|'not-in-port-set',\n 'value': {\n 'count': 123,\n 'cidrs': [\n 'string',\n ],\n 'ports': [\n 123,\n ]\n },\n 'durationSeconds': 123\n }\n },\n ],\n alertTargets={\n 'string': {\n 'alertTargetArn': 'string',\n 'roleArn': 'string'\n }\n },\n expectedVersion=123\n )\n \n \n :type securityProfileName: string\n :param securityProfileName: [REQUIRED]\n The name of the security profile you want to update.\n \n\n :type securityProfileDescription: string\n :param securityProfileDescription: A description of the security profile.\n\n :type behaviors: list\n :param behaviors: Specifies the behaviors that, when violated by a device (thing), cause an alert.\n (dict) --A Device Defender security profile behavior.\n name (string) -- [REQUIRED]The name you have given to the behavior.\n metric (string) --What is measured by the behavior.\n criteria (dict) --The criteria that determine if a device is behaving normally in regard to the metric .\n comparisonOperator (string) --The operator that relates the thing measured (metric ) to the criteria (value ).\n value (dict) --The value to be compared with the metric .\n count (integer) --If the comparisonOperator calls for a numeric value, use this to specify that numeric value to be compared with the metric .\n cidrs (list) --If the comparisonOperator calls for a set of CIDRs, use this to specify that set to be compared with the metric .\n (string) --\n ports (list) --If the comparisonOperator calls for a set of ports, use this to specify that set to be compared with the metric .\n (integer) --\n \n durationSeconds (integer) --Use this to specify the period of time over which the behavior is evaluated, for those criteria which have a time dimension (for example, NUM_MESSAGES_SENT ).\n \n \n\n :type alertTargets: dict\n :param alertTargets: Where the alerts are sent. (Alerts are always sent to the console.)\n (string) --The type of alert target: one of 'SNS'.\n (dict) --A structure containing the alert target ARN and the role ARN.\n alertTargetArn (string) -- [REQUIRED]The ARN of the notification target to which alerts are sent.\n roleArn (string) -- [REQUIRED]The ARN of the role that grants permission to send alerts to the notification target.\n \n \n\n :type expectedVersion: integer\n :param expectedVersion: The expected version of the security profile. A new version is generated whenever the security profile is updated. If you specify a value that is different than the actual version, a VersionConflictException is thrown.\n\n :rtype: dict\n :return: {\n 'securityProfileName': 'string',\n 'securityProfileArn': 'string',\n 'securityProfileDescription': 'string',\n 'behaviors': [\n {\n 'name': 'string',\n 'metric': 'string',\n 'criteria': {\n 'comparisonOperator': 'less-than'|'less-than-equals'|'greater-than'|'greater-than-equals'|'in-cidr-set'|'not-in-cidr-set'|'in-port-set'|'not-in-port-set',\n 'value': {\n 'count': 123,\n 'cidrs': [\n 'string',\n ],\n 'ports': [\n 123,\n ]\n },\n 'durationSeconds': 123\n }\n },\n ],\n 'alertTargets': {\n 'string': {\n 'alertTargetArn': 'string',\n 'roleArn': 'string'\n }\n },\n 'version': 123,\n 'creationDate': datetime(2015, 1, 1),\n 'lastModifiedDate': datetime(2015, 1, 1)\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef update_stream(streamId=None, description=None, files=None, roleArn=None):\n \"\"\"\n Updates an existing stream. The stream version will be incremented by one.\n See also: AWS API Documentation\n \n \n :example: response = client.update_stream(\n streamId='string',\n description='string',\n files=[\n {\n 'fileId': 123,\n 's3Location': {\n 'bucket': 'string',\n 'key': 'string',\n 'version': 'string'\n }\n },\n ],\n roleArn='string'\n )\n \n \n :type streamId: string\n :param streamId: [REQUIRED]\n The stream ID.\n \n\n :type description: string\n :param description: The description of the stream.\n\n :type files: list\n :param files: The files associated with the stream.\n (dict) --Represents a file to stream.\n fileId (integer) --The file ID.\n s3Location (dict) --The location of the file in S3.\n bucket (string) --The S3 bucket.\n key (string) --The S3 key.\n version (string) --The S3 bucket version.\n \n \n\n :type roleArn: string\n :param roleArn: An IAM role that allows the IoT service principal assumes to access your S3 files.\n\n :rtype: dict\n :return: {\n 'streamId': 'string',\n 'streamArn': 'string',\n 'description': 'string',\n 'streamVersion': 123\n }\n \n \n \"\"\"\n pass\n\ndef update_thing(thingName=None, thingTypeName=None, attributePayload=None, expectedVersion=None, removeThingType=None):\n \"\"\"\n Updates the data for a thing.\n See also: AWS API Documentation\n \n \n :example: response = client.update_thing(\n thingName='string',\n thingTypeName='string',\n attributePayload={\n 'attributes': {\n 'string': 'string'\n },\n 'merge': True|False\n },\n expectedVersion=123,\n removeThingType=True|False\n )\n \n \n :type thingName: string\n :param thingName: [REQUIRED]\n The name of the thing to update.\n \n\n :type thingTypeName: string\n :param thingTypeName: The name of the thing type.\n\n :type attributePayload: dict\n :param attributePayload: A list of thing attributes, a JSON string containing name-value pairs. For example:\n {\\'attributes\\':{\\'name1\\':\\'value2\\'}}\n This data is used to add new attributes or update existing attributes.\n attributes (dict) --A JSON string containing up to three key-value pair in JSON format. For example:\n {\\'attributes\\':{\\'string1\\':\\'string2\\'}}\n (string) --\n (string) --\n \n merge (boolean) --Specifies whether the list of attributes provided in the AttributePayload is merged with the attributes stored in the registry, instead of overwriting them.\n To remove an attribute, call UpdateThing with an empty attribute value.\n Note\n The merge attribute is only valid when calling UpdateThing .\n \n\n :type expectedVersion: integer\n :param expectedVersion: The expected version of the thing record in the registry. If the version of the record in the registry does not match the expected version specified in the request, the UpdateThing request is rejected with a VersionConflictException .\n\n :type removeThingType: boolean\n :param removeThingType: Remove a thing type association. If true , the association is removed.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef update_thing_group(thingGroupName=None, thingGroupProperties=None, expectedVersion=None):\n \"\"\"\n Update a thing group.\n See also: AWS API Documentation\n \n \n :example: response = client.update_thing_group(\n thingGroupName='string',\n thingGroupProperties={\n 'thingGroupDescription': 'string',\n 'attributePayload': {\n 'attributes': {\n 'string': 'string'\n },\n 'merge': True|False\n }\n },\n expectedVersion=123\n )\n \n \n :type thingGroupName: string\n :param thingGroupName: [REQUIRED]\n The thing group to update.\n \n\n :type thingGroupProperties: dict\n :param thingGroupProperties: [REQUIRED]\n The thing group properties.\n thingGroupDescription (string) --The thing group description.\n attributePayload (dict) --The thing group attributes in JSON format.\n attributes (dict) --A JSON string containing up to three key-value pair in JSON format. For example:\n {\\'attributes\\':{\\'string1\\':\\'string2\\'}}\n (string) --\n (string) --\n \n merge (boolean) --Specifies whether the list of attributes provided in the AttributePayload is merged with the attributes stored in the registry, instead of overwriting them.\n To remove an attribute, call UpdateThing with an empty attribute value.\n Note\n The merge attribute is only valid when calling UpdateThing .\n \n \n\n :type expectedVersion: integer\n :param expectedVersion: The expected version of the thing group. If this does not match the version of the thing group being updated, the update will fail.\n\n :rtype: dict\n :return: {\n 'version': 123\n }\n \n \n \"\"\"\n pass\n\ndef update_thing_groups_for_thing(thingName=None, thingGroupsToAdd=None, thingGroupsToRemove=None, overrideDynamicGroups=None):\n \"\"\"\n Updates the groups to which the thing belongs.\n See also: AWS API Documentation\n \n \n :example: response = client.update_thing_groups_for_thing(\n thingName='string',\n thingGroupsToAdd=[\n 'string',\n ],\n thingGroupsToRemove=[\n 'string',\n ],\n overrideDynamicGroups=True|False\n )\n \n \n :type thingName: string\n :param thingName: The thing whose group memberships will be updated.\n\n :type thingGroupsToAdd: list\n :param thingGroupsToAdd: The groups to which the thing will be added.\n (string) --\n \n\n :type thingGroupsToRemove: list\n :param thingGroupsToRemove: The groups from which the thing will be removed.\n (string) --\n \n\n :type overrideDynamicGroups: boolean\n :param overrideDynamicGroups: Override dynamic thing groups with static thing groups when 10-group limit is reached. If a thing belongs to 10 thing groups, and one or more of those groups are dynamic thing groups, adding a thing to a static group removes the thing from the last dynamic group.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef validate_security_profile_behaviors(behaviors=None):\n \"\"\"\n Validates a Device Defender security profile behaviors specification.\n See also: AWS API Documentation\n \n \n :example: response = client.validate_security_profile_behaviors(\n behaviors=[\n {\n 'name': 'string',\n 'metric': 'string',\n 'criteria': {\n 'comparisonOperator': 'less-than'|'less-than-equals'|'greater-than'|'greater-than-equals'|'in-cidr-set'|'not-in-cidr-set'|'in-port-set'|'not-in-port-set',\n 'value': {\n 'count': 123,\n 'cidrs': [\n 'string',\n ],\n 'ports': [\n 123,\n ]\n },\n 'durationSeconds': 123\n }\n },\n ]\n )\n \n \n :type behaviors: list\n :param behaviors: [REQUIRED]\n Specifies the behaviors that, when violated by a device (thing), cause an alert.\n (dict) --A Device Defender security profile behavior.\n name (string) -- [REQUIRED]The name you have given to the behavior.\n metric (string) --What is measured by the behavior.\n criteria (dict) --The criteria that determine if a device is behaving normally in regard to the metric .\n comparisonOperator (string) --The operator that relates the thing measured (metric ) to the criteria (value ).\n value (dict) --The value to be compared with the metric .\n count (integer) --If the comparisonOperator calls for a numeric value, use this to specify that numeric value to be compared with the metric .\n cidrs (list) --If the comparisonOperator calls for a set of CIDRs, use this to specify that set to be compared with the metric .\n (string) --\n ports (list) --If the comparisonOperator calls for a set of ports, use this to specify that set to be compared with the metric .\n (integer) --\n \n durationSeconds (integer) --Use this to specify the period of time over which the behavior is evaluated, for those criteria which have a time dimension (for example, NUM_MESSAGES_SENT ).\n \n \n\n :rtype: dict\n :return: {\n 'valid': True|False,\n 'validationErrors': [\n {\n 'errorMessage': 'string'\n },\n ]\n }\n \n \n :returns: \n (integer) --\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6369057893753052, "alphanum_fraction": 0.6401801109313965, "avg_line_length": 32.66535568237305, "blob_id": "66768990cbe13834e6289d9784ade2af39212e5b", "content_id": "57289760f5ab6fa4dff914c1a568ceee0b1c2a10", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17103, "license_type": "permissive", "max_line_length": 467, "num_lines": 508, "path": "/pyboto3/mediastore.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_container(ContainerName=None):\n \"\"\"\n Creates a storage container to hold objects. A container is similar to a bucket in the Amazon S3 service.\n See also: AWS API Documentation\n \n \n :example: response = client.create_container(\n ContainerName='string'\n )\n \n \n :type ContainerName: string\n :param ContainerName: [REQUIRED]\n The name for the container. The name must be from 1 to 255 characters. Container names must be unique to your AWS account within a specific region. As an example, you could create a container named movies in every region, as long as you don t have an existing container with that name.\n \n\n :rtype: dict\n :return: {\n 'Container': {\n 'Endpoint': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'ARN': 'string',\n 'Name': 'string',\n 'Status': 'ACTIVE'|'CREATING'|'DELETING'\n }\n }\n \n \n \"\"\"\n pass\n\ndef delete_container(ContainerName=None):\n \"\"\"\n Deletes the specified container. Before you make a DeleteContainer request, delete any objects in the container or in any folders in the container. You can delete only empty containers.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_container(\n ContainerName='string'\n )\n \n \n :type ContainerName: string\n :param ContainerName: [REQUIRED]\n The name of the container to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_container_policy(ContainerName=None):\n \"\"\"\n Deletes the access policy that is associated with the specified container.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_container_policy(\n ContainerName='string'\n )\n \n \n :type ContainerName: string\n :param ContainerName: [REQUIRED]\n The name of the container that holds the policy.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_cors_policy(ContainerName=None):\n \"\"\"\n Deletes the cross-origin resource sharing (CORS) configuration information that is set for the container.\n To use this operation, you must have permission to perform the MediaStore:DeleteCorsPolicy action. The container owner has this permission by default and can grant this permission to others.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_cors_policy(\n ContainerName='string'\n )\n \n \n :type ContainerName: string\n :param ContainerName: [REQUIRED]\n The name of the container to remove the policy from.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_lifecycle_policy(ContainerName=None):\n \"\"\"\n Removes an object lifecycle policy from a container.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_lifecycle_policy(\n ContainerName='string'\n )\n \n \n :type ContainerName: string\n :param ContainerName: [REQUIRED]\n The name of the container that holds the object lifecycle policy.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef describe_container(ContainerName=None):\n \"\"\"\n Retrieves the properties of the requested container. This request is commonly used to retrieve the endpoint of a container. An endpoint is a value assigned by the service when a new container is created. A container's endpoint does not change after it has been assigned. The DescribeContainer request returns a single Container object based on ContainerName . To return all Container objects that are associated with a specified AWS account, use ListContainers .\n See also: AWS API Documentation\n \n \n :example: response = client.describe_container(\n ContainerName='string'\n )\n \n \n :type ContainerName: string\n :param ContainerName: The name of the container to query.\n\n :rtype: dict\n :return: {\n 'Container': {\n 'Endpoint': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'ARN': 'string',\n 'Name': 'string',\n 'Status': 'ACTIVE'|'CREATING'|'DELETING'\n }\n }\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_container_policy(ContainerName=None):\n \"\"\"\n Retrieves the access policy for the specified container. For information about the data that is included in an access policy, see the AWS Identity and Access Management User Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.get_container_policy(\n ContainerName='string'\n )\n \n \n :type ContainerName: string\n :param ContainerName: [REQUIRED]\n The name of the container.\n \n\n :rtype: dict\n :return: {\n 'Policy': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_cors_policy(ContainerName=None):\n \"\"\"\n Returns the cross-origin resource sharing (CORS) configuration information that is set for the container.\n To use this operation, you must have permission to perform the MediaStore:GetCorsPolicy action. By default, the container owner has this permission and can grant it to others.\n See also: AWS API Documentation\n \n \n :example: response = client.get_cors_policy(\n ContainerName='string'\n )\n \n \n :type ContainerName: string\n :param ContainerName: [REQUIRED]\n The name of the container that the policy is assigned to.\n \n\n :rtype: dict\n :return: {\n 'CorsPolicy': [\n {\n 'AllowedOrigins': [\n 'string',\n ],\n 'AllowedMethods': [\n 'PUT'|'GET'|'DELETE'|'HEAD',\n ],\n 'AllowedHeaders': [\n 'string',\n ],\n 'MaxAgeSeconds': 123,\n 'ExposeHeaders': [\n 'string',\n ]\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_lifecycle_policy(ContainerName=None):\n \"\"\"\n Retrieves the object lifecycle policy that is assigned to a container.\n See also: AWS API Documentation\n \n \n :example: response = client.get_lifecycle_policy(\n ContainerName='string'\n )\n \n \n :type ContainerName: string\n :param ContainerName: [REQUIRED]\n The name of the container that the object lifecycle policy is assigned to.\n \n\n :rtype: dict\n :return: {\n 'LifecyclePolicy': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_containers(NextToken=None, MaxResults=None):\n \"\"\"\n Lists the properties of all containers in AWS Elemental MediaStore.\n You can query to receive all the containers in one response. Or you can include the MaxResults parameter to receive a limited number of containers in each response. In this case, the response includes a token. To get the next set of containers, send the command again, this time with the NextToken parameter (with the returned token as its value). The next set of responses appears, with a token if there are still more containers to receive.\n See also DescribeContainer , which gets the properties of one container.\n See also: AWS API Documentation\n \n \n :example: response = client.list_containers(\n NextToken='string',\n MaxResults=123\n )\n \n \n :type NextToken: string\n :param NextToken: Only if you used MaxResults in the first command, enter the token (which was included in the previous response) to obtain the next set of containers. This token is included in a response only if there actually are more containers to list.\n\n :type MaxResults: integer\n :param MaxResults: Enter the maximum number of containers in the response. Use from 1 to 255 characters.\n\n :rtype: dict\n :return: {\n 'Containers': [\n {\n 'Endpoint': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'ARN': 'string',\n 'Name': 'string',\n 'Status': 'ACTIVE'|'CREATING'|'DELETING'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef put_container_policy(ContainerName=None, Policy=None):\n \"\"\"\n Creates an access policy for the specified container to restrict the users and clients that can access it. For information about the data that is included in an access policy, see the AWS Identity and Access Management User Guide .\n For this release of the REST API, you can create only one policy for a container. If you enter PutContainerPolicy twice, the second command modifies the existing policy.\n See also: AWS API Documentation\n \n \n :example: response = client.put_container_policy(\n ContainerName='string',\n Policy='string'\n )\n \n \n :type ContainerName: string\n :param ContainerName: [REQUIRED]\n The name of the container.\n \n\n :type Policy: string\n :param Policy: [REQUIRED]\n The contents of the policy, which includes the following:\n One Version tag\n One Statement tag that contains the standard tags for the policy.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef put_cors_policy(ContainerName=None, CorsPolicy=None):\n \"\"\"\n Sets the cross-origin resource sharing (CORS) configuration on a container so that the container can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com to access your AWS Elemental MediaStore container at my.example.container.com by using the browser's XMLHttpRequest capability.\n To enable CORS on a container, you attach a CORS policy to the container. In the CORS policy, you configure rules that identify origins and the HTTP methods that can be executed on your container. The policy can contain up to 398,000 characters. You can add up to 100 rules to a CORS policy. If more than one rule applies, the service uses the first applicable rule listed.\n See also: AWS API Documentation\n \n \n :example: response = client.put_cors_policy(\n ContainerName='string',\n CorsPolicy=[\n {\n 'AllowedOrigins': [\n 'string',\n ],\n 'AllowedMethods': [\n 'PUT'|'GET'|'DELETE'|'HEAD',\n ],\n 'AllowedHeaders': [\n 'string',\n ],\n 'MaxAgeSeconds': 123,\n 'ExposeHeaders': [\n 'string',\n ]\n },\n ]\n )\n \n \n :type ContainerName: string\n :param ContainerName: [REQUIRED]\n The name of the container that you want to assign the CORS policy to.\n \n\n :type CorsPolicy: list\n :param CorsPolicy: [REQUIRED]\n The CORS policy to apply to the container.\n (dict) --A rule for a CORS policy. You can add up to 100 rules to a CORS policy. If more than one rule applies, the service uses the first applicable rule listed.\n AllowedOrigins (list) -- [REQUIRED]One or more response headers that you want users to be able to access from their applications (for example, from a JavaScript XMLHttpRequest object).\n Each CORS rule must have at least one AllowedOrigins element. The string value can include only one wildcard character (*), for example, http://*.example.com. Additionally, you can specify only one wildcard character to allow cross-origin access for all origins.\n (string) --\n AllowedMethods (list) --Identifies an HTTP method that the origin that is specified in the rule is allowed to execute.\n Each CORS rule must contain at least one AllowedMethods and one AllowedOrigins element.\n (string) --\n AllowedHeaders (list) -- [REQUIRED]Specifies which headers are allowed in a preflight OPTIONS request through the Access-Control-Request-Headers header. Each header name that is specified in Access-Control-Request-Headers must have a corresponding entry in the rule. Only the headers that were requested are sent back.\n This element can contain only one wildcard character (*).\n (string) --\n MaxAgeSeconds (integer) --The time in seconds that your browser caches the preflight response for the specified resource.\n A CORS rule can have only one MaxAgeSeconds element.\n ExposeHeaders (list) --One or more headers in the response that you want users to be able to access from their applications (for example, from a JavaScript XMLHttpRequest object).\n This element is optional for each rule.\n (string) --\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef put_lifecycle_policy(ContainerName=None, LifecyclePolicy=None):\n \"\"\"\n Writes an object lifecycle policy to a container. If the container already has an object lifecycle policy, the service replaces the existing policy with the new policy.\n See also: AWS API Documentation\n \n \n :example: response = client.put_lifecycle_policy(\n ContainerName='string',\n LifecyclePolicy='string'\n )\n \n \n :type ContainerName: string\n :param ContainerName: [REQUIRED]\n The name of the container that you want to assign the object lifecycle policy to.\n \n\n :type LifecyclePolicy: string\n :param LifecyclePolicy: [REQUIRED]\n The object lifecycle policy to apply to the container.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.572035014629364, "alphanum_fraction": 0.5767879486083984, "avg_line_length": 45.54591369628906, "blob_id": "a4d6773a5739cc2b32e03b27c4647a0b89cd41e7", "content_id": "77b74fef67616ff9c0d997f6ccc1ffc1e99a357a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 120136, "license_type": "permissive", "max_line_length": 717, "num_lines": 2581, "path": "/pyboto3/rekognition.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef compare_faces(SourceImage=None, TargetImage=None, SimilarityThreshold=None):\n \"\"\"\n Compares a face in the source input image with each of the 100 largest faces detected in the target input image.\n You pass the input and target images either as base64-encoded image bytes or as references to images in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes isn't supported. The image must be formatted as a PNG or JPEG file.\n In response, the operation returns an array of face matches ordered by similarity score in descending order. For each face match, the response provides a bounding box of the face, facial landmarks, pose details (pitch, role, and yaw), quality (brightness and sharpness), and confidence value (indicating the level of confidence that the bounding box contains a face). The response also provides a similarity score, which indicates how closely the faces match.\n If the image doesn't contain Exif metadata, CompareFaces returns orientation information for the source and target images. Use these values to display the images with the correct image orientation.\n If no faces are detected in the source or target images, CompareFaces returns an InvalidParameterException error.\n For an example, see Comparing Faces in Images in the Amazon Rekognition Developer Guide.\n This operation requires permissions to perform the rekognition:CompareFaces action.\n See also: AWS API Documentation\n \n Examples\n This operation compares the largest face detected in the source image with each face detected in the target image.\n Expected Output:\n \n :example: response = client.compare_faces(\n SourceImage={\n 'Bytes': b'bytes',\n 'S3Object': {\n 'Bucket': 'string',\n 'Name': 'string',\n 'Version': 'string'\n }\n },\n TargetImage={\n 'Bytes': b'bytes',\n 'S3Object': {\n 'Bucket': 'string',\n 'Name': 'string',\n 'Version': 'string'\n }\n },\n SimilarityThreshold=...\n )\n \n \n :type SourceImage: dict\n :param SourceImage: [REQUIRED]\n The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.\n Bytes (bytes) --Blob of image bytes up to 5 MBs.\n S3Object (dict) --Identifies an S3 object as the image source.\n Bucket (string) --Name of the S3 bucket.\n Name (string) --S3 object key name.\n Version (string) --If the bucket is versioning enabled, you can specify the object version.\n \n \n\n :type TargetImage: dict\n :param TargetImage: [REQUIRED]\n The target image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.\n Bytes (bytes) --Blob of image bytes up to 5 MBs.\n S3Object (dict) --Identifies an S3 object as the image source.\n Bucket (string) --Name of the S3 bucket.\n Name (string) --S3 object key name.\n Version (string) --If the bucket is versioning enabled, you can specify the object version.\n \n \n\n :type SimilarityThreshold: float\n :param SimilarityThreshold: The minimum level of confidence in the face matches that a match must meet to be included in the FaceMatches array.\n\n :rtype: dict\n :return: {\n 'SourceImageFace': {\n 'BoundingBox': {\n 'Width': ...,\n 'Height': ...,\n 'Left': ...,\n 'Top': ...\n },\n 'Confidence': ...\n },\n 'FaceMatches': [\n {\n 'Similarity': ...,\n 'Face': {\n 'BoundingBox': {\n 'Width': ...,\n 'Height': ...,\n 'Left': ...,\n 'Top': ...\n },\n 'Confidence': ...,\n 'Landmarks': [\n {\n 'Type': 'eyeLeft'|'eyeRight'|'nose'|'mouthLeft'|'mouthRight'|'leftEyeBrowLeft'|'leftEyeBrowRight'|'leftEyeBrowUp'|'rightEyeBrowLeft'|'rightEyeBrowRight'|'rightEyeBrowUp'|'leftEyeLeft'|'leftEyeRight'|'leftEyeUp'|'leftEyeDown'|'rightEyeLeft'|'rightEyeRight'|'rightEyeUp'|'rightEyeDown'|'noseLeft'|'noseRight'|'mouthUp'|'mouthDown'|'leftPupil'|'rightPupil'|'upperJawlineLeft'|'midJawlineLeft'|'chinBottom'|'midJawlineRight'|'upperJawlineRight',\n 'X': ...,\n 'Y': ...\n },\n ],\n 'Pose': {\n 'Roll': ...,\n 'Yaw': ...,\n 'Pitch': ...\n },\n 'Quality': {\n 'Brightness': ...,\n 'Sharpness': ...\n }\n }\n },\n ],\n 'UnmatchedFaces': [\n {\n 'BoundingBox': {\n 'Width': ...,\n 'Height': ...,\n 'Left': ...,\n 'Top': ...\n },\n 'Confidence': ...,\n 'Landmarks': [\n {\n 'Type': 'eyeLeft'|'eyeRight'|'nose'|'mouthLeft'|'mouthRight'|'leftEyeBrowLeft'|'leftEyeBrowRight'|'leftEyeBrowUp'|'rightEyeBrowLeft'|'rightEyeBrowRight'|'rightEyeBrowUp'|'leftEyeLeft'|'leftEyeRight'|'leftEyeUp'|'leftEyeDown'|'rightEyeLeft'|'rightEyeRight'|'rightEyeUp'|'rightEyeDown'|'noseLeft'|'noseRight'|'mouthUp'|'mouthDown'|'leftPupil'|'rightPupil'|'upperJawlineLeft'|'midJawlineLeft'|'chinBottom'|'midJawlineRight'|'upperJawlineRight',\n 'X': ...,\n 'Y': ...\n },\n ],\n 'Pose': {\n 'Roll': ...,\n 'Yaw': ...,\n 'Pitch': ...\n },\n 'Quality': {\n 'Brightness': ...,\n 'Sharpness': ...\n }\n },\n ],\n 'SourceImageOrientationCorrection': 'ROTATE_0'|'ROTATE_90'|'ROTATE_180'|'ROTATE_270',\n 'TargetImageOrientationCorrection': 'ROTATE_0'|'ROTATE_90'|'ROTATE_180'|'ROTATE_270'\n }\n \n \n \"\"\"\n pass\n\ndef create_collection(CollectionId=None):\n \"\"\"\n Creates a collection in an AWS Region. You can add faces to the collection using the operation.\n For example, you might create collections, one for each of your application users. A user can then index faces using the IndexFaces operation and persist results in a specific collection. Then, a user can search the collection for faces in the user-specific container.\n When you create a collection, it is associated with the latest version of the face model version.\n This operation requires permissions to perform the rekognition:CreateCollection action.\n See also: AWS API Documentation\n \n Examples\n This operation creates a Rekognition collection for storing image data.\n Expected Output:\n \n :example: response = client.create_collection(\n CollectionId='string'\n )\n \n \n :type CollectionId: string\n :param CollectionId: [REQUIRED]\n ID for the collection that you are creating.\n \n\n :rtype: dict\n :return: {\n 'StatusCode': 123,\n 'CollectionArn': 'string',\n 'FaceModelVersion': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_stream_processor(Input=None, Output=None, Name=None, Settings=None, RoleArn=None):\n \"\"\"\n Creates an Amazon Rekognition stream processor that you can use to detect and recognize faces in a streaming video.\n Amazon Rekognition Video is a consumer of live video from Amazon Kinesis Video Streams. Amazon Rekognition Video sends analysis results to Amazon Kinesis Data Streams.\n You provide as input a Kinesis video stream (Input ) and a Kinesis data stream (Output ) stream. You also specify the face recognition criteria in Settings . For example, the collection containing faces that you want to recognize. Use Name to assign an identifier for the stream processor. You use Name to manage the stream processor. For example, you can start processing the source video by calling with the Name field.\n After you have finished analyzing a streaming video, use to stop processing. You can delete the stream processor by calling .\n See also: AWS API Documentation\n \n \n :example: response = client.create_stream_processor(\n Input={\n 'KinesisVideoStream': {\n 'Arn': 'string'\n }\n },\n Output={\n 'KinesisDataStream': {\n 'Arn': 'string'\n }\n },\n Name='string',\n Settings={\n 'FaceSearch': {\n 'CollectionId': 'string',\n 'FaceMatchThreshold': ...\n }\n },\n RoleArn='string'\n )\n \n \n :type Input: dict\n :param Input: [REQUIRED]\n Kinesis video stream stream that provides the source streaming video. If you are using the AWS CLI, the parameter name is StreamProcessorInput .\n KinesisVideoStream (dict) --The Kinesis video stream input stream for the source streaming video.\n Arn (string) --ARN of the Kinesis video stream stream that streams the source video.\n \n \n\n :type Output: dict\n :param Output: [REQUIRED]\n Kinesis data stream stream to which Amazon Rekognition Video puts the analysis results. If you are using the AWS CLI, the parameter name is StreamProcessorOutput .\n KinesisDataStream (dict) --The Amazon Kinesis Data Streams stream to which the Amazon Rekognition stream processor streams the analysis results.\n Arn (string) --ARN of the output Amazon Kinesis Data Streams stream.\n \n \n\n :type Name: string\n :param Name: [REQUIRED]\n An identifier you assign to the stream processor. You can use Name to manage the stream processor. For example, you can get the current status of the stream processor by calling . Name is idempotent.\n \n\n :type Settings: dict\n :param Settings: [REQUIRED]\n Face recognition input parameters to be used by the stream processor. Includes the collection to use for face recognition and the face attributes to detect.\n FaceSearch (dict) --Face search settings to use on a streaming video.\n CollectionId (string) --The ID of a collection that contains faces that you want to search for.\n FaceMatchThreshold (float) --Minimum face match confidence score that must be met to return a result for a recognized face. Default is 70. 0 is the lowest confidence. 100 is the highest confidence.\n \n \n\n :type RoleArn: string\n :param RoleArn: [REQUIRED]\n ARN of the IAM role that allows access to the stream processor.\n \n\n :rtype: dict\n :return: {\n 'StreamProcessorArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_collection(CollectionId=None):\n \"\"\"\n Deletes the specified collection. Note that this operation removes all faces in the collection. For an example, see delete-collection-procedure .\n This operation requires permissions to perform the rekognition:DeleteCollection action.\n See also: AWS API Documentation\n \n Examples\n This operation deletes a Rekognition collection.\n Expected Output:\n \n :example: response = client.delete_collection(\n CollectionId='string'\n )\n \n \n :type CollectionId: string\n :param CollectionId: [REQUIRED]\n ID of the collection to delete.\n \n\n :rtype: dict\n :return: {\n 'StatusCode': 123\n }\n \n \n \"\"\"\n pass\n\ndef delete_faces(CollectionId=None, FaceIds=None):\n \"\"\"\n Deletes faces from a collection. You specify a collection ID and an array of face IDs to remove from the collection.\n This operation requires permissions to perform the rekognition:DeleteFaces action.\n See also: AWS API Documentation\n \n Examples\n This operation deletes one or more faces from a Rekognition collection.\n Expected Output:\n \n :example: response = client.delete_faces(\n CollectionId='string',\n FaceIds=[\n 'string',\n ]\n )\n \n \n :type CollectionId: string\n :param CollectionId: [REQUIRED]\n Collection from which to remove the specific faces.\n \n\n :type FaceIds: list\n :param FaceIds: [REQUIRED]\n An array of face IDs to delete.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'DeletedFaces': [\n 'string',\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef delete_stream_processor(Name=None):\n \"\"\"\n Deletes the stream processor identified by Name . You assign the value for Name when you create the stream processor with . You might not be able to use the same name for a stream processor for a few seconds after calling DeleteStreamProcessor .\n See also: AWS API Documentation\n \n \n :example: response = client.delete_stream_processor(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the stream processor you want to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef describe_collection(CollectionId=None):\n \"\"\"\n Describes the specified collection. You can use DescribeCollection to get information, such as the number of faces indexed into a collection and the version of the model used by the collection for face detection.\n For more information, see Describing a Collection in the Amazon Rekognition Developer Guide.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_collection(\n CollectionId='string'\n )\n \n \n :type CollectionId: string\n :param CollectionId: [REQUIRED]\n The ID of the collection to describe.\n \n\n :rtype: dict\n :return: {\n 'FaceCount': 123,\n 'FaceModelVersion': 'string',\n 'CollectionARN': 'string',\n 'CreationTimestamp': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef describe_stream_processor(Name=None):\n \"\"\"\n Provides information about a stream processor created by . You can get information about the input and output streams, the input parameters for the face recognition being performed, and the current status of the stream processor.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_stream_processor(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n Name of the stream processor for which you want information.\n \n\n :rtype: dict\n :return: {\n 'Name': 'string',\n 'StreamProcessorArn': 'string',\n 'Status': 'STOPPED'|'STARTING'|'RUNNING'|'FAILED'|'STOPPING',\n 'StatusMessage': 'string',\n 'CreationTimestamp': datetime(2015, 1, 1),\n 'LastUpdateTimestamp': datetime(2015, 1, 1),\n 'Input': {\n 'KinesisVideoStream': {\n 'Arn': 'string'\n }\n },\n 'Output': {\n 'KinesisDataStream': {\n 'Arn': 'string'\n }\n },\n 'RoleArn': 'string',\n 'Settings': {\n 'FaceSearch': {\n 'CollectionId': 'string',\n 'FaceMatchThreshold': ...\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef detect_faces(Image=None, Attributes=None):\n \"\"\"\n Detects faces within an image that is provided as input.\n The face-detection algorithm is most effective on frontal faces. For non-frontal or obscured faces, the algorithm might not detect the faces or might detect faces with lower confidence.\n You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.\n This operation requires permissions to perform the rekognition:DetectFaces action.\n See also: AWS API Documentation\n \n Examples\n This operation detects faces in an image stored in an AWS S3 bucket.\n Expected Output:\n \n :example: response = client.detect_faces(\n Image={\n 'Bytes': b'bytes',\n 'S3Object': {\n 'Bucket': 'string',\n 'Name': 'string',\n 'Version': 'string'\n }\n },\n Attributes=[\n 'DEFAULT'|'ALL',\n ]\n )\n \n \n :type Image: dict\n :param Image: [REQUIRED]\n The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.\n Bytes (bytes) --Blob of image bytes up to 5 MBs.\n S3Object (dict) --Identifies an S3 object as the image source.\n Bucket (string) --Name of the S3 bucket.\n Name (string) --S3 object key name.\n Version (string) --If the bucket is versioning enabled, you can specify the object version.\n \n \n\n :type Attributes: list\n :param Attributes: An array of facial attributes you want to be returned. This can be the default list of attributes or all attributes. If you don't specify a value for Attributes or if you specify ['DEFAULT'] , the API returns the following subset of facial attributes: BoundingBox , Confidence , Pose , Quality , and Landmarks . If you provide ['ALL'] , all facial attributes are returned, but the operation takes longer to complete.\n If you provide both, ['ALL', 'DEFAULT'] , the service uses a logical AND operator to determine which attributes to return (in this case, all attributes).\n (string) --\n \n\n :rtype: dict\n :return: {\n 'FaceDetails': [\n {\n 'BoundingBox': {\n 'Width': ...,\n 'Height': ...,\n 'Left': ...,\n 'Top': ...\n },\n 'AgeRange': {\n 'Low': 123,\n 'High': 123\n },\n 'Smile': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Eyeglasses': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Sunglasses': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Gender': {\n 'Value': 'Male'|'Female',\n 'Confidence': ...\n },\n 'Beard': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Mustache': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'EyesOpen': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'MouthOpen': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Emotions': [\n {\n 'Type': 'HAPPY'|'SAD'|'ANGRY'|'CONFUSED'|'DISGUSTED'|'SURPRISED'|'CALM'|'UNKNOWN',\n 'Confidence': ...\n },\n ],\n 'Landmarks': [\n {\n 'Type': 'eyeLeft'|'eyeRight'|'nose'|'mouthLeft'|'mouthRight'|'leftEyeBrowLeft'|'leftEyeBrowRight'|'leftEyeBrowUp'|'rightEyeBrowLeft'|'rightEyeBrowRight'|'rightEyeBrowUp'|'leftEyeLeft'|'leftEyeRight'|'leftEyeUp'|'leftEyeDown'|'rightEyeLeft'|'rightEyeRight'|'rightEyeUp'|'rightEyeDown'|'noseLeft'|'noseRight'|'mouthUp'|'mouthDown'|'leftPupil'|'rightPupil'|'upperJawlineLeft'|'midJawlineLeft'|'chinBottom'|'midJawlineRight'|'upperJawlineRight',\n 'X': ...,\n 'Y': ...\n },\n ],\n 'Pose': {\n 'Roll': ...,\n 'Yaw': ...,\n 'Pitch': ...\n },\n 'Quality': {\n 'Brightness': ...,\n 'Sharpness': ...\n },\n 'Confidence': ...\n },\n ],\n 'OrientationCorrection': 'ROTATE_0'|'ROTATE_90'|'ROTATE_180'|'ROTATE_270'\n }\n \n \n :returns: \n GetCelebrityRecognition\n GetPersonTracking\n GetFaceSearch\n \n \"\"\"\n pass\n\ndef detect_labels(Image=None, MaxLabels=None, MinConfidence=None):\n \"\"\"\n Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature.\n For an example, see Analyzing Images Stored in an Amazon S3 Bucket in the Amazon Rekognition Developer Guide.\n You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.\n For each object, scene, and concept the API returns one or more labels. Each label provides the object name, and the level of confidence that the image contains the object. For example, suppose the input image has a lighthouse, the sea, and a rock. The response includes all three labels, one for each object.\n In the preceding example, the operation returns one label for each of the three objects. The operation can also return multiple labels for the same object in the image. For example, if the input image shows a flower (for example, a tulip), the operation might return the following three labels.\n In this example, the detection algorithm more precisely identifies the flower as a tulip.\n In response, the API returns an array of labels. In addition, the response also includes the orientation correction. Optionally, you can specify MinConfidence to control the confidence threshold for the labels returned. The default is 50%. You can also add the MaxLabels parameter to limit the number of labels returned.\n This is a stateless API operation. That is, the operation does not persist any data.\n This operation requires permissions to perform the rekognition:DetectLabels action.\n See also: AWS API Documentation\n \n Examples\n This operation detects labels in the supplied image\n Expected Output:\n \n :example: response = client.detect_labels(\n Image={\n 'Bytes': b'bytes',\n 'S3Object': {\n 'Bucket': 'string',\n 'Name': 'string',\n 'Version': 'string'\n }\n },\n MaxLabels=123,\n MinConfidence=...\n )\n \n \n :type Image: dict\n :param Image: [REQUIRED]\n The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.\n Bytes (bytes) --Blob of image bytes up to 5 MBs.\n S3Object (dict) --Identifies an S3 object as the image source.\n Bucket (string) --Name of the S3 bucket.\n Name (string) --S3 object key name.\n Version (string) --If the bucket is versioning enabled, you can specify the object version.\n \n \n\n :type MaxLabels: integer\n :param MaxLabels: Maximum number of labels you want the service to return in the response. The service returns the specified number of highest confidence labels.\n\n :type MinConfidence: float\n :param MinConfidence: Specifies the minimum confidence level for the labels to return. Amazon Rekognition doesn't return any labels with confidence lower than this specified value.\n If MinConfidence is not specified, the operation returns labels with a confidence values greater than or equal to 50 percent.\n \n\n :rtype: dict\n :return: {\n 'Labels': [\n {\n 'Name': 'string',\n 'Confidence': ...,\n 'Instances': [\n {\n 'BoundingBox': {\n 'Width': ...,\n 'Height': ...,\n 'Left': ...,\n 'Top': ...\n },\n 'Confidence': ...\n },\n ],\n 'Parents': [\n {\n 'Name': 'string'\n },\n ]\n },\n ],\n 'OrientationCorrection': 'ROTATE_0'|'ROTATE_90'|'ROTATE_180'|'ROTATE_270',\n 'LabelModelVersion': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef detect_moderation_labels(Image=None, MinConfidence=None):\n \"\"\"\n Detects explicit or suggestive adult content in a specified JPEG or PNG format image. Use DetectModerationLabels to moderate images depending on your requirements. For example, you might want to filter images that contain nudity, but not images containing suggestive content.\n To filter images, use the labels returned by DetectModerationLabels to determine which types of content are appropriate.\n For information about moderation labels, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.\n You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.\n See also: AWS API Documentation\n \n \n :example: response = client.detect_moderation_labels(\n Image={\n 'Bytes': b'bytes',\n 'S3Object': {\n 'Bucket': 'string',\n 'Name': 'string',\n 'Version': 'string'\n }\n },\n MinConfidence=...\n )\n \n \n :type Image: dict\n :param Image: [REQUIRED]\n The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.\n Bytes (bytes) --Blob of image bytes up to 5 MBs.\n S3Object (dict) --Identifies an S3 object as the image source.\n Bucket (string) --Name of the S3 bucket.\n Name (string) --S3 object key name.\n Version (string) --If the bucket is versioning enabled, you can specify the object version.\n \n \n\n :type MinConfidence: float\n :param MinConfidence: Specifies the minimum confidence level for the labels to return. Amazon Rekognition doesn't return any labels with a confidence level lower than this specified value.\n If you don't specify MinConfidence , the operation returns labels with confidence values greater than or equal to 50 percent.\n \n\n :rtype: dict\n :return: {\n 'ModerationLabels': [\n {\n 'Confidence': ...,\n 'Name': 'string',\n 'ParentName': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef detect_text(Image=None):\n \"\"\"\n Detects text in the input image and converts it into machine-readable text.\n Pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, you must pass it as a reference to an image in an Amazon S3 bucket. For the AWS CLI, passing image bytes is not supported. The image must be either a .png or .jpeg formatted file.\n The DetectText operation returns text in an array of elements, TextDetections . Each TextDetection element provides information about a single word or line of text that was detected in the image.\n A word is one or more ISO basic latin script characters that are not separated by spaces. DetectText can detect up to 50 words in an image.\n A line is a string of equally spaced words. A line isn't necessarily a complete sentence. For example, a driver's license number is detected as a line. A line ends when there is no aligned text after it. Also, a line ends when there is a large gap between words, relative to the length of the words. This means, depending on the gap between words, Amazon Rekognition may detect multiple lines in text aligned in the same direction. Periods don't represent the end of a line. If a sentence spans multiple lines, the DetectText operation returns multiple lines.\n To determine whether a TextDetection element is a line of text or a word, use the TextDetection object Type field.\n To be detected, text must be within +/- 90 degrees orientation of the horizontal axis.\n For more information, see DetectText in the Amazon Rekognition Developer Guide.\n See also: AWS API Documentation\n \n \n :example: response = client.detect_text(\n Image={\n 'Bytes': b'bytes',\n 'S3Object': {\n 'Bucket': 'string',\n 'Name': 'string',\n 'Version': 'string'\n }\n }\n )\n \n \n :type Image: dict\n :param Image: [REQUIRED]\n The input image as base64-encoded bytes or an Amazon S3 object. If you use the AWS CLI to call Amazon Rekognition operations, you can't pass image bytes.\n Bytes (bytes) --Blob of image bytes up to 5 MBs.\n S3Object (dict) --Identifies an S3 object as the image source.\n Bucket (string) --Name of the S3 bucket.\n Name (string) --S3 object key name.\n Version (string) --If the bucket is versioning enabled, you can specify the object version.\n \n \n\n :rtype: dict\n :return: {\n 'TextDetections': [\n {\n 'DetectedText': 'string',\n 'Type': 'LINE'|'WORD',\n 'Id': 123,\n 'ParentId': 123,\n 'Confidence': ...,\n 'Geometry': {\n 'BoundingBox': {\n 'Width': ...,\n 'Height': ...,\n 'Left': ...,\n 'Top': ...\n },\n 'Polygon': [\n {\n 'X': ...,\n 'Y': ...\n },\n ]\n }\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_celebrity_info(Id=None):\n \"\"\"\n Gets the name and additional information about a celebrity based on his or her Amazon Rekognition ID. The additional information is returned as an array of URLs. If there is no additional information about the celebrity, this list is empty.\n For more information, see Recognizing Celebrities in an Image in the Amazon Rekognition Developer Guide.\n This operation requires permissions to perform the rekognition:GetCelebrityInfo action.\n See also: AWS API Documentation\n \n \n :example: response = client.get_celebrity_info(\n Id='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n The ID for the celebrity. You get the celebrity ID from a call to the operation, which recognizes celebrities in an image.\n \n\n :rtype: dict\n :return: {\n 'Urls': [\n 'string',\n ],\n 'Name': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_celebrity_recognition(JobId=None, MaxResults=None, NextToken=None, SortBy=None):\n \"\"\"\n Gets the celebrity recognition results for a Amazon Rekognition Video analysis started by .\n Celebrity recognition in a video is an asynchronous operation. Analysis is started by a call to which returns a job identifier (JobId ). When the celebrity recognition operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartCelebrityRecognition . To get the results of the celebrity recognition analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED . If so, call GetCelebrityDetection and pass the job identifier (JobId ) from the initial call to StartCelebrityDetection .\n For more information, see Working With Stored Videos in the Amazon Rekognition Developer Guide.\n By default, the Celebrities array is sorted by time (milliseconds from the start of the video). You can also sort the array by celebrity by specifying the value ID in the SortBy input parameter.\n The CelebrityDetail object includes the celebrity identifer and additional information urls. If you don't store the additional information urls, you can get them later by calling with the celebrity identifer.\n No information is returned for faces not recognized as celebrities.\n Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in MaxResults , the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetCelebrityDetection and populate the NextToken request parameter with the token value returned from the previous call to GetCelebrityRecognition .\n See also: AWS API Documentation\n \n \n :example: response = client.get_celebrity_recognition(\n JobId='string',\n MaxResults=123,\n NextToken='string',\n SortBy='ID'|'TIMESTAMP'\n )\n \n \n :type JobId: string\n :param JobId: [REQUIRED]\n Job identifier for the required celebrity recognition analysis. You can get the job identifer from a call to StartCelebrityRecognition .\n \n\n :type MaxResults: integer\n :param MaxResults: Maximum number of results to return per paginated call. The largest value you can specify is 1000. If you specify a value greater than 1000, a maximum of 1000 results is returned. The default value is 1000.\n\n :type NextToken: string\n :param NextToken: If the previous response was incomplete (because there is more recognized celebrities to retrieve), Amazon Rekognition Video returns a pagination token in the response. You can use this pagination token to retrieve the next set of celebrities.\n\n :type SortBy: string\n :param SortBy: Sort to use for celebrities returned in Celebrities field. Specify ID to sort by the celebrity identifier, specify TIMESTAMP to sort by the time the celebrity was recognized.\n\n :rtype: dict\n :return: {\n 'JobStatus': 'IN_PROGRESS'|'SUCCEEDED'|'FAILED',\n 'StatusMessage': 'string',\n 'VideoMetadata': {\n 'Codec': 'string',\n 'DurationMillis': 123,\n 'Format': 'string',\n 'FrameRate': ...,\n 'FrameHeight': 123,\n 'FrameWidth': 123\n },\n 'NextToken': 'string',\n 'Celebrities': [\n {\n 'Timestamp': 123,\n 'Celebrity': {\n 'Urls': [\n 'string',\n ],\n 'Name': 'string',\n 'Id': 'string',\n 'Confidence': ...,\n 'BoundingBox': {\n 'Width': ...,\n 'Height': ...,\n 'Left': ...,\n 'Top': ...\n },\n 'Face': {\n 'BoundingBox': {\n 'Width': ...,\n 'Height': ...,\n 'Left': ...,\n 'Top': ...\n },\n 'AgeRange': {\n 'Low': 123,\n 'High': 123\n },\n 'Smile': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Eyeglasses': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Sunglasses': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Gender': {\n 'Value': 'Male'|'Female',\n 'Confidence': ...\n },\n 'Beard': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Mustache': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'EyesOpen': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'MouthOpen': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Emotions': [\n {\n 'Type': 'HAPPY'|'SAD'|'ANGRY'|'CONFUSED'|'DISGUSTED'|'SURPRISED'|'CALM'|'UNKNOWN',\n 'Confidence': ...\n },\n ],\n 'Landmarks': [\n {\n 'Type': 'eyeLeft'|'eyeRight'|'nose'|'mouthLeft'|'mouthRight'|'leftEyeBrowLeft'|'leftEyeBrowRight'|'leftEyeBrowUp'|'rightEyeBrowLeft'|'rightEyeBrowRight'|'rightEyeBrowUp'|'leftEyeLeft'|'leftEyeRight'|'leftEyeUp'|'leftEyeDown'|'rightEyeLeft'|'rightEyeRight'|'rightEyeUp'|'rightEyeDown'|'noseLeft'|'noseRight'|'mouthUp'|'mouthDown'|'leftPupil'|'rightPupil'|'upperJawlineLeft'|'midJawlineLeft'|'chinBottom'|'midJawlineRight'|'upperJawlineRight',\n 'X': ...,\n 'Y': ...\n },\n ],\n 'Pose': {\n 'Roll': ...,\n 'Yaw': ...,\n 'Pitch': ...\n },\n 'Quality': {\n 'Brightness': ...,\n 'Sharpness': ...\n },\n 'Confidence': ...\n }\n }\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_content_moderation(JobId=None, MaxResults=None, NextToken=None, SortBy=None):\n \"\"\"\n Gets the content moderation analysis results for a Amazon Rekognition Video analysis started by .\n Content moderation analysis of a video is an asynchronous operation. You start analysis by calling . which returns a job identifier (JobId ). When analysis finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartContentModeration . To get the results of the content moderation analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED . If so, call GetCelebrityDetection and pass the job identifier (JobId ) from the initial call to StartCelebrityDetection .\n For more information, see Working with Stored Videos in the Amazon Rekognition Devlopers Guide.\n By default, the moderated labels are returned sorted by time, in milliseconds from the start of the video. You can also sort them by moderated label by specifying NAME for the SortBy input parameter.\n Since video analysis can return a large number of results, use the MaxResults parameter to limit the number of labels returned in a single call to GetContentModeration . If there are more results than specified in MaxResults , the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetContentModeration and populate the NextToken request parameter with the value of NextToken returned from the previous call to GetContentModeration .\n For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.\n See also: AWS API Documentation\n \n \n :example: response = client.get_content_moderation(\n JobId='string',\n MaxResults=123,\n NextToken='string',\n SortBy='NAME'|'TIMESTAMP'\n )\n \n \n :type JobId: string\n :param JobId: [REQUIRED]\n The identifier for the content moderation job. Use JobId to identify the job in a subsequent call to GetContentModeration .\n \n\n :type MaxResults: integer\n :param MaxResults: Maximum number of results to return per paginated call. The largest value you can specify is 1000. If you specify a value greater than 1000, a maximum of 1000 results is returned. The default value is 1000.\n\n :type NextToken: string\n :param NextToken: If the previous response was incomplete (because there is more data to retrieve), Amazon Rekognition returns a pagination token in the response. You can use this pagination token to retrieve the next set of content moderation labels.\n\n :type SortBy: string\n :param SortBy: Sort to use for elements in the ModerationLabelDetections array. Use TIMESTAMP to sort array elements by the time labels are detected. Use NAME to alphabetically group elements for a label together. Within each label group, the array element are sorted by detection confidence. The default sort is by TIMESTAMP .\n\n :rtype: dict\n :return: {\n 'JobStatus': 'IN_PROGRESS'|'SUCCEEDED'|'FAILED',\n 'StatusMessage': 'string',\n 'VideoMetadata': {\n 'Codec': 'string',\n 'DurationMillis': 123,\n 'Format': 'string',\n 'FrameRate': ...,\n 'FrameHeight': 123,\n 'FrameWidth': 123\n },\n 'ModerationLabels': [\n {\n 'Timestamp': 123,\n 'ModerationLabel': {\n 'Confidence': ...,\n 'Name': 'string',\n 'ParentName': 'string'\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_face_detection(JobId=None, MaxResults=None, NextToken=None):\n \"\"\"\n Gets face detection results for a Amazon Rekognition Video analysis started by .\n Face detection with Amazon Rekognition Video is an asynchronous operation. You start face detection by calling which returns a job identifier (JobId ). When the face detection operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartFaceDetection . To get the results of the face detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED . If so, call and pass the job identifier (JobId ) from the initial call to StartFaceDetection .\n Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in MaxResults , the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetFaceDetection and populate the NextToken request parameter with the token value returned from the previous call to GetFaceDetection .\n See also: AWS API Documentation\n \n \n :example: response = client.get_face_detection(\n JobId='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type JobId: string\n :param JobId: [REQUIRED]\n Unique identifier for the face detection job. The JobId is returned from StartFaceDetection .\n \n\n :type MaxResults: integer\n :param MaxResults: Maximum number of results to return per paginated call. The largest value you can specify is 1000. If you specify a value greater than 1000, a maximum of 1000 results is returned. The default value is 1000.\n\n :type NextToken: string\n :param NextToken: If the previous response was incomplete (because there are more faces to retrieve), Amazon Rekognition Video returns a pagination token in the response. You can use this pagination token to retrieve the next set of faces.\n\n :rtype: dict\n :return: {\n 'JobStatus': 'IN_PROGRESS'|'SUCCEEDED'|'FAILED',\n 'StatusMessage': 'string',\n 'VideoMetadata': {\n 'Codec': 'string',\n 'DurationMillis': 123,\n 'Format': 'string',\n 'FrameRate': ...,\n 'FrameHeight': 123,\n 'FrameWidth': 123\n },\n 'NextToken': 'string',\n 'Faces': [\n {\n 'Timestamp': 123,\n 'Face': {\n 'BoundingBox': {\n 'Width': ...,\n 'Height': ...,\n 'Left': ...,\n 'Top': ...\n },\n 'AgeRange': {\n 'Low': 123,\n 'High': 123\n },\n 'Smile': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Eyeglasses': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Sunglasses': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Gender': {\n 'Value': 'Male'|'Female',\n 'Confidence': ...\n },\n 'Beard': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Mustache': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'EyesOpen': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'MouthOpen': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Emotions': [\n {\n 'Type': 'HAPPY'|'SAD'|'ANGRY'|'CONFUSED'|'DISGUSTED'|'SURPRISED'|'CALM'|'UNKNOWN',\n 'Confidence': ...\n },\n ],\n 'Landmarks': [\n {\n 'Type': 'eyeLeft'|'eyeRight'|'nose'|'mouthLeft'|'mouthRight'|'leftEyeBrowLeft'|'leftEyeBrowRight'|'leftEyeBrowUp'|'rightEyeBrowLeft'|'rightEyeBrowRight'|'rightEyeBrowUp'|'leftEyeLeft'|'leftEyeRight'|'leftEyeUp'|'leftEyeDown'|'rightEyeLeft'|'rightEyeRight'|'rightEyeUp'|'rightEyeDown'|'noseLeft'|'noseRight'|'mouthUp'|'mouthDown'|'leftPupil'|'rightPupil'|'upperJawlineLeft'|'midJawlineLeft'|'chinBottom'|'midJawlineRight'|'upperJawlineRight',\n 'X': ...,\n 'Y': ...\n },\n ],\n 'Pose': {\n 'Roll': ...,\n 'Yaw': ...,\n 'Pitch': ...\n },\n 'Quality': {\n 'Brightness': ...,\n 'Sharpness': ...\n },\n 'Confidence': ...\n }\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_face_search(JobId=None, MaxResults=None, NextToken=None, SortBy=None):\n \"\"\"\n Gets the face search results for Amazon Rekognition Video face search started by . The search returns faces in a collection that match the faces of persons detected in a video. It also includes the time(s) that faces are matched in the video.\n Face search in a video is an asynchronous operation. You start face search by calling to which returns a job identifier (JobId ). When the search operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartFaceSearch . To get the search results, first check that the status value published to the Amazon SNS topic is SUCCEEDED . If so, call GetFaceSearch and pass the job identifier (JobId ) from the initial call to StartFaceSearch .\n For more information, see Searching Faces in a Collection in the Amazon Rekognition Developer Guide.\n The search results are retured in an array, Persons , of objects. Each``PersonMatch`` element contains details about the matching faces in the input collection, person information (facial attributes, bounding boxes, and person identifer) for the matched person, and the time the person was matched in the video.\n By default, the Persons array is sorted by the time, in milliseconds from the start of the video, persons are matched. You can also sort by persons by specifying INDEX for the SORTBY input parameter.\n See also: AWS API Documentation\n \n \n :example: response = client.get_face_search(\n JobId='string',\n MaxResults=123,\n NextToken='string',\n SortBy='INDEX'|'TIMESTAMP'\n )\n \n \n :type JobId: string\n :param JobId: [REQUIRED]\n The job identifer for the search request. You get the job identifier from an initial call to StartFaceSearch .\n \n\n :type MaxResults: integer\n :param MaxResults: Maximum number of results to return per paginated call. The largest value you can specify is 1000. If you specify a value greater than 1000, a maximum of 1000 results is returned. The default value is 1000.\n\n :type NextToken: string\n :param NextToken: If the previous response was incomplete (because there is more search results to retrieve), Amazon Rekognition Video returns a pagination token in the response. You can use this pagination token to retrieve the next set of search results.\n\n :type SortBy: string\n :param SortBy: Sort to use for grouping faces in the response. Use TIMESTAMP to group faces by the time that they are recognized. Use INDEX to sort by recognized faces.\n\n :rtype: dict\n :return: {\n 'JobStatus': 'IN_PROGRESS'|'SUCCEEDED'|'FAILED',\n 'StatusMessage': 'string',\n 'NextToken': 'string',\n 'VideoMetadata': {\n 'Codec': 'string',\n 'DurationMillis': 123,\n 'Format': 'string',\n 'FrameRate': ...,\n 'FrameHeight': 123,\n 'FrameWidth': 123\n },\n 'Persons': [\n {\n 'Timestamp': 123,\n 'Person': {\n 'Index': 123,\n 'BoundingBox': {\n 'Width': ...,\n 'Height': ...,\n 'Left': ...,\n 'Top': ...\n },\n 'Face': {\n 'BoundingBox': {\n 'Width': ...,\n 'Height': ...,\n 'Left': ...,\n 'Top': ...\n },\n 'AgeRange': {\n 'Low': 123,\n 'High': 123\n },\n 'Smile': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Eyeglasses': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Sunglasses': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Gender': {\n 'Value': 'Male'|'Female',\n 'Confidence': ...\n },\n 'Beard': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Mustache': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'EyesOpen': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'MouthOpen': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Emotions': [\n {\n 'Type': 'HAPPY'|'SAD'|'ANGRY'|'CONFUSED'|'DISGUSTED'|'SURPRISED'|'CALM'|'UNKNOWN',\n 'Confidence': ...\n },\n ],\n 'Landmarks': [\n {\n 'Type': 'eyeLeft'|'eyeRight'|'nose'|'mouthLeft'|'mouthRight'|'leftEyeBrowLeft'|'leftEyeBrowRight'|'leftEyeBrowUp'|'rightEyeBrowLeft'|'rightEyeBrowRight'|'rightEyeBrowUp'|'leftEyeLeft'|'leftEyeRight'|'leftEyeUp'|'leftEyeDown'|'rightEyeLeft'|'rightEyeRight'|'rightEyeUp'|'rightEyeDown'|'noseLeft'|'noseRight'|'mouthUp'|'mouthDown'|'leftPupil'|'rightPupil'|'upperJawlineLeft'|'midJawlineLeft'|'chinBottom'|'midJawlineRight'|'upperJawlineRight',\n 'X': ...,\n 'Y': ...\n },\n ],\n 'Pose': {\n 'Roll': ...,\n 'Yaw': ...,\n 'Pitch': ...\n },\n 'Quality': {\n 'Brightness': ...,\n 'Sharpness': ...\n },\n 'Confidence': ...\n }\n },\n 'FaceMatches': [\n {\n 'Similarity': ...,\n 'Face': {\n 'FaceId': 'string',\n 'BoundingBox': {\n 'Width': ...,\n 'Height': ...,\n 'Left': ...,\n 'Top': ...\n },\n 'ImageId': 'string',\n 'ExternalImageId': 'string',\n 'Confidence': ...\n }\n },\n ]\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_label_detection(JobId=None, MaxResults=None, NextToken=None, SortBy=None):\n \"\"\"\n Gets the label detection results of a Amazon Rekognition Video analysis started by .\n The label detection operation is started by a call to which returns a job identifier (JobId ). When the label detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartlabelDetection . To get the results of the label detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED . If so, call and pass the job identifier (JobId ) from the initial call to StartLabelDetection .\n The labels returned include the label name, the percentage confidence in the accuracy of the detected label, and the time the label was detected in the video.\n Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in MaxResults , the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetlabelDetection and populate the NextToken request parameter with the token value returned from the previous call to GetLabelDetection .\n See also: AWS API Documentation\n \n \n :example: response = client.get_label_detection(\n JobId='string',\n MaxResults=123,\n NextToken='string',\n SortBy='NAME'|'TIMESTAMP'\n )\n \n \n :type JobId: string\n :param JobId: [REQUIRED]\n Job identifier for the label detection operation for which you want results returned. You get the job identifer from an initial call to StartlabelDetection .\n \n\n :type MaxResults: integer\n :param MaxResults: Maximum number of results to return per paginated call. The largest value you can specify is 1000. If you specify a value greater than 1000, a maximum of 1000 results is returned. The default value is 1000.\n\n :type NextToken: string\n :param NextToken: If the previous response was incomplete (because there are more labels to retrieve), Amazon Rekognition Video returns a pagination token in the response. You can use this pagination token to retrieve the next set of labels.\n\n :type SortBy: string\n :param SortBy: Sort to use for elements in the Labels array. Use TIMESTAMP to sort array elements by the time labels are detected. Use NAME to alphabetically group elements for a label together. Within each label group, the array element are sorted by detection confidence. The default sort is by TIMESTAMP .\n\n :rtype: dict\n :return: {\n 'JobStatus': 'IN_PROGRESS'|'SUCCEEDED'|'FAILED',\n 'StatusMessage': 'string',\n 'VideoMetadata': {\n 'Codec': 'string',\n 'DurationMillis': 123,\n 'Format': 'string',\n 'FrameRate': ...,\n 'FrameHeight': 123,\n 'FrameWidth': 123\n },\n 'NextToken': 'string',\n 'Labels': [\n {\n 'Timestamp': 123,\n 'Label': {\n 'Name': 'string',\n 'Confidence': ...,\n 'Instances': [\n {\n 'BoundingBox': {\n 'Width': ...,\n 'Height': ...,\n 'Left': ...,\n 'Top': ...\n },\n 'Confidence': ...\n },\n ],\n 'Parents': [\n {\n 'Name': 'string'\n },\n ]\n }\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_person_tracking(JobId=None, MaxResults=None, NextToken=None, SortBy=None):\n \"\"\"\n Gets the path tracking results of a Amazon Rekognition Video analysis started by .\n The person path tracking operation is started by a call to StartPersonTracking which returns a job identifier (JobId ). When the operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartPersonTracking .\n To get the results of the person path tracking operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED . If so, call and pass the job identifier (JobId ) from the initial call to StartPersonTracking .\n By default, the array is sorted by the time(s) a person's path is tracked in the video. You can sort by tracked persons by specifying INDEX for the SortBy input parameter.\n Use the MaxResults parameter to limit the number of items returned. If there are more results than specified in MaxResults , the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetPersonTracking and populate the NextToken request parameter with the token value returned from the previous call to GetPersonTracking .\n See also: AWS API Documentation\n \n \n :example: response = client.get_person_tracking(\n JobId='string',\n MaxResults=123,\n NextToken='string',\n SortBy='INDEX'|'TIMESTAMP'\n )\n \n \n :type JobId: string\n :param JobId: [REQUIRED]\n The identifier for a job that tracks persons in a video. You get the JobId from a call to StartPersonTracking .\n \n\n :type MaxResults: integer\n :param MaxResults: Maximum number of results to return per paginated call. The largest value you can specify is 1000. If you specify a value greater than 1000, a maximum of 1000 results is returned. The default value is 1000.\n\n :type NextToken: string\n :param NextToken: If the previous response was incomplete (because there are more persons to retrieve), Amazon Rekognition Video returns a pagination token in the response. You can use this pagination token to retrieve the next set of persons.\n\n :type SortBy: string\n :param SortBy: Sort to use for elements in the Persons array. Use TIMESTAMP to sort array elements by the time persons are detected. Use INDEX to sort by the tracked persons. If you sort by INDEX , the array elements for each person are sorted by detection confidence. The default sort is by TIMESTAMP .\n\n :rtype: dict\n :return: {\n 'JobStatus': 'IN_PROGRESS'|'SUCCEEDED'|'FAILED',\n 'StatusMessage': 'string',\n 'VideoMetadata': {\n 'Codec': 'string',\n 'DurationMillis': 123,\n 'Format': 'string',\n 'FrameRate': ...,\n 'FrameHeight': 123,\n 'FrameWidth': 123\n },\n 'NextToken': 'string',\n 'Persons': [\n {\n 'Timestamp': 123,\n 'Person': {\n 'Index': 123,\n 'BoundingBox': {\n 'Width': ...,\n 'Height': ...,\n 'Left': ...,\n 'Top': ...\n },\n 'Face': {\n 'BoundingBox': {\n 'Width': ...,\n 'Height': ...,\n 'Left': ...,\n 'Top': ...\n },\n 'AgeRange': {\n 'Low': 123,\n 'High': 123\n },\n 'Smile': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Eyeglasses': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Sunglasses': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Gender': {\n 'Value': 'Male'|'Female',\n 'Confidence': ...\n },\n 'Beard': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Mustache': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'EyesOpen': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'MouthOpen': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Emotions': [\n {\n 'Type': 'HAPPY'|'SAD'|'ANGRY'|'CONFUSED'|'DISGUSTED'|'SURPRISED'|'CALM'|'UNKNOWN',\n 'Confidence': ...\n },\n ],\n 'Landmarks': [\n {\n 'Type': 'eyeLeft'|'eyeRight'|'nose'|'mouthLeft'|'mouthRight'|'leftEyeBrowLeft'|'leftEyeBrowRight'|'leftEyeBrowUp'|'rightEyeBrowLeft'|'rightEyeBrowRight'|'rightEyeBrowUp'|'leftEyeLeft'|'leftEyeRight'|'leftEyeUp'|'leftEyeDown'|'rightEyeLeft'|'rightEyeRight'|'rightEyeUp'|'rightEyeDown'|'noseLeft'|'noseRight'|'mouthUp'|'mouthDown'|'leftPupil'|'rightPupil'|'upperJawlineLeft'|'midJawlineLeft'|'chinBottom'|'midJawlineRight'|'upperJawlineRight',\n 'X': ...,\n 'Y': ...\n },\n ],\n 'Pose': {\n 'Roll': ...,\n 'Yaw': ...,\n 'Pitch': ...\n },\n 'Quality': {\n 'Brightness': ...,\n 'Sharpness': ...\n },\n 'Confidence': ...\n }\n }\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef index_faces(CollectionId=None, Image=None, ExternalImageId=None, DetectionAttributes=None, MaxFaces=None, QualityFilter=None):\n \"\"\"\n Detects faces in the input image and adds them to the specified collection.\n Amazon Rekognition doesn't save the actual faces that are detected. Instead, the underlying detection algorithm first detects the faces in the input image. For each face, the algorithm extracts facial features into a feature vector, and stores it in the backend database. Amazon Rekognition uses feature vectors when it performs face match and search operations using the and operations.\n For more information, see Adding Faces to a Collection in the Amazon Rekognition Developer Guide.\n To get the number of faces in a collection, call .\n If you're using version 1.0 of the face detection model, IndexFaces indexes the 15 largest faces in the input image. Later versions of the face detection model index the 100 largest faces in the input image.\n If you're using version 4 or later of the face model, image orientation information is not returned in the OrientationCorrection field.\n To determine which version of the model you're using, call and supply the collection ID. You can also get the model version from the value of FaceModelVersion in the response from IndexFaces\n For more information, see Model Versioning in the Amazon Rekognition Developer Guide.\n If you provide the optional ExternalImageID for the input image you provided, Amazon Rekognition associates this ID with all faces that it detects. When you call the operation, the response returns the external ID. You can use this external image ID to create a client-side index to associate the faces with each image. You can then use the index to find all faces in an image.\n You can specify the maximum number of faces to index with the MaxFaces input parameter. This is useful when you want to index the largest faces in an image and don't want to index smaller faces, such as those belonging to people standing in the background.\n The QualityFilter input parameter allows you to filter out detected faces that dont meet the required quality bar chosen by Amazon Rekognition. The quality bar is based on a variety of common use cases. By default, IndexFaces filters detected faces. You can also explicitly filter detected faces by specifying AUTO for the value of QualityFilter . If you do not want to filter detected faces, specify NONE .\n Information about faces detected in an image, but not indexed, is returned in an array of objects, UnindexedFaces . Faces aren't indexed for reasons such as:\n In response, the IndexFaces operation returns an array of metadata for all detected faces, FaceRecords . This includes:\n If you request all facial attributes (by using the detectionAttributes parameter), Amazon Rekognition returns detailed facial attributes, such as facial landmarks (for example, location of eye and mouth) and other facial attributes like gender. If you provide the same image, specify the same collection, and use the same external ID in the IndexFaces operation, Amazon Rekognition doesn't save duplicate face metadata.\n The input image is passed either as base64-encoded image bytes, or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes isn't supported. The image must be formatted as a PNG or JPEG file.\n This operation requires permissions to perform the rekognition:IndexFaces action.\n See also: AWS API Documentation\n \n Examples\n This operation detects faces in an image and adds them to the specified Rekognition collection.\n Expected Output:\n \n :example: response = client.index_faces(\n CollectionId='string',\n Image={\n 'Bytes': b'bytes',\n 'S3Object': {\n 'Bucket': 'string',\n 'Name': 'string',\n 'Version': 'string'\n }\n },\n ExternalImageId='string',\n DetectionAttributes=[\n 'DEFAULT'|'ALL',\n ],\n MaxFaces=123,\n QualityFilter='NONE'|'AUTO'\n )\n \n \n :type CollectionId: string\n :param CollectionId: [REQUIRED]\n The ID of an existing collection to which you want to add the faces that are detected in the input images.\n \n\n :type Image: dict\n :param Image: [REQUIRED]\n The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes isn't supported.\n Bytes (bytes) --Blob of image bytes up to 5 MBs.\n S3Object (dict) --Identifies an S3 object as the image source.\n Bucket (string) --Name of the S3 bucket.\n Name (string) --S3 object key name.\n Version (string) --If the bucket is versioning enabled, you can specify the object version.\n \n \n\n :type ExternalImageId: string\n :param ExternalImageId: The ID you want to assign to all the faces detected in the image.\n\n :type DetectionAttributes: list\n :param DetectionAttributes: An array of facial attributes that you want to be returned. This can be the default list of attributes or all attributes. If you don't specify a value for Attributes or if you specify ['DEFAULT'] , the API returns the following subset of facial attributes: BoundingBox , Confidence , Pose , Quality , and Landmarks . If you provide ['ALL'] , all facial attributes are returned, but the operation takes longer to complete.\n If you provide both, ['ALL', 'DEFAULT'] , the service uses a logical AND operator to determine which attributes to return (in this case, all attributes).\n (string) --\n \n\n :type MaxFaces: integer\n :param MaxFaces: The maximum number of faces to index. The value of MaxFaces must be greater than or equal to 1. IndexFaces returns no more than 100 detected faces in an image, even if you specify a larger value for MaxFaces .\n If IndexFaces detects more faces than the value of MaxFaces , the faces with the lowest quality are filtered out first. If there are still more faces than the value of MaxFaces , the faces with the smallest bounding boxes are filtered out (up to the number that's needed to satisfy the value of MaxFaces ). Information about the unindexed faces is available in the UnindexedFaces array.\n The faces that are returned by IndexFaces are sorted by the largest face bounding box size to the smallest size, in descending order.\n MaxFaces can be used with a collection associated with any version of the face model.\n \n\n :type QualityFilter: string\n :param QualityFilter: A filter that specifies how much filtering is done to identify faces that are detected with low quality. Filtered faces aren't indexed. If you specify AUTO , filtering prioritizes the identification of faces that don t meet the required quality bar chosen by Amazon Rekognition. The quality bar is based on a variety of common use cases. Low-quality detections can occur for a number of reasons. Some examples are an object that's misidentified as a face, a face that's too blurry, or a face with a pose that's too extreme to use. If you specify NONE , no filtering is performed. The default value is AUTO.\n To use quality filtering, the collection you are using must be associated with version 3 of the face model.\n \n\n :rtype: dict\n :return: {\n 'FaceRecords': [\n {\n 'Face': {\n 'FaceId': 'string',\n 'BoundingBox': {\n 'Width': ...,\n 'Height': ...,\n 'Left': ...,\n 'Top': ...\n },\n 'ImageId': 'string',\n 'ExternalImageId': 'string',\n 'Confidence': ...\n },\n 'FaceDetail': {\n 'BoundingBox': {\n 'Width': ...,\n 'Height': ...,\n 'Left': ...,\n 'Top': ...\n },\n 'AgeRange': {\n 'Low': 123,\n 'High': 123\n },\n 'Smile': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Eyeglasses': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Sunglasses': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Gender': {\n 'Value': 'Male'|'Female',\n 'Confidence': ...\n },\n 'Beard': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Mustache': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'EyesOpen': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'MouthOpen': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Emotions': [\n {\n 'Type': 'HAPPY'|'SAD'|'ANGRY'|'CONFUSED'|'DISGUSTED'|'SURPRISED'|'CALM'|'UNKNOWN',\n 'Confidence': ...\n },\n ],\n 'Landmarks': [\n {\n 'Type': 'eyeLeft'|'eyeRight'|'nose'|'mouthLeft'|'mouthRight'|'leftEyeBrowLeft'|'leftEyeBrowRight'|'leftEyeBrowUp'|'rightEyeBrowLeft'|'rightEyeBrowRight'|'rightEyeBrowUp'|'leftEyeLeft'|'leftEyeRight'|'leftEyeUp'|'leftEyeDown'|'rightEyeLeft'|'rightEyeRight'|'rightEyeUp'|'rightEyeDown'|'noseLeft'|'noseRight'|'mouthUp'|'mouthDown'|'leftPupil'|'rightPupil'|'upperJawlineLeft'|'midJawlineLeft'|'chinBottom'|'midJawlineRight'|'upperJawlineRight',\n 'X': ...,\n 'Y': ...\n },\n ],\n 'Pose': {\n 'Roll': ...,\n 'Yaw': ...,\n 'Pitch': ...\n },\n 'Quality': {\n 'Brightness': ...,\n 'Sharpness': ...\n },\n 'Confidence': ...\n }\n },\n ],\n 'OrientationCorrection': 'ROTATE_0'|'ROTATE_90'|'ROTATE_180'|'ROTATE_270',\n 'FaceModelVersion': 'string',\n 'UnindexedFaces': [\n {\n 'Reasons': [\n 'EXCEEDS_MAX_FACES'|'EXTREME_POSE'|'LOW_BRIGHTNESS'|'LOW_SHARPNESS'|'LOW_CONFIDENCE'|'SMALL_BOUNDING_BOX',\n ],\n 'FaceDetail': {\n 'BoundingBox': {\n 'Width': ...,\n 'Height': ...,\n 'Left': ...,\n 'Top': ...\n },\n 'AgeRange': {\n 'Low': 123,\n 'High': 123\n },\n 'Smile': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Eyeglasses': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Sunglasses': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Gender': {\n 'Value': 'Male'|'Female',\n 'Confidence': ...\n },\n 'Beard': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Mustache': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'EyesOpen': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'MouthOpen': {\n 'Value': True|False,\n 'Confidence': ...\n },\n 'Emotions': [\n {\n 'Type': 'HAPPY'|'SAD'|'ANGRY'|'CONFUSED'|'DISGUSTED'|'SURPRISED'|'CALM'|'UNKNOWN',\n 'Confidence': ...\n },\n ],\n 'Landmarks': [\n {\n 'Type': 'eyeLeft'|'eyeRight'|'nose'|'mouthLeft'|'mouthRight'|'leftEyeBrowLeft'|'leftEyeBrowRight'|'leftEyeBrowUp'|'rightEyeBrowLeft'|'rightEyeBrowRight'|'rightEyeBrowUp'|'leftEyeLeft'|'leftEyeRight'|'leftEyeUp'|'leftEyeDown'|'rightEyeLeft'|'rightEyeRight'|'rightEyeUp'|'rightEyeDown'|'noseLeft'|'noseRight'|'mouthUp'|'mouthDown'|'leftPupil'|'rightPupil'|'upperJawlineLeft'|'midJawlineLeft'|'chinBottom'|'midJawlineRight'|'upperJawlineRight',\n 'X': ...,\n 'Y': ...\n },\n ],\n 'Pose': {\n 'Roll': ...,\n 'Yaw': ...,\n 'Pitch': ...\n },\n 'Quality': {\n 'Brightness': ...,\n 'Sharpness': ...\n },\n 'Confidence': ...\n }\n },\n ]\n }\n \n \n :returns: \n The bounding box, BoundingBox , of the detected face.\n A confidence value, Confidence , which indicates the confidence that the bounding box contains a face.\n A face ID, faceId , assigned by the service for each face that's detected and stored.\n An image ID, ImageId , assigned by the service for the input image.\n \n \"\"\"\n pass\n\ndef list_collections(NextToken=None, MaxResults=None):\n \"\"\"\n Returns list of collection IDs in your account. If the result is truncated, the response also provides a NextToken that you can use in the subsequent request to fetch the next set of collection IDs.\n For an example, see Listing Collections in the Amazon Rekognition Developer Guide.\n This operation requires permissions to perform the rekognition:ListCollections action.\n See also: AWS API Documentation\n \n Examples\n This operation returns a list of Rekognition collections.\n Expected Output:\n \n :example: response = client.list_collections(\n NextToken='string',\n MaxResults=123\n )\n \n \n :type NextToken: string\n :param NextToken: Pagination token from the previous response.\n\n :type MaxResults: integer\n :param MaxResults: Maximum number of collection IDs to return.\n\n :rtype: dict\n :return: {\n 'CollectionIds': [\n 'string',\n ],\n 'NextToken': 'string',\n 'FaceModelVersions': [\n 'string',\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_faces(CollectionId=None, NextToken=None, MaxResults=None):\n \"\"\"\n Returns metadata for faces in the specified collection. This metadata includes information such as the bounding box coordinates, the confidence (that the bounding box contains a face), and face ID. For an example, see Listing Faces in a Collection in the Amazon Rekognition Developer Guide.\n This operation requires permissions to perform the rekognition:ListFaces action.\n See also: AWS API Documentation\n \n Examples\n This operation lists the faces in a Rekognition collection.\n Expected Output:\n \n :example: response = client.list_faces(\n CollectionId='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type CollectionId: string\n :param CollectionId: [REQUIRED]\n ID of the collection from which to list the faces.\n \n\n :type NextToken: string\n :param NextToken: If the previous response was incomplete (because there is more data to retrieve), Amazon Rekognition returns a pagination token in the response. You can use this pagination token to retrieve the next set of faces.\n\n :type MaxResults: integer\n :param MaxResults: Maximum number of faces to return.\n\n :rtype: dict\n :return: {\n 'Faces': [\n {\n 'FaceId': 'string',\n 'BoundingBox': {\n 'Width': ...,\n 'Height': ...,\n 'Left': ...,\n 'Top': ...\n },\n 'ImageId': 'string',\n 'ExternalImageId': 'string',\n 'Confidence': ...\n },\n ],\n 'NextToken': 'string',\n 'FaceModelVersion': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_stream_processors(NextToken=None, MaxResults=None):\n \"\"\"\n Gets a list of stream processors that you have created with .\n See also: AWS API Documentation\n \n \n :example: response = client.list_stream_processors(\n NextToken='string',\n MaxResults=123\n )\n \n \n :type NextToken: string\n :param NextToken: If the previous response was incomplete (because there are more stream processors to retrieve), Amazon Rekognition Video returns a pagination token in the response. You can use this pagination token to retrieve the next set of stream processors.\n\n :type MaxResults: integer\n :param MaxResults: Maximum number of stream processors you want Amazon Rekognition Video to return in the response. The default is 1000.\n\n :rtype: dict\n :return: {\n 'NextToken': 'string',\n 'StreamProcessors': [\n {\n 'Name': 'string',\n 'Status': 'STOPPED'|'STARTING'|'RUNNING'|'FAILED'|'STOPPING'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef recognize_celebrities(Image=None):\n \"\"\"\n Returns an array of celebrities recognized in the input image. For more information, see Recognizing Celebrities in the Amazon Rekognition Developer Guide.\n For each celebrity recognized, RecognizeCelebrities returns a Celebrity object. The Celebrity object contains the celebrity name, ID, URL links to additional information, match confidence, and a ComparedFace object that you can use to locate the celebrity's face on the image.\n Amazon Rekognition doesn't retain information about which images a celebrity has been recognized in. Your application must store this information and use the Celebrity ID property as a unique identifier for the celebrity. If you don't store the celebrity name or additional information URLs returned by RecognizeCelebrities , you will need the ID to identify the celebrity in a call to the operation.\n You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.\n For an example, see Recognizing Celebrities in an Image in the Amazon Rekognition Developer Guide.\n This operation requires permissions to perform the rekognition:RecognizeCelebrities operation.\n See also: AWS API Documentation\n \n \n :example: response = client.recognize_celebrities(\n Image={\n 'Bytes': b'bytes',\n 'S3Object': {\n 'Bucket': 'string',\n 'Name': 'string',\n 'Version': 'string'\n }\n }\n )\n \n \n :type Image: dict\n :param Image: [REQUIRED]\n The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.\n Bytes (bytes) --Blob of image bytes up to 5 MBs.\n S3Object (dict) --Identifies an S3 object as the image source.\n Bucket (string) --Name of the S3 bucket.\n Name (string) --S3 object key name.\n Version (string) --If the bucket is versioning enabled, you can specify the object version.\n \n \n\n :rtype: dict\n :return: {\n 'CelebrityFaces': [\n {\n 'Urls': [\n 'string',\n ],\n 'Name': 'string',\n 'Id': 'string',\n 'Face': {\n 'BoundingBox': {\n 'Width': ...,\n 'Height': ...,\n 'Left': ...,\n 'Top': ...\n },\n 'Confidence': ...,\n 'Landmarks': [\n {\n 'Type': 'eyeLeft'|'eyeRight'|'nose'|'mouthLeft'|'mouthRight'|'leftEyeBrowLeft'|'leftEyeBrowRight'|'leftEyeBrowUp'|'rightEyeBrowLeft'|'rightEyeBrowRight'|'rightEyeBrowUp'|'leftEyeLeft'|'leftEyeRight'|'leftEyeUp'|'leftEyeDown'|'rightEyeLeft'|'rightEyeRight'|'rightEyeUp'|'rightEyeDown'|'noseLeft'|'noseRight'|'mouthUp'|'mouthDown'|'leftPupil'|'rightPupil'|'upperJawlineLeft'|'midJawlineLeft'|'chinBottom'|'midJawlineRight'|'upperJawlineRight',\n 'X': ...,\n 'Y': ...\n },\n ],\n 'Pose': {\n 'Roll': ...,\n 'Yaw': ...,\n 'Pitch': ...\n },\n 'Quality': {\n 'Brightness': ...,\n 'Sharpness': ...\n }\n },\n 'MatchConfidence': ...\n },\n ],\n 'UnrecognizedFaces': [\n {\n 'BoundingBox': {\n 'Width': ...,\n 'Height': ...,\n 'Left': ...,\n 'Top': ...\n },\n 'Confidence': ...,\n 'Landmarks': [\n {\n 'Type': 'eyeLeft'|'eyeRight'|'nose'|'mouthLeft'|'mouthRight'|'leftEyeBrowLeft'|'leftEyeBrowRight'|'leftEyeBrowUp'|'rightEyeBrowLeft'|'rightEyeBrowRight'|'rightEyeBrowUp'|'leftEyeLeft'|'leftEyeRight'|'leftEyeUp'|'leftEyeDown'|'rightEyeLeft'|'rightEyeRight'|'rightEyeUp'|'rightEyeDown'|'noseLeft'|'noseRight'|'mouthUp'|'mouthDown'|'leftPupil'|'rightPupil'|'upperJawlineLeft'|'midJawlineLeft'|'chinBottom'|'midJawlineRight'|'upperJawlineRight',\n 'X': ...,\n 'Y': ...\n },\n ],\n 'Pose': {\n 'Roll': ...,\n 'Yaw': ...,\n 'Pitch': ...\n },\n 'Quality': {\n 'Brightness': ...,\n 'Sharpness': ...\n }\n },\n ],\n 'OrientationCorrection': 'ROTATE_0'|'ROTATE_90'|'ROTATE_180'|'ROTATE_270'\n }\n \n \n \"\"\"\n pass\n\ndef search_faces(CollectionId=None, FaceId=None, MaxFaces=None, FaceMatchThreshold=None):\n \"\"\"\n For a given input face ID, searches for matching faces in the collection the face belongs to. You get a face ID when you add a face to the collection using the IndexFaces operation. The operation compares the features of the input face with faces in the specified collection.\n The operation response returns an array of faces that match, ordered by similarity score with the highest similarity first. More specifically, it is an array of metadata for each face match that is found. Along with the metadata, the response also includes a confidence value for each face match, indicating the confidence that the specific face matches the input face.\n For an example, see Searching for a Face Using Its Face ID in the Amazon Rekognition Developer Guide.\n This operation requires permissions to perform the rekognition:SearchFaces action.\n See also: AWS API Documentation\n \n Examples\n This operation searches for matching faces in the collection the supplied face belongs to.\n Expected Output:\n \n :example: response = client.search_faces(\n CollectionId='string',\n FaceId='string',\n MaxFaces=123,\n FaceMatchThreshold=...\n )\n \n \n :type CollectionId: string\n :param CollectionId: [REQUIRED]\n ID of the collection the face belongs to.\n \n\n :type FaceId: string\n :param FaceId: [REQUIRED]\n ID of a face to find matches for in the collection.\n \n\n :type MaxFaces: integer\n :param MaxFaces: Maximum number of faces to return. The operation returns the maximum number of faces with the highest confidence in the match.\n\n :type FaceMatchThreshold: float\n :param FaceMatchThreshold: Optional value specifying the minimum confidence in the face match to return. For example, don't return any matches where confidence in matches is less than 70%.\n\n :rtype: dict\n :return: {\n 'SearchedFaceId': 'string',\n 'FaceMatches': [\n {\n 'Similarity': ...,\n 'Face': {\n 'FaceId': 'string',\n 'BoundingBox': {\n 'Width': ...,\n 'Height': ...,\n 'Left': ...,\n 'Top': ...\n },\n 'ImageId': 'string',\n 'ExternalImageId': 'string',\n 'Confidence': ...\n }\n },\n ],\n 'FaceModelVersion': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef search_faces_by_image(CollectionId=None, Image=None, MaxFaces=None, FaceMatchThreshold=None):\n \"\"\"\n For a given input image, first detects the largest face in the image, and then searches the specified collection for matching faces. The operation compares the features of the input face with faces in the specified collection.\n You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.\n The response returns an array of faces that match, ordered by similarity score with the highest similarity first. More specifically, it is an array of metadata for each face match found. Along with the metadata, the response also includes a similarity indicating how similar the face is to the input face. In the response, the operation also returns the bounding box (and a confidence level that the bounding box contains a face) of the face that Amazon Rekognition used for the input image.\n For an example, Searching for a Face Using an Image in the Amazon Rekognition Developer Guide.\n This operation requires permissions to perform the rekognition:SearchFacesByImage action.\n See also: AWS API Documentation\n \n Examples\n This operation searches for faces in a Rekognition collection that match the largest face in an S3 bucket stored image.\n Expected Output:\n \n :example: response = client.search_faces_by_image(\n CollectionId='string',\n Image={\n 'Bytes': b'bytes',\n 'S3Object': {\n 'Bucket': 'string',\n 'Name': 'string',\n 'Version': 'string'\n }\n },\n MaxFaces=123,\n FaceMatchThreshold=...\n )\n \n \n :type CollectionId: string\n :param CollectionId: [REQUIRED]\n ID of the collection to search.\n \n\n :type Image: dict\n :param Image: [REQUIRED]\n The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.\n Bytes (bytes) --Blob of image bytes up to 5 MBs.\n S3Object (dict) --Identifies an S3 object as the image source.\n Bucket (string) --Name of the S3 bucket.\n Name (string) --S3 object key name.\n Version (string) --If the bucket is versioning enabled, you can specify the object version.\n \n \n\n :type MaxFaces: integer\n :param MaxFaces: Maximum number of faces to return. The operation returns the maximum number of faces with the highest confidence in the match.\n\n :type FaceMatchThreshold: float\n :param FaceMatchThreshold: (Optional) Specifies the minimum confidence in the face match to return. For example, don't return any matches where confidence in matches is less than 70%.\n\n :rtype: dict\n :return: {\n 'SearchedFaceBoundingBox': {\n 'Width': ...,\n 'Height': ...,\n 'Left': ...,\n 'Top': ...\n },\n 'SearchedFaceConfidence': ...,\n 'FaceMatches': [\n {\n 'Similarity': ...,\n 'Face': {\n 'FaceId': 'string',\n 'BoundingBox': {\n 'Width': ...,\n 'Height': ...,\n 'Left': ...,\n 'Top': ...\n },\n 'ImageId': 'string',\n 'ExternalImageId': 'string',\n 'Confidence': ...\n }\n },\n ],\n 'FaceModelVersion': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef start_celebrity_recognition(Video=None, ClientRequestToken=None, NotificationChannel=None, JobTag=None):\n \"\"\"\n Starts asynchronous recognition of celebrities in a stored video.\n Amazon Rekognition Video can detect celebrities in a video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartCelebrityRecognition returns a job identifier (JobId ) which you use to get the results of the analysis. When celebrity recognition analysis is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel . To get the results of the celebrity recognition analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED . If so, call and pass the job identifier (JobId ) from the initial call to StartCelebrityRecognition .\n For more information, see Recognizing Celebrities in the Amazon Rekognition Developer Guide.\n See also: AWS API Documentation\n \n \n :example: response = client.start_celebrity_recognition(\n Video={\n 'S3Object': {\n 'Bucket': 'string',\n 'Name': 'string',\n 'Version': 'string'\n }\n },\n ClientRequestToken='string',\n NotificationChannel={\n 'SNSTopicArn': 'string',\n 'RoleArn': 'string'\n },\n JobTag='string'\n )\n \n \n :type Video: dict\n :param Video: [REQUIRED]\n The video in which you want to recognize celebrities. The video must be stored in an Amazon S3 bucket.\n S3Object (dict) --The Amazon S3 bucket name and file name for the video.\n Bucket (string) --Name of the S3 bucket.\n Name (string) --S3 object key name.\n Version (string) --If the bucket is versioning enabled, you can specify the object version.\n \n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: Idempotent token used to identify the start request. If you use the same token with multiple StartCelebrityRecognition requests, the same JobId is returned. Use ClientRequestToken to prevent the same job from being accidently started more than once.\n\n :type NotificationChannel: dict\n :param NotificationChannel: The Amazon SNS topic ARN that you want Amazon Rekognition Video to publish the completion status of the celebrity recognition analysis to.\n SNSTopicArn (string) -- [REQUIRED]The Amazon SNS topic to which Amazon Rekognition to posts the completion status.\n RoleArn (string) -- [REQUIRED]The ARN of an IAM role that gives Amazon Rekognition publishing permissions to the Amazon SNS topic.\n \n\n :type JobTag: string\n :param JobTag: Unique identifier you specify to identify the job in the completion status published to the Amazon Simple Notification Service topic.\n\n :rtype: dict\n :return: {\n 'JobId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef start_content_moderation(Video=None, MinConfidence=None, ClientRequestToken=None, NotificationChannel=None, JobTag=None):\n \"\"\"\n Starts asynchronous detection of explicit or suggestive adult content in a stored video.\n Amazon Rekognition Video can moderate content in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartContentModeration returns a job identifier (JobId ) which you use to get the results of the analysis. When content moderation analysis is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel .\n To get the results of the content moderation analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED . If so, call and pass the job identifier (JobId ) from the initial call to StartContentModeration .\n For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.\n See also: AWS API Documentation\n \n \n :example: response = client.start_content_moderation(\n Video={\n 'S3Object': {\n 'Bucket': 'string',\n 'Name': 'string',\n 'Version': 'string'\n }\n },\n MinConfidence=...,\n ClientRequestToken='string',\n NotificationChannel={\n 'SNSTopicArn': 'string',\n 'RoleArn': 'string'\n },\n JobTag='string'\n )\n \n \n :type Video: dict\n :param Video: [REQUIRED]\n The video in which you want to moderate content. The video must be stored in an Amazon S3 bucket.\n S3Object (dict) --The Amazon S3 bucket name and file name for the video.\n Bucket (string) --Name of the S3 bucket.\n Name (string) --S3 object key name.\n Version (string) --If the bucket is versioning enabled, you can specify the object version.\n \n \n\n :type MinConfidence: float\n :param MinConfidence: Specifies the minimum confidence that Amazon Rekognition must have in order to return a moderated content label. Confidence represents how certain Amazon Rekognition is that the moderated content is correctly identified. 0 is the lowest confidence. 100 is the highest confidence. Amazon Rekognition doesn't return any moderated content labels with a confidence level lower than this specified value. If you don't specify MinConfidence , GetContentModeration returns labels with confidence values greater than or equal to 50 percent.\n\n :type ClientRequestToken: string\n :param ClientRequestToken: Idempotent token used to identify the start request. If you use the same token with multiple StartContentModeration requests, the same JobId is returned. Use ClientRequestToken to prevent the same job from being accidently started more than once.\n\n :type NotificationChannel: dict\n :param NotificationChannel: The Amazon SNS topic ARN that you want Amazon Rekognition Video to publish the completion status of the content moderation analysis to.\n SNSTopicArn (string) -- [REQUIRED]The Amazon SNS topic to which Amazon Rekognition to posts the completion status.\n RoleArn (string) -- [REQUIRED]The ARN of an IAM role that gives Amazon Rekognition publishing permissions to the Amazon SNS topic.\n \n\n :type JobTag: string\n :param JobTag: Unique identifier you specify to identify the job in the completion status published to the Amazon Simple Notification Service topic.\n\n :rtype: dict\n :return: {\n 'JobId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef start_face_detection(Video=None, ClientRequestToken=None, NotificationChannel=None, FaceAttributes=None, JobTag=None):\n \"\"\"\n Starts asynchronous detection of faces in a stored video.\n Amazon Rekognition Video can detect faces in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartFaceDetection returns a job identifier (JobId ) that you use to get the results of the operation. When face detection is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel . To get the results of the face detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED . If so, call and pass the job identifier (JobId ) from the initial call to StartFaceDetection .\n For more information, see Detecting Faces in a Stored Video in the Amazon Rekognition Developer Guide.\n See also: AWS API Documentation\n \n \n :example: response = client.start_face_detection(\n Video={\n 'S3Object': {\n 'Bucket': 'string',\n 'Name': 'string',\n 'Version': 'string'\n }\n },\n ClientRequestToken='string',\n NotificationChannel={\n 'SNSTopicArn': 'string',\n 'RoleArn': 'string'\n },\n FaceAttributes='DEFAULT'|'ALL',\n JobTag='string'\n )\n \n \n :type Video: dict\n :param Video: [REQUIRED]\n The video in which you want to detect faces. The video must be stored in an Amazon S3 bucket.\n S3Object (dict) --The Amazon S3 bucket name and file name for the video.\n Bucket (string) --Name of the S3 bucket.\n Name (string) --S3 object key name.\n Version (string) --If the bucket is versioning enabled, you can specify the object version.\n \n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: Idempotent token used to identify the start request. If you use the same token with multiple StartFaceDetection requests, the same JobId is returned. Use ClientRequestToken to prevent the same job from being accidently started more than once.\n\n :type NotificationChannel: dict\n :param NotificationChannel: The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video to publish the completion status of the face detection operation.\n SNSTopicArn (string) -- [REQUIRED]The Amazon SNS topic to which Amazon Rekognition to posts the completion status.\n RoleArn (string) -- [REQUIRED]The ARN of an IAM role that gives Amazon Rekognition publishing permissions to the Amazon SNS topic.\n \n\n :type FaceAttributes: string\n :param FaceAttributes: The face attributes you want returned.\n DEFAULT - The following subset of facial attributes are returned: BoundingBox, Confidence, Pose, Quality and Landmarks.ALL - All facial attributes are returned.\n \n\n :type JobTag: string\n :param JobTag: Unique identifier you specify to identify the job in the completion status published to the Amazon Simple Notification Service topic.\n\n :rtype: dict\n :return: {\n 'JobId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef start_face_search(Video=None, ClientRequestToken=None, FaceMatchThreshold=None, CollectionId=None, NotificationChannel=None, JobTag=None):\n \"\"\"\n Starts the asynchronous search for faces in a collection that match the faces of persons detected in a stored video.\n The video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartFaceSearch returns a job identifier (JobId ) which you use to get the search results once the search has completed. When searching is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel . To get the search results, first check that the status value published to the Amazon SNS topic is SUCCEEDED . If so, call and pass the job identifier (JobId ) from the initial call to StartFaceSearch . For more information, see procedure-person-search-videos .\n See also: AWS API Documentation\n \n \n :example: response = client.start_face_search(\n Video={\n 'S3Object': {\n 'Bucket': 'string',\n 'Name': 'string',\n 'Version': 'string'\n }\n },\n ClientRequestToken='string',\n FaceMatchThreshold=...,\n CollectionId='string',\n NotificationChannel={\n 'SNSTopicArn': 'string',\n 'RoleArn': 'string'\n },\n JobTag='string'\n )\n \n \n :type Video: dict\n :param Video: [REQUIRED]\n The video you want to search. The video must be stored in an Amazon S3 bucket.\n S3Object (dict) --The Amazon S3 bucket name and file name for the video.\n Bucket (string) --Name of the S3 bucket.\n Name (string) --S3 object key name.\n Version (string) --If the bucket is versioning enabled, you can specify the object version.\n \n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: Idempotent token used to identify the start request. If you use the same token with multiple StartFaceSearch requests, the same JobId is returned. Use ClientRequestToken to prevent the same job from being accidently started more than once.\n\n :type FaceMatchThreshold: float\n :param FaceMatchThreshold: The minimum confidence in the person match to return. For example, don't return any matches where confidence in matches is less than 70%.\n\n :type CollectionId: string\n :param CollectionId: [REQUIRED]\n ID of the collection that contains the faces you want to search for.\n \n\n :type NotificationChannel: dict\n :param NotificationChannel: The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video to publish the completion status of the search.\n SNSTopicArn (string) -- [REQUIRED]The Amazon SNS topic to which Amazon Rekognition to posts the completion status.\n RoleArn (string) -- [REQUIRED]The ARN of an IAM role that gives Amazon Rekognition publishing permissions to the Amazon SNS topic.\n \n\n :type JobTag: string\n :param JobTag: Unique identifier you specify to identify the job in the completion status published to the Amazon Simple Notification Service topic.\n\n :rtype: dict\n :return: {\n 'JobId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef start_label_detection(Video=None, ClientRequestToken=None, MinConfidence=None, NotificationChannel=None, JobTag=None):\n \"\"\"\n Starts asynchronous detection of labels in a stored video.\n Amazon Rekognition Video can detect labels in a video. Labels are instances of real-world entities. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; concepts like landscape, evening, and nature; and activities like a person getting out of a car or a person skiing.\n The video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartLabelDetection returns a job identifier (JobId ) which you use to get the results of the operation. When label detection is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel .\n To get the results of the label detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED . If so, call and pass the job identifier (JobId ) from the initial call to StartLabelDetection .\n See also: AWS API Documentation\n \n \n :example: response = client.start_label_detection(\n Video={\n 'S3Object': {\n 'Bucket': 'string',\n 'Name': 'string',\n 'Version': 'string'\n }\n },\n ClientRequestToken='string',\n MinConfidence=...,\n NotificationChannel={\n 'SNSTopicArn': 'string',\n 'RoleArn': 'string'\n },\n JobTag='string'\n )\n \n \n :type Video: dict\n :param Video: [REQUIRED]\n The video in which you want to detect labels. The video must be stored in an Amazon S3 bucket.\n S3Object (dict) --The Amazon S3 bucket name and file name for the video.\n Bucket (string) --Name of the S3 bucket.\n Name (string) --S3 object key name.\n Version (string) --If the bucket is versioning enabled, you can specify the object version.\n \n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: Idempotent token used to identify the start request. If you use the same token with multiple StartLabelDetection requests, the same JobId is returned. Use ClientRequestToken to prevent the same job from being accidently started more than once.\n\n :type MinConfidence: float\n :param MinConfidence: Specifies the minimum confidence that Amazon Rekognition Video must have in order to return a detected label. Confidence represents how certain Amazon Rekognition is that a label is correctly identified.0 is the lowest confidence. 100 is the highest confidence. Amazon Rekognition Video doesn't return any labels with a confidence level lower than this specified value.\n If you don't specify MinConfidence , the operation returns labels with confidence values greater than or equal to 50 percent.\n \n\n :type NotificationChannel: dict\n :param NotificationChannel: The Amazon SNS topic ARN you want Amazon Rekognition Video to publish the completion status of the label detection operation to.\n SNSTopicArn (string) -- [REQUIRED]The Amazon SNS topic to which Amazon Rekognition to posts the completion status.\n RoleArn (string) -- [REQUIRED]The ARN of an IAM role that gives Amazon Rekognition publishing permissions to the Amazon SNS topic.\n \n\n :type JobTag: string\n :param JobTag: Unique identifier you specify to identify the job in the completion status published to the Amazon Simple Notification Service topic.\n\n :rtype: dict\n :return: {\n 'JobId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef start_person_tracking(Video=None, ClientRequestToken=None, NotificationChannel=None, JobTag=None):\n \"\"\"\n Starts the asynchronous tracking of a person's path in a stored video.\n Amazon Rekognition Video can track the path of people in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartPersonTracking returns a job identifier (JobId ) which you use to get the results of the operation. When label detection is finished, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel .\n To get the results of the person detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED . If so, call and pass the job identifier (JobId ) from the initial call to StartPersonTracking .\n See also: AWS API Documentation\n \n \n :example: response = client.start_person_tracking(\n Video={\n 'S3Object': {\n 'Bucket': 'string',\n 'Name': 'string',\n 'Version': 'string'\n }\n },\n ClientRequestToken='string',\n NotificationChannel={\n 'SNSTopicArn': 'string',\n 'RoleArn': 'string'\n },\n JobTag='string'\n )\n \n \n :type Video: dict\n :param Video: [REQUIRED]\n The video in which you want to detect people. The video must be stored in an Amazon S3 bucket.\n S3Object (dict) --The Amazon S3 bucket name and file name for the video.\n Bucket (string) --Name of the S3 bucket.\n Name (string) --S3 object key name.\n Version (string) --If the bucket is versioning enabled, you can specify the object version.\n \n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: Idempotent token used to identify the start request. If you use the same token with multiple StartPersonTracking requests, the same JobId is returned. Use ClientRequestToken to prevent the same job from being accidently started more than once.\n\n :type NotificationChannel: dict\n :param NotificationChannel: The Amazon SNS topic ARN you want Amazon Rekognition Video to publish the completion status of the people detection operation to.\n SNSTopicArn (string) -- [REQUIRED]The Amazon SNS topic to which Amazon Rekognition to posts the completion status.\n RoleArn (string) -- [REQUIRED]The ARN of an IAM role that gives Amazon Rekognition publishing permissions to the Amazon SNS topic.\n \n\n :type JobTag: string\n :param JobTag: Unique identifier you specify to identify the job in the completion status published to the Amazon Simple Notification Service topic.\n\n :rtype: dict\n :return: {\n 'JobId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef start_stream_processor(Name=None):\n \"\"\"\n Starts processing a stream processor. You create a stream processor by calling . To tell StartStreamProcessor which stream processor to start, use the value of the Name field specified in the call to CreateStreamProcessor .\n See also: AWS API Documentation\n \n \n :example: response = client.start_stream_processor(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the stream processor to start processing.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef stop_stream_processor(Name=None):\n \"\"\"\n Stops a running stream processor that was created by .\n See also: AWS API Documentation\n \n \n :example: response = client.stop_stream_processor(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of a stream processor created by .\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6554376482963562, "alphanum_fraction": 0.6632024645805359, "avg_line_length": 47.93254470825195, "blob_id": "dd1cd09983d568e3a38fee1ef849a53e11e1612e", "content_id": "5f10c65a92e8cfba3321b2e02cad2e79bc2d2e9c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 177724, "license_type": "permissive", "max_line_length": 705, "num_lines": 3632, "path": "/pyboto3/ses.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef clone_receipt_rule_set(RuleSetName=None, OriginalRuleSetName=None):\n \"\"\"\n Creates a receipt rule set by cloning an existing one. All receipt rules and configurations are copied to the new receipt rule set and are completely independent of the source rule set.\n For information about setting up rule sets, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example creates a receipt rule set by cloning an existing one:\n Expected Output:\n \n :example: response = client.clone_receipt_rule_set(\n RuleSetName='string',\n OriginalRuleSetName='string'\n )\n \n \n :type RuleSetName: string\n :param RuleSetName: [REQUIRED]\n The name of the rule set to create. The name must:\n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Start and end with a letter or number.\n Contain less than 64 characters.\n \n\n :type OriginalRuleSetName: string\n :param OriginalRuleSetName: [REQUIRED]\n The name of the rule set to clone.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef create_configuration_set(ConfigurationSet=None):\n \"\"\"\n Creates a configuration set.\n Configuration sets enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n \n :example: response = client.create_configuration_set(\n ConfigurationSet={\n 'Name': 'string'\n }\n )\n \n \n :type ConfigurationSet: dict\n :param ConfigurationSet: [REQUIRED]\n A data structure that contains the name of the configuration set.\n Name (string) -- [REQUIRED]The name of the configuration set. The name must meet the following requirements:\n Contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain 64 characters or fewer.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef create_configuration_set_event_destination(ConfigurationSetName=None, EventDestination=None):\n \"\"\"\n Creates a configuration set event destination.\n An event destination is the AWS service to which Amazon SES publishes the email sending events associated with a configuration set. For information about using configuration sets, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n \n :example: response = client.create_configuration_set_event_destination(\n ConfigurationSetName='string',\n EventDestination={\n 'Name': 'string',\n 'Enabled': True|False,\n 'MatchingEventTypes': [\n 'send'|'reject'|'bounce'|'complaint'|'delivery'|'open'|'click'|'renderingFailure',\n ],\n 'KinesisFirehoseDestination': {\n 'IAMRoleARN': 'string',\n 'DeliveryStreamARN': 'string'\n },\n 'CloudWatchDestination': {\n 'DimensionConfigurations': [\n {\n 'DimensionName': 'string',\n 'DimensionValueSource': 'messageTag'|'emailHeader'|'linkTag',\n 'DefaultDimensionValue': 'string'\n },\n ]\n },\n 'SNSDestination': {\n 'TopicARN': 'string'\n }\n }\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: [REQUIRED]\n The name of the configuration set that the event destination should be associated with.\n \n\n :type EventDestination: dict\n :param EventDestination: [REQUIRED]\n An object that describes the AWS service that email sending event information will be published to.\n Name (string) -- [REQUIRED]The name of the event destination. The name must:\n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain less than 64 characters.\n Enabled (boolean) --Sets whether Amazon SES publishes events to this destination when you send an email with the associated configuration set. Set to true to enable publishing to this destination; set to false to prevent publishing to this destination. The default value is false .\n MatchingEventTypes (list) -- [REQUIRED]The type of email sending events to publish to the event destination.\n (string) --\n KinesisFirehoseDestination (dict) --An object that contains the delivery stream ARN and the IAM role ARN associated with an Amazon Kinesis Firehose event destination.\n IAMRoleARN (string) -- [REQUIRED]The ARN of the IAM role under which Amazon SES publishes email sending events to the Amazon Kinesis Firehose stream.\n DeliveryStreamARN (string) -- [REQUIRED]The ARN of the Amazon Kinesis Firehose stream that email sending events should be published to.\n CloudWatchDestination (dict) --An object that contains the names, default values, and sources of the dimensions associated with an Amazon CloudWatch event destination.\n DimensionConfigurations (list) -- [REQUIRED]A list of dimensions upon which to categorize your emails when you publish email sending events to Amazon CloudWatch.\n (dict) --Contains the dimension configuration to use when you publish email sending events to Amazon CloudWatch.\n For information about publishing email sending events to Amazon CloudWatch, see the Amazon SES Developer Guide .\n DimensionName (string) -- [REQUIRED]The name of an Amazon CloudWatch dimension associated with an email sending metric. The name must:\n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain less than 256 characters.\n DimensionValueSource (string) -- [REQUIRED]The place where Amazon SES finds the value of a dimension to publish to Amazon CloudWatch. If you want Amazon SES to use the message tags that you specify using an X-SES-MESSAGE-TAGS header or a parameter to the SendEmail /SendRawEmail API, choose messageTag . If you want Amazon SES to use your own email headers, choose emailHeader .\n DefaultDimensionValue (string) -- [REQUIRED]The default value of the dimension that is published to Amazon CloudWatch if you do not provide the value of the dimension when you send an email. The default value must:\n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain less than 256 characters.\n \n \n SNSDestination (dict) --An object that contains the topic ARN associated with an Amazon Simple Notification Service (Amazon SNS) event destination.\n TopicARN (string) -- [REQUIRED]The ARN of the Amazon SNS topic that email sending events will be published to. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic . For more information about Amazon SNS topics, see the Amazon SNS Developer Guide .\n \n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef create_configuration_set_tracking_options(ConfigurationSetName=None, TrackingOptions=None):\n \"\"\"\n Creates an association between a configuration set and a custom domain for open and click event tracking.\n By default, images and links used for tracking open and click events are hosted on domains operated by Amazon SES. You can configure a subdomain of your own to handle these events. For information about using custom domains, see the Amazon SES Developer Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.create_configuration_set_tracking_options(\n ConfigurationSetName='string',\n TrackingOptions={\n 'CustomRedirectDomain': 'string'\n }\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: [REQUIRED]\n The name of the configuration set that the tracking options should be associated with.\n \n\n :type TrackingOptions: dict\n :param TrackingOptions: [REQUIRED]\n A domain that is used to redirect email recipients to an Amazon SES-operated domain. This domain captures open and click events generated by Amazon SES emails.\n For more information, see Configuring Custom Domains to Handle Open and Click Tracking in the Amazon SES Developer Guide .\n CustomRedirectDomain (string) --The custom subdomain that will be used to redirect email recipients to the Amazon SES event tracking domain.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef create_custom_verification_email_template(TemplateName=None, FromEmailAddress=None, TemplateSubject=None, TemplateContent=None, SuccessRedirectionURL=None, FailureRedirectionURL=None):\n \"\"\"\n Creates a new custom verification email template.\n For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n \n :example: response = client.create_custom_verification_email_template(\n TemplateName='string',\n FromEmailAddress='string',\n TemplateSubject='string',\n TemplateContent='string',\n SuccessRedirectionURL='string',\n FailureRedirectionURL='string'\n )\n \n \n :type TemplateName: string\n :param TemplateName: [REQUIRED]\n The name of the custom verification email template.\n \n\n :type FromEmailAddress: string\n :param FromEmailAddress: [REQUIRED]\n The email address that the custom verification email is sent from.\n \n\n :type TemplateSubject: string\n :param TemplateSubject: [REQUIRED]\n The subject line of the custom verification email.\n \n\n :type TemplateContent: string\n :param TemplateContent: [REQUIRED]\n The content of the custom verification email. The total size of the email must be less than 10 MB. The message body may contain HTML, with some limitations. For more information, see Custom Verification Email Frequently Asked Questions in the Amazon SES Developer Guide .\n \n\n :type SuccessRedirectionURL: string\n :param SuccessRedirectionURL: [REQUIRED]\n The URL that the recipient of the verification email is sent to if his or her address is successfully verified.\n \n\n :type FailureRedirectionURL: string\n :param FailureRedirectionURL: [REQUIRED]\n The URL that the recipient of the verification email is sent to if his or her address is not successfully verified.\n \n\n \"\"\"\n pass\n\ndef create_receipt_filter(Filter=None):\n \"\"\"\n Creates a new IP address filter.\n For information about setting up IP address filters, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example creates a new IP address filter:\n Expected Output:\n \n :example: response = client.create_receipt_filter(\n Filter={\n 'Name': 'string',\n 'IpFilter': {\n 'Policy': 'Block'|'Allow',\n 'Cidr': 'string'\n }\n }\n )\n \n \n :type Filter: dict\n :param Filter: [REQUIRED]\n A data structure that describes the IP address filter to create, which consists of a name, an IP address range, and whether to allow or block mail from it.\n Name (string) -- [REQUIRED]The name of the IP address filter. The name must:\n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Start and end with a letter or number.\n Contain less than 64 characters.\n IpFilter (dict) -- [REQUIRED]A structure that provides the IP addresses to block or allow, and whether to block or allow incoming mail from them.\n Policy (string) -- [REQUIRED]Indicates whether to block or allow incoming mail from the specified IP addresses.\n Cidr (string) -- [REQUIRED]A single IP address or a range of IP addresses that you want to block or allow, specified in Classless Inter-Domain Routing (CIDR) notation. An example of a single email address is 10.0.0.1. An example of a range of IP addresses is 10.0.0.1/24. For more information about CIDR notation, see RFC 2317 .\n \n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef create_receipt_rule(RuleSetName=None, After=None, Rule=None):\n \"\"\"\n Creates a receipt rule.\n For information about setting up receipt rules, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example creates a new receipt rule:\n Expected Output:\n \n :example: response = client.create_receipt_rule(\n RuleSetName='string',\n After='string',\n Rule={\n 'Name': 'string',\n 'Enabled': True|False,\n 'TlsPolicy': 'Require'|'Optional',\n 'Recipients': [\n 'string',\n ],\n 'Actions': [\n {\n 'S3Action': {\n 'TopicArn': 'string',\n 'BucketName': 'string',\n 'ObjectKeyPrefix': 'string',\n 'KmsKeyArn': 'string'\n },\n 'BounceAction': {\n 'TopicArn': 'string',\n 'SmtpReplyCode': 'string',\n 'StatusCode': 'string',\n 'Message': 'string',\n 'Sender': 'string'\n },\n 'WorkmailAction': {\n 'TopicArn': 'string',\n 'OrganizationArn': 'string'\n },\n 'LambdaAction': {\n 'TopicArn': 'string',\n 'FunctionArn': 'string',\n 'InvocationType': 'Event'|'RequestResponse'\n },\n 'StopAction': {\n 'Scope': 'RuleSet',\n 'TopicArn': 'string'\n },\n 'AddHeaderAction': {\n 'HeaderName': 'string',\n 'HeaderValue': 'string'\n },\n 'SNSAction': {\n 'TopicArn': 'string',\n 'Encoding': 'UTF-8'|'Base64'\n }\n },\n ],\n 'ScanEnabled': True|False\n }\n )\n \n \n :type RuleSetName: string\n :param RuleSetName: [REQUIRED]\n The name of the rule set that the receipt rule will be added to.\n \n\n :type After: string\n :param After: The name of an existing rule after which the new rule will be placed. If this parameter is null, the new rule will be inserted at the beginning of the rule list.\n\n :type Rule: dict\n :param Rule: [REQUIRED]\n A data structure that contains the specified rule's name, actions, recipients, domains, enabled status, scan status, and TLS policy.\n Name (string) -- [REQUIRED]The name of the receipt rule. The name must:\n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Start and end with a letter or number.\n Contain less than 64 characters.\n Enabled (boolean) --If true , the receipt rule is active. The default value is false .\n TlsPolicy (string) --Specifies whether Amazon SES should require that incoming email is delivered over a connection encrypted with Transport Layer Security (TLS). If this parameter is set to Require , Amazon SES will bounce emails that are not received over TLS. The default is Optional .\n Recipients (list) --The recipient domains and email addresses that the receipt rule applies to. If this field is not specified, this rule will match all recipients under all verified domains.\n (string) --\n Actions (list) --An ordered list of actions to perform on messages that match at least one of the recipient email addresses or domains specified in the receipt rule.\n (dict) --An action that Amazon SES can take when it receives an email on behalf of one or more email addresses or domains that you own. An instance of this data type can represent only one action.\n For information about setting up receipt rules, see the Amazon SES Developer Guide .\n S3Action (dict) --Saves the received message to an Amazon Simple Storage Service (Amazon S3) bucket and, optionally, publishes a notification to Amazon SNS.\n TopicArn (string) --The ARN of the Amazon SNS topic to notify when the message is saved to the Amazon S3 bucket. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic . For more information about Amazon SNS topics, see the Amazon SNS Developer Guide .\n BucketName (string) -- [REQUIRED]The name of the Amazon S3 bucket that incoming email will be saved to.\n ObjectKeyPrefix (string) --The key prefix of the Amazon S3 bucket. The key prefix is similar to a directory name that enables you to store similar data under the same directory in a bucket.\n KmsKeyArn (string) --The customer master key that Amazon SES should use to encrypt your emails before saving them to the Amazon S3 bucket. You can use the default master key or a custom master key you created in AWS KMS as follows:\n To use the default master key, provide an ARN in the form of arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses . For example, if your AWS account ID is 123456789012 and you want to use the default master key in the US West (Oregon) region, the ARN of the default master key would be arn:aws:kms:us-west-2:123456789012:alias/aws/ses . If you use the default master key, you don't need to perform any extra steps to give Amazon SES permission to use the key.\n To use a custom master key you created in AWS KMS, provide the ARN of the master key and ensure that you add a statement to your key's policy to give Amazon SES permission to use it. For more information about giving permissions, see the Amazon SES Developer Guide .\n For more information about key policies, see the AWS KMS Developer Guide . If you do not specify a master key, Amazon SES will not encrypt your emails.\n Warning\n Your mail is encrypted by Amazon SES using the Amazon S3 encryption client before the mail is submitted to Amazon S3 for storage. It is not encrypted using Amazon S3 server-side encryption. This means that you must use the Amazon S3 encryption client to decrypt the email after retrieving it from Amazon S3, as the service has no access to use your AWS KMS keys for decryption. This encryption client is currently available with the AWS SDK for Java and AWS SDK for Ruby only. For more information about client-side encryption using AWS KMS master keys, see the Amazon S3 Developer Guide .\n \n BounceAction (dict) --Rejects the received email by returning a bounce response to the sender and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS).\n TopicArn (string) --The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the bounce action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic . For more information about Amazon SNS topics, see the Amazon SNS Developer Guide .\n SmtpReplyCode (string) -- [REQUIRED]The SMTP reply code, as defined by RFC 5321 .\n StatusCode (string) --The SMTP enhanced status code, as defined by RFC 3463 .\n Message (string) -- [REQUIRED]Human-readable text to include in the bounce message.\n Sender (string) -- [REQUIRED]The email address of the sender of the bounced email. This is the address from which the bounce message will be sent.\n WorkmailAction (dict) --Calls Amazon WorkMail and, optionally, publishes a notification to Amazon Amazon SNS.\n TopicArn (string) --The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the WorkMail action is called. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic . For more information about Amazon SNS topics, see the Amazon SNS Developer Guide .\n OrganizationArn (string) -- [REQUIRED]The ARN of the Amazon WorkMail organization. An example of an Amazon WorkMail organization ARN is arn:aws:workmail:us-west-2:123456789012:organization/m-68755160c4cb4e29a2b2f8fb58f359d7 . For information about Amazon WorkMail organizations, see the Amazon WorkMail Administrator Guide .\n LambdaAction (dict) --Calls an AWS Lambda function, and optionally, publishes a notification to Amazon SNS.\n TopicArn (string) --The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the Lambda action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic . For more information about Amazon SNS topics, see the Amazon SNS Developer Guide .\n FunctionArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS Lambda function. An example of an AWS Lambda function ARN is arn:aws:lambda:us-west-2:account-id:function:MyFunction . For more information about AWS Lambda, see the AWS Lambda Developer Guide .\n InvocationType (string) --The invocation type of the AWS Lambda function. An invocation type of RequestResponse means that the execution of the function will immediately result in a response, and a value of Event means that the function will be invoked asynchronously. The default value is Event . For information about AWS Lambda invocation types, see the AWS Lambda Developer Guide .\n Warning\n There is a 30-second timeout on RequestResponse invocations. You should use Event invocation in most cases. Use RequestResponse only when you want to make a mail flow decision, such as whether to stop the receipt rule or the receipt rule set.\n \n StopAction (dict) --Terminates the evaluation of the receipt rule set and optionally publishes a notification to Amazon SNS.\n Scope (string) -- [REQUIRED]The name of the RuleSet that is being stopped.\n TopicArn (string) --The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the stop action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic . For more information about Amazon SNS topics, see the Amazon SNS Developer Guide .\n AddHeaderAction (dict) --Adds a header to the received email.\n HeaderName (string) -- [REQUIRED]The name of the header to add. Must be between 1 and 50 characters, inclusive, and consist of alphanumeric (a-z, A-Z, 0-9) characters and dashes only.\n HeaderValue (string) -- [REQUIRED]Must be less than 2048 characters, and must not contain newline characters ('r' or 'n').\n SNSAction (dict) --Publishes the email content within a notification to Amazon SNS.\n TopicArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Amazon SNS topic to notify. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic . For more information about Amazon SNS topics, see the Amazon SNS Developer Guide .\n Encoding (string) --The encoding to use for the email within the Amazon SNS notification. UTF-8 is easier to use, but may not preserve all special characters when a message was encoded with a different encoding format. Base64 preserves all special characters. The default value is UTF-8.\n \n ScanEnabled (boolean) --If true , then messages that this receipt rule applies to are scanned for spam and viruses. The default value is false .\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef create_receipt_rule_set(RuleSetName=None):\n \"\"\"\n Creates an empty receipt rule set.\n For information about setting up receipt rule sets, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example creates an empty receipt rule set:\n Expected Output:\n \n :example: response = client.create_receipt_rule_set(\n RuleSetName='string'\n )\n \n \n :type RuleSetName: string\n :param RuleSetName: [REQUIRED]\n The name of the rule set to create. The name must:\n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Start and end with a letter or number.\n Contain less than 64 characters.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef create_template(Template=None):\n \"\"\"\n Creates an email template. Email templates enable you to send personalized email to one or more destinations in a single API operation. For more information, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n \n :example: response = client.create_template(\n Template={\n 'TemplateName': 'string',\n 'SubjectPart': 'string',\n 'TextPart': 'string',\n 'HtmlPart': 'string'\n }\n )\n \n \n :type Template: dict\n :param Template: [REQUIRED]\n The content of the email, composed of a subject line, an HTML part, and a text-only part.\n TemplateName (string) -- [REQUIRED]The name of the template. You will refer to this name when you send email using the SendTemplatedEmail or SendBulkTemplatedEmail operations.\n SubjectPart (string) --The subject line of the email.\n TextPart (string) --The email body that will be visible to recipients whose email clients do not display HTML.\n HtmlPart (string) --The HTML body of the email.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_configuration_set(ConfigurationSetName=None):\n \"\"\"\n Deletes a configuration set. Configuration sets enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_configuration_set(\n ConfigurationSetName='string'\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: [REQUIRED]\n The name of the configuration set to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_configuration_set_event_destination(ConfigurationSetName=None, EventDestinationName=None):\n \"\"\"\n Deletes a configuration set event destination. Configuration set event destinations are associated with configuration sets, which enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_configuration_set_event_destination(\n ConfigurationSetName='string',\n EventDestinationName='string'\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: [REQUIRED]\n The name of the configuration set from which to delete the event destination.\n \n\n :type EventDestinationName: string\n :param EventDestinationName: [REQUIRED]\n The name of the event destination to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_configuration_set_tracking_options(ConfigurationSetName=None):\n \"\"\"\n Deletes an association between a configuration set and a custom domain for open and click event tracking.\n By default, images and links used for tracking open and click events are hosted on domains operated by Amazon SES. You can configure a subdomain of your own to handle these events. For information about using custom domains, see the Amazon SES Developer Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.delete_configuration_set_tracking_options(\n ConfigurationSetName='string'\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: [REQUIRED]\n The name of the configuration set from which you want to delete the tracking options.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_custom_verification_email_template(TemplateName=None):\n \"\"\"\n Deletes an existing custom verification email template.\n For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_custom_verification_email_template(\n TemplateName='string'\n )\n \n \n :type TemplateName: string\n :param TemplateName: [REQUIRED]\n The name of the custom verification email template that you want to delete.\n \n\n \"\"\"\n pass\n\ndef delete_identity(Identity=None):\n \"\"\"\n Deletes the specified identity (an email address or a domain) from the list of verified identities.\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example deletes an identity from the list of identities that have been submitted for verification with Amazon SES:\n Expected Output:\n \n :example: response = client.delete_identity(\n Identity='string'\n )\n \n \n :type Identity: string\n :param Identity: [REQUIRED]\n The identity to be removed from the list of identities for the AWS Account.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_identity_policy(Identity=None, PolicyName=None):\n \"\"\"\n Deletes the specified sending authorization policy for the given identity (an email address or a domain). This API returns successfully even if a policy with the specified name does not exist.\n Sending authorization is a feature that enables an identity owner to authorize other senders to use its identities. For information about using sending authorization, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example deletes a sending authorization policy for an identity:\n Expected Output:\n \n :example: response = client.delete_identity_policy(\n Identity='string',\n PolicyName='string'\n )\n \n \n :type Identity: string\n :param Identity: [REQUIRED]\n The identity that is associated with the policy that you want to delete. You can specify the identity by using its name or by using its Amazon Resource Name (ARN). Examples: [email protected] , example.com , arn:aws:ses:us-east-1:123456789012:identity/example.com .\n To successfully call this API, you must own the identity.\n \n\n :type PolicyName: string\n :param PolicyName: [REQUIRED]\n The name of the policy to be deleted.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_receipt_filter(FilterName=None):\n \"\"\"\n Deletes the specified IP address filter.\n For information about managing IP address filters, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example deletes an IP address filter:\n Expected Output:\n \n :example: response = client.delete_receipt_filter(\n FilterName='string'\n )\n \n \n :type FilterName: string\n :param FilterName: [REQUIRED]\n The name of the IP address filter to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_receipt_rule(RuleSetName=None, RuleName=None):\n \"\"\"\n Deletes the specified receipt rule.\n For information about managing receipt rules, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example deletes a receipt rule:\n Expected Output:\n \n :example: response = client.delete_receipt_rule(\n RuleSetName='string',\n RuleName='string'\n )\n \n \n :type RuleSetName: string\n :param RuleSetName: [REQUIRED]\n The name of the receipt rule set that contains the receipt rule to delete.\n \n\n :type RuleName: string\n :param RuleName: [REQUIRED]\n The name of the receipt rule to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_receipt_rule_set(RuleSetName=None):\n \"\"\"\n Deletes the specified receipt rule set and all of the receipt rules it contains.\n For information about managing receipt rule sets, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example deletes a receipt rule set:\n Expected Output:\n \n :example: response = client.delete_receipt_rule_set(\n RuleSetName='string'\n )\n \n \n :type RuleSetName: string\n :param RuleSetName: [REQUIRED]\n The name of the receipt rule set to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_template(TemplateName=None):\n \"\"\"\n Deletes an email template.\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_template(\n TemplateName='string'\n )\n \n \n :type TemplateName: string\n :param TemplateName: [REQUIRED]\n The name of the template to be deleted.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_verified_email_address(EmailAddress=None):\n \"\"\"\n Deprecated. Use the DeleteIdentity operation to delete email addresses and domains.\n See also: AWS API Documentation\n \n Examples\n The following example deletes an email address from the list of identities that have been submitted for verification with Amazon SES:\n Expected Output:\n \n :example: response = client.delete_verified_email_address(\n EmailAddress='string'\n )\n \n \n :type EmailAddress: string\n :param EmailAddress: [REQUIRED]\n An email address to be removed from the list of verified addresses.\n \n\n :return: response = client.delete_verified_email_address(\n EmailAddress='[email protected]',\n )\n \n print(response)\n \n \n \"\"\"\n pass\n\ndef describe_active_receipt_rule_set():\n \"\"\"\n Returns the metadata and receipt rules for the receipt rule set that is currently active.\n For information about setting up receipt rule sets, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example returns the metadata and receipt rules for the receipt rule set that is currently active:\n Expected Output:\n \n :example: response = client.describe_active_receipt_rule_set()\n \n \n :rtype: dict\n :return: {\n 'Metadata': {\n 'Name': 'string',\n 'CreatedTimestamp': datetime(2015, 1, 1)\n },\n 'Rules': [\n {\n 'Name': 'string',\n 'Enabled': True|False,\n 'TlsPolicy': 'Require'|'Optional',\n 'Recipients': [\n 'string',\n ],\n 'Actions': [\n {\n 'S3Action': {\n 'TopicArn': 'string',\n 'BucketName': 'string',\n 'ObjectKeyPrefix': 'string',\n 'KmsKeyArn': 'string'\n },\n 'BounceAction': {\n 'TopicArn': 'string',\n 'SmtpReplyCode': 'string',\n 'StatusCode': 'string',\n 'Message': 'string',\n 'Sender': 'string'\n },\n 'WorkmailAction': {\n 'TopicArn': 'string',\n 'OrganizationArn': 'string'\n },\n 'LambdaAction': {\n 'TopicArn': 'string',\n 'FunctionArn': 'string',\n 'InvocationType': 'Event'|'RequestResponse'\n },\n 'StopAction': {\n 'Scope': 'RuleSet',\n 'TopicArn': 'string'\n },\n 'AddHeaderAction': {\n 'HeaderName': 'string',\n 'HeaderValue': 'string'\n },\n 'SNSAction': {\n 'TopicArn': 'string',\n 'Encoding': 'UTF-8'|'Base64'\n }\n },\n ],\n 'ScanEnabled': True|False\n },\n ]\n }\n \n \n :returns: \n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Start and end with a letter or number.\n Contain less than 64 characters.\n \n \"\"\"\n pass\n\ndef describe_configuration_set(ConfigurationSetName=None, ConfigurationSetAttributeNames=None):\n \"\"\"\n Returns the details of the specified configuration set. For information about using configuration sets, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_configuration_set(\n ConfigurationSetName='string',\n ConfigurationSetAttributeNames=[\n 'eventDestinations'|'trackingOptions'|'reputationOptions',\n ]\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: [REQUIRED]\n The name of the configuration set to describe.\n \n\n :type ConfigurationSetAttributeNames: list\n :param ConfigurationSetAttributeNames: A list of configuration set attributes to return.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'ConfigurationSet': {\n 'Name': 'string'\n },\n 'EventDestinations': [\n {\n 'Name': 'string',\n 'Enabled': True|False,\n 'MatchingEventTypes': [\n 'send'|'reject'|'bounce'|'complaint'|'delivery'|'open'|'click'|'renderingFailure',\n ],\n 'KinesisFirehoseDestination': {\n 'IAMRoleARN': 'string',\n 'DeliveryStreamARN': 'string'\n },\n 'CloudWatchDestination': {\n 'DimensionConfigurations': [\n {\n 'DimensionName': 'string',\n 'DimensionValueSource': 'messageTag'|'emailHeader'|'linkTag',\n 'DefaultDimensionValue': 'string'\n },\n ]\n },\n 'SNSDestination': {\n 'TopicARN': 'string'\n }\n },\n ],\n 'TrackingOptions': {\n 'CustomRedirectDomain': 'string'\n },\n 'ReputationOptions': {\n 'SendingEnabled': True|False,\n 'ReputationMetricsEnabled': True|False,\n 'LastFreshStart': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n Contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain 64 characters or fewer.\n \n \"\"\"\n pass\n\ndef describe_receipt_rule(RuleSetName=None, RuleName=None):\n \"\"\"\n Returns the details of the specified receipt rule.\n For information about setting up receipt rules, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example returns the details of a receipt rule:\n Expected Output:\n \n :example: response = client.describe_receipt_rule(\n RuleSetName='string',\n RuleName='string'\n )\n \n \n :type RuleSetName: string\n :param RuleSetName: [REQUIRED]\n The name of the receipt rule set that the receipt rule belongs to.\n \n\n :type RuleName: string\n :param RuleName: [REQUIRED]\n The name of the receipt rule.\n \n\n :rtype: dict\n :return: {\n 'Rule': {\n 'Name': 'string',\n 'Enabled': True|False,\n 'TlsPolicy': 'Require'|'Optional',\n 'Recipients': [\n 'string',\n ],\n 'Actions': [\n {\n 'S3Action': {\n 'TopicArn': 'string',\n 'BucketName': 'string',\n 'ObjectKeyPrefix': 'string',\n 'KmsKeyArn': 'string'\n },\n 'BounceAction': {\n 'TopicArn': 'string',\n 'SmtpReplyCode': 'string',\n 'StatusCode': 'string',\n 'Message': 'string',\n 'Sender': 'string'\n },\n 'WorkmailAction': {\n 'TopicArn': 'string',\n 'OrganizationArn': 'string'\n },\n 'LambdaAction': {\n 'TopicArn': 'string',\n 'FunctionArn': 'string',\n 'InvocationType': 'Event'|'RequestResponse'\n },\n 'StopAction': {\n 'Scope': 'RuleSet',\n 'TopicArn': 'string'\n },\n 'AddHeaderAction': {\n 'HeaderName': 'string',\n 'HeaderValue': 'string'\n },\n 'SNSAction': {\n 'TopicArn': 'string',\n 'Encoding': 'UTF-8'|'Base64'\n }\n },\n ],\n 'ScanEnabled': True|False\n }\n }\n \n \n :returns: \n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Start and end with a letter or number.\n Contain less than 64 characters.\n \n \"\"\"\n pass\n\ndef describe_receipt_rule_set(RuleSetName=None):\n \"\"\"\n Returns the details of the specified receipt rule set.\n For information about managing receipt rule sets, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example returns the metadata and receipt rules of a receipt rule set:\n Expected Output:\n \n :example: response = client.describe_receipt_rule_set(\n RuleSetName='string'\n )\n \n \n :type RuleSetName: string\n :param RuleSetName: [REQUIRED]\n The name of the receipt rule set to describe.\n \n\n :rtype: dict\n :return: {\n 'Metadata': {\n 'Name': 'string',\n 'CreatedTimestamp': datetime(2015, 1, 1)\n },\n 'Rules': [\n {\n 'Name': 'string',\n 'Enabled': True|False,\n 'TlsPolicy': 'Require'|'Optional',\n 'Recipients': [\n 'string',\n ],\n 'Actions': [\n {\n 'S3Action': {\n 'TopicArn': 'string',\n 'BucketName': 'string',\n 'ObjectKeyPrefix': 'string',\n 'KmsKeyArn': 'string'\n },\n 'BounceAction': {\n 'TopicArn': 'string',\n 'SmtpReplyCode': 'string',\n 'StatusCode': 'string',\n 'Message': 'string',\n 'Sender': 'string'\n },\n 'WorkmailAction': {\n 'TopicArn': 'string',\n 'OrganizationArn': 'string'\n },\n 'LambdaAction': {\n 'TopicArn': 'string',\n 'FunctionArn': 'string',\n 'InvocationType': 'Event'|'RequestResponse'\n },\n 'StopAction': {\n 'Scope': 'RuleSet',\n 'TopicArn': 'string'\n },\n 'AddHeaderAction': {\n 'HeaderName': 'string',\n 'HeaderValue': 'string'\n },\n 'SNSAction': {\n 'TopicArn': 'string',\n 'Encoding': 'UTF-8'|'Base64'\n }\n },\n ],\n 'ScanEnabled': True|False\n },\n ]\n }\n \n \n :returns: \n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Start and end with a letter or number.\n Contain less than 64 characters.\n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_account_sending_enabled():\n \"\"\"\n Returns the email sending status of the Amazon SES account for the current region.\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n \n :example: response = client.get_account_sending_enabled()\n \n \n :rtype: dict\n :return: {\n 'Enabled': True|False\n }\n \n \n \"\"\"\n pass\n\ndef get_custom_verification_email_template(TemplateName=None):\n \"\"\"\n Returns the custom email verification template for the template name you specify.\n For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n \n :example: response = client.get_custom_verification_email_template(\n TemplateName='string'\n )\n \n \n :type TemplateName: string\n :param TemplateName: [REQUIRED]\n The name of the custom verification email template that you want to retrieve.\n \n\n :rtype: dict\n :return: {\n 'TemplateName': 'string',\n 'FromEmailAddress': 'string',\n 'TemplateSubject': 'string',\n 'TemplateContent': 'string',\n 'SuccessRedirectionURL': 'string',\n 'FailureRedirectionURL': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_identity_dkim_attributes(Identities=None):\n \"\"\"\n Returns the current status of Easy DKIM signing for an entity. For domain name identities, this operation also returns the DKIM tokens that are required for Easy DKIM signing, and whether Amazon SES has successfully verified that these tokens have been published.\n This operation takes a list of identities as input and returns the following information for each:\n This operation is throttled at one request per second and can only get DKIM attributes for up to 100 identities at a time.\n For more information about creating DNS records using DKIM tokens, go to the Amazon SES Developer Guide .\n See also: AWS API Documentation\n \n Examples\n The following example retrieves the Amazon SES Easy DKIM attributes for a list of identities:\n Expected Output:\n \n :example: response = client.get_identity_dkim_attributes(\n Identities=[\n 'string',\n ]\n )\n \n \n :type Identities: list\n :param Identities: [REQUIRED]\n A list of one or more verified identities - email addresses, domains, or both.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'DkimAttributes': {\n 'string': {\n 'DkimEnabled': True|False,\n 'DkimVerificationStatus': 'Pending'|'Success'|'Failed'|'TemporaryFailure'|'NotStarted',\n 'DkimTokens': [\n 'string',\n ]\n }\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_identity_mail_from_domain_attributes(Identities=None):\n \"\"\"\n Returns the custom MAIL FROM attributes for a list of identities (email addresses : domains).\n This operation is throttled at one request per second and can only get custom MAIL FROM attributes for up to 100 identities at a time.\n See also: AWS API Documentation\n \n Examples\n The following example returns the custom MAIL FROM attributes for an identity:\n Expected Output:\n \n :example: response = client.get_identity_mail_from_domain_attributes(\n Identities=[\n 'string',\n ]\n )\n \n \n :type Identities: list\n :param Identities: [REQUIRED]\n A list of one or more identities.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'MailFromDomainAttributes': {\n 'string': {\n 'MailFromDomain': 'string',\n 'MailFromDomainStatus': 'Pending'|'Success'|'Failed'|'TemporaryFailure',\n 'BehaviorOnMXFailure': 'UseDefaultValue'|'RejectMessage'\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_identity_notification_attributes(Identities=None):\n \"\"\"\n Given a list of verified identities (email addresses and/or domains), returns a structure describing identity notification attributes.\n This operation is throttled at one request per second and can only get notification attributes for up to 100 identities at a time.\n For more information about using notifications with Amazon SES, see the Amazon SES Developer Guide .\n See also: AWS API Documentation\n \n Examples\n The following example returns the notification attributes for an identity:\n Expected Output:\n \n :example: response = client.get_identity_notification_attributes(\n Identities=[\n 'string',\n ]\n )\n \n \n :type Identities: list\n :param Identities: [REQUIRED]\n A list of one or more identities. You can specify an identity by using its name or by using its Amazon Resource Name (ARN). Examples: [email protected] , example.com , arn:aws:ses:us-east-1:123456789012:identity/example.com .\n (string) --\n \n\n :rtype: dict\n :return: {\n 'NotificationAttributes': {\n 'string': {\n 'BounceTopic': 'string',\n 'ComplaintTopic': 'string',\n 'DeliveryTopic': 'string',\n 'ForwardingEnabled': True|False,\n 'HeadersInBounceNotificationsEnabled': True|False,\n 'HeadersInComplaintNotificationsEnabled': True|False,\n 'HeadersInDeliveryNotificationsEnabled': True|False\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_identity_policies(Identity=None, PolicyNames=None):\n \"\"\"\n Returns the requested sending authorization policies for the given identity (an email address or a domain). The policies are returned as a map of policy names to policy contents. You can retrieve a maximum of 20 policies at a time.\n Sending authorization is a feature that enables an identity owner to authorize other senders to use its identities. For information about using sending authorization, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example returns a sending authorization policy for an identity:\n Expected Output:\n \n :example: response = client.get_identity_policies(\n Identity='string',\n PolicyNames=[\n 'string',\n ]\n )\n \n \n :type Identity: string\n :param Identity: [REQUIRED]\n The identity for which the policies will be retrieved. You can specify an identity by using its name or by using its Amazon Resource Name (ARN). Examples: [email protected] , example.com , arn:aws:ses:us-east-1:123456789012:identity/example.com .\n To successfully call this API, you must own the identity.\n \n\n :type PolicyNames: list\n :param PolicyNames: [REQUIRED]\n A list of the names of policies to be retrieved. You can retrieve a maximum of 20 policies at a time. If you do not know the names of the policies that are attached to the identity, you can use ListIdentityPolicies .\n (string) --\n \n\n :rtype: dict\n :return: {\n 'Policies': {\n 'string': 'string'\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_identity_verification_attributes(Identities=None):\n \"\"\"\n Given a list of identities (email addresses and/or domains), returns the verification status and (for domain identities) the verification token for each identity.\n The verification status of an email address is \"Pending\" until the email address owner clicks the link within the verification email that Amazon SES sent to that address. If the email address owner clicks the link within 24 hours, the verification status of the email address changes to \"Success\". If the link is not clicked within 24 hours, the verification status changes to \"Failed.\" In that case, if you still want to verify the email address, you must restart the verification process from the beginning.\n For domain identities, the domain's verification status is \"Pending\" as Amazon SES searches for the required TXT record in the DNS settings of the domain. When Amazon SES detects the record, the domain's verification status changes to \"Success\". If Amazon SES is unable to detect the record within 72 hours, the domain's verification status changes to \"Failed.\" In that case, if you still want to verify the domain, you must restart the verification process from the beginning.\n This operation is throttled at one request per second and can only get verification attributes for up to 100 identities at a time.\n See also: AWS API Documentation\n \n Examples\n The following example returns the verification status and the verification token for a domain identity:\n Expected Output:\n \n :example: response = client.get_identity_verification_attributes(\n Identities=[\n 'string',\n ]\n )\n \n \n :type Identities: list\n :param Identities: [REQUIRED]\n A list of identities.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'VerificationAttributes': {\n 'string': {\n 'VerificationStatus': 'Pending'|'Success'|'Failed'|'TemporaryFailure'|'NotStarted',\n 'VerificationToken': 'string'\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_send_quota():\n \"\"\"\n Provides the sending limits for the Amazon SES account.\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example returns the Amazon SES sending limits for an AWS account:\n Expected Output:\n \n :example: response = client.get_send_quota()\n \n \n :rtype: dict\n :return: {\n 'Max24HourSend': 123.0,\n 'MaxSendRate': 123.0,\n 'SentLast24Hours': 123.0\n }\n \n \n \"\"\"\n pass\n\ndef get_send_statistics():\n \"\"\"\n Provides sending statistics for the current AWS Region. The result is a list of data points, representing the last two weeks of sending activity. Each data point in the list contains statistics for a 15-minute period of time.\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example returns Amazon SES sending statistics:\n Expected Output:\n \n :example: response = client.get_send_statistics()\n \n \n :rtype: dict\n :return: {\n 'SendDataPoints': [\n {\n 'Timestamp': datetime(2015, 1, 1),\n 'DeliveryAttempts': 123,\n 'Bounces': 123,\n 'Complaints': 123,\n 'Rejects': 123\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_template(TemplateName=None):\n \"\"\"\n Displays the template object (which includes the Subject line, HTML part and text part) for the template you specify.\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n \n :example: response = client.get_template(\n TemplateName='string'\n )\n \n \n :type TemplateName: string\n :param TemplateName: [REQUIRED]\n The name of the template you want to retrieve.\n \n\n :rtype: dict\n :return: {\n 'Template': {\n 'TemplateName': 'string',\n 'SubjectPart': 'string',\n 'TextPart': 'string',\n 'HtmlPart': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_configuration_sets(NextToken=None, MaxItems=None):\n \"\"\"\n Provides a list of the configuration sets associated with your Amazon SES account in the current AWS Region. For information about using configuration sets, see Monitoring Your Amazon SES Sending Activity in the Amazon SES Developer Guide.\n You can execute this operation no more than once per second. This operation will return up to 1,000 configuration sets each time it is run. If your Amazon SES account has more than 1,000 configuration sets, this operation will also return a NextToken element. You can then execute the ListConfigurationSets operation again, passing the NextToken parameter and the value of the NextToken element to retrieve additional results.\n See also: AWS API Documentation\n \n \n :example: response = client.list_configuration_sets(\n NextToken='string',\n MaxItems=123\n )\n \n \n :type NextToken: string\n :param NextToken: A token returned from a previous call to ListConfigurationSets to indicate the position of the configuration set in the configuration set list.\n\n :type MaxItems: integer\n :param MaxItems: The number of configuration sets to return.\n\n :rtype: dict\n :return: {\n 'ConfigurationSets': [\n {\n 'Name': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n Contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain 64 characters or fewer.\n \n \"\"\"\n pass\n\ndef list_custom_verification_email_templates(NextToken=None, MaxResults=None):\n \"\"\"\n Lists the existing custom verification email templates for your account in the current AWS Region.\n For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n \n :example: response = client.list_custom_verification_email_templates(\n NextToken='string',\n MaxResults=123\n )\n \n \n :type NextToken: string\n :param NextToken: An array the contains the name and creation time stamp for each template in your Amazon SES account.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of custom verification email templates to return. This value must be at least 1 and less than or equal to 50. If you do not specify a value, or if you specify a value less than 1 or greater than 50, the operation will return up to 50 results.\n\n :rtype: dict\n :return: {\n 'CustomVerificationEmailTemplates': [\n {\n 'TemplateName': 'string',\n 'FromEmailAddress': 'string',\n 'TemplateSubject': 'string',\n 'SuccessRedirectionURL': 'string',\n 'FailureRedirectionURL': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_identities(IdentityType=None, NextToken=None, MaxItems=None):\n \"\"\"\n Returns a list containing all of the identities (email addresses and domains) for your AWS account in the current AWS Region, regardless of verification status.\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example lists the email address identities that have been submitted for verification with Amazon SES:\n Expected Output:\n \n :example: response = client.list_identities(\n IdentityType='EmailAddress'|'Domain',\n NextToken='string',\n MaxItems=123\n )\n \n \n :type IdentityType: string\n :param IdentityType: The type of the identities to list. Possible values are 'EmailAddress' and 'Domain'. If this parameter is omitted, then all identities will be listed.\n\n :type NextToken: string\n :param NextToken: The token to use for pagination.\n\n :type MaxItems: integer\n :param MaxItems: The maximum number of identities per page. Possible values are 1-1000 inclusive.\n\n :rtype: dict\n :return: {\n 'Identities': [\n 'string',\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_identity_policies(Identity=None):\n \"\"\"\n Returns a list of sending authorization policies that are attached to the given identity (an email address or a domain). This API returns only a list. If you want the actual policy content, you can use GetIdentityPolicies .\n Sending authorization is a feature that enables an identity owner to authorize other senders to use its identities. For information about using sending authorization, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example returns a list of sending authorization policies that are attached to an identity:\n Expected Output:\n \n :example: response = client.list_identity_policies(\n Identity='string'\n )\n \n \n :type Identity: string\n :param Identity: [REQUIRED]\n The identity that is associated with the policy for which the policies will be listed. You can specify an identity by using its name or by using its Amazon Resource Name (ARN). Examples: [email protected] , example.com , arn:aws:ses:us-east-1:123456789012:identity/example.com .\n To successfully call this API, you must own the identity.\n \n\n :rtype: dict\n :return: {\n 'PolicyNames': [\n 'string',\n ]\n }\n \n \n \"\"\"\n pass\n\ndef list_receipt_filters():\n \"\"\"\n Lists the IP address filters associated with your AWS account in the current AWS Region.\n For information about managing IP address filters, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example lists the IP address filters that are associated with an AWS account:\n Expected Output:\n \n :example: response = client.list_receipt_filters()\n \n \n :rtype: dict\n :return: {\n 'Filters': [\n {\n 'Name': 'string',\n 'IpFilter': {\n 'Policy': 'Block'|'Allow',\n 'Cidr': 'string'\n }\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef list_receipt_rule_sets(NextToken=None):\n \"\"\"\n Lists the receipt rule sets that exist under your AWS account in the current AWS Region. If there are additional receipt rule sets to be retrieved, you will receive a NextToken that you can provide to the next call to ListReceiptRuleSets to retrieve the additional entries.\n For information about managing receipt rule sets, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example lists the receipt rule sets that exist under an AWS account:\n Expected Output:\n \n :example: response = client.list_receipt_rule_sets(\n NextToken='string'\n )\n \n \n :type NextToken: string\n :param NextToken: A token returned from a previous call to ListReceiptRuleSets to indicate the position in the receipt rule set list.\n\n :rtype: dict\n :return: {\n 'RuleSets': [\n {\n 'Name': 'string',\n 'CreatedTimestamp': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_templates(NextToken=None, MaxItems=None):\n \"\"\"\n Lists the email templates present in your Amazon SES account in the current AWS Region.\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n \n :example: response = client.list_templates(\n NextToken='string',\n MaxItems=123\n )\n \n \n :type NextToken: string\n :param NextToken: A token returned from a previous call to ListTemplates to indicate the position in the list of email templates.\n\n :type MaxItems: integer\n :param MaxItems: The maximum number of templates to return. This value must be at least 1 and less than or equal to 10. If you do not specify a value, or if you specify a value less than 1 or greater than 10, the operation will return up to 10 results.\n\n :rtype: dict\n :return: {\n 'TemplatesMetadata': [\n {\n 'Name': 'string',\n 'CreatedTimestamp': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_verified_email_addresses():\n \"\"\"\n Deprecated. Use the ListIdentities operation to list the email addresses and domains associated with your account.\n See also: AWS API Documentation\n \n Examples\n The following example lists all email addresses that have been submitted for verification with Amazon SES:\n Expected Output:\n \n :example: response = client.list_verified_email_addresses()\n \n \n :rtype: dict\n :return: {\n 'VerifiedEmailAddresses': [\n 'string',\n ]\n }\n \n \n \"\"\"\n pass\n\ndef put_identity_policy(Identity=None, PolicyName=None, Policy=None):\n \"\"\"\n Adds or updates a sending authorization policy for the specified identity (an email address or a domain).\n Sending authorization is a feature that enables an identity owner to authorize other senders to use its identities. For information about using sending authorization, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example adds a sending authorization policy to an identity:\n Expected Output:\n \n :example: response = client.put_identity_policy(\n Identity='string',\n PolicyName='string',\n Policy='string'\n )\n \n \n :type Identity: string\n :param Identity: [REQUIRED]\n The identity that the policy will apply to. You can specify an identity by using its name or by using its Amazon Resource Name (ARN). Examples: [email protected] , example.com , arn:aws:ses:us-east-1:123456789012:identity/example.com .\n To successfully call this API, you must own the identity.\n \n\n :type PolicyName: string\n :param PolicyName: [REQUIRED]\n The name of the policy.\n The policy name cannot exceed 64 characters and can only include alphanumeric characters, dashes, and underscores.\n \n\n :type Policy: string\n :param Policy: [REQUIRED]\n The text of the policy in JSON format. The policy cannot exceed 4 KB.\n For information about the syntax of sending authorization policies, see the Amazon SES Developer Guide .\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef reorder_receipt_rule_set(RuleSetName=None, RuleNames=None):\n \"\"\"\n Reorders the receipt rules within a receipt rule set.\n For information about managing receipt rule sets, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example reorders the receipt rules within a receipt rule set:\n Expected Output:\n \n :example: response = client.reorder_receipt_rule_set(\n RuleSetName='string',\n RuleNames=[\n 'string',\n ]\n )\n \n \n :type RuleSetName: string\n :param RuleSetName: [REQUIRED]\n The name of the receipt rule set to reorder.\n \n\n :type RuleNames: list\n :param RuleNames: [REQUIRED]\n A list of the specified receipt rule set's receipt rules in the order that you want to put them.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef send_bounce(OriginalMessageId=None, BounceSender=None, Explanation=None, MessageDsn=None, BouncedRecipientInfoList=None, BounceSenderArn=None):\n \"\"\"\n Generates and sends a bounce message to the sender of an email you received through Amazon SES. You can only use this API on an email up to 24 hours after you receive it.\n For information about receiving email through Amazon SES, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n \n :example: response = client.send_bounce(\n OriginalMessageId='string',\n BounceSender='string',\n Explanation='string',\n MessageDsn={\n 'ReportingMta': 'string',\n 'ArrivalDate': datetime(2015, 1, 1),\n 'ExtensionFields': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ]\n },\n BouncedRecipientInfoList=[\n {\n 'Recipient': 'string',\n 'RecipientArn': 'string',\n 'BounceType': 'DoesNotExist'|'MessageTooLarge'|'ExceededQuota'|'ContentRejected'|'Undefined'|'TemporaryFailure',\n 'RecipientDsnFields': {\n 'FinalRecipient': 'string',\n 'Action': 'failed'|'delayed'|'delivered'|'relayed'|'expanded',\n 'RemoteMta': 'string',\n 'Status': 'string',\n 'DiagnosticCode': 'string',\n 'LastAttemptDate': datetime(2015, 1, 1),\n 'ExtensionFields': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ]\n }\n },\n ],\n BounceSenderArn='string'\n )\n \n \n :type OriginalMessageId: string\n :param OriginalMessageId: [REQUIRED]\n The message ID of the message to be bounced.\n \n\n :type BounceSender: string\n :param BounceSender: [REQUIRED]\n The address to use in the 'From' header of the bounce message. This must be an identity that you have verified with Amazon SES.\n \n\n :type Explanation: string\n :param Explanation: Human-readable text for the bounce message to explain the failure. If not specified, the text will be auto-generated based on the bounced recipient information.\n\n :type MessageDsn: dict\n :param MessageDsn: Message-related DSN fields. If not specified, Amazon SES will choose the values.\n ReportingMta (string) -- [REQUIRED]The reporting MTA that attempted to deliver the message, formatted as specified in RFC 3464 (mta-name-type; mta-name ). The default value is dns; inbound-smtp.[region].amazonaws.com .\n ArrivalDate (datetime) --When the message was received by the reporting mail transfer agent (MTA), in RFC 822 date-time format.\n ExtensionFields (list) --Additional X-headers to include in the DSN.\n (dict) --Additional X-headers to include in the Delivery Status Notification (DSN) when an email that Amazon SES receives on your behalf bounces.\n For information about receiving email through Amazon SES, see the Amazon SES Developer Guide .\n Name (string) -- [REQUIRED]The name of the header to add. Must be between 1 and 50 characters, inclusive, and consist of alphanumeric (a-z, A-Z, 0-9) characters and dashes only.\n Value (string) -- [REQUIRED]The value of the header to add. Must be less than 2048 characters, and must not contain newline characters ('r' or 'n').\n \n \n\n :type BouncedRecipientInfoList: list\n :param BouncedRecipientInfoList: [REQUIRED]\n A list of recipients of the bounced message, including the information required to create the Delivery Status Notifications (DSNs) for the recipients. You must specify at least one BouncedRecipientInfo in the list.\n (dict) --Recipient-related information to include in the Delivery Status Notification (DSN) when an email that Amazon SES receives on your behalf bounces.\n For information about receiving email through Amazon SES, see the Amazon SES Developer Guide .\n Recipient (string) -- [REQUIRED]The email address of the recipient of the bounced email.\n RecipientArn (string) --This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to receive email for the recipient of the bounced email. For more information about sending authorization, see the Amazon SES Developer Guide .\n BounceType (string) --The reason for the bounce. You must provide either this parameter or RecipientDsnFields .\n RecipientDsnFields (dict) --Recipient-related DSN fields, most of which would normally be filled in automatically when provided with a BounceType . You must provide either this parameter or BounceType .\n FinalRecipient (string) --The email address that the message was ultimately delivered to. This corresponds to the Final-Recipient in the DSN. If not specified, FinalRecipient will be set to the Recipient specified in the BouncedRecipientInfo structure. Either FinalRecipient or the recipient in BouncedRecipientInfo must be a recipient of the original bounced message.\n Note\n Do not prepend the FinalRecipient email address with rfc 822; , as described in RFC 3798 .\n Action (string) -- [REQUIRED]The action performed by the reporting mail transfer agent (MTA) as a result of its attempt to deliver the message to the recipient address. This is required by RFC 3464 .\n RemoteMta (string) --The MTA to which the remote MTA attempted to deliver the message, formatted as specified in RFC 3464 (mta-name-type; mta-name ). This parameter typically applies only to propagating synchronous bounces.\n Status (string) -- [REQUIRED]The status code that indicates what went wrong. This is required by RFC 3464 .\n DiagnosticCode (string) --An extended explanation of what went wrong; this is usually an SMTP response. See RFC 3463 for the correct formatting of this parameter.\n LastAttemptDate (datetime) --The time the final delivery attempt was made, in RFC 822 date-time format.\n ExtensionFields (list) --Additional X-headers to include in the DSN.\n (dict) --Additional X-headers to include in the Delivery Status Notification (DSN) when an email that Amazon SES receives on your behalf bounces.\n For information about receiving email through Amazon SES, see the Amazon SES Developer Guide .\n Name (string) -- [REQUIRED]The name of the header to add. Must be between 1 and 50 characters, inclusive, and consist of alphanumeric (a-z, A-Z, 0-9) characters and dashes only.\n Value (string) -- [REQUIRED]The value of the header to add. Must be less than 2048 characters, and must not contain newline characters ('r' or 'n').\n \n \n \n\n :type BounceSenderArn: string\n :param BounceSenderArn: This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the address in the 'From' header of the bounce. For more information about sending authorization, see the Amazon SES Developer Guide .\n\n :rtype: dict\n :return: {\n 'MessageId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef send_bulk_templated_email(Source=None, SourceArn=None, ReplyToAddresses=None, ReturnPath=None, ReturnPathArn=None, ConfigurationSetName=None, DefaultTags=None, Template=None, TemplateArn=None, DefaultTemplateData=None, Destinations=None):\n \"\"\"\n Composes an email message to multiple destinations. The message body is created using an email template.\n In order to send email using the SendBulkTemplatedEmail operation, your call to the API must meet the following requirements:\n See also: AWS API Documentation\n \n \n :example: response = client.send_bulk_templated_email(\n Source='string',\n SourceArn='string',\n ReplyToAddresses=[\n 'string',\n ],\n ReturnPath='string',\n ReturnPathArn='string',\n ConfigurationSetName='string',\n DefaultTags=[\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n Template='string',\n TemplateArn='string',\n DefaultTemplateData='string',\n Destinations=[\n {\n 'Destination': {\n 'ToAddresses': [\n 'string',\n ],\n 'CcAddresses': [\n 'string',\n ],\n 'BccAddresses': [\n 'string',\n ]\n },\n 'ReplacementTags': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'ReplacementTemplateData': 'string'\n },\n ]\n )\n \n \n :type Source: string\n :param Source: [REQUIRED]\n The email address that is sending the email. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES. For information about verifying identities, see the Amazon SES Developer Guide .\n If you are sending on behalf of another user and have been permitted to do so by a sending authorization policy, then you must also specify the SourceArn parameter. For more information about sending authorization, see the Amazon SES Developer Guide .\n Note\n Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531 . For this reason, the local part of a source email address (the part of the email address that precedes the @ sign) may only contain 7-bit ASCII characters . If the domain part of an address (the part after the @ sign) contains non-ASCII characters, they must be encoded using Punycode, as described in RFC3492 . The sender name (also known as the friendly name ) may contain non-ASCII characters. These characters must be encoded using MIME encoded-word syntax, as described in RFC 2047 . MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?= .\n \n\n :type SourceArn: string\n :param SourceArn: This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to send for the email address specified in the Source parameter.\n For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com ) attaches a policy to it that authorizes you to send from [email protected] , then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com , and the Source to be [email protected] .\n For more information about sending authorization, see the Amazon SES Developer Guide .\n \n\n :type ReplyToAddresses: list\n :param ReplyToAddresses: The reply-to email address(es) for the message. If the recipient replies to the message, each reply-to address will receive the reply.\n (string) --\n \n\n :type ReturnPath: string\n :param ReturnPath: The email address that bounces and complaints will be forwarded to when feedback forwarding is enabled. If the message cannot be delivered to the recipient, then an error message will be returned from the recipient's ISP; this message will then be forwarded to the email address specified by the ReturnPath parameter. The ReturnPath parameter is never overwritten. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES.\n\n :type ReturnPathArn: string\n :param ReturnPathArn: This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter.\n For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com ) attaches a policy to it that authorizes you to use [email protected] , then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com , and the ReturnPath to be [email protected] .\n For more information about sending authorization, see the Amazon SES Developer Guide .\n \n\n :type ConfigurationSetName: string\n :param ConfigurationSetName: The name of the configuration set to use when you send an email using SendBulkTemplatedEmail .\n\n :type DefaultTags: list\n :param DefaultTags: A list of tags, in the form of name/value pairs, to apply to an email that you send to a destination using SendBulkTemplatedEmail .\n (dict) --Contains the name and value of a tag that you can provide to SendEmail or SendRawEmail to apply to an email.\n Message tags, which you use with configuration sets, enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide .\n Name (string) -- [REQUIRED]The name of the tag. The name must:\n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain less than 256 characters.\n Value (string) -- [REQUIRED]The value of the tag. The value must:\n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain less than 256 characters.\n \n \n\n :type Template: string\n :param Template: [REQUIRED]\n The template to use when sending this email.\n \n\n :type TemplateArn: string\n :param TemplateArn: The ARN of the template to use when sending this email.\n\n :type DefaultTemplateData: string\n :param DefaultTemplateData: A list of replacement values to apply to the template when replacement data is not specified in a Destination object. These values act as a default or fallback option when no other data is available.\n The template data is a JSON object, typically consisting of key-value pairs in which the keys correspond to replacement tags in the email template.\n \n\n :type Destinations: list\n :param Destinations: [REQUIRED]\n One or more Destination objects. All of the recipients in a Destination will receive the same version of the email. You can specify up to 50 Destination objects within a Destinations array.\n (dict) --An array that contains one or more Destinations, as well as the tags and replacement data associated with each of those Destinations.\n Destination (dict) -- [REQUIRED]Represents the destination of the message, consisting of To:, CC:, and BCC: fields.\n Note\n Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531 . For this reason, the local part of a destination email address (the part of the email address that precedes the @ sign) may only contain 7-bit ASCII characters . If the domain part of an address (the part after the @ sign) contains non-ASCII characters, they must be encoded using Punycode, as described in RFC3492 .\n ToAddresses (list) --The To: field(s) of the message.\n (string) --\n CcAddresses (list) --The CC: field(s) of the message.\n (string) --\n BccAddresses (list) --The BCC: field(s) of the message.\n (string) --\n \n ReplacementTags (list) --A list of tags, in the form of name/value pairs, to apply to an email that you send using SendBulkTemplatedEmail . Tags correspond to characteristics of the email that you define, so that you can publish email sending events.\n (dict) --Contains the name and value of a tag that you can provide to SendEmail or SendRawEmail to apply to an email.\n Message tags, which you use with configuration sets, enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide .\n Name (string) -- [REQUIRED]The name of the tag. The name must:\n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain less than 256 characters.\n Value (string) -- [REQUIRED]The value of the tag. The value must:\n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain less than 256 characters.\n \n ReplacementTemplateData (string) --A list of replacement values to apply to the template. This parameter is a JSON object, typically consisting of key-value pairs in which the keys correspond to replacement tags in the email template.\n \n \n\n :rtype: dict\n :return: {\n 'Status': [\n {\n 'Status': 'Success'|'MessageRejected'|'MailFromDomainNotVerified'|'ConfigurationSetDoesNotExist'|'TemplateDoesNotExist'|'AccountSuspended'|'AccountThrottled'|'AccountDailyQuotaExceeded'|'InvalidSendingPoolName'|'AccountSendingPaused'|'ConfigurationSetSendingPaused'|'InvalidParameterValue'|'TransientFailure'|'Failed',\n 'Error': 'string',\n 'MessageId': 'string'\n },\n ]\n }\n \n \n :returns: \n Source (string) -- [REQUIRED]\n The email address that is sending the email. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES. For information about verifying identities, see the Amazon SES Developer Guide .\n If you are sending on behalf of another user and have been permitted to do so by a sending authorization policy, then you must also specify the SourceArn parameter. For more information about sending authorization, see the Amazon SES Developer Guide .\n \n Note\n Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531 . For this reason, the local part of a source email address (the part of the email address that precedes the @ sign) may only contain 7-bit ASCII characters . If the domain part of an address (the part after the @ sign) contains non-ASCII characters, they must be encoded using Punycode, as described in RFC3492 . The sender name (also known as the friendly name ) may contain non-ASCII characters. These characters must be encoded using MIME encoded-word syntax, as described in RFC 2047 . MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?= .\n \n \n SourceArn (string) -- This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to send for the email address specified in the Source parameter.\n For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com ) attaches a policy to it that authorizes you to send from [email protected] , then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com , and the Source to be [email protected] .\n For more information about sending authorization, see the Amazon SES Developer Guide .\n \n ReplyToAddresses (list) -- The reply-to email address(es) for the message. If the recipient replies to the message, each reply-to address will receive the reply.\n \n (string) --\n \n \n ReturnPath (string) -- The email address that bounces and complaints will be forwarded to when feedback forwarding is enabled. If the message cannot be delivered to the recipient, then an error message will be returned from the recipient's ISP; this message will then be forwarded to the email address specified by the ReturnPath parameter. The ReturnPath parameter is never overwritten. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES.\n ReturnPathArn (string) -- This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter.\n For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com ) attaches a policy to it that authorizes you to use [email protected] , then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com , and the ReturnPath to be [email protected] .\n For more information about sending authorization, see the Amazon SES Developer Guide .\n \n ConfigurationSetName (string) -- The name of the configuration set to use when you send an email using SendBulkTemplatedEmail .\n DefaultTags (list) -- A list of tags, in the form of name/value pairs, to apply to an email that you send to a destination using SendBulkTemplatedEmail .\n \n (dict) --Contains the name and value of a tag that you can provide to SendEmail or SendRawEmail to apply to an email.\n Message tags, which you use with configuration sets, enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide .\n \n Name (string) -- [REQUIRED]The name of the tag. The name must:\n \n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain less than 256 characters.\n \n \n Value (string) -- [REQUIRED]The value of the tag. The value must:\n \n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain less than 256 characters.\n \n \n \n \n \n \n Template (string) -- [REQUIRED]\n The template to use when sending this email.\n \n TemplateArn (string) -- The ARN of the template to use when sending this email.\n DefaultTemplateData (string) -- A list of replacement values to apply to the template when replacement data is not specified in a Destination object. These values act as a default or fallback option when no other data is available.\n The template data is a JSON object, typically consisting of key-value pairs in which the keys correspond to replacement tags in the email template.\n \n Destinations (list) -- [REQUIRED]\n One or more Destination objects. All of the recipients in a Destination will receive the same version of the email. You can specify up to 50 Destination objects within a Destinations array.\n \n (dict) --An array that contains one or more Destinations, as well as the tags and replacement data associated with each of those Destinations.\n \n Destination (dict) -- [REQUIRED]Represents the destination of the message, consisting of To:, CC:, and BCC: fields.\n \n Note\n Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531 . For this reason, the local part of a destination email address (the part of the email address that precedes the @ sign) may only contain 7-bit ASCII characters . If the domain part of an address (the part after the @ sign) contains non-ASCII characters, they must be encoded using Punycode, as described in RFC3492 .\n \n \n ToAddresses (list) --The To: field(s) of the message.\n \n (string) --\n \n \n CcAddresses (list) --The CC: field(s) of the message.\n \n (string) --\n \n \n BccAddresses (list) --The BCC: field(s) of the message.\n \n (string) --\n \n \n \n \n ReplacementTags (list) --A list of tags, in the form of name/value pairs, to apply to an email that you send using SendBulkTemplatedEmail . Tags correspond to characteristics of the email that you define, so that you can publish email sending events.\n \n (dict) --Contains the name and value of a tag that you can provide to SendEmail or SendRawEmail to apply to an email.\n Message tags, which you use with configuration sets, enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide .\n \n Name (string) -- [REQUIRED]The name of the tag. The name must:\n \n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain less than 256 characters.\n \n \n Value (string) -- [REQUIRED]The value of the tag. The value must:\n \n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain less than 256 characters.\n \n \n \n \n \n \n ReplacementTemplateData (string) --A list of replacement values to apply to the template. This parameter is a JSON object, typically consisting of key-value pairs in which the keys correspond to replacement tags in the email template.\n \n \n \n \n \n \n \"\"\"\n pass\n\ndef send_custom_verification_email(EmailAddress=None, TemplateName=None, ConfigurationSetName=None):\n \"\"\"\n Adds an email address to the list of identities for your Amazon SES account in the current AWS Region and attempts to verify it. As a result of executing this operation, a customized verification email is sent to the specified address.\n To use this operation, you must first create a custom verification email template. For more information about creating and using custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n \n :example: response = client.send_custom_verification_email(\n EmailAddress='string',\n TemplateName='string',\n ConfigurationSetName='string'\n )\n \n \n :type EmailAddress: string\n :param EmailAddress: [REQUIRED]\n The email address to verify.\n \n\n :type TemplateName: string\n :param TemplateName: [REQUIRED]\n The name of the custom verification email template to use when sending the verification email.\n \n\n :type ConfigurationSetName: string\n :param ConfigurationSetName: Name of a configuration set to use when sending the verification email.\n\n :rtype: dict\n :return: {\n 'MessageId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef send_email(Source=None, Destination=None, Message=None, ReplyToAddresses=None, ReturnPath=None, SourceArn=None, ReturnPathArn=None, Tags=None, ConfigurationSetName=None):\n \"\"\"\n Composes an email message and immediately queues it for sending. In order to send email using the SendEmail operation, your message must meet the following requirements:\n See also: AWS API Documentation\n \n Examples\n The following example sends a formatted email:\n Expected Output:\n \n :example: response = client.send_email(\n Source='string',\n Destination={\n 'ToAddresses': [\n 'string',\n ],\n 'CcAddresses': [\n 'string',\n ],\n 'BccAddresses': [\n 'string',\n ]\n },\n Message={\n 'Subject': {\n 'Data': 'string',\n 'Charset': 'string'\n },\n 'Body': {\n 'Text': {\n 'Data': 'string',\n 'Charset': 'string'\n },\n 'Html': {\n 'Data': 'string',\n 'Charset': 'string'\n }\n }\n },\n ReplyToAddresses=[\n 'string',\n ],\n ReturnPath='string',\n SourceArn='string',\n ReturnPathArn='string',\n Tags=[\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n ConfigurationSetName='string'\n )\n \n \n :type Source: string\n :param Source: [REQUIRED]\n The email address that is sending the email. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES. For information about verifying identities, see the Amazon SES Developer Guide .\n If you are sending on behalf of another user and have been permitted to do so by a sending authorization policy, then you must also specify the SourceArn parameter. For more information about sending authorization, see the Amazon SES Developer Guide .\n Note\n Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531 . For this reason, the local part of a source email address (the part of the email address that precedes the @ sign) may only contain 7-bit ASCII characters . If the domain part of an address (the part after the @ sign) contains non-ASCII characters, they must be encoded using Punycode, as described in RFC3492 . The sender name (also known as the friendly name ) may contain non-ASCII characters. These characters must be encoded using MIME encoded-word syntax, as described in RFC 2047 . MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?= .\n \n\n :type Destination: dict\n :param Destination: [REQUIRED]\n The destination for this email, composed of To:, CC:, and BCC: fields.\n ToAddresses (list) --The To: field(s) of the message.\n (string) --\n CcAddresses (list) --The CC: field(s) of the message.\n (string) --\n BccAddresses (list) --The BCC: field(s) of the message.\n (string) --\n \n\n :type Message: dict\n :param Message: [REQUIRED]\n The message to be sent.\n Subject (dict) -- [REQUIRED]The subject of the message: A short summary of the content, which will appear in the recipient's inbox.\n Data (string) -- [REQUIRED]The textual data of the content.\n Charset (string) --The character set of the content.\n Body (dict) -- [REQUIRED]The message body.\n Text (dict) --The content of the message, in text format. Use this for text-based email clients, or clients on high-latency networks (such as mobile devices).\n Data (string) -- [REQUIRED]The textual data of the content.\n Charset (string) --The character set of the content.\n Html (dict) --The content of the message, in HTML format. Use this for email clients that can process HTML. You can include clickable links, formatted text, and much more in an HTML message.\n Data (string) -- [REQUIRED]The textual data of the content.\n Charset (string) --The character set of the content.\n \n \n\n :type ReplyToAddresses: list\n :param ReplyToAddresses: The reply-to email address(es) for the message. If the recipient replies to the message, each reply-to address will receive the reply.\n (string) --\n \n\n :type ReturnPath: string\n :param ReturnPath: The email address that bounces and complaints will be forwarded to when feedback forwarding is enabled. If the message cannot be delivered to the recipient, then an error message will be returned from the recipient's ISP; this message will then be forwarded to the email address specified by the ReturnPath parameter. The ReturnPath parameter is never overwritten. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES.\n\n :type SourceArn: string\n :param SourceArn: This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to send for the email address specified in the Source parameter.\n For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com ) attaches a policy to it that authorizes you to send from [email protected] , then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com , and the Source to be [email protected] .\n For more information about sending authorization, see the Amazon SES Developer Guide .\n \n\n :type ReturnPathArn: string\n :param ReturnPathArn: This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter.\n For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com ) attaches a policy to it that authorizes you to use [email protected] , then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com , and the ReturnPath to be [email protected] .\n For more information about sending authorization, see the Amazon SES Developer Guide .\n \n\n :type Tags: list\n :param Tags: A list of tags, in the form of name/value pairs, to apply to an email that you send using SendEmail . Tags correspond to characteristics of the email that you define, so that you can publish email sending events.\n (dict) --Contains the name and value of a tag that you can provide to SendEmail or SendRawEmail to apply to an email.\n Message tags, which you use with configuration sets, enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide .\n Name (string) -- [REQUIRED]The name of the tag. The name must:\n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain less than 256 characters.\n Value (string) -- [REQUIRED]The value of the tag. The value must:\n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain less than 256 characters.\n \n \n\n :type ConfigurationSetName: string\n :param ConfigurationSetName: The name of the configuration set to use when you send an email using SendEmail .\n\n :rtype: dict\n :return: {\n 'MessageId': 'string'\n }\n \n \n :returns: \n Source (string) -- [REQUIRED]\n The email address that is sending the email. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES. For information about verifying identities, see the Amazon SES Developer Guide .\n If you are sending on behalf of another user and have been permitted to do so by a sending authorization policy, then you must also specify the SourceArn parameter. For more information about sending authorization, see the Amazon SES Developer Guide .\n \n Note\n Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531 . For this reason, the local part of a source email address (the part of the email address that precedes the @ sign) may only contain 7-bit ASCII characters . If the domain part of an address (the part after the @ sign) contains non-ASCII characters, they must be encoded using Punycode, as described in RFC3492 . The sender name (also known as the friendly name ) may contain non-ASCII characters. These characters must be encoded using MIME encoded-word syntax, as described in RFC 2047 . MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?= .\n \n \n Destination (dict) -- [REQUIRED]\n The destination for this email, composed of To:, CC:, and BCC: fields.\n \n ToAddresses (list) --The To: field(s) of the message.\n \n (string) --\n \n \n CcAddresses (list) --The CC: field(s) of the message.\n \n (string) --\n \n \n BccAddresses (list) --The BCC: field(s) of the message.\n \n (string) --\n \n \n \n \n Message (dict) -- [REQUIRED]\n The message to be sent.\n \n Subject (dict) -- [REQUIRED]The subject of the message: A short summary of the content, which will appear in the recipient's inbox.\n \n Data (string) -- [REQUIRED]The textual data of the content.\n \n Charset (string) --The character set of the content.\n \n \n \n Body (dict) -- [REQUIRED]The message body.\n \n Text (dict) --The content of the message, in text format. Use this for text-based email clients, or clients on high-latency networks (such as mobile devices).\n \n Data (string) -- [REQUIRED]The textual data of the content.\n \n Charset (string) --The character set of the content.\n \n \n \n Html (dict) --The content of the message, in HTML format. Use this for email clients that can process HTML. You can include clickable links, formatted text, and much more in an HTML message.\n \n Data (string) -- [REQUIRED]The textual data of the content.\n \n Charset (string) --The character set of the content.\n \n \n \n \n \n \n \n ReplyToAddresses (list) -- The reply-to email address(es) for the message. If the recipient replies to the message, each reply-to address will receive the reply.\n \n (string) --\n \n \n ReturnPath (string) -- The email address that bounces and complaints will be forwarded to when feedback forwarding is enabled. If the message cannot be delivered to the recipient, then an error message will be returned from the recipient's ISP; this message will then be forwarded to the email address specified by the ReturnPath parameter. The ReturnPath parameter is never overwritten. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES.\n SourceArn (string) -- This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to send for the email address specified in the Source parameter.\n For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com ) attaches a policy to it that authorizes you to send from [email protected] , then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com , and the Source to be [email protected] .\n For more information about sending authorization, see the Amazon SES Developer Guide .\n \n ReturnPathArn (string) -- This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter.\n For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com ) attaches a policy to it that authorizes you to use [email protected] , then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com , and the ReturnPath to be [email protected] .\n For more information about sending authorization, see the Amazon SES Developer Guide .\n \n Tags (list) -- A list of tags, in the form of name/value pairs, to apply to an email that you send using SendEmail . Tags correspond to characteristics of the email that you define, so that you can publish email sending events.\n \n (dict) --Contains the name and value of a tag that you can provide to SendEmail or SendRawEmail to apply to an email.\n Message tags, which you use with configuration sets, enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide .\n \n Name (string) -- [REQUIRED]The name of the tag. The name must:\n \n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain less than 256 characters.\n \n \n Value (string) -- [REQUIRED]The value of the tag. The value must:\n \n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain less than 256 characters.\n \n \n \n \n \n \n ConfigurationSetName (string) -- The name of the configuration set to use when you send an email using SendEmail .\n \n \"\"\"\n pass\n\ndef send_raw_email(Source=None, Destinations=None, RawMessage=None, FromArn=None, SourceArn=None, ReturnPathArn=None, Tags=None, ConfigurationSetName=None):\n \"\"\"\n Composes an email message and immediately queues it for sending.\n This operation is more flexible than the SendEmail API operation. When you use the SendRawEmail operation, you can specify the headers of the message as well as its content. This flexibility is useful, for example, when you want to send a multipart MIME email (such a message that contains both a text and an HTML version). You can also use this operation to send messages that include attachments.\n The SendRawEmail operation has the following requirements:\n Additionally, keep the following considerations in mind when using the SendRawEmail operation:\n For most common sending authorization scenarios, we recommend that you specify the SourceIdentityArn parameter and not the FromIdentityArn or ReturnPathIdentityArn parameters. If you only specify the SourceIdentityArn parameter, Amazon SES will set the From and Return Path addresses to the identity specified in SourceIdentityArn . For more information about sending authorization, see the Using Sending Authorization with Amazon SES in the Amazon SES Developer Guide.\n See also: AWS API Documentation\n \n Examples\n The following example sends an email with an attachment:\n Expected Output:\n \n :example: response = client.send_raw_email(\n Source='string',\n Destinations=[\n 'string',\n ],\n RawMessage={\n 'Data': b'bytes'\n },\n FromArn='string',\n SourceArn='string',\n ReturnPathArn='string',\n Tags=[\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n ConfigurationSetName='string'\n )\n \n \n :type Source: string\n :param Source: The identity's email address. If you do not provide a value for this parameter, you must specify a 'From' address in the raw text of the message. (You can also specify both.)\n Note\n Amazon SES does not support the SMTPUTF8 extension, as described in`RFC6531 <https://tools.ietf.org/html/rfc6531>`__ . For this reason, the local part of a source email address (the part of the email address that precedes the @ sign) may only contain 7-bit ASCII characters . If the domain part of an address (the part after the @ sign) contains non-ASCII characters, they must be encoded using Punycode, as described in RFC3492 . The sender name (also known as the friendly name ) may contain non-ASCII characters. These characters must be encoded using MIME encoded-word syntax, as described in RFC 2047 . MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?= .\n If you specify the Source parameter and have feedback forwarding enabled, then bounces and complaints will be sent to this email address. This takes precedence over any Return-Path header that you might include in the raw text of the message.\n \n\n :type Destinations: list\n :param Destinations: A list of destinations for the message, consisting of To:, CC:, and BCC: addresses.\n (string) --\n \n\n :type RawMessage: dict\n :param RawMessage: [REQUIRED]\n The raw email message itself. The message has to meet the following criteria:\n The message has to contain a header and a body, separated by a blank line.\n All of the required header fields must be present in the message.\n Each part of a multipart MIME message must be formatted properly.\n Attachments must be of a content type that Amazon SES supports. For a list on unsupported content types, see Unsupported Attachment Types in the Amazon SES Developer Guide .\n The entire message must be base64-encoded.\n If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, we highly recommend that you encode that content. For more information, see Sending Raw Email in the Amazon SES Developer Guide .\n Per RFC 5321 , the maximum length of each line of text, including the <CRLF>, must not exceed 1,000 characters.\n Data (bytes) -- [REQUIRED]The raw data of the message. This data needs to base64-encoded if you are accessing Amazon SES directly through the HTTPS interface. If you are accessing Amazon SES using an AWS SDK, the SDK takes care of the base 64-encoding for you. In all cases, the client must ensure that the message format complies with Internet email standards regarding email header fields, MIME types, and MIME encoding.\n The To:, CC:, and BCC: headers in the raw message can contain a group list.\n If you are using SendRawEmail with sending authorization, you can include X-headers in the raw message to specify the 'Source,' 'From,' and 'Return-Path' addresses. For more information, see the documentation for SendRawEmail .\n Warning\n Do not include these X-headers in the DKIM signature, because they are removed by Amazon SES before sending the email.\n For more information, go to the Amazon SES Developer Guide .\n \n\n :type FromArn: string\n :param FromArn: This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to specify a particular 'From' address in the header of the raw email.\n Instead of using this parameter, you can use the X-header X-SES-FROM-ARN in the raw message of the email. If you use both the FromArn parameter and the corresponding X-header, Amazon SES uses the value of the FromArn parameter.\n Note\n For information about when to use this parameter, see the description of SendRawEmail in this guide, or see the Amazon SES Developer Guide .\n \n\n :type SourceArn: string\n :param SourceArn: This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to send for the email address specified in the Source parameter.\n For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com ) attaches a policy to it that authorizes you to send from [email protected] , then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com , and the Source to be [email protected] .\n Instead of using this parameter, you can use the X-header X-SES-SOURCE-ARN in the raw message of the email. If you use both the SourceArn parameter and the corresponding X-header, Amazon SES uses the value of the SourceArn parameter.\n Note\n For information about when to use this parameter, see the description of SendRawEmail in this guide, or see the Amazon SES Developer Guide .\n \n\n :type ReturnPathArn: string\n :param ReturnPathArn: This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter.\n For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com ) attaches a policy to it that authorizes you to use [email protected] , then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com , and the ReturnPath to be [email protected] .\n Instead of using this parameter, you can use the X-header X-SES-RETURN-PATH-ARN in the raw message of the email. If you use both the ReturnPathArn parameter and the corresponding X-header, Amazon SES uses the value of the ReturnPathArn parameter.\n Note\n For information about when to use this parameter, see the description of SendRawEmail in this guide, or see the Amazon SES Developer Guide .\n \n\n :type Tags: list\n :param Tags: A list of tags, in the form of name/value pairs, to apply to an email that you send using SendRawEmail . Tags correspond to characteristics of the email that you define, so that you can publish email sending events.\n (dict) --Contains the name and value of a tag that you can provide to SendEmail or SendRawEmail to apply to an email.\n Message tags, which you use with configuration sets, enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide .\n Name (string) -- [REQUIRED]The name of the tag. The name must:\n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain less than 256 characters.\n Value (string) -- [REQUIRED]The value of the tag. The value must:\n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain less than 256 characters.\n \n \n\n :type ConfigurationSetName: string\n :param ConfigurationSetName: The name of the configuration set to use when you send an email using SendRawEmail .\n\n :rtype: dict\n :return: {\n 'MessageId': 'string'\n }\n \n \n :returns: \n Although you can customize the message headers when using the SendRawEmail operation, Amazon SES will automatically apply its own Message-ID and Date headers; if you passed these headers when creating the message, they will be overwritten by the values that Amazon SES provides.\n If you are using sending authorization to send on behalf of another user, SendRawEmail enables you to specify the cross-account identity for the email's Source, From, and Return-Path parameters in one of two ways: you can pass optional parameters SourceArn , FromArn , and/or ReturnPathArn to the API, or you can include the following X-headers in the header of your raw email:\n X-SES-SOURCE-ARN\n X-SES-FROM-ARN\n X-SES-RETURN-PATH-ARN\n \n \n \n \"\"\"\n pass\n\ndef send_templated_email(Source=None, Destination=None, ReplyToAddresses=None, ReturnPath=None, SourceArn=None, ReturnPathArn=None, Tags=None, ConfigurationSetName=None, Template=None, TemplateArn=None, TemplateData=None):\n \"\"\"\n Composes an email message using an email template and immediately queues it for sending.\n In order to send email using the SendTemplatedEmail operation, your call to the API must meet the following requirements:\n See also: AWS API Documentation\n \n \n :example: response = client.send_templated_email(\n Source='string',\n Destination={\n 'ToAddresses': [\n 'string',\n ],\n 'CcAddresses': [\n 'string',\n ],\n 'BccAddresses': [\n 'string',\n ]\n },\n ReplyToAddresses=[\n 'string',\n ],\n ReturnPath='string',\n SourceArn='string',\n ReturnPathArn='string',\n Tags=[\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n ConfigurationSetName='string',\n Template='string',\n TemplateArn='string',\n TemplateData='string'\n )\n \n \n :type Source: string\n :param Source: [REQUIRED]\n The email address that is sending the email. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES. For information about verifying identities, see the Amazon SES Developer Guide .\n If you are sending on behalf of another user and have been permitted to do so by a sending authorization policy, then you must also specify the SourceArn parameter. For more information about sending authorization, see the Amazon SES Developer Guide .\n Note\n Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531 . For this reason, the local part of a source email address (the part of the email address that precedes the @ sign) may only contain 7-bit ASCII characters . If the domain part of an address (the part after the @ sign) contains non-ASCII characters, they must be encoded using Punycode, as described in RFC3492 . The sender name (also known as the friendly name ) may contain non-ASCII characters. These characters must be encoded using MIME encoded-word syntax, as described in`RFC 2047 <https://tools.ietf.org/html/rfc2047>`__ . MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?= .\n \n\n :type Destination: dict\n :param Destination: [REQUIRED]\n The destination for this email, composed of To:, CC:, and BCC: fields. A Destination can include up to 50 recipients across these three fields.\n ToAddresses (list) --The To: field(s) of the message.\n (string) --\n CcAddresses (list) --The CC: field(s) of the message.\n (string) --\n BccAddresses (list) --The BCC: field(s) of the message.\n (string) --\n \n\n :type ReplyToAddresses: list\n :param ReplyToAddresses: The reply-to email address(es) for the message. If the recipient replies to the message, each reply-to address will receive the reply.\n (string) --\n \n\n :type ReturnPath: string\n :param ReturnPath: The email address that bounces and complaints will be forwarded to when feedback forwarding is enabled. If the message cannot be delivered to the recipient, then an error message will be returned from the recipient's ISP; this message will then be forwarded to the email address specified by the ReturnPath parameter. The ReturnPath parameter is never overwritten. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES.\n\n :type SourceArn: string\n :param SourceArn: This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to send for the email address specified in the Source parameter.\n For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com ) attaches a policy to it that authorizes you to send from [email protected] , then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com , and the Source to be [email protected] .\n For more information about sending authorization, see the Amazon SES Developer Guide .\n \n\n :type ReturnPathArn: string\n :param ReturnPathArn: This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter.\n For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com ) attaches a policy to it that authorizes you to use [email protected] , then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com , and the ReturnPath to be [email protected] .\n For more information about sending authorization, see the Amazon SES Developer Guide .\n \n\n :type Tags: list\n :param Tags: A list of tags, in the form of name/value pairs, to apply to an email that you send using SendTemplatedEmail . Tags correspond to characteristics of the email that you define, so that you can publish email sending events.\n (dict) --Contains the name and value of a tag that you can provide to SendEmail or SendRawEmail to apply to an email.\n Message tags, which you use with configuration sets, enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide .\n Name (string) -- [REQUIRED]The name of the tag. The name must:\n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain less than 256 characters.\n Value (string) -- [REQUIRED]The value of the tag. The value must:\n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain less than 256 characters.\n \n \n\n :type ConfigurationSetName: string\n :param ConfigurationSetName: The name of the configuration set to use when you send an email using SendTemplatedEmail .\n\n :type Template: string\n :param Template: [REQUIRED]\n The template to use when sending this email.\n \n\n :type TemplateArn: string\n :param TemplateArn: The ARN of the template to use when sending this email.\n\n :type TemplateData: string\n :param TemplateData: [REQUIRED]\n A list of replacement values to apply to the template. This parameter is a JSON object, typically consisting of key-value pairs in which the keys correspond to replacement tags in the email template.\n \n\n :rtype: dict\n :return: {\n 'MessageId': 'string'\n }\n \n \n :returns: \n Source (string) -- [REQUIRED]\n The email address that is sending the email. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES. For information about verifying identities, see the Amazon SES Developer Guide .\n If you are sending on behalf of another user and have been permitted to do so by a sending authorization policy, then you must also specify the SourceArn parameter. For more information about sending authorization, see the Amazon SES Developer Guide .\n \n Note\n Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531 . For this reason, the local part of a source email address (the part of the email address that precedes the @ sign) may only contain 7-bit ASCII characters . If the domain part of an address (the part after the @ sign) contains non-ASCII characters, they must be encoded using Punycode, as described in RFC3492 . The sender name (also known as the friendly name ) may contain non-ASCII characters. These characters must be encoded using MIME encoded-word syntax, as described in`RFC 2047 <https://tools.ietf.org/html/rfc2047>`__ . MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?= .\n \n \n Destination (dict) -- [REQUIRED]\n The destination for this email, composed of To:, CC:, and BCC: fields. A Destination can include up to 50 recipients across these three fields.\n \n ToAddresses (list) --The To: field(s) of the message.\n \n (string) --\n \n \n CcAddresses (list) --The CC: field(s) of the message.\n \n (string) --\n \n \n BccAddresses (list) --The BCC: field(s) of the message.\n \n (string) --\n \n \n \n \n ReplyToAddresses (list) -- The reply-to email address(es) for the message. If the recipient replies to the message, each reply-to address will receive the reply.\n \n (string) --\n \n \n ReturnPath (string) -- The email address that bounces and complaints will be forwarded to when feedback forwarding is enabled. If the message cannot be delivered to the recipient, then an error message will be returned from the recipient's ISP; this message will then be forwarded to the email address specified by the ReturnPath parameter. The ReturnPath parameter is never overwritten. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES.\n SourceArn (string) -- This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to send for the email address specified in the Source parameter.\n For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com ) attaches a policy to it that authorizes you to send from [email protected] , then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com , and the Source to be [email protected] .\n For more information about sending authorization, see the Amazon SES Developer Guide .\n \n ReturnPathArn (string) -- This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter.\n For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com ) attaches a policy to it that authorizes you to use [email protected] , then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com , and the ReturnPath to be [email protected] .\n For more information about sending authorization, see the Amazon SES Developer Guide .\n \n Tags (list) -- A list of tags, in the form of name/value pairs, to apply to an email that you send using SendTemplatedEmail . Tags correspond to characteristics of the email that you define, so that you can publish email sending events.\n \n (dict) --Contains the name and value of a tag that you can provide to SendEmail or SendRawEmail to apply to an email.\n Message tags, which you use with configuration sets, enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide .\n \n Name (string) -- [REQUIRED]The name of the tag. The name must:\n \n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain less than 256 characters.\n \n \n Value (string) -- [REQUIRED]The value of the tag. The value must:\n \n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain less than 256 characters.\n \n \n \n \n \n \n ConfigurationSetName (string) -- The name of the configuration set to use when you send an email using SendTemplatedEmail .\n Template (string) -- [REQUIRED]\n The template to use when sending this email.\n \n TemplateArn (string) -- The ARN of the template to use when sending this email.\n TemplateData (string) -- [REQUIRED]\n A list of replacement values to apply to the template. This parameter is a JSON object, typically consisting of key-value pairs in which the keys correspond to replacement tags in the email template.\n \n \n \"\"\"\n pass\n\ndef set_active_receipt_rule_set(RuleSetName=None):\n \"\"\"\n Sets the specified receipt rule set as the active receipt rule set.\n For information about managing receipt rule sets, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example sets the active receipt rule set:\n Expected Output:\n \n :example: response = client.set_active_receipt_rule_set(\n RuleSetName='string'\n )\n \n \n :type RuleSetName: string\n :param RuleSetName: The name of the receipt rule set to make active. Setting this value to null disables all email receiving.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef set_identity_dkim_enabled(Identity=None, DkimEnabled=None):\n \"\"\"\n Enables or disables Easy DKIM signing of email sent from an identity:\n For email addresses (for example, [email protected] ), you can only enable Easy DKIM signing if the corresponding domain (in this case, example.com ) has been set up for Easy DKIM using the AWS Console or the VerifyDomainDkim operation.\n You can execute this operation no more than once per second.\n For more information about Easy DKIM signing, go to the Amazon SES Developer Guide .\n See also: AWS API Documentation\n \n Examples\n The following example configures Amazon SES to Easy DKIM-sign the email sent from an identity:\n Expected Output:\n \n :example: response = client.set_identity_dkim_enabled(\n Identity='string',\n DkimEnabled=True|False\n )\n \n \n :type Identity: string\n :param Identity: [REQUIRED]\n The identity for which DKIM signing should be enabled or disabled.\n \n\n :type DkimEnabled: boolean\n :param DkimEnabled: [REQUIRED]\n Sets whether DKIM signing is enabled for an identity. Set to true to enable DKIM signing for this identity; false to disable it.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n Identity (string) -- [REQUIRED]\n The identity for which DKIM signing should be enabled or disabled.\n \n DkimEnabled (boolean) -- [REQUIRED]\n Sets whether DKIM signing is enabled for an identity. Set to true to enable DKIM signing for this identity; false to disable it.\n \n \n \"\"\"\n pass\n\ndef set_identity_feedback_forwarding_enabled(Identity=None, ForwardingEnabled=None):\n \"\"\"\n Given an identity (an email address or a domain), enables or disables whether Amazon SES forwards bounce and complaint notifications as email. Feedback forwarding can only be disabled when Amazon Simple Notification Service (Amazon SNS) topics are specified for both bounces and complaints.\n You can execute this operation no more than once per second.\n For more information about using notifications with Amazon SES, see the Amazon SES Developer Guide .\n See also: AWS API Documentation\n \n Examples\n The following example configures Amazon SES to forward an identity's bounces and complaints via email:\n Expected Output:\n \n :example: response = client.set_identity_feedback_forwarding_enabled(\n Identity='string',\n ForwardingEnabled=True|False\n )\n \n \n :type Identity: string\n :param Identity: [REQUIRED]\n The identity for which to set bounce and complaint notification forwarding. Examples: [email protected] , example.com .\n \n\n :type ForwardingEnabled: boolean\n :param ForwardingEnabled: [REQUIRED]\n Sets whether Amazon SES will forward bounce and complaint notifications as email. true specifies that Amazon SES will forward bounce and complaint notifications as email, in addition to any Amazon SNS topic publishing otherwise specified. false specifies that Amazon SES will publish bounce and complaint notifications only through Amazon SNS. This value can only be set to false when Amazon SNS topics are set for both Bounce and Complaint notification types.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef set_identity_headers_in_notifications_enabled(Identity=None, NotificationType=None, Enabled=None):\n \"\"\"\n Given an identity (an email address or a domain), sets whether Amazon SES includes the original email headers in the Amazon Simple Notification Service (Amazon SNS) notifications of a specified type.\n You can execute this operation no more than once per second.\n For more information about using notifications with Amazon SES, see the Amazon SES Developer Guide .\n See also: AWS API Documentation\n \n Examples\n The following example configures Amazon SES to include the original email headers in the Amazon SNS bounce notifications for an identity:\n Expected Output:\n \n :example: response = client.set_identity_headers_in_notifications_enabled(\n Identity='string',\n NotificationType='Bounce'|'Complaint'|'Delivery',\n Enabled=True|False\n )\n \n \n :type Identity: string\n :param Identity: [REQUIRED]\n The identity for which to enable or disable headers in notifications. Examples: [email protected] , example.com .\n \n\n :type NotificationType: string\n :param NotificationType: [REQUIRED]\n The notification type for which to enable or disable headers in notifications.\n \n\n :type Enabled: boolean\n :param Enabled: [REQUIRED]\n Sets whether Amazon SES includes the original email headers in Amazon SNS notifications of the specified notification type. A value of true specifies that Amazon SES will include headers in notifications, and a value of false specifies that Amazon SES will not include headers in notifications.\n This value can only be set when NotificationType is already set to use a particular Amazon SNS topic.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef set_identity_mail_from_domain(Identity=None, MailFromDomain=None, BehaviorOnMXFailure=None):\n \"\"\"\n Enables or disables the custom MAIL FROM domain setup for a verified identity (an email address or a domain).\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example configures Amazon SES to use a custom MAIL FROM domain for an identity:\n Expected Output:\n \n :example: response = client.set_identity_mail_from_domain(\n Identity='string',\n MailFromDomain='string',\n BehaviorOnMXFailure='UseDefaultValue'|'RejectMessage'\n )\n \n \n :type Identity: string\n :param Identity: [REQUIRED]\n The verified identity for which you want to enable or disable the specified custom MAIL FROM domain.\n \n\n :type MailFromDomain: string\n :param MailFromDomain: The custom MAIL FROM domain that you want the verified identity to use. The MAIL FROM domain must 1) be a subdomain of the verified identity, 2) not be used in a 'From' address if the MAIL FROM domain is the destination of email feedback forwarding (for more information, see the Amazon SES Developer Guide ), and 3) not be used to receive emails. A value of null disables the custom MAIL FROM setting for the identity.\n\n :type BehaviorOnMXFailure: string\n :param BehaviorOnMXFailure: The action that you want Amazon SES to take if it cannot successfully read the required MX record when you send an email. If you choose UseDefaultValue , Amazon SES will use amazonses.com (or a subdomain of that) as the MAIL FROM domain. If you choose RejectMessage , Amazon SES will return a MailFromDomainNotVerified error and not send the email.\n The action specified in BehaviorOnMXFailure is taken when the custom MAIL FROM domain setup is in the Pending , Failed , and TemporaryFailure states.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef set_identity_notification_topic(Identity=None, NotificationType=None, SnsTopic=None):\n \"\"\"\n Sets an Amazon Simple Notification Service (Amazon SNS) topic to use when delivering notifications. When you use this operation, you specify a verified identity, such as an email address or domain. When you send an email that uses the chosen identity in the Source field, Amazon SES sends notifications to the topic you specified. You can send bounce, complaint, or delivery notifications (or any combination of the three) to the Amazon SNS topic that you specify.\n You can execute this operation no more than once per second.\n For more information about feedback notification, see the Amazon SES Developer Guide .\n See also: AWS API Documentation\n \n Examples\n The following example sets the Amazon SNS topic to which Amazon SES will publish bounce, complaint, and/or delivery notifications for emails sent with the specified identity as the Source:\n Expected Output:\n \n :example: response = client.set_identity_notification_topic(\n Identity='string',\n NotificationType='Bounce'|'Complaint'|'Delivery',\n SnsTopic='string'\n )\n \n \n :type Identity: string\n :param Identity: [REQUIRED]\n The identity (email address or domain) that you want to set the Amazon SNS topic for.\n Warning\n You can only specify a verified identity for this parameter.\n You can specify an identity by using its name or by using its Amazon Resource Name (ARN). The following examples are all valid identities: [email protected] , example.com , arn:aws:ses:us-east-1:123456789012:identity/example.com .\n \n\n :type NotificationType: string\n :param NotificationType: [REQUIRED]\n The type of notifications that will be published to the specified Amazon SNS topic.\n \n\n :type SnsTopic: string\n :param SnsTopic: The Amazon Resource Name (ARN) of the Amazon SNS topic. If the parameter is omitted from the request or a null value is passed, SnsTopic is cleared and publishing is disabled.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef set_receipt_rule_position(RuleSetName=None, RuleName=None, After=None):\n \"\"\"\n Sets the position of the specified receipt rule in the receipt rule set.\n For information about managing receipt rules, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example sets the position of a receipt rule in a receipt rule set:\n Expected Output:\n \n :example: response = client.set_receipt_rule_position(\n RuleSetName='string',\n RuleName='string',\n After='string'\n )\n \n \n :type RuleSetName: string\n :param RuleSetName: [REQUIRED]\n The name of the receipt rule set that contains the receipt rule to reposition.\n \n\n :type RuleName: string\n :param RuleName: [REQUIRED]\n The name of the receipt rule to reposition.\n \n\n :type After: string\n :param After: The name of the receipt rule after which to place the specified receipt rule.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef test_render_template(TemplateName=None, TemplateData=None):\n \"\"\"\n Creates a preview of the MIME content of an email when provided with a template and a set of replacement data.\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n \n :example: response = client.test_render_template(\n TemplateName='string',\n TemplateData='string'\n )\n \n \n :type TemplateName: string\n :param TemplateName: [REQUIRED]\n The name of the template that you want to render.\n \n\n :type TemplateData: string\n :param TemplateData: [REQUIRED]\n A list of replacement values to apply to the template. This parameter is a JSON object, typically consisting of key-value pairs in which the keys correspond to replacement tags in the email template.\n \n\n :rtype: dict\n :return: {\n 'RenderedTemplate': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef update_account_sending_enabled(Enabled=None):\n \"\"\"\n Enables or disables email sending across your entire Amazon SES account in the current AWS Region. You can use this operation in conjunction with Amazon CloudWatch alarms to temporarily pause email sending across your Amazon SES account in a given AWS Region when reputation metrics (such as your bounce or complaint rates) reach certain thresholds.\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n \n :example: response = client.update_account_sending_enabled(\n Enabled=True|False\n )\n \n \n :type Enabled: boolean\n :param Enabled: Describes whether email sending is enabled or disabled for your Amazon SES account in the current AWS Region.\n\n \"\"\"\n pass\n\ndef update_configuration_set_event_destination(ConfigurationSetName=None, EventDestination=None):\n \"\"\"\n Updates the event destination of a configuration set. Event destinations are associated with configuration sets, which enable you to publish email sending events to Amazon CloudWatch, Amazon Kinesis Firehose, or Amazon Simple Notification Service (Amazon SNS). For information about using configuration sets, see Monitoring Your Amazon SES Sending Activity in the Amazon SES Developer Guide.\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n \n :example: response = client.update_configuration_set_event_destination(\n ConfigurationSetName='string',\n EventDestination={\n 'Name': 'string',\n 'Enabled': True|False,\n 'MatchingEventTypes': [\n 'send'|'reject'|'bounce'|'complaint'|'delivery'|'open'|'click'|'renderingFailure',\n ],\n 'KinesisFirehoseDestination': {\n 'IAMRoleARN': 'string',\n 'DeliveryStreamARN': 'string'\n },\n 'CloudWatchDestination': {\n 'DimensionConfigurations': [\n {\n 'DimensionName': 'string',\n 'DimensionValueSource': 'messageTag'|'emailHeader'|'linkTag',\n 'DefaultDimensionValue': 'string'\n },\n ]\n },\n 'SNSDestination': {\n 'TopicARN': 'string'\n }\n }\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: [REQUIRED]\n The name of the configuration set that contains the event destination that you want to update.\n \n\n :type EventDestination: dict\n :param EventDestination: [REQUIRED]\n The event destination object that you want to apply to the specified configuration set.\n Name (string) -- [REQUIRED]The name of the event destination. The name must:\n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain less than 64 characters.\n Enabled (boolean) --Sets whether Amazon SES publishes events to this destination when you send an email with the associated configuration set. Set to true to enable publishing to this destination; set to false to prevent publishing to this destination. The default value is false .\n MatchingEventTypes (list) -- [REQUIRED]The type of email sending events to publish to the event destination.\n (string) --\n KinesisFirehoseDestination (dict) --An object that contains the delivery stream ARN and the IAM role ARN associated with an Amazon Kinesis Firehose event destination.\n IAMRoleARN (string) -- [REQUIRED]The ARN of the IAM role under which Amazon SES publishes email sending events to the Amazon Kinesis Firehose stream.\n DeliveryStreamARN (string) -- [REQUIRED]The ARN of the Amazon Kinesis Firehose stream that email sending events should be published to.\n CloudWatchDestination (dict) --An object that contains the names, default values, and sources of the dimensions associated with an Amazon CloudWatch event destination.\n DimensionConfigurations (list) -- [REQUIRED]A list of dimensions upon which to categorize your emails when you publish email sending events to Amazon CloudWatch.\n (dict) --Contains the dimension configuration to use when you publish email sending events to Amazon CloudWatch.\n For information about publishing email sending events to Amazon CloudWatch, see the Amazon SES Developer Guide .\n DimensionName (string) -- [REQUIRED]The name of an Amazon CloudWatch dimension associated with an email sending metric. The name must:\n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain less than 256 characters.\n DimensionValueSource (string) -- [REQUIRED]The place where Amazon SES finds the value of a dimension to publish to Amazon CloudWatch. If you want Amazon SES to use the message tags that you specify using an X-SES-MESSAGE-TAGS header or a parameter to the SendEmail /SendRawEmail API, choose messageTag . If you want Amazon SES to use your own email headers, choose emailHeader .\n DefaultDimensionValue (string) -- [REQUIRED]The default value of the dimension that is published to Amazon CloudWatch if you do not provide the value of the dimension when you send an email. The default value must:\n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Contain less than 256 characters.\n \n \n SNSDestination (dict) --An object that contains the topic ARN associated with an Amazon Simple Notification Service (Amazon SNS) event destination.\n TopicARN (string) -- [REQUIRED]The ARN of the Amazon SNS topic that email sending events will be published to. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic . For more information about Amazon SNS topics, see the Amazon SNS Developer Guide .\n \n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef update_configuration_set_reputation_metrics_enabled(ConfigurationSetName=None, Enabled=None):\n \"\"\"\n Enables or disables the publishing of reputation metrics for emails sent using a specific configuration set in a given AWS Region. Reputation metrics include bounce and complaint rates. These metrics are published to Amazon CloudWatch. By using CloudWatch, you can create alarms when bounce or complaint rates exceed certain thresholds.\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n \n :example: response = client.update_configuration_set_reputation_metrics_enabled(\n ConfigurationSetName='string',\n Enabled=True|False\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: [REQUIRED]\n The name of the configuration set that you want to update.\n \n\n :type Enabled: boolean\n :param Enabled: [REQUIRED]\n Describes whether or not Amazon SES will publish reputation metrics for the configuration set, such as bounce and complaint rates, to Amazon CloudWatch.\n \n\n \"\"\"\n pass\n\ndef update_configuration_set_sending_enabled(ConfigurationSetName=None, Enabled=None):\n \"\"\"\n Enables or disables email sending for messages sent using a specific configuration set in a given AWS Region. You can use this operation in conjunction with Amazon CloudWatch alarms to temporarily pause email sending for a configuration set when the reputation metrics for that configuration set (such as your bounce on complaint rate) exceed certain thresholds.\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n \n :example: response = client.update_configuration_set_sending_enabled(\n ConfigurationSetName='string',\n Enabled=True|False\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: [REQUIRED]\n The name of the configuration set that you want to update.\n \n\n :type Enabled: boolean\n :param Enabled: [REQUIRED]\n Describes whether email sending is enabled or disabled for the configuration set.\n \n\n \"\"\"\n pass\n\ndef update_configuration_set_tracking_options(ConfigurationSetName=None, TrackingOptions=None):\n \"\"\"\n Modifies an association between a configuration set and a custom domain for open and click event tracking.\n By default, images and links used for tracking open and click events are hosted on domains operated by Amazon SES. You can configure a subdomain of your own to handle these events. For information about using custom domains, see the Amazon SES Developer Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.update_configuration_set_tracking_options(\n ConfigurationSetName='string',\n TrackingOptions={\n 'CustomRedirectDomain': 'string'\n }\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: [REQUIRED]\n The name of the configuration set for which you want to update the custom tracking domain.\n \n\n :type TrackingOptions: dict\n :param TrackingOptions: [REQUIRED]\n A domain that is used to redirect email recipients to an Amazon SES-operated domain. This domain captures open and click events generated by Amazon SES emails.\n For more information, see Configuring Custom Domains to Handle Open and Click Tracking in the Amazon SES Developer Guide .\n CustomRedirectDomain (string) --The custom subdomain that will be used to redirect email recipients to the Amazon SES event tracking domain.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef update_custom_verification_email_template(TemplateName=None, FromEmailAddress=None, TemplateSubject=None, TemplateContent=None, SuccessRedirectionURL=None, FailureRedirectionURL=None):\n \"\"\"\n Updates an existing custom verification email template.\n For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n \n :example: response = client.update_custom_verification_email_template(\n TemplateName='string',\n FromEmailAddress='string',\n TemplateSubject='string',\n TemplateContent='string',\n SuccessRedirectionURL='string',\n FailureRedirectionURL='string'\n )\n \n \n :type TemplateName: string\n :param TemplateName: [REQUIRED]\n The name of the custom verification email template that you want to update.\n \n\n :type FromEmailAddress: string\n :param FromEmailAddress: The email address that the custom verification email is sent from.\n\n :type TemplateSubject: string\n :param TemplateSubject: The subject line of the custom verification email.\n\n :type TemplateContent: string\n :param TemplateContent: The content of the custom verification email. The total size of the email must be less than 10 MB. The message body may contain HTML, with some limitations. For more information, see Custom Verification Email Frequently Asked Questions in the Amazon SES Developer Guide .\n\n :type SuccessRedirectionURL: string\n :param SuccessRedirectionURL: The URL that the recipient of the verification email is sent to if his or her address is successfully verified.\n\n :type FailureRedirectionURL: string\n :param FailureRedirectionURL: The URL that the recipient of the verification email is sent to if his or her address is not successfully verified.\n\n \"\"\"\n pass\n\ndef update_receipt_rule(RuleSetName=None, Rule=None):\n \"\"\"\n Updates a receipt rule.\n For information about managing receipt rules, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example updates a receipt rule to use an Amazon S3 action:\n Expected Output:\n \n :example: response = client.update_receipt_rule(\n RuleSetName='string',\n Rule={\n 'Name': 'string',\n 'Enabled': True|False,\n 'TlsPolicy': 'Require'|'Optional',\n 'Recipients': [\n 'string',\n ],\n 'Actions': [\n {\n 'S3Action': {\n 'TopicArn': 'string',\n 'BucketName': 'string',\n 'ObjectKeyPrefix': 'string',\n 'KmsKeyArn': 'string'\n },\n 'BounceAction': {\n 'TopicArn': 'string',\n 'SmtpReplyCode': 'string',\n 'StatusCode': 'string',\n 'Message': 'string',\n 'Sender': 'string'\n },\n 'WorkmailAction': {\n 'TopicArn': 'string',\n 'OrganizationArn': 'string'\n },\n 'LambdaAction': {\n 'TopicArn': 'string',\n 'FunctionArn': 'string',\n 'InvocationType': 'Event'|'RequestResponse'\n },\n 'StopAction': {\n 'Scope': 'RuleSet',\n 'TopicArn': 'string'\n },\n 'AddHeaderAction': {\n 'HeaderName': 'string',\n 'HeaderValue': 'string'\n },\n 'SNSAction': {\n 'TopicArn': 'string',\n 'Encoding': 'UTF-8'|'Base64'\n }\n },\n ],\n 'ScanEnabled': True|False\n }\n )\n \n \n :type RuleSetName: string\n :param RuleSetName: [REQUIRED]\n The name of the receipt rule set that the receipt rule belongs to.\n \n\n :type Rule: dict\n :param Rule: [REQUIRED]\n A data structure that contains the updated receipt rule information.\n Name (string) -- [REQUIRED]The name of the receipt rule. The name must:\n This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n Start and end with a letter or number.\n Contain less than 64 characters.\n Enabled (boolean) --If true , the receipt rule is active. The default value is false .\n TlsPolicy (string) --Specifies whether Amazon SES should require that incoming email is delivered over a connection encrypted with Transport Layer Security (TLS). If this parameter is set to Require , Amazon SES will bounce emails that are not received over TLS. The default is Optional .\n Recipients (list) --The recipient domains and email addresses that the receipt rule applies to. If this field is not specified, this rule will match all recipients under all verified domains.\n (string) --\n Actions (list) --An ordered list of actions to perform on messages that match at least one of the recipient email addresses or domains specified in the receipt rule.\n (dict) --An action that Amazon SES can take when it receives an email on behalf of one or more email addresses or domains that you own. An instance of this data type can represent only one action.\n For information about setting up receipt rules, see the Amazon SES Developer Guide .\n S3Action (dict) --Saves the received message to an Amazon Simple Storage Service (Amazon S3) bucket and, optionally, publishes a notification to Amazon SNS.\n TopicArn (string) --The ARN of the Amazon SNS topic to notify when the message is saved to the Amazon S3 bucket. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic . For more information about Amazon SNS topics, see the Amazon SNS Developer Guide .\n BucketName (string) -- [REQUIRED]The name of the Amazon S3 bucket that incoming email will be saved to.\n ObjectKeyPrefix (string) --The key prefix of the Amazon S3 bucket. The key prefix is similar to a directory name that enables you to store similar data under the same directory in a bucket.\n KmsKeyArn (string) --The customer master key that Amazon SES should use to encrypt your emails before saving them to the Amazon S3 bucket. You can use the default master key or a custom master key you created in AWS KMS as follows:\n To use the default master key, provide an ARN in the form of arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses . For example, if your AWS account ID is 123456789012 and you want to use the default master key in the US West (Oregon) region, the ARN of the default master key would be arn:aws:kms:us-west-2:123456789012:alias/aws/ses . If you use the default master key, you don't need to perform any extra steps to give Amazon SES permission to use the key.\n To use a custom master key you created in AWS KMS, provide the ARN of the master key and ensure that you add a statement to your key's policy to give Amazon SES permission to use it. For more information about giving permissions, see the Amazon SES Developer Guide .\n For more information about key policies, see the AWS KMS Developer Guide . If you do not specify a master key, Amazon SES will not encrypt your emails.\n Warning\n Your mail is encrypted by Amazon SES using the Amazon S3 encryption client before the mail is submitted to Amazon S3 for storage. It is not encrypted using Amazon S3 server-side encryption. This means that you must use the Amazon S3 encryption client to decrypt the email after retrieving it from Amazon S3, as the service has no access to use your AWS KMS keys for decryption. This encryption client is currently available with the AWS SDK for Java and AWS SDK for Ruby only. For more information about client-side encryption using AWS KMS master keys, see the Amazon S3 Developer Guide .\n \n BounceAction (dict) --Rejects the received email by returning a bounce response to the sender and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS).\n TopicArn (string) --The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the bounce action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic . For more information about Amazon SNS topics, see the Amazon SNS Developer Guide .\n SmtpReplyCode (string) -- [REQUIRED]The SMTP reply code, as defined by RFC 5321 .\n StatusCode (string) --The SMTP enhanced status code, as defined by RFC 3463 .\n Message (string) -- [REQUIRED]Human-readable text to include in the bounce message.\n Sender (string) -- [REQUIRED]The email address of the sender of the bounced email. This is the address from which the bounce message will be sent.\n WorkmailAction (dict) --Calls Amazon WorkMail and, optionally, publishes a notification to Amazon Amazon SNS.\n TopicArn (string) --The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the WorkMail action is called. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic . For more information about Amazon SNS topics, see the Amazon SNS Developer Guide .\n OrganizationArn (string) -- [REQUIRED]The ARN of the Amazon WorkMail organization. An example of an Amazon WorkMail organization ARN is arn:aws:workmail:us-west-2:123456789012:organization/m-68755160c4cb4e29a2b2f8fb58f359d7 . For information about Amazon WorkMail organizations, see the Amazon WorkMail Administrator Guide .\n LambdaAction (dict) --Calls an AWS Lambda function, and optionally, publishes a notification to Amazon SNS.\n TopicArn (string) --The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the Lambda action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic . For more information about Amazon SNS topics, see the Amazon SNS Developer Guide .\n FunctionArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS Lambda function. An example of an AWS Lambda function ARN is arn:aws:lambda:us-west-2:account-id:function:MyFunction . For more information about AWS Lambda, see the AWS Lambda Developer Guide .\n InvocationType (string) --The invocation type of the AWS Lambda function. An invocation type of RequestResponse means that the execution of the function will immediately result in a response, and a value of Event means that the function will be invoked asynchronously. The default value is Event . For information about AWS Lambda invocation types, see the AWS Lambda Developer Guide .\n Warning\n There is a 30-second timeout on RequestResponse invocations. You should use Event invocation in most cases. Use RequestResponse only when you want to make a mail flow decision, such as whether to stop the receipt rule or the receipt rule set.\n \n StopAction (dict) --Terminates the evaluation of the receipt rule set and optionally publishes a notification to Amazon SNS.\n Scope (string) -- [REQUIRED]The name of the RuleSet that is being stopped.\n TopicArn (string) --The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the stop action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic . For more information about Amazon SNS topics, see the Amazon SNS Developer Guide .\n AddHeaderAction (dict) --Adds a header to the received email.\n HeaderName (string) -- [REQUIRED]The name of the header to add. Must be between 1 and 50 characters, inclusive, and consist of alphanumeric (a-z, A-Z, 0-9) characters and dashes only.\n HeaderValue (string) -- [REQUIRED]Must be less than 2048 characters, and must not contain newline characters ('r' or 'n').\n SNSAction (dict) --Publishes the email content within a notification to Amazon SNS.\n TopicArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Amazon SNS topic to notify. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic . For more information about Amazon SNS topics, see the Amazon SNS Developer Guide .\n Encoding (string) --The encoding to use for the email within the Amazon SNS notification. UTF-8 is easier to use, but may not preserve all special characters when a message was encoded with a different encoding format. Base64 preserves all special characters. The default value is UTF-8.\n \n ScanEnabled (boolean) --If true , then messages that this receipt rule applies to are scanned for spam and viruses. The default value is false .\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef update_template(Template=None):\n \"\"\"\n Updates an email template. Email templates enable you to send personalized email to one or more destinations in a single API operation. For more information, see the Amazon SES Developer Guide .\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n \n :example: response = client.update_template(\n Template={\n 'TemplateName': 'string',\n 'SubjectPart': 'string',\n 'TextPart': 'string',\n 'HtmlPart': 'string'\n }\n )\n \n \n :type Template: dict\n :param Template: [REQUIRED]\n The content of the email, composed of a subject line, an HTML part, and a text-only part.\n TemplateName (string) -- [REQUIRED]The name of the template. You will refer to this name when you send email using the SendTemplatedEmail or SendBulkTemplatedEmail operations.\n SubjectPart (string) --The subject line of the email.\n TextPart (string) --The email body that will be visible to recipients whose email clients do not display HTML.\n HtmlPart (string) --The HTML body of the email.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef verify_domain_dkim(Domain=None):\n \"\"\"\n Returns a set of DKIM tokens for a domain. DKIM tokens are character strings that represent your domain's identity. Using these tokens, you will need to create DNS CNAME records that point to DKIM public keys hosted by Amazon SES. Amazon Web Services will eventually detect that you have updated your DNS records; this detection process may take up to 72 hours. Upon successful detection, Amazon SES will be able to DKIM-sign email originating from that domain.\n You can execute this operation no more than once per second.\n To enable or disable Easy DKIM signing for a domain, use the SetIdentityDkimEnabled operation.\n For more information about creating DNS records using DKIM tokens, go to the Amazon SES Developer Guide .\n See also: AWS API Documentation\n \n Examples\n The following example generates DKIM tokens for a domain that has been verified with Amazon SES:\n Expected Output:\n \n :example: response = client.verify_domain_dkim(\n Domain='string'\n )\n \n \n :type Domain: string\n :param Domain: [REQUIRED]\n The name of the domain to be verified for Easy DKIM signing.\n \n\n :rtype: dict\n :return: {\n 'DkimTokens': [\n 'string',\n ]\n }\n \n \n \"\"\"\n pass\n\ndef verify_domain_identity(Domain=None):\n \"\"\"\n Adds a domain to the list of identities for your Amazon SES account in the current AWS Region and attempts to verify it. For more information about verifying domains, see Verifying Email Addresses and Domains in the Amazon SES Developer Guide.\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example starts the domain verification process with Amazon SES:\n Expected Output:\n \n :example: response = client.verify_domain_identity(\n Domain='string'\n )\n \n \n :type Domain: string\n :param Domain: [REQUIRED]\n The domain to be verified.\n \n\n :rtype: dict\n :return: {\n 'VerificationToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef verify_email_address(EmailAddress=None):\n \"\"\"\n Deprecated. Use the VerifyEmailIdentity operation to verify a new email address.\n See also: AWS API Documentation\n \n Examples\n The following example starts the email address verification process with Amazon SES:\n Expected Output:\n \n :example: response = client.verify_email_address(\n EmailAddress='string'\n )\n \n \n :type EmailAddress: string\n :param EmailAddress: [REQUIRED]\n The email address to be verified.\n \n\n :return: response = client.verify_email_address(\n EmailAddress='[email protected]',\n )\n \n print(response)\n \n \n \"\"\"\n pass\n\ndef verify_email_identity(EmailAddress=None):\n \"\"\"\n Adds an email address to the list of identities for your Amazon SES account in the current AWS region and attempts to verify it. As a result of executing this operation, a verification email is sent to the specified address.\n You can execute this operation no more than once per second.\n See also: AWS API Documentation\n \n Examples\n The following example starts the email address verification process with Amazon SES:\n Expected Output:\n \n :example: response = client.verify_email_identity(\n EmailAddress='string'\n )\n \n \n :type EmailAddress: string\n :param EmailAddress: [REQUIRED]\n The email address to be verified.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6472139358520508, "alphanum_fraction": 0.6544735431671143, "avg_line_length": 66.24468231201172, "blob_id": "7795bd41e92933c0377ea213fb1494b670bdc631", "content_id": "b7cbea8873930aad0704d5d2c819325128093dee", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 56890, "license_type": "permissive", "max_line_length": 601, "num_lines": 846, "path": "/pyboto3/autoscalingplans.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_scaling_plan(ScalingPlanName=None, ApplicationSource=None, ScalingInstructions=None):\n \"\"\"\n Creates a scaling plan.\n See also: AWS API Documentation\n \n \n :example: response = client.create_scaling_plan(\n ScalingPlanName='string',\n ApplicationSource={\n 'CloudFormationStackARN': 'string',\n 'TagFilters': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ]\n },\n ScalingInstructions=[\n {\n 'ServiceNamespace': 'autoscaling'|'ecs'|'ec2'|'rds'|'dynamodb',\n 'ResourceId': 'string',\n 'ScalableDimension': 'autoscaling:autoScalingGroup:DesiredCapacity'|'ecs:service:DesiredCount'|'ec2:spot-fleet-request:TargetCapacity'|'rds:cluster:ReadReplicaCount'|'dynamodb:table:ReadCapacityUnits'|'dynamodb:table:WriteCapacityUnits'|'dynamodb:index:ReadCapacityUnits'|'dynamodb:index:WriteCapacityUnits',\n 'MinCapacity': 123,\n 'MaxCapacity': 123,\n 'TargetTrackingConfigurations': [\n {\n 'PredefinedScalingMetricSpecification': {\n 'PredefinedScalingMetricType': 'ASGAverageCPUUtilization'|'ASGAverageNetworkIn'|'ASGAverageNetworkOut'|'DynamoDBReadCapacityUtilization'|'DynamoDBWriteCapacityUtilization'|'ECSServiceAverageCPUUtilization'|'ECSServiceAverageMemoryUtilization'|'ALBRequestCountPerTarget'|'RDSReaderAverageCPUUtilization'|'RDSReaderAverageDatabaseConnections'|'EC2SpotFleetRequestAverageCPUUtilization'|'EC2SpotFleetRequestAverageNetworkIn'|'EC2SpotFleetRequestAverageNetworkOut',\n 'ResourceLabel': 'string'\n },\n 'CustomizedScalingMetricSpecification': {\n 'MetricName': 'string',\n 'Namespace': 'string',\n 'Dimensions': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'Statistic': 'Average'|'Minimum'|'Maximum'|'SampleCount'|'Sum',\n 'Unit': 'string'\n },\n 'TargetValue': 123.0,\n 'DisableScaleIn': True|False,\n 'ScaleOutCooldown': 123,\n 'ScaleInCooldown': 123,\n 'EstimatedInstanceWarmup': 123\n },\n ],\n 'PredefinedLoadMetricSpecification': {\n 'PredefinedLoadMetricType': 'ASGTotalCPUUtilization'|'ASGTotalNetworkIn'|'ASGTotalNetworkOut'|'ALBTargetGroupRequestCount',\n 'ResourceLabel': 'string'\n },\n 'CustomizedLoadMetricSpecification': {\n 'MetricName': 'string',\n 'Namespace': 'string',\n 'Dimensions': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'Statistic': 'Average'|'Minimum'|'Maximum'|'SampleCount'|'Sum',\n 'Unit': 'string'\n },\n 'ScheduledActionBufferTime': 123,\n 'PredictiveScalingMaxCapacityBehavior': 'SetForecastCapacityToMaxCapacity'|'SetMaxCapacityToForecastCapacity'|'SetMaxCapacityAboveForecastCapacity',\n 'PredictiveScalingMaxCapacityBuffer': 123,\n 'PredictiveScalingMode': 'ForecastAndScale'|'ForecastOnly',\n 'ScalingPolicyUpdateBehavior': 'KeepExternalPolicies'|'ReplaceExternalPolicies',\n 'DisableDynamicScaling': True|False\n },\n ]\n )\n \n \n :type ScalingPlanName: string\n :param ScalingPlanName: [REQUIRED]\n The name of the scaling plan. Names cannot contain vertical bars, colons, or forward slashes.\n \n\n :type ApplicationSource: dict\n :param ApplicationSource: [REQUIRED]\n A CloudFormation stack or set of tags. You can create one scaling plan per application source.\n CloudFormationStackARN (string) --The Amazon Resource Name (ARN) of a AWS CloudFormation stack.\n TagFilters (list) --A set of tags (up to 50).\n (dict) --Represents a tag.\n Key (string) --The tag key.\n Values (list) --The tag values (0 to 20).\n (string) --\n \n \n\n :type ScalingInstructions: list\n :param ScalingInstructions: [REQUIRED]\n The scaling instructions.\n (dict) --Describes a scaling instruction for a scalable resource.\n The scaling instruction is used in combination with a scaling plan, which is a set of instructions for configuring dynamic scaling and predictive scaling for the scalable resources in your application. Each scaling instruction applies to one resource.\n AWS Auto Scaling creates target tracking scaling policies based on the scaling instructions. Target tracking scaling policies adjust the capacity of your scalable resource as required to maintain resource utilization at the target value that you specified.\n AWS Auto Scaling also configures predictive scaling for your Amazon EC2 Auto Scaling groups using a subset of parameters, including the load metric, the scaling metric, the target value for the scaling metric, the predictive scaling mode (forecast and scale or forecast only), and the desired behavior when the forecast capacity exceeds the maximum capacity of the resource. With predictive scaling, AWS Auto Scaling generates forecasts with traffic predictions for the two days ahead and schedules scaling actions that proactively add and remove resource capacity to match the forecast.\n For more information, see the AWS Auto Scaling User Guide .\n ServiceNamespace (string) -- [REQUIRED]The namespace of the AWS service.\n ResourceId (string) -- [REQUIRED]The ID of the resource. This string consists of the resource type and unique identifier.\n Auto Scaling group - The resource type is autoScalingGroup and the unique identifier is the name of the Auto Scaling group. Example: autoScalingGroup/my-asg .\n ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp .\n Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE .\n DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table .\n DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index .\n Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster .\n ScalableDimension (string) -- [REQUIRED]The scalable dimension associated with the resource.\n autoscaling:autoScalingGroup:DesiredCapacity - The desired capacity of an Auto Scaling group.\n ecs:service:DesiredCount - The desired task count of an ECS service.\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition.\n MinCapacity (integer) -- [REQUIRED]The minimum capacity of the resource.\n MaxCapacity (integer) -- [REQUIRED]The maximum capacity of the resource. The exception to this upper limit is if you specify a non-default setting for PredictiveScalingMaxCapacityBehavior .\n TargetTrackingConfigurations (list) -- [REQUIRED]The structure that defines new target tracking configurations (up to 10). Each of these structures includes a specific scaling metric and a target value for the metric, along with various parameters to use with dynamic scaling.\n With predictive scaling and dynamic scaling, the resource scales based on the target tracking configuration that provides the largest capacity for both scale in and scale out.\n Condition: The scaling metric must be unique across target tracking configurations.\n (dict) --Describes a target tracking configuration. Used with ScalingInstruction and ScalingPolicy .\n PredefinedScalingMetricSpecification (dict) --A predefined metric.\n PredefinedScalingMetricType (string) -- [REQUIRED]The metric type. The ALBRequestCountPerTarget metric type applies only to Auto Scaling groups, Spot Fleet requests, and ECS services.\n ResourceLabel (string) --Identifies the resource associated with the metric type. You can't specify a resource label unless the metric type is ALBRequestCountPerTarget and there is a target group for an Application Load Balancer attached to the Auto Scaling group, Spot Fleet request, or ECS service.\n The format is app/<load-balancer-name>/<load-balancer-id>/targetgroup/<target-group-name>/<target-group-id>, where:\n app/<load-balancer-name>/<load-balancer-id> is the final portion of the load balancer ARN.\n targetgroup/<target-group-name>/<target-group-id> is the final portion of the target group ARN.\n \n CustomizedScalingMetricSpecification (dict) --A customized metric.\n MetricName (string) -- [REQUIRED]The name of the metric.\n Namespace (string) -- [REQUIRED]The namespace of the metric.\n Dimensions (list) --The dimensions of the metric.\n (dict) --Represents a dimension for a customized metric.\n Name (string) -- [REQUIRED]The name of the dimension.\n Value (string) -- [REQUIRED]The value of the dimension.\n \n Statistic (string) -- [REQUIRED]The statistic of the metric.\n Unit (string) --The unit of the metric.\n TargetValue (float) -- [REQUIRED]The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2).\n DisableScaleIn (boolean) --Indicates whether scale in by the target tracking scaling policy is disabled. If the value is true , scale in is disabled and the target tracking scaling policy doesn't remove capacity from the scalable resource. Otherwise, scale in is enabled and the target tracking scaling policy can remove capacity from the scalable resource.\n The default value is false .\n ScaleOutCooldown (integer) --The amount of time, in seconds, after a scale-out activity completes before another scale-out activity can start. This value is not used if the scalable resource is an Auto Scaling group.\n While the cooldown period is in effect, the capacity that has been added by the previous scale-out event that initiated the cooldown is calculated as part of the desired capacity for the next scale out. The intention is to continuously (but not excessively) scale out.\n ScaleInCooldown (integer) --The amount of time, in seconds, after a scale in activity completes before another scale in activity can start. This value is not used if the scalable resource is an Auto Scaling group.\n The cooldown period is used to block subsequent scale in requests until it has expired. The intention is to scale in conservatively to protect your application's availability. However, if another alarm triggers a scale-out policy during the cooldown period after a scale-in, AWS Auto Scaling scales out your scalable target immediately.\n EstimatedInstanceWarmup (integer) --The estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics. This value is used only if the resource is an Auto Scaling group.\n \n PredefinedLoadMetricSpecification (dict) --The predefined load metric to use for predictive scaling. This parameter or a CustomizedLoadMetricSpecification is required when configuring predictive scaling, and cannot be used otherwise.\n PredefinedLoadMetricType (string) -- [REQUIRED]The metric type.\n ResourceLabel (string) --Identifies the resource associated with the metric type. You can't specify a resource label unless the metric type is ALBRequestCountPerTarget and there is a target group for an Application Load Balancer attached to the Auto Scaling group.\n The format is app/<load-balancer-name>/<load-balancer-id>/targetgroup/<target-group-name>/<target-group-id>, where:\n app/<load-balancer-name>/<load-balancer-id> is the final portion of the load balancer ARN.\n targetgroup/<target-group-name>/<target-group-id> is the final portion of the target group ARN.\n \n CustomizedLoadMetricSpecification (dict) --The customized load metric to use for predictive scaling. This parameter or a PredefinedLoadMetricSpecification is required when configuring predictive scaling, and cannot be used otherwise.\n MetricName (string) -- [REQUIRED]The name of the metric.\n Namespace (string) -- [REQUIRED]The namespace of the metric.\n Dimensions (list) --The dimensions of the metric.\n (dict) --Represents a dimension for a customized metric.\n Name (string) -- [REQUIRED]The name of the dimension.\n Value (string) -- [REQUIRED]The value of the dimension.\n \n Statistic (string) -- [REQUIRED]The statistic of the metric. Currently, the value must always be Sum .\n Unit (string) --The unit of the metric.\n ScheduledActionBufferTime (integer) --The amount of time, in seconds, to buffer the run time of scheduled scaling actions when scaling out. For example, if the forecast says to add capacity at 10:00 AM, and the buffer time is 5 minutes, then the run time of the corresponding scheduled scaling action will be 9:55 AM. The intention is to give resources time to be provisioned. For example, it can take a few minutes to launch an EC2 instance. The actual amount of time required depends on several factors, such as the size of the instance and whether there are startup scripts to complete.\n The value must be less than the forecast interval duration of 3600 seconds (60 minutes). The default is 300 seconds.\n Only valid when configuring predictive scaling.\n PredictiveScalingMaxCapacityBehavior (string) --Defines the behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity specified for the resource. The default value is SetForecastCapacityToMaxCapacity .\n The following are possible values:\n SetForecastCapacityToMaxCapacity - AWS Auto Scaling cannot scale resource capacity higher than the maximum capacity. The maximum capacity is enforced as a hard limit.\n SetMaxCapacityToForecastCapacity - AWS Auto Scaling may scale resource capacity higher than the maximum capacity to equal but not exceed forecast capacity.\n SetMaxCapacityAboveForecastCapacity - AWS Auto Scaling may scale resource capacity higher than the maximum capacity by a specified buffer value. The intention is to give the target tracking scaling policy extra capacity if unexpected traffic occurs.\n Only valid when configuring predictive scaling.\n PredictiveScalingMaxCapacityBuffer (integer) --The size of the capacity buffer to use when the forecast capacity is close to or exceeds the maximum capacity. The value is specified as a percentage relative to the forecast capacity. For example, if the buffer is 10, this means a 10 percent buffer, such that if the forecast capacity is 50, and the maximum capacity is 40, then the effective maximum capacity is 55.\n Only valid when configuring predictive scaling. Required if the PredictiveScalingMaxCapacityBehavior is set to SetMaxCapacityAboveForecastCapacity , and cannot be used otherwise.\n The range is 1-100.\n PredictiveScalingMode (string) --The predictive scaling mode. The default value is ForecastAndScale . Otherwise, AWS Auto Scaling forecasts capacity but does not create any scheduled scaling actions based on the capacity forecast.\n ScalingPolicyUpdateBehavior (string) --Controls whether a resource's externally created scaling policies are kept or replaced.\n The default value is KeepExternalPolicies . If the parameter is set to ReplaceExternalPolicies , any scaling policies that are external to AWS Auto Scaling are deleted and new target tracking scaling policies created.\n Only valid when configuring dynamic scaling.\n Condition: The number of existing policies to be replaced must be less than or equal to 50. If there are more than 50 policies to be replaced, AWS Auto Scaling keeps all existing policies and does not create new ones.\n DisableDynamicScaling (boolean) --Controls whether dynamic scaling by AWS Auto Scaling is disabled. When dynamic scaling is enabled, AWS Auto Scaling creates target tracking scaling policies based on the specified target tracking configurations.\n The default is enabled (false ).\n \n \n\n :rtype: dict\n :return: {\n 'ScalingPlanVersion': 123\n }\n \n \n \"\"\"\n pass\n\ndef delete_scaling_plan(ScalingPlanName=None, ScalingPlanVersion=None):\n \"\"\"\n Deletes the specified scaling plan.\n Deleting a scaling plan deletes the underlying ScalingInstruction for all of the scalable resources that are covered by the plan.\n If the plan has launched resources or has scaling activities in progress, you must delete those resources separately.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_scaling_plan(\n ScalingPlanName='string',\n ScalingPlanVersion=123\n )\n \n \n :type ScalingPlanName: string\n :param ScalingPlanName: [REQUIRED]\n The name of the scaling plan.\n \n\n :type ScalingPlanVersion: integer\n :param ScalingPlanVersion: [REQUIRED]\n The version number of the scaling plan.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef describe_scaling_plan_resources(ScalingPlanName=None, ScalingPlanVersion=None, MaxResults=None, NextToken=None):\n \"\"\"\n Describes the scalable resources in the specified scaling plan.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_scaling_plan_resources(\n ScalingPlanName='string',\n ScalingPlanVersion=123,\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type ScalingPlanName: string\n :param ScalingPlanName: [REQUIRED]\n The name of the scaling plan.\n \n\n :type ScalingPlanVersion: integer\n :param ScalingPlanVersion: [REQUIRED]\n The version number of the scaling plan.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of scalable resources to return. The value must be between 1 and 50. The default value is 50.\n\n :type NextToken: string\n :param NextToken: The token for the next set of results.\n\n :rtype: dict\n :return: {\n 'ScalingPlanResources': [\n {\n 'ScalingPlanName': 'string',\n 'ScalingPlanVersion': 123,\n 'ServiceNamespace': 'autoscaling'|'ecs'|'ec2'|'rds'|'dynamodb',\n 'ResourceId': 'string',\n 'ScalableDimension': 'autoscaling:autoScalingGroup:DesiredCapacity'|'ecs:service:DesiredCount'|'ec2:spot-fleet-request:TargetCapacity'|'rds:cluster:ReadReplicaCount'|'dynamodb:table:ReadCapacityUnits'|'dynamodb:table:WriteCapacityUnits'|'dynamodb:index:ReadCapacityUnits'|'dynamodb:index:WriteCapacityUnits',\n 'ScalingPolicies': [\n {\n 'PolicyName': 'string',\n 'PolicyType': 'TargetTrackingScaling',\n 'TargetTrackingConfiguration': {\n 'PredefinedScalingMetricSpecification': {\n 'PredefinedScalingMetricType': 'ASGAverageCPUUtilization'|'ASGAverageNetworkIn'|'ASGAverageNetworkOut'|'DynamoDBReadCapacityUtilization'|'DynamoDBWriteCapacityUtilization'|'ECSServiceAverageCPUUtilization'|'ECSServiceAverageMemoryUtilization'|'ALBRequestCountPerTarget'|'RDSReaderAverageCPUUtilization'|'RDSReaderAverageDatabaseConnections'|'EC2SpotFleetRequestAverageCPUUtilization'|'EC2SpotFleetRequestAverageNetworkIn'|'EC2SpotFleetRequestAverageNetworkOut',\n 'ResourceLabel': 'string'\n },\n 'CustomizedScalingMetricSpecification': {\n 'MetricName': 'string',\n 'Namespace': 'string',\n 'Dimensions': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'Statistic': 'Average'|'Minimum'|'Maximum'|'SampleCount'|'Sum',\n 'Unit': 'string'\n },\n 'TargetValue': 123.0,\n 'DisableScaleIn': True|False,\n 'ScaleOutCooldown': 123,\n 'ScaleInCooldown': 123,\n 'EstimatedInstanceWarmup': 123\n }\n },\n ],\n 'ScalingStatusCode': 'Inactive'|'PartiallyActive'|'Active',\n 'ScalingStatusMessage': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n Auto Scaling group - The resource type is autoScalingGroup and the unique identifier is the name of the Auto Scaling group. Example: autoScalingGroup/my-asg .\n ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp .\n Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE .\n DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table .\n DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index .\n Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster .\n \n \"\"\"\n pass\n\ndef describe_scaling_plans(ScalingPlanNames=None, ScalingPlanVersion=None, ApplicationSources=None, MaxResults=None, NextToken=None):\n \"\"\"\n Describes one or more of your scaling plans.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_scaling_plans(\n ScalingPlanNames=[\n 'string',\n ],\n ScalingPlanVersion=123,\n ApplicationSources=[\n {\n 'CloudFormationStackARN': 'string',\n 'TagFilters': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ]\n },\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type ScalingPlanNames: list\n :param ScalingPlanNames: The names of the scaling plans (up to 10). If you specify application sources, you cannot specify scaling plan names.\n (string) --\n \n\n :type ScalingPlanVersion: integer\n :param ScalingPlanVersion: The version number of the scaling plan. If you specify a scaling plan version, you must also specify a scaling plan name.\n\n :type ApplicationSources: list\n :param ApplicationSources: The sources for the applications (up to 10). If you specify scaling plan names, you cannot specify application sources.\n (dict) --Represents an application source.\n CloudFormationStackARN (string) --The Amazon Resource Name (ARN) of a AWS CloudFormation stack.\n TagFilters (list) --A set of tags (up to 50).\n (dict) --Represents a tag.\n Key (string) --The tag key.\n Values (list) --The tag values (0 to 20).\n (string) --\n \n \n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of scalable resources to return. This value can be between 1 and 50. The default value is 50.\n\n :type NextToken: string\n :param NextToken: The token for the next set of results.\n\n :rtype: dict\n :return: {\n 'ScalingPlans': [\n {\n 'ScalingPlanName': 'string',\n 'ScalingPlanVersion': 123,\n 'ApplicationSource': {\n 'CloudFormationStackARN': 'string',\n 'TagFilters': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ]\n },\n 'ScalingInstructions': [\n {\n 'ServiceNamespace': 'autoscaling'|'ecs'|'ec2'|'rds'|'dynamodb',\n 'ResourceId': 'string',\n 'ScalableDimension': 'autoscaling:autoScalingGroup:DesiredCapacity'|'ecs:service:DesiredCount'|'ec2:spot-fleet-request:TargetCapacity'|'rds:cluster:ReadReplicaCount'|'dynamodb:table:ReadCapacityUnits'|'dynamodb:table:WriteCapacityUnits'|'dynamodb:index:ReadCapacityUnits'|'dynamodb:index:WriteCapacityUnits',\n 'MinCapacity': 123,\n 'MaxCapacity': 123,\n 'TargetTrackingConfigurations': [\n {\n 'PredefinedScalingMetricSpecification': {\n 'PredefinedScalingMetricType': 'ASGAverageCPUUtilization'|'ASGAverageNetworkIn'|'ASGAverageNetworkOut'|'DynamoDBReadCapacityUtilization'|'DynamoDBWriteCapacityUtilization'|'ECSServiceAverageCPUUtilization'|'ECSServiceAverageMemoryUtilization'|'ALBRequestCountPerTarget'|'RDSReaderAverageCPUUtilization'|'RDSReaderAverageDatabaseConnections'|'EC2SpotFleetRequestAverageCPUUtilization'|'EC2SpotFleetRequestAverageNetworkIn'|'EC2SpotFleetRequestAverageNetworkOut',\n 'ResourceLabel': 'string'\n },\n 'CustomizedScalingMetricSpecification': {\n 'MetricName': 'string',\n 'Namespace': 'string',\n 'Dimensions': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'Statistic': 'Average'|'Minimum'|'Maximum'|'SampleCount'|'Sum',\n 'Unit': 'string'\n },\n 'TargetValue': 123.0,\n 'DisableScaleIn': True|False,\n 'ScaleOutCooldown': 123,\n 'ScaleInCooldown': 123,\n 'EstimatedInstanceWarmup': 123\n },\n ],\n 'PredefinedLoadMetricSpecification': {\n 'PredefinedLoadMetricType': 'ASGTotalCPUUtilization'|'ASGTotalNetworkIn'|'ASGTotalNetworkOut'|'ALBTargetGroupRequestCount',\n 'ResourceLabel': 'string'\n },\n 'CustomizedLoadMetricSpecification': {\n 'MetricName': 'string',\n 'Namespace': 'string',\n 'Dimensions': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'Statistic': 'Average'|'Minimum'|'Maximum'|'SampleCount'|'Sum',\n 'Unit': 'string'\n },\n 'ScheduledActionBufferTime': 123,\n 'PredictiveScalingMaxCapacityBehavior': 'SetForecastCapacityToMaxCapacity'|'SetMaxCapacityToForecastCapacity'|'SetMaxCapacityAboveForecastCapacity',\n 'PredictiveScalingMaxCapacityBuffer': 123,\n 'PredictiveScalingMode': 'ForecastAndScale'|'ForecastOnly',\n 'ScalingPolicyUpdateBehavior': 'KeepExternalPolicies'|'ReplaceExternalPolicies',\n 'DisableDynamicScaling': True|False\n },\n ],\n 'StatusCode': 'Active'|'ActiveWithProblems'|'CreationInProgress'|'CreationFailed'|'DeletionInProgress'|'DeletionFailed'|'UpdateInProgress'|'UpdateFailed',\n 'StatusMessage': 'string',\n 'StatusStartTime': datetime(2015, 1, 1),\n 'CreationTime': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_scaling_plan_resource_forecast_data(ScalingPlanName=None, ScalingPlanVersion=None, ServiceNamespace=None, ResourceId=None, ScalableDimension=None, ForecastDataType=None, StartTime=None, EndTime=None):\n \"\"\"\n Retrieves the forecast data for a scalable resource.\n Capacity forecasts are represented as predicted values, or data points, that are calculated using historical data points from a specified CloudWatch load metric. Data points are available for up to 56 days.\n See also: AWS API Documentation\n \n \n :example: response = client.get_scaling_plan_resource_forecast_data(\n ScalingPlanName='string',\n ScalingPlanVersion=123,\n ServiceNamespace='autoscaling'|'ecs'|'ec2'|'rds'|'dynamodb',\n ResourceId='string',\n ScalableDimension='autoscaling:autoScalingGroup:DesiredCapacity'|'ecs:service:DesiredCount'|'ec2:spot-fleet-request:TargetCapacity'|'rds:cluster:ReadReplicaCount'|'dynamodb:table:ReadCapacityUnits'|'dynamodb:table:WriteCapacityUnits'|'dynamodb:index:ReadCapacityUnits'|'dynamodb:index:WriteCapacityUnits',\n ForecastDataType='CapacityForecast'|'LoadForecast'|'ScheduledActionMinCapacity'|'ScheduledActionMaxCapacity',\n StartTime=datetime(2015, 1, 1),\n EndTime=datetime(2015, 1, 1)\n )\n \n \n :type ScalingPlanName: string\n :param ScalingPlanName: [REQUIRED]\n The name of the scaling plan.\n \n\n :type ScalingPlanVersion: integer\n :param ScalingPlanVersion: [REQUIRED]\n The version number of the scaling plan.\n \n\n :type ServiceNamespace: string\n :param ServiceNamespace: [REQUIRED]\n The namespace of the AWS service.\n \n\n :type ResourceId: string\n :param ResourceId: [REQUIRED]\n The ID of the resource. This string consists of the resource type and unique identifier.\n Auto Scaling group - The resource type is autoScalingGroup and the unique identifier is the name of the Auto Scaling group. Example: autoScalingGroup/my-asg .\n ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp .\n Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE .\n DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table .\n DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index .\n Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster .\n \n\n :type ScalableDimension: string\n :param ScalableDimension: [REQUIRED]\n The scalable dimension for the resource.\n \n\n :type ForecastDataType: string\n :param ForecastDataType: [REQUIRED]\n The type of forecast data to get.\n LoadForecast : The load metric forecast.\n CapacityForecast : The capacity forecast.\n ScheduledActionMinCapacity : The minimum capacity for each scheduled scaling action. This data is calculated as the larger of two values: the capacity forecast or the minimum capacity in the scaling instruction.\n ScheduledActionMaxCapacity : The maximum capacity for each scheduled scaling action. The calculation used is determined by the predictive scaling maximum capacity behavior setting in the scaling instruction.\n \n\n :type StartTime: datetime\n :param StartTime: [REQUIRED]\n The inclusive start time of the time range for the forecast data to get. The date and time can be at most 56 days before the current date and time.\n \n\n :type EndTime: datetime\n :param EndTime: [REQUIRED]\n The exclusive end time of the time range for the forecast data to get. The maximum time duration between the start and end time is seven days.\n Although this parameter can accept a date and time that is more than two days in the future, the availability of forecast data has limits. AWS Auto Scaling only issues forecasts for periods of two days in advance.\n \n\n :rtype: dict\n :return: {\n 'Datapoints': [\n {\n 'Timestamp': datetime(2015, 1, 1),\n 'Value': 123.0\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef update_scaling_plan(ScalingPlanName=None, ScalingPlanVersion=None, ApplicationSource=None, ScalingInstructions=None):\n \"\"\"\n Updates the specified scaling plan.\n You cannot update a scaling plan if it is in the process of being created, updated, or deleted.\n See also: AWS API Documentation\n \n \n :example: response = client.update_scaling_plan(\n ScalingPlanName='string',\n ScalingPlanVersion=123,\n ApplicationSource={\n 'CloudFormationStackARN': 'string',\n 'TagFilters': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ]\n },\n ScalingInstructions=[\n {\n 'ServiceNamespace': 'autoscaling'|'ecs'|'ec2'|'rds'|'dynamodb',\n 'ResourceId': 'string',\n 'ScalableDimension': 'autoscaling:autoScalingGroup:DesiredCapacity'|'ecs:service:DesiredCount'|'ec2:spot-fleet-request:TargetCapacity'|'rds:cluster:ReadReplicaCount'|'dynamodb:table:ReadCapacityUnits'|'dynamodb:table:WriteCapacityUnits'|'dynamodb:index:ReadCapacityUnits'|'dynamodb:index:WriteCapacityUnits',\n 'MinCapacity': 123,\n 'MaxCapacity': 123,\n 'TargetTrackingConfigurations': [\n {\n 'PredefinedScalingMetricSpecification': {\n 'PredefinedScalingMetricType': 'ASGAverageCPUUtilization'|'ASGAverageNetworkIn'|'ASGAverageNetworkOut'|'DynamoDBReadCapacityUtilization'|'DynamoDBWriteCapacityUtilization'|'ECSServiceAverageCPUUtilization'|'ECSServiceAverageMemoryUtilization'|'ALBRequestCountPerTarget'|'RDSReaderAverageCPUUtilization'|'RDSReaderAverageDatabaseConnections'|'EC2SpotFleetRequestAverageCPUUtilization'|'EC2SpotFleetRequestAverageNetworkIn'|'EC2SpotFleetRequestAverageNetworkOut',\n 'ResourceLabel': 'string'\n },\n 'CustomizedScalingMetricSpecification': {\n 'MetricName': 'string',\n 'Namespace': 'string',\n 'Dimensions': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'Statistic': 'Average'|'Minimum'|'Maximum'|'SampleCount'|'Sum',\n 'Unit': 'string'\n },\n 'TargetValue': 123.0,\n 'DisableScaleIn': True|False,\n 'ScaleOutCooldown': 123,\n 'ScaleInCooldown': 123,\n 'EstimatedInstanceWarmup': 123\n },\n ],\n 'PredefinedLoadMetricSpecification': {\n 'PredefinedLoadMetricType': 'ASGTotalCPUUtilization'|'ASGTotalNetworkIn'|'ASGTotalNetworkOut'|'ALBTargetGroupRequestCount',\n 'ResourceLabel': 'string'\n },\n 'CustomizedLoadMetricSpecification': {\n 'MetricName': 'string',\n 'Namespace': 'string',\n 'Dimensions': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'Statistic': 'Average'|'Minimum'|'Maximum'|'SampleCount'|'Sum',\n 'Unit': 'string'\n },\n 'ScheduledActionBufferTime': 123,\n 'PredictiveScalingMaxCapacityBehavior': 'SetForecastCapacityToMaxCapacity'|'SetMaxCapacityToForecastCapacity'|'SetMaxCapacityAboveForecastCapacity',\n 'PredictiveScalingMaxCapacityBuffer': 123,\n 'PredictiveScalingMode': 'ForecastAndScale'|'ForecastOnly',\n 'ScalingPolicyUpdateBehavior': 'KeepExternalPolicies'|'ReplaceExternalPolicies',\n 'DisableDynamicScaling': True|False\n },\n ]\n )\n \n \n :type ScalingPlanName: string\n :param ScalingPlanName: [REQUIRED]\n The name of the scaling plan.\n \n\n :type ScalingPlanVersion: integer\n :param ScalingPlanVersion: [REQUIRED]\n The version number of the scaling plan.\n \n\n :type ApplicationSource: dict\n :param ApplicationSource: A CloudFormation stack or set of tags.\n CloudFormationStackARN (string) --The Amazon Resource Name (ARN) of a AWS CloudFormation stack.\n TagFilters (list) --A set of tags (up to 50).\n (dict) --Represents a tag.\n Key (string) --The tag key.\n Values (list) --The tag values (0 to 20).\n (string) --\n \n \n\n :type ScalingInstructions: list\n :param ScalingInstructions: The scaling instructions.\n (dict) --Describes a scaling instruction for a scalable resource.\n The scaling instruction is used in combination with a scaling plan, which is a set of instructions for configuring dynamic scaling and predictive scaling for the scalable resources in your application. Each scaling instruction applies to one resource.\n AWS Auto Scaling creates target tracking scaling policies based on the scaling instructions. Target tracking scaling policies adjust the capacity of your scalable resource as required to maintain resource utilization at the target value that you specified.\n AWS Auto Scaling also configures predictive scaling for your Amazon EC2 Auto Scaling groups using a subset of parameters, including the load metric, the scaling metric, the target value for the scaling metric, the predictive scaling mode (forecast and scale or forecast only), and the desired behavior when the forecast capacity exceeds the maximum capacity of the resource. With predictive scaling, AWS Auto Scaling generates forecasts with traffic predictions for the two days ahead and schedules scaling actions that proactively add and remove resource capacity to match the forecast.\n For more information, see the AWS Auto Scaling User Guide .\n ServiceNamespace (string) -- [REQUIRED]The namespace of the AWS service.\n ResourceId (string) -- [REQUIRED]The ID of the resource. This string consists of the resource type and unique identifier.\n Auto Scaling group - The resource type is autoScalingGroup and the unique identifier is the name of the Auto Scaling group. Example: autoScalingGroup/my-asg .\n ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp .\n Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE .\n DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table .\n DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index .\n Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster .\n ScalableDimension (string) -- [REQUIRED]The scalable dimension associated with the resource.\n autoscaling:autoScalingGroup:DesiredCapacity - The desired capacity of an Auto Scaling group.\n ecs:service:DesiredCount - The desired task count of an ECS service.\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition.\n MinCapacity (integer) -- [REQUIRED]The minimum capacity of the resource.\n MaxCapacity (integer) -- [REQUIRED]The maximum capacity of the resource. The exception to this upper limit is if you specify a non-default setting for PredictiveScalingMaxCapacityBehavior .\n TargetTrackingConfigurations (list) -- [REQUIRED]The structure that defines new target tracking configurations (up to 10). Each of these structures includes a specific scaling metric and a target value for the metric, along with various parameters to use with dynamic scaling.\n With predictive scaling and dynamic scaling, the resource scales based on the target tracking configuration that provides the largest capacity for both scale in and scale out.\n Condition: The scaling metric must be unique across target tracking configurations.\n (dict) --Describes a target tracking configuration. Used with ScalingInstruction and ScalingPolicy .\n PredefinedScalingMetricSpecification (dict) --A predefined metric.\n PredefinedScalingMetricType (string) -- [REQUIRED]The metric type. The ALBRequestCountPerTarget metric type applies only to Auto Scaling groups, Spot Fleet requests, and ECS services.\n ResourceLabel (string) --Identifies the resource associated with the metric type. You can't specify a resource label unless the metric type is ALBRequestCountPerTarget and there is a target group for an Application Load Balancer attached to the Auto Scaling group, Spot Fleet request, or ECS service.\n The format is app/<load-balancer-name>/<load-balancer-id>/targetgroup/<target-group-name>/<target-group-id>, where:\n app/<load-balancer-name>/<load-balancer-id> is the final portion of the load balancer ARN.\n targetgroup/<target-group-name>/<target-group-id> is the final portion of the target group ARN.\n \n CustomizedScalingMetricSpecification (dict) --A customized metric.\n MetricName (string) -- [REQUIRED]The name of the metric.\n Namespace (string) -- [REQUIRED]The namespace of the metric.\n Dimensions (list) --The dimensions of the metric.\n (dict) --Represents a dimension for a customized metric.\n Name (string) -- [REQUIRED]The name of the dimension.\n Value (string) -- [REQUIRED]The value of the dimension.\n \n Statistic (string) -- [REQUIRED]The statistic of the metric.\n Unit (string) --The unit of the metric.\n TargetValue (float) -- [REQUIRED]The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2).\n DisableScaleIn (boolean) --Indicates whether scale in by the target tracking scaling policy is disabled. If the value is true , scale in is disabled and the target tracking scaling policy doesn't remove capacity from the scalable resource. Otherwise, scale in is enabled and the target tracking scaling policy can remove capacity from the scalable resource.\n The default value is false .\n ScaleOutCooldown (integer) --The amount of time, in seconds, after a scale-out activity completes before another scale-out activity can start. This value is not used if the scalable resource is an Auto Scaling group.\n While the cooldown period is in effect, the capacity that has been added by the previous scale-out event that initiated the cooldown is calculated as part of the desired capacity for the next scale out. The intention is to continuously (but not excessively) scale out.\n ScaleInCooldown (integer) --The amount of time, in seconds, after a scale in activity completes before another scale in activity can start. This value is not used if the scalable resource is an Auto Scaling group.\n The cooldown period is used to block subsequent scale in requests until it has expired. The intention is to scale in conservatively to protect your application's availability. However, if another alarm triggers a scale-out policy during the cooldown period after a scale-in, AWS Auto Scaling scales out your scalable target immediately.\n EstimatedInstanceWarmup (integer) --The estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics. This value is used only if the resource is an Auto Scaling group.\n \n PredefinedLoadMetricSpecification (dict) --The predefined load metric to use for predictive scaling. This parameter or a CustomizedLoadMetricSpecification is required when configuring predictive scaling, and cannot be used otherwise.\n PredefinedLoadMetricType (string) -- [REQUIRED]The metric type.\n ResourceLabel (string) --Identifies the resource associated with the metric type. You can't specify a resource label unless the metric type is ALBRequestCountPerTarget and there is a target group for an Application Load Balancer attached to the Auto Scaling group.\n The format is app/<load-balancer-name>/<load-balancer-id>/targetgroup/<target-group-name>/<target-group-id>, where:\n app/<load-balancer-name>/<load-balancer-id> is the final portion of the load balancer ARN.\n targetgroup/<target-group-name>/<target-group-id> is the final portion of the target group ARN.\n \n CustomizedLoadMetricSpecification (dict) --The customized load metric to use for predictive scaling. This parameter or a PredefinedLoadMetricSpecification is required when configuring predictive scaling, and cannot be used otherwise.\n MetricName (string) -- [REQUIRED]The name of the metric.\n Namespace (string) -- [REQUIRED]The namespace of the metric.\n Dimensions (list) --The dimensions of the metric.\n (dict) --Represents a dimension for a customized metric.\n Name (string) -- [REQUIRED]The name of the dimension.\n Value (string) -- [REQUIRED]The value of the dimension.\n \n Statistic (string) -- [REQUIRED]The statistic of the metric. Currently, the value must always be Sum .\n Unit (string) --The unit of the metric.\n ScheduledActionBufferTime (integer) --The amount of time, in seconds, to buffer the run time of scheduled scaling actions when scaling out. For example, if the forecast says to add capacity at 10:00 AM, and the buffer time is 5 minutes, then the run time of the corresponding scheduled scaling action will be 9:55 AM. The intention is to give resources time to be provisioned. For example, it can take a few minutes to launch an EC2 instance. The actual amount of time required depends on several factors, such as the size of the instance and whether there are startup scripts to complete.\n The value must be less than the forecast interval duration of 3600 seconds (60 minutes). The default is 300 seconds.\n Only valid when configuring predictive scaling.\n PredictiveScalingMaxCapacityBehavior (string) --Defines the behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity specified for the resource. The default value is SetForecastCapacityToMaxCapacity .\n The following are possible values:\n SetForecastCapacityToMaxCapacity - AWS Auto Scaling cannot scale resource capacity higher than the maximum capacity. The maximum capacity is enforced as a hard limit.\n SetMaxCapacityToForecastCapacity - AWS Auto Scaling may scale resource capacity higher than the maximum capacity to equal but not exceed forecast capacity.\n SetMaxCapacityAboveForecastCapacity - AWS Auto Scaling may scale resource capacity higher than the maximum capacity by a specified buffer value. The intention is to give the target tracking scaling policy extra capacity if unexpected traffic occurs.\n Only valid when configuring predictive scaling.\n PredictiveScalingMaxCapacityBuffer (integer) --The size of the capacity buffer to use when the forecast capacity is close to or exceeds the maximum capacity. The value is specified as a percentage relative to the forecast capacity. For example, if the buffer is 10, this means a 10 percent buffer, such that if the forecast capacity is 50, and the maximum capacity is 40, then the effective maximum capacity is 55.\n Only valid when configuring predictive scaling. Required if the PredictiveScalingMaxCapacityBehavior is set to SetMaxCapacityAboveForecastCapacity , and cannot be used otherwise.\n The range is 1-100.\n PredictiveScalingMode (string) --The predictive scaling mode. The default value is ForecastAndScale . Otherwise, AWS Auto Scaling forecasts capacity but does not create any scheduled scaling actions based on the capacity forecast.\n ScalingPolicyUpdateBehavior (string) --Controls whether a resource's externally created scaling policies are kept or replaced.\n The default value is KeepExternalPolicies . If the parameter is set to ReplaceExternalPolicies , any scaling policies that are external to AWS Auto Scaling are deleted and new target tracking scaling policies created.\n Only valid when configuring dynamic scaling.\n Condition: The number of existing policies to be replaced must be less than or equal to 50. If there are more than 50 policies to be replaced, AWS Auto Scaling keeps all existing policies and does not create new ones.\n DisableDynamicScaling (boolean) --Controls whether dynamic scaling by AWS Auto Scaling is disabled. When dynamic scaling is enabled, AWS Auto Scaling creates target tracking scaling policies based on the specified target tracking configurations.\n The default is enabled (false ).\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6867934465408325, "alphanum_fraction": 0.6888489127159119, "avg_line_length": 32.834781646728516, "blob_id": "53ad9558528270eb69b23ee5d4453d9b517714cb", "content_id": "c932d0eaacd7a4d6d8f7705ef9f3bb8c4da07ddd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3892, "license_type": "permissive", "max_line_length": 92, "num_lines": 115, "path": "/pyboto3/apigatewaymanagementapi.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef post_to_connection(Data=None, ConnectionId=None):\n \"\"\"\n Sends the provided data to the specified connection.\n See also: AWS API Documentation\n \n \n :example: response = client.post_to_connection(\n Data=b'bytes'|file,\n ConnectionId='string'\n )\n \n \n :type Data: bytes or seekable file-like object\n :param Data: [REQUIRED]\n The data to be sent to the client specified by its connection id.\n \n\n :type ConnectionId: string\n :param ConnectionId: [REQUIRED]\n The identifier of the connection that a specific client is using.\n \n\n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6079664826393127, "alphanum_fraction": 0.6145614981651306, "avg_line_length": 33.63691329956055, "blob_id": "74bac05c9a6162d9cec796a346f6129d12393acd", "content_id": "ce054335d71f624554756d5e7321a793a316113b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22896, "license_type": "permissive", "max_line_length": 454, "num_lines": 661, "path": "/pyboto3/shield.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef associate_drt_log_bucket(LogBucket=None):\n \"\"\"\n Authorizes the DDoS Response team (DRT) to access the specified Amazon S3 bucket containing your flow logs. You can associate up to 10 Amazon S3 buckets with your subscription.\n To use the services of the DRT and make an AssociateDRTLogBucket request, you must be subscribed to the Business Support plan or the Enterprise Support plan .\n See also: AWS API Documentation\n \n \n :example: response = client.associate_drt_log_bucket(\n LogBucket='string'\n )\n \n \n :type LogBucket: string\n :param LogBucket: [REQUIRED]\n The Amazon S3 bucket that contains your flow logs.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef associate_drt_role(RoleArn=None):\n \"\"\"\n Authorizes the DDoS Response team (DRT), using the specified role, to access your AWS account to assist with DDoS attack mitigation during potential attacks. This enables the DRT to inspect your AWS WAF configuration and create or update AWS WAF rules and web ACLs.\n You can associate only one RoleArn with your subscription. If you submit an AssociateDRTRole request for an account that already has an associated role, the new RoleArn will replace the existing RoleArn .\n Prior to making the AssociateDRTRole request, you must attach the AWSShieldDRTAccessPolicy managed policy to the role you will specify in the request. For more information see `Attaching and Detaching IAM Policies < https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html>`__ . The role must also trust the service principal drt.shield.amazonaws.com . For more information, see IAM JSON Policy Elements: Principal .\n The DRT will have access only to your AWS WAF and Shield resources. By submitting this request, you authorize the DRT to inspect your AWS WAF and Shield configuration and create and update AWS WAF rules and web ACLs on your behalf. The DRT takes these actions only if explicitly authorized by you.\n You must have the iam:PassRole permission to make an AssociateDRTRole request. For more information, see Granting a User Permissions to Pass a Role to an AWS Service .\n To use the services of the DRT and make an AssociateDRTRole request, you must be subscribed to the Business Support plan or the Enterprise Support plan .\n See also: AWS API Documentation\n \n \n :example: response = client.associate_drt_role(\n RoleArn='string'\n )\n \n \n :type RoleArn: string\n :param RoleArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the role the DRT will use to access your AWS account.\n Prior to making the AssociateDRTRole request, you must attach the AWSShieldDRTAccessPolicy managed policy to this role. For more information see `Attaching and Detaching IAM Policies < https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html>`__ .\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_protection(Name=None, ResourceArn=None):\n \"\"\"\n Enables AWS Shield Advanced for a specific AWS resource. The resource can be an Amazon CloudFront distribution, Elastic Load Balancing load balancer, Elastic IP Address, or an Amazon Route 53 hosted zone.\n You can add protection to only a single resource with each CreateProtection request. If you want to add protection to multiple resources at once, use the AWS WAF console . For more information see Getting Started with AWS Shield Advanced and Add AWS Shield Advanced Protection to more AWS Resources .\n See also: AWS API Documentation\n \n \n :example: response = client.create_protection(\n Name='string',\n ResourceArn='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n Friendly name for the Protection you are creating.\n \n\n :type ResourceArn: string\n :param ResourceArn: [REQUIRED]\n The ARN (Amazon Resource Name) of the resource to be protected.\n The ARN should be in one of the following formats:\n For an Application Load Balancer: ``arn:aws:elasticloadbalancing:region :account-id :loadbalancer/app/load-balancer-name /load-balancer-id ``\n For an Elastic Load Balancer (Classic Load Balancer): ``arn:aws:elasticloadbalancing:region :account-id :loadbalancer/load-balancer-name ``\n For AWS CloudFront distribution: ``arn:aws:cloudfront::account-id :distribution/distribution-id ``\n For Amazon Route 53: ``arn:aws:route53:::hostedzone/hosted-zone-id ``\n For an Elastic IP address: ``arn:aws:ec2:region :account-id :eip-allocation/allocation-id ``\n \n\n :rtype: dict\n :return: {\n 'ProtectionId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_subscription():\n \"\"\"\n Activates AWS Shield Advanced for an account.\n As part of this request you can specify EmergencySettings that automaticaly grant the DDoS response team (DRT) needed permissions to assist you during a suspected DDoS attack. For more information see Authorize the DDoS Response Team to Create Rules and Web ACLs on Your Behalf .\n When you initally create a subscription, your subscription is set to be automatically renewed at the end of the existing subscription period. You can change this by submitting an UpdateSubscription request.\n See also: AWS API Documentation\n \n \n :example: response = client.create_subscription()\n \n \n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_protection(ProtectionId=None):\n \"\"\"\n Deletes an AWS Shield Advanced Protection .\n See also: AWS API Documentation\n \n \n :example: response = client.delete_protection(\n ProtectionId='string'\n )\n \n \n :type ProtectionId: string\n :param ProtectionId: [REQUIRED]\n The unique identifier (ID) for the Protection object to be deleted.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_subscription():\n \"\"\"\n Removes AWS Shield Advanced from an account. AWS Shield Advanced requires a 1-year subscription commitment. You cannot delete a subscription prior to the completion of that commitment.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_subscription()\n \n \n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef describe_attack(AttackId=None):\n \"\"\"\n Describes the details of a DDoS attack.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_attack(\n AttackId='string'\n )\n \n \n :type AttackId: string\n :param AttackId: [REQUIRED]\n The unique identifier (ID) for the attack that to be described.\n \n\n :rtype: dict\n :return: {\n 'Attack': {\n 'AttackId': 'string',\n 'ResourceArn': 'string',\n 'SubResources': [\n {\n 'Type': 'IP'|'URL',\n 'Id': 'string',\n 'AttackVectors': [\n {\n 'VectorType': 'string',\n 'VectorCounters': [\n {\n 'Name': 'string',\n 'Max': 123.0,\n 'Average': 123.0,\n 'Sum': 123.0,\n 'N': 123,\n 'Unit': 'string'\n },\n ]\n },\n ],\n 'Counters': [\n {\n 'Name': 'string',\n 'Max': 123.0,\n 'Average': 123.0,\n 'Sum': 123.0,\n 'N': 123,\n 'Unit': 'string'\n },\n ]\n },\n ],\n 'StartTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'AttackCounters': [\n {\n 'Name': 'string',\n 'Max': 123.0,\n 'Average': 123.0,\n 'Sum': 123.0,\n 'N': 123,\n 'Unit': 'string'\n },\n ],\n 'AttackProperties': [\n {\n 'AttackLayer': 'NETWORK'|'APPLICATION',\n 'AttackPropertyIdentifier': 'DESTINATION_URL'|'REFERRER'|'SOURCE_ASN'|'SOURCE_COUNTRY'|'SOURCE_IP_ADDRESS'|'SOURCE_USER_AGENT',\n 'TopContributors': [\n {\n 'Name': 'string',\n 'Value': 123\n },\n ],\n 'Unit': 'BITS'|'BYTES'|'PACKETS'|'REQUESTS',\n 'Total': 123\n },\n ],\n 'Mitigations': [\n {\n 'MitigationName': 'string'\n },\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_drt_access():\n \"\"\"\n Returns the current role and list of Amazon S3 log buckets used by the DDoS Response team (DRT) to access your AWS account while assisting with attack mitigation.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_drt_access()\n \n \n :rtype: dict\n :return: {\n 'RoleArn': 'string',\n 'LogBucketList': [\n 'string',\n ]\n }\n \n \n \"\"\"\n pass\n\ndef describe_emergency_contact_settings():\n \"\"\"\n Lists the email addresses that the DRT can use to contact you during a suspected attack.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_emergency_contact_settings()\n \n \n :rtype: dict\n :return: {\n 'EmergencyContactList': [\n {\n 'EmailAddress': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef describe_protection(ProtectionId=None):\n \"\"\"\n Lists the details of a Protection object.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_protection(\n ProtectionId='string'\n )\n \n \n :type ProtectionId: string\n :param ProtectionId: [REQUIRED]\n The unique identifier (ID) for the Protection object that is described.\n \n\n :rtype: dict\n :return: {\n 'Protection': {\n 'Id': 'string',\n 'Name': 'string',\n 'ResourceArn': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_subscription():\n \"\"\"\n Provides details about the AWS Shield Advanced subscription for an account.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_subscription()\n \n \n :rtype: dict\n :return: {\n 'Subscription': {\n 'StartTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'TimeCommitmentInSeconds': 123,\n 'AutoRenew': 'ENABLED'|'DISABLED',\n 'Limits': [\n {\n 'Type': 'string',\n 'Max': 123\n },\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef disassociate_drt_log_bucket(LogBucket=None):\n \"\"\"\n Removes the DDoS Response team's (DRT) access to the specified Amazon S3 bucket containing your flow logs.\n To make a DisassociateDRTLogBucket request, you must be subscribed to the Business Support plan or the Enterprise Support plan . However, if you are not subscribed to one of these support plans, but had been previously and had granted the DRT access to your account, you can submit a DisassociateDRTLogBucket request to remove this access.\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_drt_log_bucket(\n LogBucket='string'\n )\n \n \n :type LogBucket: string\n :param LogBucket: [REQUIRED]\n The Amazon S3 bucket that contains your flow logs.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef disassociate_drt_role():\n \"\"\"\n Removes the DDoS Response team's (DRT) access to your AWS account.\n To make a DisassociateDRTRole request, you must be subscribed to the Business Support plan or the Enterprise Support plan . However, if you are not subscribed to one of these support plans, but had been previously and had granted the DRT access to your account, you can submit a DisassociateDRTRole request to remove this access.\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_drt_role()\n \n \n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_subscription_state():\n \"\"\"\n Returns the SubscriptionState , either Active or Inactive .\n See also: AWS API Documentation\n \n \n :example: response = client.get_subscription_state()\n \n \n :rtype: dict\n :return: {\n 'SubscriptionState': 'ACTIVE'|'INACTIVE'\n }\n \n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_attacks(ResourceArns=None, StartTime=None, EndTime=None, NextToken=None, MaxResults=None):\n \"\"\"\n Returns all ongoing DDoS attacks or all DDoS attacks during a specified time period.\n See also: AWS API Documentation\n \n \n :example: response = client.list_attacks(\n ResourceArns=[\n 'string',\n ],\n StartTime={\n 'FromInclusive': datetime(2015, 1, 1),\n 'ToExclusive': datetime(2015, 1, 1)\n },\n EndTime={\n 'FromInclusive': datetime(2015, 1, 1),\n 'ToExclusive': datetime(2015, 1, 1)\n },\n NextToken='string',\n MaxResults=123\n )\n \n \n :type ResourceArns: list\n :param ResourceArns: The ARN (Amazon Resource Name) of the resource that was attacked. If this is left blank, all applicable resources for this account will be included.\n (string) --\n \n\n :type StartTime: dict\n :param StartTime: The start of the time period for the attacks. This is a timestamp type. The sample request above indicates a number type because the default used by WAF is Unix time in seconds. However any valid timestamp format is allowed.\n FromInclusive (datetime) --The start time, in Unix time in seconds. For more information see timestamp .\n ToExclusive (datetime) --The end time, in Unix time in seconds. For more information see timestamp .\n \n\n :type EndTime: dict\n :param EndTime: The end of the time period for the attacks. This is a timestamp type. The sample request above indicates a number type because the default used by WAF is Unix time in seconds. However any valid timestamp format is allowed.\n FromInclusive (datetime) --The start time, in Unix time in seconds. For more information see timestamp .\n ToExclusive (datetime) --The end time, in Unix time in seconds. For more information see timestamp .\n \n\n :type NextToken: string\n :param NextToken: The ListAttacksRequest.NextMarker value from a previous call to ListAttacksRequest . Pass null if this is the first call.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of AttackSummary objects to be returned. If this is left blank, the first 20 results will be returned.\n This is a maximum value; it is possible that AWS WAF will return the results in smaller batches. That is, the number of AttackSummary objects returned could be less than MaxResults , even if there are still more AttackSummary objects yet to return. If there are more AttackSummary objects to return, AWS WAF will always also return a NextToken .\n \n\n :rtype: dict\n :return: {\n 'AttackSummaries': [\n {\n 'AttackId': 'string',\n 'ResourceArn': 'string',\n 'StartTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'AttackVectors': [\n {\n 'VectorType': 'string'\n },\n ]\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n UDP_TRAFFIC\n UDP_FRAGMENT\n GENERIC_UDP_REFLECTION\n DNS_REFLECTION\n NTP_REFLECTION\n CHARGEN_REFLECTION\n SSDP_REFLECTION\n PORT_MAPPER\n RIP_REFLECTION\n SNMP_REFLECTION\n MSSQL_REFLECTION\n NET_BIOS_REFLECTION\n SYN_FLOOD\n ACK_FLOOD\n REQUEST_FLOOD\n \n \"\"\"\n pass\n\ndef list_protections(NextToken=None, MaxResults=None):\n \"\"\"\n Lists all Protection objects for the account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_protections(\n NextToken='string',\n MaxResults=123\n )\n \n \n :type NextToken: string\n :param NextToken: The ListProtectionsRequest.NextToken value from a previous call to ListProtections . Pass null if this is the first call.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of Protection objects to be returned. If this is left blank the first 20 results will be returned.\n This is a maximum value; it is possible that AWS WAF will return the results in smaller batches. That is, the number of Protection objects returned could be less than MaxResults , even if there are still more Protection objects yet to return. If there are more Protection objects to return, AWS WAF will always also return a NextToken .\n \n\n :rtype: dict\n :return: {\n 'Protections': [\n {\n 'Id': 'string',\n 'Name': 'string',\n 'ResourceArn': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef update_emergency_contact_settings(EmergencyContactList=None):\n \"\"\"\n Updates the details of the list of email addresses that the DRT can use to contact you during a suspected attack.\n See also: AWS API Documentation\n \n \n :example: response = client.update_emergency_contact_settings(\n EmergencyContactList=[\n {\n 'EmailAddress': 'string'\n },\n ]\n )\n \n \n :type EmergencyContactList: list\n :param EmergencyContactList: A list of email addresses that the DRT can use to contact you during a suspected attack.\n (dict) --Contact information that the DRT can use to contact you during a suspected attack.\n EmailAddress (string) -- [REQUIRED]An email address that the DRT can use to contact you during a suspected attack.\n \n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef update_subscription(AutoRenew=None):\n \"\"\"\n Updates the details of an existing subscription. Only enter values for parameters you want to change. Empty parameters are not updated.\n See also: AWS API Documentation\n \n \n :example: response = client.update_subscription(\n AutoRenew='ENABLED'|'DISABLED'\n )\n \n \n :type AutoRenew: string\n :param AutoRenew: When you initally create a subscription, AutoRenew is set to ENABLED . If ENABLED , the subscription will be automatically renewed at the end of the existing subscription period. You can change this by submitting an UpdateSubscription request. If the UpdateSubscription request does not included a value for AutoRenew , the existing value for AutoRenew remains unchanged.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6150544285774231, "alphanum_fraction": 0.6238097548484802, "avg_line_length": 45.40790557861328, "blob_id": "22b86fb2fa32f5e8167b58c333f3e2fea65aae77", "content_id": "34f33a5f7cdc9a730f7aad113f9715a3a80281a8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 58707, "license_type": "permissive", "max_line_length": 530, "num_lines": 1265, "path": "/pyboto3/mediaconnect.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef add_flow_outputs(FlowArn=None, Outputs=None):\n \"\"\"\n Adds outputs to an existing flow. You can create up to 20 outputs per flow.\n See also: AWS API Documentation\n \n \n :example: response = client.add_flow_outputs(\n FlowArn='string',\n Outputs=[\n {\n 'Description': 'string',\n 'Destination': 'string',\n 'Encryption': {\n 'Algorithm': 'aes128'|'aes192'|'aes256',\n 'KeyType': 'static-key',\n 'RoleArn': 'string',\n 'SecretArn': 'string'\n },\n 'MaxLatency': 123,\n 'Name': 'string',\n 'Port': 123,\n 'Protocol': 'zixi-push'|'rtp-fec'|'rtp',\n 'SmoothingLatency': 123,\n 'StreamId': 'string'\n },\n ]\n )\n \n \n :type FlowArn: string\n :param FlowArn: [REQUIRED] The flow that you want to add outputs to.\n\n :type Outputs: list\n :param Outputs: [REQUIRED] A list of outputs that you want to add.\n (dict) -- The output that you want to add to this flow.\n Description (string) -- A description of the output. This description appears only on the AWS Elemental MediaConnect console and will not be seen by the end user.\n Destination (string) -- [REQUIRED] The IP address from which video will be sent to output destinations.\n Encryption (dict) -- The type of key used for the encryption. If no keyType is provided, the service will use the default setting (static-key).\n Algorithm (string) -- [REQUIRED] The type of algorithm that is used for the encryption (such as aes128, aes192, or aes256).\n KeyType (string) -- The type of key that is used for the encryption. If no keyType is provided, the service will use the default setting (static-key).\n RoleArn (string) -- [REQUIRED] The ARN of the role that you created during setup (when you set up AWS Elemental MediaConnect as a trusted entity).\n SecretArn (string) -- [REQUIRED] The ARN that was assigned to the secret that you created in AWS Secrets Manager to store the encryption key.\n MaxLatency (integer) -- The maximum latency in milliseconds for Zixi-based streams.\n Name (string) -- The name of the output. This value must be unique within the current flow.\n Port (integer) -- [REQUIRED] The port to use when content is distributed to this output.\n Protocol (string) -- [REQUIRED] The protocol to use for the output.\n SmoothingLatency (integer) -- The smoothing latency in milliseconds for RTP and RTP-FEC streams.\n StreamId (string) -- The stream ID that you want to use for this transport. This parameter applies only to Zixi-based streams.\n \n\n :rtype: dict\n :return: {\n 'FlowArn': 'string',\n 'Outputs': [\n {\n 'Description': 'string',\n 'Destination': 'string',\n 'Encryption': {\n 'Algorithm': 'aes128'|'aes192'|'aes256',\n 'KeyType': 'static-key',\n 'RoleArn': 'string',\n 'SecretArn': 'string'\n },\n 'EntitlementArn': 'string',\n 'MediaLiveInputArn': 'string',\n 'Name': 'string',\n 'OutputArn': 'string',\n 'Port': 123,\n 'Transport': {\n 'MaxBitrate': 123,\n 'MaxLatency': 123,\n 'Protocol': 'zixi-push'|'rtp-fec'|'rtp',\n 'SmoothingLatency': 123,\n 'StreamId': 'string'\n }\n },\n ]\n }\n \n \n :returns: \n (dict) -- AWS Elemental MediaConnect added the outputs successfully.\n FlowArn (string) -- The ARN of the flow that these outputs were added to.\n Outputs (list) -- The details of the newly added outputs.\n (dict) -- The settings for an output.\n Description (string) -- A description of the output.\n Destination (string) -- The address where you want to send the output.\n Encryption (dict) -- The type of key used for the encryption. If no keyType is provided, the service will use the default setting (static-key).\n Algorithm (string) -- The type of algorithm that is used for the encryption (such as aes128, aes192, or aes256).\n KeyType (string) -- The type of key that is used for the encryption. If no keyType is provided, the service will use the default setting (static-key).\n RoleArn (string) -- The ARN of the role that you created during setup (when you set up AWS Elemental MediaConnect as a trusted entity).\n SecretArn (string) -- The ARN that was assigned to the secret that you created in AWS Secrets Manager to store the encryption key.\n \n \n EntitlementArn (string) -- The ARN of the entitlement on the originator''s flow. This value is relevant only on entitled flows.\n MediaLiveInputArn (string) -- The input ARN of the AWS Elemental MediaLive channel. This parameter is relevant only for outputs that were added by creating a MediaLive input.\n Name (string) -- The name of the output. This value must be unique within the current flow.\n OutputArn (string) -- The ARN of the output.\n Port (integer) -- The port to use when content is distributed to this output.\n Transport (dict) -- Attributes related to the transport stream that are used in the output.\n MaxBitrate (integer) -- The smoothing max bitrate for RTP and RTP-FEC streams.\n MaxLatency (integer) -- The maximum latency in milliseconds for Zixi-based streams.\n Protocol (string) -- The protocol that is used by the source or output.\n SmoothingLatency (integer) -- The smoothing latency in milliseconds for RTP and RTP-FEC streams.\n StreamId (string) -- The stream ID that you want to use for this transport. This parameter applies only to Zixi-based streams.\n \n \n \n \n \n \n \n \n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_flow(AvailabilityZone=None, Entitlements=None, Name=None, Outputs=None, Source=None):\n \"\"\"\n Creates a new flow. The request must include one source. The request optionally can include outputs (up to 20) and entitlements (up to 50).\n See also: AWS API Documentation\n \n \n :example: response = client.create_flow(\n AvailabilityZone='string',\n Entitlements=[\n {\n 'Description': 'string',\n 'Encryption': {\n 'Algorithm': 'aes128'|'aes192'|'aes256',\n 'KeyType': 'static-key',\n 'RoleArn': 'string',\n 'SecretArn': 'string'\n },\n 'Name': 'string',\n 'Subscribers': [\n 'string',\n ]\n },\n ],\n Name='string',\n Outputs=[\n {\n 'Description': 'string',\n 'Destination': 'string',\n 'Encryption': {\n 'Algorithm': 'aes128'|'aes192'|'aes256',\n 'KeyType': 'static-key',\n 'RoleArn': 'string',\n 'SecretArn': 'string'\n },\n 'MaxLatency': 123,\n 'Name': 'string',\n 'Port': 123,\n 'Protocol': 'zixi-push'|'rtp-fec'|'rtp',\n 'SmoothingLatency': 123,\n 'StreamId': 'string'\n },\n ],\n Source={\n 'Decryption': {\n 'Algorithm': 'aes128'|'aes192'|'aes256',\n 'KeyType': 'static-key',\n 'RoleArn': 'string',\n 'SecretArn': 'string'\n },\n 'Description': 'string',\n 'EntitlementArn': 'string',\n 'IngestPort': 123,\n 'MaxBitrate': 123,\n 'MaxLatency': 123,\n 'Name': 'string',\n 'Protocol': 'zixi-push'|'rtp-fec'|'rtp',\n 'StreamId': 'string',\n 'WhitelistCidr': 'string'\n }\n )\n \n \n :type AvailabilityZone: string\n :param AvailabilityZone: The Availability Zone that you want to create the flow in. These options are limited to the Availability Zones within the current AWS Region.\n\n :type Entitlements: list\n :param Entitlements: The entitlements that you want to grant on a flow.\n (dict) -- The entitlements that you want to grant on a flow.\n Description (string) -- A description of the entitlement. This description appears only on the AWS Elemental MediaConnect console and will not be seen by the subscriber or end user.\n Encryption (dict) -- The type of encryption that will be used on the output that is associated with this entitlement.\n Algorithm (string) -- [REQUIRED] The type of algorithm that is used for the encryption (such as aes128, aes192, or aes256).\n KeyType (string) -- The type of key that is used for the encryption. If no keyType is provided, the service will use the default setting (static-key).\n RoleArn (string) -- [REQUIRED] The ARN of the role that you created during setup (when you set up AWS Elemental MediaConnect as a trusted entity).\n SecretArn (string) -- [REQUIRED] The ARN that was assigned to the secret that you created in AWS Secrets Manager to store the encryption key.\n Name (string) -- The name of the entitlement. This value must be unique within the current flow.\n Subscribers (list) -- [REQUIRED] The AWS account IDs that you want to share your content with. The receiving accounts (subscribers) will be allowed to create their own flows using your content as the source.\n (string) --\n \n \n\n :type Name: string\n :param Name: [REQUIRED] The name of the flow.\n\n :type Outputs: list\n :param Outputs: The outputs that you want to add to this flow.\n (dict) -- The output that you want to add to this flow.\n Description (string) -- A description of the output. This description appears only on the AWS Elemental MediaConnect console and will not be seen by the end user.\n Destination (string) -- [REQUIRED] The IP address from which video will be sent to output destinations.\n Encryption (dict) -- The type of key used for the encryption. If no keyType is provided, the service will use the default setting (static-key).\n Algorithm (string) -- [REQUIRED] The type of algorithm that is used for the encryption (such as aes128, aes192, or aes256).\n KeyType (string) -- The type of key that is used for the encryption. If no keyType is provided, the service will use the default setting (static-key).\n RoleArn (string) -- [REQUIRED] The ARN of the role that you created during setup (when you set up AWS Elemental MediaConnect as a trusted entity).\n SecretArn (string) -- [REQUIRED] The ARN that was assigned to the secret that you created in AWS Secrets Manager to store the encryption key.\n MaxLatency (integer) -- The maximum latency in milliseconds for Zixi-based streams.\n Name (string) -- The name of the output. This value must be unique within the current flow.\n Port (integer) -- [REQUIRED] The port to use when content is distributed to this output.\n Protocol (string) -- [REQUIRED] The protocol to use for the output.\n SmoothingLatency (integer) -- The smoothing latency in milliseconds for RTP and RTP-FEC streams.\n StreamId (string) -- The stream ID that you want to use for this transport. This parameter applies only to Zixi-based streams.\n \n\n :type Source: dict\n :param Source: [REQUIRED] The settings for the source of the flow.\n Decryption (dict) -- The type of encryption that is used on the content ingested from this source.\n Algorithm (string) -- [REQUIRED] The type of algorithm that is used for the encryption (such as aes128, aes192, or aes256).\n KeyType (string) -- The type of key that is used for the encryption. If no keyType is provided, the service will use the default setting (static-key).\n RoleArn (string) -- [REQUIRED] The ARN of the role that you created during setup (when you set up AWS Elemental MediaConnect as a trusted entity).\n SecretArn (string) -- [REQUIRED] The ARN that was assigned to the secret that you created in AWS Secrets Manager to store the encryption key.\n Description (string) -- A description for the source. This value is not used or seen outside of the current AWS Elemental MediaConnect account.\n EntitlementArn (string) -- The ARN of the entitlement that allows you to subscribe to this flow. The entitlement is set by the flow originator, and the ARN is generated as part of the originator's flow.\n IngestPort (integer) -- The port that the flow will be listening on for incoming content.\n MaxBitrate (integer) -- The smoothing max bitrate for RTP and RTP-FEC streams.\n MaxLatency (integer) -- The maximum latency in milliseconds for Zixi-based streams.\n Name (string) -- The name of the source.\n Protocol (string) -- The protocol that is used by the source.\n StreamId (string) -- The stream ID that you want to use for this transport. This parameter applies only to Zixi-based streams.\n WhitelistCidr (string) -- The range of IP addresses that should be allowed to contribute content to your source. These IP addresses should in the form of a Classless Inter-Domain Routing (CIDR) block; for example, 10.0.0.0/16.\n \n\n :rtype: dict\n :return: {\n 'Flow': {\n 'AvailabilityZone': 'string',\n 'Description': 'string',\n 'EgressIp': 'string',\n 'Entitlements': [\n {\n 'Description': 'string',\n 'Encryption': {\n 'Algorithm': 'aes128'|'aes192'|'aes256',\n 'KeyType': 'static-key',\n 'RoleArn': 'string',\n 'SecretArn': 'string'\n },\n 'EntitlementArn': 'string',\n 'Name': 'string',\n 'Subscribers': [\n 'string',\n ]\n },\n ],\n 'FlowArn': 'string',\n 'Name': 'string',\n 'Outputs': [\n {\n 'Description': 'string',\n 'Destination': 'string',\n 'Encryption': {\n 'Algorithm': 'aes128'|'aes192'|'aes256',\n 'KeyType': 'static-key',\n 'RoleArn': 'string',\n 'SecretArn': 'string'\n },\n 'EntitlementArn': 'string',\n 'MediaLiveInputArn': 'string',\n 'Name': 'string',\n 'OutputArn': 'string',\n 'Port': 123,\n 'Transport': {\n 'MaxBitrate': 123,\n 'MaxLatency': 123,\n 'Protocol': 'zixi-push'|'rtp-fec'|'rtp',\n 'SmoothingLatency': 123,\n 'StreamId': 'string'\n }\n },\n ],\n 'Source': {\n 'Decryption': {\n 'Algorithm': 'aes128'|'aes192'|'aes256',\n 'KeyType': 'static-key',\n 'RoleArn': 'string',\n 'SecretArn': 'string'\n },\n 'Description': 'string',\n 'EntitlementArn': 'string',\n 'IngestIp': 'string',\n 'IngestPort': 123,\n 'Name': 'string',\n 'SourceArn': 'string',\n 'Transport': {\n 'MaxBitrate': 123,\n 'MaxLatency': 123,\n 'Protocol': 'zixi-push'|'rtp-fec'|'rtp',\n 'SmoothingLatency': 123,\n 'StreamId': 'string'\n },\n 'WhitelistCidr': 'string'\n },\n 'Status': 'STANDBY'|'ACTIVE'|'UPDATING'|'DELETING'|'STARTING'|'STOPPING'|'ERROR'\n }\n }\n \n \n :returns: \n (dict) -- AWS Elemental MediaConnect created the new flow successfully.\n Flow (dict) -- The settings for a flow, including its source, outputs, and entitlements.\n AvailabilityZone (string) -- The Availability Zone that you want to create the flow in. These options are limited to the Availability Zones within the current AWS.\n Description (string) -- A description of the flow. This value is not used or seen outside of the current AWS Elemental MediaConnect account.\n EgressIp (string) -- The IP address from which video will be sent to output destinations.\n Entitlements (list) -- The entitlements in this flow.\n (dict) -- The settings for a flow entitlement.\n Description (string) -- A description of the entitlement.\n Encryption (dict) -- The type of encryption that will be used on the output that is associated with this entitlement.\n Algorithm (string) -- The type of algorithm that is used for the encryption (such as aes128, aes192, or aes256).\n KeyType (string) -- The type of key that is used for the encryption. If no keyType is provided, the service will use the default setting (static-key).\n RoleArn (string) -- The ARN of the role that you created during setup (when you set up AWS Elemental MediaConnect as a trusted entity).\n SecretArn (string) -- The ARN that was assigned to the secret that you created in AWS Secrets Manager to store the encryption key.\n \n \n EntitlementArn (string) -- The ARN of the entitlement.\n Name (string) -- The name of the entitlement.\n Subscribers (list) -- The AWS account IDs that you want to share your content with. The receiving accounts (subscribers) will be allowed to create their own flow using your content as the source.\n (string) --\n \n \n \n \n \n \n FlowArn (string) -- The Amazon Resource Name (ARN), a unique identifier for any AWS resource, of the flow.\n Name (string) -- The name of the flow.\n Outputs (list) -- The outputs in this flow.\n (dict) -- The settings for an output.\n Description (string) -- A description of the output.\n Destination (string) -- The address where you want to send the output.\n Encryption (dict) -- The type of key used for the encryption. If no keyType is provided, the service will use the default setting (static-key).\n Algorithm (string) -- The type of algorithm that is used for the encryption (such as aes128, aes192, or aes256).\n KeyType (string) -- The type of key that is used for the encryption. If no keyType is provided, the service will use the default setting (static-key).\n RoleArn (string) -- The ARN of the role that you created during setup (when you set up AWS Elemental MediaConnect as a trusted entity).\n SecretArn (string) -- The ARN that was assigned to the secret that you created in AWS Secrets Manager to store the encryption key.\n \n \n EntitlementArn (string) -- The ARN of the entitlement on the originator''s flow. This value is relevant only on entitled flows.\n MediaLiveInputArn (string) -- The input ARN of the AWS Elemental MediaLive channel. This parameter is relevant only for outputs that were added by creating a MediaLive input.\n Name (string) -- The name of the output. This value must be unique within the current flow.\n OutputArn (string) -- The ARN of the output.\n Port (integer) -- The port to use when content is distributed to this output.\n Transport (dict) -- Attributes related to the transport stream that are used in the output.\n MaxBitrate (integer) -- The smoothing max bitrate for RTP and RTP-FEC streams.\n MaxLatency (integer) -- The maximum latency in milliseconds for Zixi-based streams.\n Protocol (string) -- The protocol that is used by the source or output.\n SmoothingLatency (integer) -- The smoothing latency in milliseconds for RTP and RTP-FEC streams.\n StreamId (string) -- The stream ID that you want to use for this transport. This parameter applies only to Zixi-based streams.\n \n \n \n \n \n \n Source (dict) -- The settings for the source of the flow.\n Decryption (dict) -- The type of encryption that is used on the content ingested from this source.\n Algorithm (string) -- The type of algorithm that is used for the encryption (such as aes128, aes192, or aes256).\n KeyType (string) -- The type of key that is used for the encryption. If no keyType is provided, the service will use the default setting (static-key).\n RoleArn (string) -- The ARN of the role that you created during setup (when you set up AWS Elemental MediaConnect as a trusted entity).\n SecretArn (string) -- The ARN that was assigned to the secret that you created in AWS Secrets Manager to store the encryption key.\n \n \n Description (string) -- A description for the source. This value is not used or seen outside of the current AWS Elemental MediaConnect account.\n EntitlementArn (string) -- The ARN of the entitlement that allows you to subscribe to content that comes from another AWS account. The entitlement is set by the content originator and the ARN is generated as part of the originator's flow.\n IngestIp (string) -- The IP address that the flow will be listening on for incoming content.\n IngestPort (integer) -- The port that the flow will be listening on for incoming content.\n Name (string) -- The name of the source.\n SourceArn (string) -- The ARN of the source.\n Transport (dict) -- Attributes related to the transport stream that are used in the source.\n MaxBitrate (integer) -- The smoothing max bitrate for RTP and RTP-FEC streams.\n MaxLatency (integer) -- The maximum latency in milliseconds for Zixi-based streams.\n Protocol (string) -- The protocol that is used by the source or output.\n SmoothingLatency (integer) -- The smoothing latency in milliseconds for RTP and RTP-FEC streams.\n StreamId (string) -- The stream ID that you want to use for this transport. This parameter applies only to Zixi-based streams.\n \n \n WhitelistCidr (string) -- The range of IP addresses that should be allowed to contribute content to your source. These IP addresses should in the form of a Classless Inter-Domain Routing (CIDR) block; for example, 10.0.0.0/16.\n \n \n Status (string) -- The current status of the flow.\n \n \n \n \n \n \"\"\"\n pass\n\ndef delete_flow(FlowArn=None):\n \"\"\"\n Deletes a flow. Before you can delete a flow, you must stop the flow.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_flow(\n FlowArn='string'\n )\n \n \n :type FlowArn: string\n :param FlowArn: [REQUIRED] The ARN of the flow that you want to delete.\n\n :rtype: dict\n :return: {\n 'FlowArn': 'string',\n 'Status': 'STANDBY'|'ACTIVE'|'UPDATING'|'DELETING'|'STARTING'|'STOPPING'|'ERROR'\n }\n \n \n \"\"\"\n pass\n\ndef describe_flow(FlowArn=None):\n \"\"\"\n Displays the details of a flow. The response includes the flow ARN, name, and Availability Zone, as well as details about the source, outputs, and entitlements.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_flow(\n FlowArn='string'\n )\n \n \n :type FlowArn: string\n :param FlowArn: [REQUIRED] The ARN of the flow that you want to describe.\n\n :rtype: dict\n :return: {\n 'Flow': {\n 'AvailabilityZone': 'string',\n 'Description': 'string',\n 'EgressIp': 'string',\n 'Entitlements': [\n {\n 'Description': 'string',\n 'Encryption': {\n 'Algorithm': 'aes128'|'aes192'|'aes256',\n 'KeyType': 'static-key',\n 'RoleArn': 'string',\n 'SecretArn': 'string'\n },\n 'EntitlementArn': 'string',\n 'Name': 'string',\n 'Subscribers': [\n 'string',\n ]\n },\n ],\n 'FlowArn': 'string',\n 'Name': 'string',\n 'Outputs': [\n {\n 'Description': 'string',\n 'Destination': 'string',\n 'Encryption': {\n 'Algorithm': 'aes128'|'aes192'|'aes256',\n 'KeyType': 'static-key',\n 'RoleArn': 'string',\n 'SecretArn': 'string'\n },\n 'EntitlementArn': 'string',\n 'MediaLiveInputArn': 'string',\n 'Name': 'string',\n 'OutputArn': 'string',\n 'Port': 123,\n 'Transport': {\n 'MaxBitrate': 123,\n 'MaxLatency': 123,\n 'Protocol': 'zixi-push'|'rtp-fec'|'rtp',\n 'SmoothingLatency': 123,\n 'StreamId': 'string'\n }\n },\n ],\n 'Source': {\n 'Decryption': {\n 'Algorithm': 'aes128'|'aes192'|'aes256',\n 'KeyType': 'static-key',\n 'RoleArn': 'string',\n 'SecretArn': 'string'\n },\n 'Description': 'string',\n 'EntitlementArn': 'string',\n 'IngestIp': 'string',\n 'IngestPort': 123,\n 'Name': 'string',\n 'SourceArn': 'string',\n 'Transport': {\n 'MaxBitrate': 123,\n 'MaxLatency': 123,\n 'Protocol': 'zixi-push'|'rtp-fec'|'rtp',\n 'SmoothingLatency': 123,\n 'StreamId': 'string'\n },\n 'WhitelistCidr': 'string'\n },\n 'Status': 'STANDBY'|'ACTIVE'|'UPDATING'|'DELETING'|'STARTING'|'STOPPING'|'ERROR'\n },\n 'Messages': {\n 'Errors': [\n 'string',\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef grant_flow_entitlements(Entitlements=None, FlowArn=None):\n \"\"\"\n Grants entitlements to an existing flow.\n See also: AWS API Documentation\n \n \n :example: response = client.grant_flow_entitlements(\n Entitlements=[\n {\n 'Description': 'string',\n 'Encryption': {\n 'Algorithm': 'aes128'|'aes192'|'aes256',\n 'KeyType': 'static-key',\n 'RoleArn': 'string',\n 'SecretArn': 'string'\n },\n 'Name': 'string',\n 'Subscribers': [\n 'string',\n ]\n },\n ],\n FlowArn='string'\n )\n \n \n :type Entitlements: list\n :param Entitlements: [REQUIRED] The list of entitlements that you want to grant.\n (dict) -- The entitlements that you want to grant on a flow.\n Description (string) -- A description of the entitlement. This description appears only on the AWS Elemental MediaConnect console and will not be seen by the subscriber or end user.\n Encryption (dict) -- The type of encryption that will be used on the output that is associated with this entitlement.\n Algorithm (string) -- [REQUIRED] The type of algorithm that is used for the encryption (such as aes128, aes192, or aes256).\n KeyType (string) -- The type of key that is used for the encryption. If no keyType is provided, the service will use the default setting (static-key).\n RoleArn (string) -- [REQUIRED] The ARN of the role that you created during setup (when you set up AWS Elemental MediaConnect as a trusted entity).\n SecretArn (string) -- [REQUIRED] The ARN that was assigned to the secret that you created in AWS Secrets Manager to store the encryption key.\n Name (string) -- The name of the entitlement. This value must be unique within the current flow.\n Subscribers (list) -- [REQUIRED] The AWS account IDs that you want to share your content with. The receiving accounts (subscribers) will be allowed to create their own flows using your content as the source.\n (string) --\n \n \n\n :type FlowArn: string\n :param FlowArn: [REQUIRED] The flow that you want to grant entitlements on.\n\n :rtype: dict\n :return: {\n 'Entitlements': [\n {\n 'Description': 'string',\n 'Encryption': {\n 'Algorithm': 'aes128'|'aes192'|'aes256',\n 'KeyType': 'static-key',\n 'RoleArn': 'string',\n 'SecretArn': 'string'\n },\n 'EntitlementArn': 'string',\n 'Name': 'string',\n 'Subscribers': [\n 'string',\n ]\n },\n ],\n 'FlowArn': 'string'\n }\n \n \n :returns: \n (dict) -- AWS Elemental MediaConnect granted the entitlements successfully.\n Entitlements (list) -- The entitlements that were just granted.\n (dict) -- The settings for a flow entitlement.\n Description (string) -- A description of the entitlement.\n Encryption (dict) -- The type of encryption that will be used on the output that is associated with this entitlement.\n Algorithm (string) -- The type of algorithm that is used for the encryption (such as aes128, aes192, or aes256).\n KeyType (string) -- The type of key that is used for the encryption. If no keyType is provided, the service will use the default setting (static-key).\n RoleArn (string) -- The ARN of the role that you created during setup (when you set up AWS Elemental MediaConnect as a trusted entity).\n SecretArn (string) -- The ARN that was assigned to the secret that you created in AWS Secrets Manager to store the encryption key.\n \n \n EntitlementArn (string) -- The ARN of the entitlement.\n Name (string) -- The name of the entitlement.\n Subscribers (list) -- The AWS account IDs that you want to share your content with. The receiving accounts (subscribers) will be allowed to create their own flow using your content as the source.\n (string) --\n \n \n \n \n \n \n FlowArn (string) -- The ARN of the flow that these entitlements were granted to.\n \n \n \n \"\"\"\n pass\n\ndef list_entitlements(MaxResults=None, NextToken=None):\n \"\"\"\n Displays a list of all entitlements that have been granted to this account. This request returns 20 results per page.\n See also: AWS API Documentation\n \n \n :example: response = client.list_entitlements(\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return per API request. For example, you submit a ListEntitlements request with MaxResults set at 5. Although 20 items match your request, the service returns no more than the first 5 items. (The service also returns a NextToken value that you can use to fetch the next batch of results.) The service might return fewer results than the MaxResults value. If MaxResults is not included in the request, the service defaults to pagination with a maximum of 20 results per page.\n\n :type NextToken: string\n :param NextToken: The token that identifies which batch of results that you want to see. For example, you submit a ListEntitlements request with MaxResults set at 5. The service returns the first batch of results (up to 5) and a NextToken value. To see the next batch of results, you can submit the ListEntitlements request a second time and specify the NextToken value.\n\n :rtype: dict\n :return: {\n 'Entitlements': [\n {\n 'EntitlementArn': 'string',\n 'EntitlementName': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (dict) -- AWS Elemental MediaConnect returned the list of entitlements successfully.\n Entitlements (list) -- A list of entitlements that have been granted to you from other AWS accounts.\n (dict) -- An entitlement that has been granted to you from other AWS accounts.\n EntitlementArn (string) -- The ARN of the entitlement.\n EntitlementName (string) -- The name of the entitlement.\n \n \n \n \n NextToken (string) -- The token that identifies which batch of results that you want to see. For example, you submit a ListEntitlements request with MaxResults set at 5. The service returns the first batch of results (up to 5) and a NextToken value. To see the next batch of results, you can submit the ListEntitlements request a second time and specify the NextToken value.\n \n \n \n \"\"\"\n pass\n\ndef list_flows(MaxResults=None, NextToken=None):\n \"\"\"\n Displays a list of flows that are associated with this account. This request returns a paginated result.\n See also: AWS API Documentation\n \n \n :example: response = client.list_flows(\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return per API request. For example, you submit a ListFlows request with MaxResults set at 5. Although 20 items match your request, the service returns no more than the first 5 items. (The service also returns a NextToken value that you can use to fetch the next batch of results.) The service might return fewer results than the MaxResults value. If MaxResults is not included in the request, the service defaults to pagination with a maximum of 10 results per page.\n\n :type NextToken: string\n :param NextToken: The token that identifies which batch of results that you want to see. For example, you submit a ListFlows request with MaxResults set at 5. The service returns the first batch of results (up to 5) and a NextToken value. To see the next batch of results, you can submit the ListFlows request a second time and specify the NextToken value.\n\n :rtype: dict\n :return: {\n 'Flows': [\n {\n 'AvailabilityZone': 'string',\n 'Description': 'string',\n 'FlowArn': 'string',\n 'Name': 'string',\n 'SourceType': 'OWNED'|'ENTITLED',\n 'Status': 'STANDBY'|'ACTIVE'|'UPDATING'|'DELETING'|'STARTING'|'STOPPING'|'ERROR'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (dict) -- AWS Elemental MediaConnect returned the list of flows successfully.\n Flows (list) -- A list of flow summaries.\n (dict) -- Provides a summary of a flow, including its ARN, Availability Zone, and source type.\n AvailabilityZone (string) -- The Availability Zone that the flow was created in.\n Description (string) -- A description of the flow.\n FlowArn (string) -- The ARN of the flow.\n Name (string) -- The name of the flow.\n SourceType (string) -- The type of source. This value is either owned (originated somewhere other than an AWS Elemental MediaConnect flow owned by another AWS account) or entitled (originated at an AWS Elemental MediaConnect flow owned by another AWS account).\n Status (string) -- The current status of the flow.\n \n \n \n \n NextToken (string) -- The token that identifies which batch of results that you want to see. For example, you submit a ListFlows request with MaxResults set at 5. The service returns the first batch of results (up to 5) and a NextToken value. To see the next batch of results, you can submit the ListFlows request a second time and specify the NextToken value.\n \n \n \n \"\"\"\n pass\n\ndef remove_flow_output(FlowArn=None, OutputArn=None):\n \"\"\"\n Removes an output from an existing flow. This request can be made only on an output that does not have an entitlement associated with it. If the output has an entitlement, you must revoke the entitlement instead. When an entitlement is revoked from a flow, the service automatically removes the associated output.\n See also: AWS API Documentation\n \n \n :example: response = client.remove_flow_output(\n FlowArn='string',\n OutputArn='string'\n )\n \n \n :type FlowArn: string\n :param FlowArn: [REQUIRED] The flow that you want to remove an output from.\n\n :type OutputArn: string\n :param OutputArn: [REQUIRED] The ARN of the output that you want to remove.\n\n :rtype: dict\n :return: {\n 'FlowArn': 'string',\n 'OutputArn': 'string'\n }\n \n \n :returns: \n (dict) -- output successfully removed from flow configuration.\n FlowArn (string) -- The ARN of the flow that is associated with the output you removed.\n OutputArn (string) -- The ARN of the output that was removed.\n \n \n \n \"\"\"\n pass\n\ndef revoke_flow_entitlement(EntitlementArn=None, FlowArn=None):\n \"\"\"\n Revokes an entitlement from a flow. Once an entitlement is revoked, the content becomes unavailable to the subscriber and the associated output is removed.\n See also: AWS API Documentation\n \n \n :example: response = client.revoke_flow_entitlement(\n EntitlementArn='string',\n FlowArn='string'\n )\n \n \n :type EntitlementArn: string\n :param EntitlementArn: [REQUIRED] The ARN of the entitlement that you want to revoke.\n\n :type FlowArn: string\n :param FlowArn: [REQUIRED] The flow that you want to revoke an entitlement from.\n\n :rtype: dict\n :return: {\n 'EntitlementArn': 'string',\n 'FlowArn': 'string'\n }\n \n \n :returns: \n (dict) -- AWS Elemental MediaConnect revoked the entitlement successfully.\n EntitlementArn (string) -- The ARN of the entitlement that was revoked.\n FlowArn (string) -- The ARN of the flow that the entitlement was revoked from.\n \n \n \n \"\"\"\n pass\n\ndef start_flow(FlowArn=None):\n \"\"\"\n Starts a flow.\n See also: AWS API Documentation\n \n \n :example: response = client.start_flow(\n FlowArn='string'\n )\n \n \n :type FlowArn: string\n :param FlowArn: [REQUIRED] The ARN of the flow that you want to start.\n\n :rtype: dict\n :return: {\n 'FlowArn': 'string',\n 'Status': 'STANDBY'|'ACTIVE'|'UPDATING'|'DELETING'|'STARTING'|'STOPPING'|'ERROR'\n }\n \n \n \"\"\"\n pass\n\ndef stop_flow(FlowArn=None):\n \"\"\"\n Stops a flow.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_flow(\n FlowArn='string'\n )\n \n \n :type FlowArn: string\n :param FlowArn: [REQUIRED] The ARN of the flow that you want to stop.\n\n :rtype: dict\n :return: {\n 'FlowArn': 'string',\n 'Status': 'STANDBY'|'ACTIVE'|'UPDATING'|'DELETING'|'STARTING'|'STOPPING'|'ERROR'\n }\n \n \n \"\"\"\n pass\n\ndef update_flow_entitlement(Description=None, Encryption=None, EntitlementArn=None, FlowArn=None, Subscribers=None):\n \"\"\"\n You can change an entitlement's description, subscribers, and encryption. If you change the subscribers, the service will remove the outputs that are are used by the subscribers that are removed.\n See also: AWS API Documentation\n \n \n :example: response = client.update_flow_entitlement(\n Description='string',\n Encryption={\n 'Algorithm': 'aes128'|'aes192'|'aes256',\n 'KeyType': 'static-key',\n 'RoleArn': 'string',\n 'SecretArn': 'string'\n },\n EntitlementArn='string',\n FlowArn='string',\n Subscribers=[\n 'string',\n ]\n )\n \n \n :type Description: string\n :param Description: A description of the entitlement. This description appears only on the AWS Elemental MediaConnect console and will not be seen by the subscriber or end user.\n\n :type Encryption: dict\n :param Encryption: The type of encryption that will be used on the output associated with this entitlement.\n Algorithm (string) -- The type of algorithm that is used for the encryption (such as aes128, aes192, or aes256).\n KeyType (string) -- The type of key that is used for the encryption. If no keyType is provided, the service will use the default setting (static-key).\n RoleArn (string) -- The ARN of the role that you created during setup (when you set up AWS Elemental MediaConnect as a trusted entity).\n SecretArn (string) -- The ARN that was assigned to the secret that you created in AWS Secrets Manager to store the encryption key.\n \n\n :type EntitlementArn: string\n :param EntitlementArn: [REQUIRED] The ARN of the entitlement that you want to update.\n\n :type FlowArn: string\n :param FlowArn: [REQUIRED] The flow that is associated with the entitlement that you want to update.\n\n :type Subscribers: list\n :param Subscribers: The AWS account IDs that you want to share your content with. The receiving accounts (subscribers) will be allowed to create their own flow using your content as the source.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'Entitlement': {\n 'Description': 'string',\n 'Encryption': {\n 'Algorithm': 'aes128'|'aes192'|'aes256',\n 'KeyType': 'static-key',\n 'RoleArn': 'string',\n 'SecretArn': 'string'\n },\n 'EntitlementArn': 'string',\n 'Name': 'string',\n 'Subscribers': [\n 'string',\n ]\n },\n 'FlowArn': 'string'\n }\n \n \n :returns: \n (dict) -- AWS Elemental MediaConnect updated the entitlement successfully.\n Entitlement (dict) -- The settings for a flow entitlement.\n Description (string) -- A description of the entitlement.\n Encryption (dict) -- The type of encryption that will be used on the output that is associated with this entitlement.\n Algorithm (string) -- The type of algorithm that is used for the encryption (such as aes128, aes192, or aes256).\n KeyType (string) -- The type of key that is used for the encryption. If no keyType is provided, the service will use the default setting (static-key).\n RoleArn (string) -- The ARN of the role that you created during setup (when you set up AWS Elemental MediaConnect as a trusted entity).\n SecretArn (string) -- The ARN that was assigned to the secret that you created in AWS Secrets Manager to store the encryption key.\n \n \n EntitlementArn (string) -- The ARN of the entitlement.\n Name (string) -- The name of the entitlement.\n Subscribers (list) -- The AWS account IDs that you want to share your content with. The receiving accounts (subscribers) will be allowed to create their own flow using your content as the source.\n (string) --\n \n \n \n \n FlowArn (string) -- The ARN of the flow that this entitlement was granted on.\n \n \n \n \"\"\"\n pass\n\ndef update_flow_output(Description=None, Destination=None, Encryption=None, FlowArn=None, MaxLatency=None, OutputArn=None, Port=None, Protocol=None, SmoothingLatency=None, StreamId=None):\n \"\"\"\n Updates an existing flow output.\n See also: AWS API Documentation\n \n \n :example: response = client.update_flow_output(\n Description='string',\n Destination='string',\n Encryption={\n 'Algorithm': 'aes128'|'aes192'|'aes256',\n 'KeyType': 'static-key',\n 'RoleArn': 'string',\n 'SecretArn': 'string'\n },\n FlowArn='string',\n MaxLatency=123,\n OutputArn='string',\n Port=123,\n Protocol='zixi-push'|'rtp-fec'|'rtp',\n SmoothingLatency=123,\n StreamId='string'\n )\n \n \n :type Description: string\n :param Description: A description of the output. This description appears only on the AWS Elemental MediaConnect console and will not be seen by the end user.\n\n :type Destination: string\n :param Destination: The IP address where you want to send the output.\n\n :type Encryption: dict\n :param Encryption: The type of key used for the encryption. If no keyType is provided, the service will use the default setting (static-key).\n Algorithm (string) -- The type of algorithm that is used for the encryption (such as aes128, aes192, or aes256).\n KeyType (string) -- The type of key that is used for the encryption. If no keyType is provided, the service will use the default setting (static-key).\n RoleArn (string) -- The ARN of the role that you created during setup (when you set up AWS Elemental MediaConnect as a trusted entity).\n SecretArn (string) -- The ARN that was assigned to the secret that you created in AWS Secrets Manager to store the encryption key.\n \n\n :type FlowArn: string\n :param FlowArn: [REQUIRED] The flow that is associated with the output that you want to update.\n\n :type MaxLatency: integer\n :param MaxLatency: The maximum latency in milliseconds for Zixi-based streams.\n\n :type OutputArn: string\n :param OutputArn: [REQUIRED] The ARN of the output that you want to update.\n\n :type Port: integer\n :param Port: The port to use when content is distributed to this output.\n\n :type Protocol: string\n :param Protocol: The protocol to use for the output.\n\n :type SmoothingLatency: integer\n :param SmoothingLatency: The smoothing latency in milliseconds for RTP and RTP-FEC streams.\n\n :type StreamId: string\n :param StreamId: The stream ID that you want to use for this transport. This parameter applies only to Zixi-based streams.\n\n :rtype: dict\n :return: {\n 'FlowArn': 'string',\n 'Output': {\n 'Description': 'string',\n 'Destination': 'string',\n 'Encryption': {\n 'Algorithm': 'aes128'|'aes192'|'aes256',\n 'KeyType': 'static-key',\n 'RoleArn': 'string',\n 'SecretArn': 'string'\n },\n 'EntitlementArn': 'string',\n 'MediaLiveInputArn': 'string',\n 'Name': 'string',\n 'OutputArn': 'string',\n 'Port': 123,\n 'Transport': {\n 'MaxBitrate': 123,\n 'MaxLatency': 123,\n 'Protocol': 'zixi-push'|'rtp-fec'|'rtp',\n 'SmoothingLatency': 123,\n 'StreamId': 'string'\n }\n }\n }\n \n \n :returns: \n (dict) -- AWS Elemental MediaConnect updated the output successfully.\n FlowArn (string) -- The ARN of the flow that is associated with the updated output.\n Output (dict) -- The settings for an output.\n Description (string) -- A description of the output.\n Destination (string) -- The address where you want to send the output.\n Encryption (dict) -- The type of key used for the encryption. If no keyType is provided, the service will use the default setting (static-key).\n Algorithm (string) -- The type of algorithm that is used for the encryption (such as aes128, aes192, or aes256).\n KeyType (string) -- The type of key that is used for the encryption. If no keyType is provided, the service will use the default setting (static-key).\n RoleArn (string) -- The ARN of the role that you created during setup (when you set up AWS Elemental MediaConnect as a trusted entity).\n SecretArn (string) -- The ARN that was assigned to the secret that you created in AWS Secrets Manager to store the encryption key.\n \n \n EntitlementArn (string) -- The ARN of the entitlement on the originator''s flow. This value is relevant only on entitled flows.\n MediaLiveInputArn (string) -- The input ARN of the AWS Elemental MediaLive channel. This parameter is relevant only for outputs that were added by creating a MediaLive input.\n Name (string) -- The name of the output. This value must be unique within the current flow.\n OutputArn (string) -- The ARN of the output.\n Port (integer) -- The port to use when content is distributed to this output.\n Transport (dict) -- Attributes related to the transport stream that are used in the output.\n MaxBitrate (integer) -- The smoothing max bitrate for RTP and RTP-FEC streams.\n MaxLatency (integer) -- The maximum latency in milliseconds for Zixi-based streams.\n Protocol (string) -- The protocol that is used by the source or output.\n SmoothingLatency (integer) -- The smoothing latency in milliseconds for RTP and RTP-FEC streams.\n StreamId (string) -- The stream ID that you want to use for this transport. This parameter applies only to Zixi-based streams.\n \n \n \n \n \n \n \n \"\"\"\n pass\n\ndef update_flow_source(Decryption=None, Description=None, EntitlementArn=None, FlowArn=None, IngestPort=None, MaxBitrate=None, MaxLatency=None, Protocol=None, SourceArn=None, StreamId=None, WhitelistCidr=None):\n \"\"\"\n Updates the source of a flow.\n See also: AWS API Documentation\n \n \n :example: response = client.update_flow_source(\n Decryption={\n 'Algorithm': 'aes128'|'aes192'|'aes256',\n 'KeyType': 'static-key',\n 'RoleArn': 'string',\n 'SecretArn': 'string'\n },\n Description='string',\n EntitlementArn='string',\n FlowArn='string',\n IngestPort=123,\n MaxBitrate=123,\n MaxLatency=123,\n Protocol='zixi-push'|'rtp-fec'|'rtp',\n SourceArn='string',\n StreamId='string',\n WhitelistCidr='string'\n )\n \n \n :type Decryption: dict\n :param Decryption: The type of encryption used on the content ingested from this source.\n Algorithm (string) -- The type of algorithm that is used for the encryption (such as aes128, aes192, or aes256).\n KeyType (string) -- The type of key that is used for the encryption. If no keyType is provided, the service will use the default setting (static-key).\n RoleArn (string) -- The ARN of the role that you created during setup (when you set up AWS Elemental MediaConnect as a trusted entity).\n SecretArn (string) -- The ARN that was assigned to the secret that you created in AWS Secrets Manager to store the encryption key.\n \n\n :type Description: string\n :param Description: A description for the source. This value is not used or seen outside of the current AWS Elemental MediaConnect account.\n\n :type EntitlementArn: string\n :param EntitlementArn: The ARN of the entitlement that allows you to subscribe to this flow. The entitlement is set by the flow originator, and the ARN is generated as part of the originator's flow.\n\n :type FlowArn: string\n :param FlowArn: [REQUIRED] The flow that is associated with the source that you want to update.\n\n :type IngestPort: integer\n :param IngestPort: The port that the flow will be listening on for incoming content.\n\n :type MaxBitrate: integer\n :param MaxBitrate: The smoothing max bitrate for RTP and RTP-FEC streams.\n\n :type MaxLatency: integer\n :param MaxLatency: The maximum latency in milliseconds for Zixi-based streams.\n\n :type Protocol: string\n :param Protocol: The protocol that is used by the source.\n\n :type SourceArn: string\n :param SourceArn: [REQUIRED] The ARN of the source that you want to update.\n\n :type StreamId: string\n :param StreamId: The stream ID that you want to use for this transport. This parameter applies only to Zixi-based streams.\n\n :type WhitelistCidr: string\n :param WhitelistCidr: The range of IP addresses that should be allowed to contribute content to your source. These IP addresses should in the form of a Classless Inter-Domain Routing (CIDR) block; for example, 10.0.0.0/16.\n\n :rtype: dict\n :return: {\n 'FlowArn': 'string',\n 'Source': {\n 'Decryption': {\n 'Algorithm': 'aes128'|'aes192'|'aes256',\n 'KeyType': 'static-key',\n 'RoleArn': 'string',\n 'SecretArn': 'string'\n },\n 'Description': 'string',\n 'EntitlementArn': 'string',\n 'IngestIp': 'string',\n 'IngestPort': 123,\n 'Name': 'string',\n 'SourceArn': 'string',\n 'Transport': {\n 'MaxBitrate': 123,\n 'MaxLatency': 123,\n 'Protocol': 'zixi-push'|'rtp-fec'|'rtp',\n 'SmoothingLatency': 123,\n 'StreamId': 'string'\n },\n 'WhitelistCidr': 'string'\n }\n }\n \n \n :returns: \n (dict) -- AWS Elemental MediaConnect updated the flow successfully.\n FlowArn (string) -- The ARN of the flow that you want to update.\n Source (dict) -- The settings for the source of the flow.\n Decryption (dict) -- The type of encryption that is used on the content ingested from this source.\n Algorithm (string) -- The type of algorithm that is used for the encryption (such as aes128, aes192, or aes256).\n KeyType (string) -- The type of key that is used for the encryption. If no keyType is provided, the service will use the default setting (static-key).\n RoleArn (string) -- The ARN of the role that you created during setup (when you set up AWS Elemental MediaConnect as a trusted entity).\n SecretArn (string) -- The ARN that was assigned to the secret that you created in AWS Secrets Manager to store the encryption key.\n \n \n Description (string) -- A description for the source. This value is not used or seen outside of the current AWS Elemental MediaConnect account.\n EntitlementArn (string) -- The ARN of the entitlement that allows you to subscribe to content that comes from another AWS account. The entitlement is set by the content originator and the ARN is generated as part of the originator's flow.\n IngestIp (string) -- The IP address that the flow will be listening on for incoming content.\n IngestPort (integer) -- The port that the flow will be listening on for incoming content.\n Name (string) -- The name of the source.\n SourceArn (string) -- The ARN of the source.\n Transport (dict) -- Attributes related to the transport stream that are used in the source.\n MaxBitrate (integer) -- The smoothing max bitrate for RTP and RTP-FEC streams.\n MaxLatency (integer) -- The maximum latency in milliseconds for Zixi-based streams.\n Protocol (string) -- The protocol that is used by the source or output.\n SmoothingLatency (integer) -- The smoothing latency in milliseconds for RTP and RTP-FEC streams.\n StreamId (string) -- The stream ID that you want to use for this transport. This parameter applies only to Zixi-based streams.\n \n \n WhitelistCidr (string) -- The range of IP addresses that should be allowed to contribute content to your source. These IP addresses should in the form of a Classless Inter-Domain Routing (CIDR) block; for example, 10.0.0.0/16.\n \n \n \n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6350076198577881, "alphanum_fraction": 0.6399182081222534, "avg_line_length": 50.04020690917969, "blob_id": "6dffd9296337b200c1cec43b8d3de3f65267dcb9", "content_id": "04d2f7281c6fb54f344e0b60696b6c2164c41940", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 59667, "license_type": "permissive", "max_line_length": 705, "num_lines": 1169, "path": "/pyboto3/fsx.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_backup(FileSystemId=None, ClientRequestToken=None, Tags=None):\n \"\"\"\n Creates a backup of an existing Amazon FSx for Windows File Server file system. Creating regular backups for your file system is a best practice that complements the replication that Amazon FSx for Windows File Server performs for your file system. It also enables you to restore from user modification of data.\n If a backup with the specified client request token exists, and the parameters match, this operation returns the description of the existing backup. If a backup specified client request token exists, and the parameters don't match, this operation returns IncompatibleParameterError . If a backup with the specified client request token doesn't exist, CreateBackup does the following:\n By using the idempotent operation, you can retry a CreateBackup operation without the risk of creating an extra backup. This approach can be useful when an initial call fails in a way that makes it unclear whether a backup was created. If you use the same client request token and the initial call created a backup, the operation returns a successful result because all the parameters are the same.\n The CreateFileSystem operation returns while the backup's lifecycle state is still CREATING . You can check the file system creation status by calling the DescribeBackups operation, which returns the backup state along with other information.\n See also: AWS API Documentation\n \n \n :example: response = client.create_backup(\n FileSystemId='string',\n ClientRequestToken='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type FileSystemId: string\n :param FileSystemId: [REQUIRED]\n The ID of the file system to back up.\n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: (Optional) A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK.\n This field is autopopulated if not provided.\n \n\n :type Tags: list\n :param Tags: The tags to apply to the backup at backup creation. The key value of the Name tag appears in the console as the backup name.\n (dict) --Specifies a key-value pair for a resource tag.\n Key (string) --A value that specifies the TagKey , the name of the tag. Tag keys must be unique for the resource to which they are attached.\n Value (string) --A value that specifies the TagValue , the value assigned to the corresponding tag key. Tag values can be null and don't have to be unique in a tag set. For example, you can have a key-value pair in a tag set of finances : April and also of payroll : April .\n \n \n\n :rtype: dict\n :return: {\n 'Backup': {\n 'BackupId': 'string',\n 'Lifecycle': 'AVAILABLE'|'CREATING'|'DELETED'|'FAILED',\n 'FailureDetails': {\n 'Message': 'string'\n },\n 'Type': 'AUTOMATIC'|'USER_INITIATED',\n 'ProgressPercent': 123,\n 'CreationTime': datetime(2015, 1, 1),\n 'KmsKeyId': 'string',\n 'ResourceARN': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'FileSystem': {\n 'OwnerId': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'FileSystemId': 'string',\n 'FileSystemType': 'WINDOWS'|'LUSTRE',\n 'Lifecycle': 'AVAILABLE'|'CREATING'|'FAILED'|'DELETING',\n 'FailureDetails': {\n 'Message': 'string'\n },\n 'StorageCapacity': 123,\n 'VpcId': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'NetworkInterfaceIds': [\n 'string',\n ],\n 'DNSName': 'string',\n 'KmsKeyId': 'string',\n 'ResourceARN': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'WindowsConfiguration': {\n 'ActiveDirectoryId': 'string',\n 'ThroughputCapacity': 123,\n 'MaintenanceOperationsInProgress': [\n 'PATCHING'|'BACKING_UP',\n ],\n 'WeeklyMaintenanceStartTime': 'string',\n 'DailyAutomaticBackupStartTime': 'string',\n 'AutomaticBackupRetentionDays': 123,\n 'CopyTagsToBackups': True|False\n },\n 'LustreConfiguration': {\n 'WeeklyMaintenanceStartTime': 'string',\n 'DataRepositoryConfiguration': {\n 'ImportPath': 'string',\n 'ExportPath': 'string',\n 'ImportedFileChunkSize': 123\n }\n }\n }\n }\n }\n \n \n :returns: \n FileSystemId (string) -- [REQUIRED]\n The ID of the file system to back up.\n \n ClientRequestToken (string) -- (Optional) A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK.\n This field is autopopulated if not provided.\n \n Tags (list) -- The tags to apply to the backup at backup creation. The key value of the Name tag appears in the console as the backup name.\n \n (dict) --Specifies a key-value pair for a resource tag.\n \n Key (string) --A value that specifies the TagKey , the name of the tag. Tag keys must be unique for the resource to which they are attached.\n \n Value (string) --A value that specifies the TagValue , the value assigned to the corresponding tag key. Tag values can be null and don't have to be unique in a tag set. For example, you can have a key-value pair in a tag set of finances : April and also of payroll : April .\n \n \n \n \n \n \n \"\"\"\n pass\n\ndef create_file_system(ClientRequestToken=None, FileSystemType=None, StorageCapacity=None, SubnetIds=None, SecurityGroupIds=None, Tags=None, KmsKeyId=None, WindowsConfiguration=None, LustreConfiguration=None):\n \"\"\"\n Creates a new, empty Amazon FSx file system.\n If a file system with the specified client request token exists and the parameters match, CreateFileSystem returns the description of the existing file system. If a file system specified client request token exists and the parameters don't match, this call returns IncompatibleParameterError . If a file system with the specified client request token doesn't exist, CreateFileSystem does the following:\n This operation requires a client request token in the request that Amazon FSx uses to ensure idempotent creation. This means that calling the operation multiple times with the same client request token has no effect. By using the idempotent operation, you can retry a CreateFileSystem operation without the risk of creating an extra file system. This approach can be useful when an initial call fails in a way that makes it unclear whether a file system was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a file system, the client receives success as long as the parameters are the same.\n See also: AWS API Documentation\n \n \n :example: response = client.create_file_system(\n ClientRequestToken='string',\n FileSystemType='WINDOWS'|'LUSTRE',\n StorageCapacity=123,\n SubnetIds=[\n 'string',\n ],\n SecurityGroupIds=[\n 'string',\n ],\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n KmsKeyId='string',\n WindowsConfiguration={\n 'ActiveDirectoryId': 'string',\n 'ThroughputCapacity': 123,\n 'WeeklyMaintenanceStartTime': 'string',\n 'DailyAutomaticBackupStartTime': 'string',\n 'AutomaticBackupRetentionDays': 123,\n 'CopyTagsToBackups': True|False\n },\n LustreConfiguration={\n 'WeeklyMaintenanceStartTime': 'string',\n 'ImportPath': 'string',\n 'ImportedFileChunkSize': 123\n }\n )\n \n \n :type ClientRequestToken: string\n :param ClientRequestToken: (Optional) A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK.\n This field is autopopulated if not provided.\n \n\n :type FileSystemType: string\n :param FileSystemType: [REQUIRED]\n The type of file system.\n \n\n :type StorageCapacity: integer\n :param StorageCapacity: [REQUIRED]\n The storage capacity of the file system.\n For Windows file systems, the storage capacity has a minimum of 300 GiB, and a maximum of 65,536 GiB.\n For Lustre file systems, the storage capacity has a minimum of 3,600 GiB. Storage capacity is provisioned in increments of 3,600 GiB.\n \n\n :type SubnetIds: list\n :param SubnetIds: [REQUIRED]\n A list of IDs for the subnets that the file system will be accessible from. File systems support only one subnet. The file server is also launched in that subnet's Availability Zone.\n (string) --The ID for a subnet. A subnet is a range of IP addresses in your virtual private cloud (VPC). For more information, see VPC and Subnets in the Amazon VPC User Guide.\n \n\n :type SecurityGroupIds: list\n :param SecurityGroupIds: A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. This list isn't returned in later describe requests.\n (string) --The ID of your Amazon EC2 security group. This ID is used to control network access to the endpoint that Amazon FSx creates on your behalf in each subnet. For more information, see Amazon EC2 Security Groups for Linux Instances in the Amazon EC2 User Guide .\n \n\n :type Tags: list\n :param Tags: The tags to be applied to the file system at file system creation. The key value of the Name tag appears in the console as the file system name.\n (dict) --Specifies a key-value pair for a resource tag.\n Key (string) --A value that specifies the TagKey , the name of the tag. Tag keys must be unique for the resource to which they are attached.\n Value (string) --A value that specifies the TagValue , the value assigned to the corresponding tag key. Tag values can be null and don't have to be unique in a tag set. For example, you can have a key-value pair in a tag set of finances : April and also of payroll : April .\n \n \n\n :type KmsKeyId: string\n :param KmsKeyId: The ID of your AWS Key Management Service (AWS KMS) key. This ID is used to encrypt the data in your file system at rest. For more information, see Encrypt in the AWS Key Management Service API Reference .\n\n :type WindowsConfiguration: dict\n :param WindowsConfiguration: The configuration for this Microsoft Windows file system.\n ActiveDirectoryId (string) --The ID for an existing Microsoft Active Directory instance that the file system should join when it's created.\n ThroughputCapacity (integer) -- [REQUIRED]The throughput of an Amazon FSx file system, measured in megabytes per second.\n WeeklyMaintenanceStartTime (string) --The preferred start time to perform weekly maintenance, in the UTC time zone.\n DailyAutomaticBackupStartTime (string) --The preferred time to take daily automatic backups, in the UTC time zone.\n AutomaticBackupRetentionDays (integer) --The number of days to retain automatic backups. The default is to retain backups for 7 days. Setting this value to 0 disables the creation of automatic backups. The maximum retention period for backups is 35 days.\n CopyTagsToBackups (boolean) --A boolean flag indicating whether tags on the file system should be copied to backups. This value defaults to false. If it's set to true, all tags on the file system are copied to all automatic backups and any user-initiated backups where the user doesn't specify any tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups.\n \n\n :type LustreConfiguration: dict\n :param LustreConfiguration: The configuration object for Lustre file systems used in the CreateFileSystem operation.\n WeeklyMaintenanceStartTime (string) --The preferred time to perform weekly maintenance, in the UTC time zone.\n ImportPath (string) --(Optional) The path to the Amazon S3 bucket (and optional prefix) that you're using as the data repository for your FSx for Lustre file system, for example s3://import-bucket/optional-prefix . If you specify a prefix after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file system.\n ImportedFileChunkSize (integer) --(Optional) For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.\n The chunk size default is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.\n \n\n :rtype: dict\n :return: {\n 'FileSystem': {\n 'OwnerId': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'FileSystemId': 'string',\n 'FileSystemType': 'WINDOWS'|'LUSTRE',\n 'Lifecycle': 'AVAILABLE'|'CREATING'|'FAILED'|'DELETING',\n 'FailureDetails': {\n 'Message': 'string'\n },\n 'StorageCapacity': 123,\n 'VpcId': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'NetworkInterfaceIds': [\n 'string',\n ],\n 'DNSName': 'string',\n 'KmsKeyId': 'string',\n 'ResourceARN': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'WindowsConfiguration': {\n 'ActiveDirectoryId': 'string',\n 'ThroughputCapacity': 123,\n 'MaintenanceOperationsInProgress': [\n 'PATCHING'|'BACKING_UP',\n ],\n 'WeeklyMaintenanceStartTime': 'string',\n 'DailyAutomaticBackupStartTime': 'string',\n 'AutomaticBackupRetentionDays': 123,\n 'CopyTagsToBackups': True|False\n },\n 'LustreConfiguration': {\n 'WeeklyMaintenanceStartTime': 'string',\n 'DataRepositoryConfiguration': {\n 'ImportPath': 'string',\n 'ExportPath': 'string',\n 'ImportedFileChunkSize': 123\n }\n }\n }\n }\n \n \n :returns: \n ClientRequestToken (string) -- (Optional) A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK.\n This field is autopopulated if not provided.\n \n FileSystemType (string) -- [REQUIRED]\n The type of file system.\n \n StorageCapacity (integer) -- [REQUIRED]\n The storage capacity of the file system.\n For Windows file systems, the storage capacity has a minimum of 300 GiB, and a maximum of 65,536 GiB.\n For Lustre file systems, the storage capacity has a minimum of 3,600 GiB. Storage capacity is provisioned in increments of 3,600 GiB.\n \n SubnetIds (list) -- [REQUIRED]\n A list of IDs for the subnets that the file system will be accessible from. File systems support only one subnet. The file server is also launched in that subnet's Availability Zone.\n \n (string) --The ID for a subnet. A subnet is a range of IP addresses in your virtual private cloud (VPC). For more information, see VPC and Subnets in the Amazon VPC User Guide.\n \n \n \n SecurityGroupIds (list) -- A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. This list isn't returned in later describe requests.\n \n (string) --The ID of your Amazon EC2 security group. This ID is used to control network access to the endpoint that Amazon FSx creates on your behalf in each subnet. For more information, see Amazon EC2 Security Groups for Linux Instances in the Amazon EC2 User Guide .\n \n \n \n Tags (list) -- The tags to be applied to the file system at file system creation. The key value of the Name tag appears in the console as the file system name.\n \n (dict) --Specifies a key-value pair for a resource tag.\n \n Key (string) --A value that specifies the TagKey , the name of the tag. Tag keys must be unique for the resource to which they are attached.\n \n Value (string) --A value that specifies the TagValue , the value assigned to the corresponding tag key. Tag values can be null and don't have to be unique in a tag set. For example, you can have a key-value pair in a tag set of finances : April and also of payroll : April .\n \n \n \n \n \n KmsKeyId (string) -- The ID of your AWS Key Management Service (AWS KMS) key. This ID is used to encrypt the data in your file system at rest. For more information, see Encrypt in the AWS Key Management Service API Reference .\n WindowsConfiguration (dict) -- The configuration for this Microsoft Windows file system.\n \n ActiveDirectoryId (string) --The ID for an existing Microsoft Active Directory instance that the file system should join when it's created.\n \n ThroughputCapacity (integer) -- [REQUIRED]The throughput of an Amazon FSx file system, measured in megabytes per second.\n \n WeeklyMaintenanceStartTime (string) --The preferred start time to perform weekly maintenance, in the UTC time zone.\n \n DailyAutomaticBackupStartTime (string) --The preferred time to take daily automatic backups, in the UTC time zone.\n \n AutomaticBackupRetentionDays (integer) --The number of days to retain automatic backups. The default is to retain backups for 7 days. Setting this value to 0 disables the creation of automatic backups. The maximum retention period for backups is 35 days.\n \n CopyTagsToBackups (boolean) --A boolean flag indicating whether tags on the file system should be copied to backups. This value defaults to false. If it's set to true, all tags on the file system are copied to all automatic backups and any user-initiated backups where the user doesn't specify any tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups.\n \n \n \n LustreConfiguration (dict) -- The configuration object for Lustre file systems used in the CreateFileSystem operation.\n \n WeeklyMaintenanceStartTime (string) --The preferred time to perform weekly maintenance, in the UTC time zone.\n \n ImportPath (string) --(Optional) The path to the Amazon S3 bucket (and optional prefix) that you're using as the data repository for your FSx for Lustre file system, for example s3://import-bucket/optional-prefix . If you specify a prefix after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file system.\n \n ImportedFileChunkSize (integer) --(Optional) For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.\n The chunk size default is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.\n \n \n \n \n \"\"\"\n pass\n\ndef create_file_system_from_backup(BackupId=None, ClientRequestToken=None, SubnetIds=None, SecurityGroupIds=None, Tags=None, WindowsConfiguration=None):\n \"\"\"\n Creates a new Amazon FSx file system from an existing Amazon FSx for Windows File Server backup.\n If a file system with the specified client request token exists and the parameters match, this call returns the description of the existing file system. If a client request token specified by the file system exists and the parameters don't match, this call returns IncompatibleParameterError . If a file system with the specified client request token doesn't exist, this operation does the following:\n Parameters like Active Directory, default share name, automatic backup, and backup settings default to the parameters of the file system that was backed up, unless overridden. You can explicitly supply other settings.\n By using the idempotent operation, you can retry a CreateFileSystemFromBackup call without the risk of creating an extra file system. This approach can be useful when an initial call fails in a way that makes it unclear whether a file system was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a file system, the client receives success as long as the parameters are the same.\n See also: AWS API Documentation\n \n \n :example: response = client.create_file_system_from_backup(\n BackupId='string',\n ClientRequestToken='string',\n SubnetIds=[\n 'string',\n ],\n SecurityGroupIds=[\n 'string',\n ],\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n WindowsConfiguration={\n 'ActiveDirectoryId': 'string',\n 'ThroughputCapacity': 123,\n 'WeeklyMaintenanceStartTime': 'string',\n 'DailyAutomaticBackupStartTime': 'string',\n 'AutomaticBackupRetentionDays': 123,\n 'CopyTagsToBackups': True|False\n }\n )\n \n \n :type BackupId: string\n :param BackupId: [REQUIRED]\n The ID of the backup.\n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: (Optional) A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK.\n This field is autopopulated if not provided.\n \n\n :type SubnetIds: list\n :param SubnetIds: [REQUIRED]\n A list of IDs for the subnets that the file system will be accessible from. Currently, you can specify only one subnet. The file server is also launched in that subnet's Availability Zone.\n (string) --The ID for a subnet. A subnet is a range of IP addresses in your virtual private cloud (VPC). For more information, see VPC and Subnets in the Amazon VPC User Guide.\n \n\n :type SecurityGroupIds: list\n :param SecurityGroupIds: A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups apply to all network interfaces. This value isn't returned in later describe requests.\n (string) --The ID of your Amazon EC2 security group. This ID is used to control network access to the endpoint that Amazon FSx creates on your behalf in each subnet. For more information, see Amazon EC2 Security Groups for Linux Instances in the Amazon EC2 User Guide .\n \n\n :type Tags: list\n :param Tags: The tags to be applied to the file system at file system creation. The key value of the Name tag appears in the console as the file system name.\n (dict) --Specifies a key-value pair for a resource tag.\n Key (string) --A value that specifies the TagKey , the name of the tag. Tag keys must be unique for the resource to which they are attached.\n Value (string) --A value that specifies the TagValue , the value assigned to the corresponding tag key. Tag values can be null and don't have to be unique in a tag set. For example, you can have a key-value pair in a tag set of finances : April and also of payroll : April .\n \n \n\n :type WindowsConfiguration: dict\n :param WindowsConfiguration: The configuration for this Microsoft Windows file system.\n ActiveDirectoryId (string) --The ID for an existing Microsoft Active Directory instance that the file system should join when it's created.\n ThroughputCapacity (integer) -- [REQUIRED]The throughput of an Amazon FSx file system, measured in megabytes per second.\n WeeklyMaintenanceStartTime (string) --The preferred start time to perform weekly maintenance, in the UTC time zone.\n DailyAutomaticBackupStartTime (string) --The preferred time to take daily automatic backups, in the UTC time zone.\n AutomaticBackupRetentionDays (integer) --The number of days to retain automatic backups. The default is to retain backups for 7 days. Setting this value to 0 disables the creation of automatic backups. The maximum retention period for backups is 35 days.\n CopyTagsToBackups (boolean) --A boolean flag indicating whether tags on the file system should be copied to backups. This value defaults to false. If it's set to true, all tags on the file system are copied to all automatic backups and any user-initiated backups where the user doesn't specify any tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups.\n \n\n :rtype: dict\n :return: {\n 'FileSystem': {\n 'OwnerId': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'FileSystemId': 'string',\n 'FileSystemType': 'WINDOWS'|'LUSTRE',\n 'Lifecycle': 'AVAILABLE'|'CREATING'|'FAILED'|'DELETING',\n 'FailureDetails': {\n 'Message': 'string'\n },\n 'StorageCapacity': 123,\n 'VpcId': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'NetworkInterfaceIds': [\n 'string',\n ],\n 'DNSName': 'string',\n 'KmsKeyId': 'string',\n 'ResourceARN': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'WindowsConfiguration': {\n 'ActiveDirectoryId': 'string',\n 'ThroughputCapacity': 123,\n 'MaintenanceOperationsInProgress': [\n 'PATCHING'|'BACKING_UP',\n ],\n 'WeeklyMaintenanceStartTime': 'string',\n 'DailyAutomaticBackupStartTime': 'string',\n 'AutomaticBackupRetentionDays': 123,\n 'CopyTagsToBackups': True|False\n },\n 'LustreConfiguration': {\n 'WeeklyMaintenanceStartTime': 'string',\n 'DataRepositoryConfiguration': {\n 'ImportPath': 'string',\n 'ExportPath': 'string',\n 'ImportedFileChunkSize': 123\n }\n }\n }\n }\n \n \n :returns: \n BackupId (string) -- [REQUIRED]\n The ID of the backup.\n \n ClientRequestToken (string) -- (Optional) A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK.\n This field is autopopulated if not provided.\n \n SubnetIds (list) -- [REQUIRED]\n A list of IDs for the subnets that the file system will be accessible from. Currently, you can specify only one subnet. The file server is also launched in that subnet's Availability Zone.\n \n (string) --The ID for a subnet. A subnet is a range of IP addresses in your virtual private cloud (VPC). For more information, see VPC and Subnets in the Amazon VPC User Guide.\n \n \n \n SecurityGroupIds (list) -- A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups apply to all network interfaces. This value isn't returned in later describe requests.\n \n (string) --The ID of your Amazon EC2 security group. This ID is used to control network access to the endpoint that Amazon FSx creates on your behalf in each subnet. For more information, see Amazon EC2 Security Groups for Linux Instances in the Amazon EC2 User Guide .\n \n \n \n Tags (list) -- The tags to be applied to the file system at file system creation. The key value of the Name tag appears in the console as the file system name.\n \n (dict) --Specifies a key-value pair for a resource tag.\n \n Key (string) --A value that specifies the TagKey , the name of the tag. Tag keys must be unique for the resource to which they are attached.\n \n Value (string) --A value that specifies the TagValue , the value assigned to the corresponding tag key. Tag values can be null and don't have to be unique in a tag set. For example, you can have a key-value pair in a tag set of finances : April and also of payroll : April .\n \n \n \n \n \n WindowsConfiguration (dict) -- The configuration for this Microsoft Windows file system.\n \n ActiveDirectoryId (string) --The ID for an existing Microsoft Active Directory instance that the file system should join when it's created.\n \n ThroughputCapacity (integer) -- [REQUIRED]The throughput of an Amazon FSx file system, measured in megabytes per second.\n \n WeeklyMaintenanceStartTime (string) --The preferred start time to perform weekly maintenance, in the UTC time zone.\n \n DailyAutomaticBackupStartTime (string) --The preferred time to take daily automatic backups, in the UTC time zone.\n \n AutomaticBackupRetentionDays (integer) --The number of days to retain automatic backups. The default is to retain backups for 7 days. Setting this value to 0 disables the creation of automatic backups. The maximum retention period for backups is 35 days.\n \n CopyTagsToBackups (boolean) --A boolean flag indicating whether tags on the file system should be copied to backups. This value defaults to false. If it's set to true, all tags on the file system are copied to all automatic backups and any user-initiated backups where the user doesn't specify any tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups.\n \n \n \n \n \"\"\"\n pass\n\ndef delete_backup(BackupId=None, ClientRequestToken=None):\n \"\"\"\n Deletes an Amazon FSx for Windows File Server backup, deleting its contents. After deletion, the backup no longer exists, and its data is gone.\n The DeleteBackup call returns instantly. The backup will not show up in later DescribeBackups calls.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_backup(\n BackupId='string',\n ClientRequestToken='string'\n )\n \n \n :type BackupId: string\n :param BackupId: [REQUIRED]\n The ID of the backup you want to delete.\n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: (Optional) A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent deletion. This is automatically filled on your behalf when using the AWS CLI or SDK.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'BackupId': 'string',\n 'Lifecycle': 'AVAILABLE'|'CREATING'|'DELETED'|'FAILED'\n }\n \n \n \"\"\"\n pass\n\ndef delete_file_system(FileSystemId=None, ClientRequestToken=None, WindowsConfiguration=None):\n \"\"\"\n Deletes a file system, deleting its contents. After deletion, the file system no longer exists, and its data is gone. Any existing automatic backups will also be deleted.\n By default, when you delete an Amazon FSx for Windows File Server file system, a final backup is created upon deletion. This final backup is not subject to the file system's retention policy, and must be manually deleted.\n The DeleteFileSystem action returns while the file system has the DELETING status. You can check the file system deletion status by calling the DescribeFileSystems action, which returns a list of file systems in your account. If you pass the file system ID for a deleted file system, the DescribeFileSystems returns a FileSystemNotFound error.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_file_system(\n FileSystemId='string',\n ClientRequestToken='string',\n WindowsConfiguration={\n 'SkipFinalBackup': True|False,\n 'FinalBackupTags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n )\n \n \n :type FileSystemId: string\n :param FileSystemId: [REQUIRED]\n The ID of the file system you want to delete.\n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: (Optional) A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent deletion. This is automatically filled on your behalf when using the AWS CLI or SDK.\n This field is autopopulated if not provided.\n \n\n :type WindowsConfiguration: dict\n :param WindowsConfiguration: The configuration object for the Microsoft Windows file system used in the DeleteFileSystem operation.\n SkipFinalBackup (boolean) --By default, Amazon FSx for Windows takes a final backup on your behalf when the DeleteFileSystem operation is invoked. Doing this helps protect you from data loss, and we highly recommend taking the final backup. If you want to skip this backup, use this flag to do so.\n FinalBackupTags (list) --A set of tags for your final backup.\n (dict) --Specifies a key-value pair for a resource tag.\n Key (string) --A value that specifies the TagKey , the name of the tag. Tag keys must be unique for the resource to which they are attached.\n Value (string) --A value that specifies the TagValue , the value assigned to the corresponding tag key. Tag values can be null and don't have to be unique in a tag set. For example, you can have a key-value pair in a tag set of finances : April and also of payroll : April .\n \n \n\n :rtype: dict\n :return: {\n 'FileSystemId': 'string',\n 'Lifecycle': 'AVAILABLE'|'CREATING'|'FAILED'|'DELETING',\n 'WindowsResponse': {\n 'FinalBackupId': 'string',\n 'FinalBackupTags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_backups(BackupIds=None, Filters=None, MaxResults=None, NextToken=None):\n \"\"\"\n Returns the description of specific Amazon FSx for Windows File Server backups, if a BackupIds value is provided for that backup. Otherwise, it returns all backups owned by your AWS account in the AWS Region of the endpoint that you're calling.\n When retrieving all backups, you can optionally specify the MaxResults parameter to limit the number of backups in a response. If more backups remain, Amazon FSx returns a NextToken value in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.\n This action is used in an iterative process to retrieve a list of your backups. DescribeBackups is called first without a NextToken value. Then the action continues to be called with the NextToken parameter set to the value of the last NextToken value until a response has no NextToken .\n When using this action, keep the following in mind:\n See also: AWS API Documentation\n \n \n :example: response = client.describe_backups(\n BackupIds=[\n 'string',\n ],\n Filters=[\n {\n 'Name': 'file-system-id'|'backup-type',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type BackupIds: list\n :param BackupIds: (Optional) IDs of the backups you want to retrieve (String). This overrides any filters. If any IDs are not found, BackupNotFound will be thrown.\n (string) --The ID of the backup.\n \n\n :type Filters: list\n :param Filters: (Optional) Filters structure. Supported names are file-system-id and backup-type.\n (dict) --A filter used to restrict the results of describe calls. You can use multiple filters to return results that meet all applied filter requirements.\n Name (string) --The name for this filter.\n Values (list) --The values of the filter. These are all the values for any of the applied filters.\n (string) --The value for a filter.\n \n \n\n :type MaxResults: integer\n :param MaxResults: (Optional) Maximum number of backups to return in the response (integer). This parameter value must be greater than 0. The number of items that Amazon FSx returns is the minimum of the MaxResults parameter specified in the request and the service's internal maximum number of items per page.\n\n :type NextToken: string\n :param NextToken: (Optional) Opaque pagination token returned from a previous DescribeBackups operation (String). If a token present, the action continues the list from where the returning call left off.\n\n :rtype: dict\n :return: {\n 'Backups': [\n {\n 'BackupId': 'string',\n 'Lifecycle': 'AVAILABLE'|'CREATING'|'DELETED'|'FAILED',\n 'FailureDetails': {\n 'Message': 'string'\n },\n 'Type': 'AUTOMATIC'|'USER_INITIATED',\n 'ProgressPercent': 123,\n 'CreationTime': datetime(2015, 1, 1),\n 'KmsKeyId': 'string',\n 'ResourceARN': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'FileSystem': {\n 'OwnerId': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'FileSystemId': 'string',\n 'FileSystemType': 'WINDOWS'|'LUSTRE',\n 'Lifecycle': 'AVAILABLE'|'CREATING'|'FAILED'|'DELETING',\n 'FailureDetails': {\n 'Message': 'string'\n },\n 'StorageCapacity': 123,\n 'VpcId': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'NetworkInterfaceIds': [\n 'string',\n ],\n 'DNSName': 'string',\n 'KmsKeyId': 'string',\n 'ResourceARN': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'WindowsConfiguration': {\n 'ActiveDirectoryId': 'string',\n 'ThroughputCapacity': 123,\n 'MaintenanceOperationsInProgress': [\n 'PATCHING'|'BACKING_UP',\n ],\n 'WeeklyMaintenanceStartTime': 'string',\n 'DailyAutomaticBackupStartTime': 'string',\n 'AutomaticBackupRetentionDays': 123,\n 'CopyTagsToBackups': True|False\n },\n 'LustreConfiguration': {\n 'WeeklyMaintenanceStartTime': 'string',\n 'DataRepositoryConfiguration': {\n 'ImportPath': 'string',\n 'ExportPath': 'string',\n 'ImportedFileChunkSize': 123\n }\n }\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n BackupIds (list) -- (Optional) IDs of the backups you want to retrieve (String). This overrides any filters. If any IDs are not found, BackupNotFound will be thrown.\n \n (string) --The ID of the backup.\n \n \n \n Filters (list) -- (Optional) Filters structure. Supported names are file-system-id and backup-type.\n \n (dict) --A filter used to restrict the results of describe calls. You can use multiple filters to return results that meet all applied filter requirements.\n \n Name (string) --The name for this filter.\n \n Values (list) --The values of the filter. These are all the values for any of the applied filters.\n \n (string) --The value for a filter.\n \n \n \n \n \n \n \n MaxResults (integer) -- (Optional) Maximum number of backups to return in the response (integer). This parameter value must be greater than 0. The number of items that Amazon FSx returns is the minimum of the MaxResults parameter specified in the request and the service's internal maximum number of items per page.\n NextToken (string) -- (Optional) Opaque pagination token returned from a previous DescribeBackups operation (String). If a token present, the action continues the list from where the returning call left off.\n \n \"\"\"\n pass\n\ndef describe_file_systems(FileSystemIds=None, MaxResults=None, NextToken=None):\n \"\"\"\n Returns the description of specific Amazon FSx file systems, if a FileSystemIds value is provided for that file system. Otherwise, it returns descriptions of all file systems owned by your AWS account in the AWS Region of the endpoint that you're calling.\n When retrieving all file system descriptions, you can optionally specify the MaxResults parameter to limit the number of descriptions in a response. If more file system descriptions remain, Amazon FSx returns a NextToken value in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.\n This action is used in an iterative process to retrieve a list of your file system descriptions. DescribeFileSystems is called first without a NextToken value. Then the action continues to be called with the NextToken parameter set to the value of the last NextToken value until a response has no NextToken .\n When using this action, keep the following in mind:\n See also: AWS API Documentation\n \n \n :example: response = client.describe_file_systems(\n FileSystemIds=[\n 'string',\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type FileSystemIds: list\n :param FileSystemIds: (Optional) IDs of the file systems whose descriptions you want to retrieve (String).\n (string) --The globally unique ID of the file system, assigned by Amazon FSx.\n \n\n :type MaxResults: integer\n :param MaxResults: (Optional) Maximum number of file systems to return in the response (integer). This parameter value must be greater than 0. The number of items that Amazon FSx returns is the minimum of the MaxResults parameter specified in the request and the service's internal maximum number of items per page.\n\n :type NextToken: string\n :param NextToken: (Optional) Opaque pagination token returned from a previous DescribeFileSystems operation (String). If a token present, the action continues the list from where the returning call left off.\n\n :rtype: dict\n :return: {\n 'FileSystems': [\n {\n 'OwnerId': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'FileSystemId': 'string',\n 'FileSystemType': 'WINDOWS'|'LUSTRE',\n 'Lifecycle': 'AVAILABLE'|'CREATING'|'FAILED'|'DELETING',\n 'FailureDetails': {\n 'Message': 'string'\n },\n 'StorageCapacity': 123,\n 'VpcId': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'NetworkInterfaceIds': [\n 'string',\n ],\n 'DNSName': 'string',\n 'KmsKeyId': 'string',\n 'ResourceARN': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'WindowsConfiguration': {\n 'ActiveDirectoryId': 'string',\n 'ThroughputCapacity': 123,\n 'MaintenanceOperationsInProgress': [\n 'PATCHING'|'BACKING_UP',\n ],\n 'WeeklyMaintenanceStartTime': 'string',\n 'DailyAutomaticBackupStartTime': 'string',\n 'AutomaticBackupRetentionDays': 123,\n 'CopyTagsToBackups': True|False\n },\n 'LustreConfiguration': {\n 'WeeklyMaintenanceStartTime': 'string',\n 'DataRepositoryConfiguration': {\n 'ImportPath': 'string',\n 'ExportPath': 'string',\n 'ImportedFileChunkSize': 123\n }\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n FileSystemIds (list) -- (Optional) IDs of the file systems whose descriptions you want to retrieve (String).\n \n (string) --The globally unique ID of the file system, assigned by Amazon FSx.\n \n \n \n MaxResults (integer) -- (Optional) Maximum number of file systems to return in the response (integer). This parameter value must be greater than 0. The number of items that Amazon FSx returns is the minimum of the MaxResults parameter specified in the request and the service's internal maximum number of items per page.\n NextToken (string) -- (Optional) Opaque pagination token returned from a previous DescribeFileSystems operation (String). If a token present, the action continues the list from where the returning call left off.\n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_tags_for_resource(ResourceARN=None, MaxResults=None, NextToken=None):\n \"\"\"\n Lists tags for an Amazon FSx file systems and backups in the case of Amazon FSx for Windows File Server.\n When retrieving all tags, you can optionally specify the MaxResults parameter to limit the number of tags in a response. If more tags remain, Amazon FSx returns a NextToken value in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.\n This action is used in an iterative process to retrieve a list of your tags. ListTagsForResource is called first without a NextToken value. Then the action continues to be called with the NextToken parameter set to the value of the last NextToken value until a response has no NextToken .\n When using this action, keep the following in mind:\n See also: AWS API Documentation\n \n \n :example: response = client.list_tags_for_resource(\n ResourceARN='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type ResourceARN: string\n :param ResourceARN: [REQUIRED]\n The ARN of the Amazon FSx resource that will have its tags listed.\n \n\n :type MaxResults: integer\n :param MaxResults: (Optional) Maximum number of tags to return in the response (integer). This parameter value must be greater than 0. The number of items that Amazon FSx returns is the minimum of the MaxResults parameter specified in the request and the service's internal maximum number of items per page.\n\n :type NextToken: string\n :param NextToken: (Optional) Opaque pagination token returned from a previous ListTagsForResource operation (String). If a token present, the action continues the list from where the returning call left off.\n\n :rtype: dict\n :return: {\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n ResourceARN (string) -- [REQUIRED]\n The ARN of the Amazon FSx resource that will have its tags listed.\n \n MaxResults (integer) -- (Optional) Maximum number of tags to return in the response (integer). This parameter value must be greater than 0. The number of items that Amazon FSx returns is the minimum of the MaxResults parameter specified in the request and the service's internal maximum number of items per page.\n NextToken (string) -- (Optional) Opaque pagination token returned from a previous ListTagsForResource operation (String). If a token present, the action continues the list from where the returning call left off.\n \n \"\"\"\n pass\n\ndef tag_resource(ResourceARN=None, Tags=None):\n \"\"\"\n Tags an Amazon FSx resource.\n See also: AWS API Documentation\n \n \n :example: response = client.tag_resource(\n ResourceARN='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type ResourceARN: string\n :param ResourceARN: [REQUIRED]\n The Amazon Resource Name (ARN) of the Amazon FSx resource that you want to tag.\n \n\n :type Tags: list\n :param Tags: [REQUIRED]\n A list of tags for the resource. If a tag with a given key already exists, the value is replaced by the one specified in this parameter.\n (dict) --Specifies a key-value pair for a resource tag.\n Key (string) --A value that specifies the TagKey , the name of the tag. Tag keys must be unique for the resource to which they are attached.\n Value (string) --A value that specifies the TagValue , the value assigned to the corresponding tag key. Tag values can be null and don't have to be unique in a tag set. For example, you can have a key-value pair in a tag set of finances : April and also of payroll : April .\n \n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef untag_resource(ResourceARN=None, TagKeys=None):\n \"\"\"\n This action removes a tag from an Amazon FSx resource.\n See also: AWS API Documentation\n \n \n :example: response = client.untag_resource(\n ResourceARN='string',\n TagKeys=[\n 'string',\n ]\n )\n \n \n :type ResourceARN: string\n :param ResourceARN: [REQUIRED]\n The ARN of the Amazon FSx resource to untag.\n \n\n :type TagKeys: list\n :param TagKeys: [REQUIRED]\n A list of keys of tags on the resource to untag. In case the tag key doesn't exist, the call will still succeed to be idempotent.\n (string) --A string of 1 to 128 characters that specifies the key for a tag. Tag keys must be unique for the resource to which they are attached.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef update_file_system(FileSystemId=None, ClientRequestToken=None, WindowsConfiguration=None, LustreConfiguration=None):\n \"\"\"\n Updates a file system configuration.\n See also: AWS API Documentation\n \n \n :example: response = client.update_file_system(\n FileSystemId='string',\n ClientRequestToken='string',\n WindowsConfiguration={\n 'WeeklyMaintenanceStartTime': 'string',\n 'DailyAutomaticBackupStartTime': 'string',\n 'AutomaticBackupRetentionDays': 123\n },\n LustreConfiguration={\n 'WeeklyMaintenanceStartTime': 'string'\n }\n )\n \n \n :type FileSystemId: string\n :param FileSystemId: [REQUIRED]\n The globally unique ID of the file system, assigned by Amazon FSx.\n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: (Optional) A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent updates. This string is automatically filled on your behalf when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK.\n This field is autopopulated if not provided.\n \n\n :type WindowsConfiguration: dict\n :param WindowsConfiguration: The configuration for this Microsoft Windows file system. The only supported options are for backup and maintenance.\n WeeklyMaintenanceStartTime (string) --The preferred time to perform weekly maintenance, in the UTC time zone.\n DailyAutomaticBackupStartTime (string) --The preferred time to take daily automatic backups, in the UTC time zone.\n AutomaticBackupRetentionDays (integer) --The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 35 days.\n \n\n :type LustreConfiguration: dict\n :param LustreConfiguration: The configuration object for Amazon FSx for Lustre file systems used in the UpdateFileSystem operation.\n WeeklyMaintenanceStartTime (string) --The preferred time to perform weekly maintenance, in the UTC time zone.\n \n\n :rtype: dict\n :return: {\n 'FileSystem': {\n 'OwnerId': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'FileSystemId': 'string',\n 'FileSystemType': 'WINDOWS'|'LUSTRE',\n 'Lifecycle': 'AVAILABLE'|'CREATING'|'FAILED'|'DELETING',\n 'FailureDetails': {\n 'Message': 'string'\n },\n 'StorageCapacity': 123,\n 'VpcId': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'NetworkInterfaceIds': [\n 'string',\n ],\n 'DNSName': 'string',\n 'KmsKeyId': 'string',\n 'ResourceARN': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'WindowsConfiguration': {\n 'ActiveDirectoryId': 'string',\n 'ThroughputCapacity': 123,\n 'MaintenanceOperationsInProgress': [\n 'PATCHING'|'BACKING_UP',\n ],\n 'WeeklyMaintenanceStartTime': 'string',\n 'DailyAutomaticBackupStartTime': 'string',\n 'AutomaticBackupRetentionDays': 123,\n 'CopyTagsToBackups': True|False\n },\n 'LustreConfiguration': {\n 'WeeklyMaintenanceStartTime': 'string',\n 'DataRepositoryConfiguration': {\n 'ImportPath': 'string',\n 'ExportPath': 'string',\n 'ImportedFileChunkSize': 123\n }\n }\n }\n }\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.5746402144432068, "alphanum_fraction": 0.5792625546455383, "avg_line_length": 37.071998596191406, "blob_id": "c162ad32c4d17ea8e73a49dff26d7aef185b6778", "content_id": "a35ddddec33df2061c754e6da99040bd9fd14454", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9519, "license_type": "permissive", "max_line_length": 512, "num_lines": 250, "path": "/pyboto3/rdsdataservice.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef execute_sql(awsSecretStoreArn=None, database=None, dbClusterOrInstanceArn=None, schema=None, sqlStatements=None):\n \"\"\"\n Executes any SQL statement on the target database synchronously\n See also: AWS API Documentation\n \n \n :example: response = client.execute_sql(\n awsSecretStoreArn='string',\n database='string',\n dbClusterOrInstanceArn='string',\n schema='string',\n sqlStatements='string'\n )\n \n \n :type awsSecretStoreArn: string\n :param awsSecretStoreArn: [REQUIRED] ARN of the db credentials in AWS Secret Store or the friendly secret name\n\n :type database: string\n :param database: Target DB name\n\n :type dbClusterOrInstanceArn: string\n :param dbClusterOrInstanceArn: [REQUIRED] ARN of the target db cluster or instance\n\n :type schema: string\n :param schema: Target Schema name\n\n :type sqlStatements: string\n :param sqlStatements: [REQUIRED] SQL statement(s) to be executed. Statements can be chained by using semicolons\n\n :rtype: dict\n :return: {\n 'sqlStatementResults': [\n {\n 'numberOfRecordsUpdated': 123,\n 'resultFrame': {\n 'records': [\n {\n 'values': [\n {\n 'arrayValues': [\n {'... recursive ...'},\n ],\n 'bigIntValue': 123,\n 'bitValue': True|False,\n 'blobValue': b'bytes',\n 'doubleValue': 123.0,\n 'intValue': 123,\n 'isNull': True|False,\n 'realValue': ...,\n 'stringValue': 'string',\n 'structValue': {\n 'attributes': [\n {'... recursive ...'},\n ]\n }\n },\n ]\n },\n ],\n 'resultSetMetadata': {\n 'columnCount': 123,\n 'columnMetadata': [\n {\n 'arrayBaseColumnType': 123,\n 'isAutoIncrement': True|False,\n 'isCaseSensitive': True|False,\n 'isCurrency': True|False,\n 'isSigned': True|False,\n 'label': 'string',\n 'name': 'string',\n 'nullable': 123,\n 'precision': 123,\n 'scale': 123,\n 'schemaName': 'string',\n 'tableName': 'string',\n 'type': 123,\n 'typeName': 'string'\n },\n ]\n }\n }\n },\n ]\n }\n \n \n :returns: \n (dict) -- Execute SQL response\n sqlStatementResults (list) -- Results returned by executing the sql statement(s)\n (dict) -- SQL statement execution result\n numberOfRecordsUpdated (integer) -- Number of rows updated.\n resultFrame (dict) -- ResultFrame returned by executing the sql statement\n records (list) -- ResultSet Metadata.\n (dict) -- Row or Record\n values (list) -- Record\n (dict) -- Column value\n arrayValues (list) -- Arbitrarily nested arrays\n (dict) -- Column value\n \n \n bigIntValue (integer) -- Long value\n bitValue (boolean) -- Bit value\n blobValue (bytes) -- Blob value\n doubleValue (float) -- Double value\n intValue (integer) -- Integer value\n isNull (boolean) -- Is column null\n realValue (float) -- Float value\n stringValue (string) -- String value\n structValue (dict) -- Struct or UDT\n attributes (list) -- Struct or UDT\n (dict) -- Column value\n \n \n \n \n \n \n \n \n \n \n \n \n resultSetMetadata (dict) -- ResultSet Metadata.\n columnCount (integer) -- Number of columns\n columnMetadata (list) -- List of columns and their types\n (dict) -- Column Metadata\n arrayBaseColumnType (integer) -- Homogenous array base SQL type from java.sql.Types.\n isAutoIncrement (boolean) -- Whether the designated column is automatically numbered\n isCaseSensitive (boolean) -- Whether values in the designated column's case matters\n isCurrency (boolean) -- Whether values in the designated column is a cash value\n isSigned (boolean) -- Whether values in the designated column are signed numbers\n label (string) -- Usually specified by the SQL AS. If not specified, return column name.\n name (string) -- Name of the column.\n nullable (integer) -- Indicates the nullability of values in the designated column. One of columnNoNulls (0), columnNullable (1), columnNullableUnknown (2)\n precision (integer) -- Get the designated column's specified column size.For numeric data, this is the maximum precision. For character data, this is the length in characters. For datetime datatypes, this is the length in characters of the String representation (assuming the maximum allowed precision of the fractional seconds component). For binary data, this is the length in bytes. For the ROWID datatype, this is the length in bytes. 0 is returned for data types where the column size is not applicable.\n scale (integer) -- Designated column's number of digits to right of the decimal point. 0 is returned for data types where the scale is not applicable.\n schemaName (string) -- Designated column's table's schema\n tableName (string) -- Designated column's table name\n type (integer) -- SQL type from java.sql.Types.\n typeName (string) -- Database-specific type name.\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.5211542248725891, "alphanum_fraction": 0.5274545550346375, "avg_line_length": 27.774192810058594, "blob_id": "ac582f6c08a5d1d9714ff175284834b94d43e324", "content_id": "8b142d250790a11016f39919711a7d105945b4c9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 44601, "license_type": "permissive", "max_line_length": 266, "num_lines": 1550, "path": "/pyboto3/amplify.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_app(name=None, description=None, repository=None, platform=None, iamServiceRoleArn=None, oauthToken=None, environmentVariables=None, enableBranchAutoBuild=None, enableBasicAuth=None, basicAuthCredentials=None, customRules=None, tags=None, buildSpec=None):\n \"\"\"\n Creates a new Amplify App.\n See also: AWS API Documentation\n \n \n :example: response = client.create_app(\n name='string',\n description='string',\n repository='string',\n platform='IOS'|'ANDROID'|'WEB'|'REACT_NATIVE',\n iamServiceRoleArn='string',\n oauthToken='string',\n environmentVariables={\n 'string': 'string'\n },\n enableBranchAutoBuild=True|False,\n enableBasicAuth=True|False,\n basicAuthCredentials='string',\n customRules=[\n {\n 'source': 'string',\n 'target': 'string',\n 'status': 'string',\n 'condition': 'string'\n },\n ],\n tags={\n 'string': 'string'\n },\n buildSpec='string'\n )\n \n \n :type name: string\n :param name: [REQUIRED]\n Name for the Amplify App\n \n\n :type description: string\n :param description: Description for an Amplify App\n\n :type repository: string\n :param repository: [REQUIRED]\n Repository for an Amplify App\n \n\n :type platform: string\n :param platform: [REQUIRED]\n Platform / framework for an Amplify App\n \n\n :type iamServiceRoleArn: string\n :param iamServiceRoleArn: AWS IAM service role for an Amplify App\n\n :type oauthToken: string\n :param oauthToken: [REQUIRED]\n OAuth token for 3rd party source control system for an Amplify App, used to create webhook and read-only deploy key. OAuth token is not stored.\n \n\n :type environmentVariables: dict\n :param environmentVariables: Environment variables map for an Amplify App.\n (string) --\n (string) --\n \n\n :type enableBranchAutoBuild: boolean\n :param enableBranchAutoBuild: Enable the auto building of branches for an Amplify App.\n\n :type enableBasicAuth: boolean\n :param enableBasicAuth: Enable Basic Authorization for an Amplify App, this will apply to all branches part of this App.\n\n :type basicAuthCredentials: string\n :param basicAuthCredentials: Credentials for Basic Authorization for an Amplify App.\n\n :type customRules: list\n :param customRules: Custom rewrite / redirect rules for an Amplify App.\n (dict) --Custom rewrite / redirect rule.\n source (string) -- [REQUIRED]The source pattern for a URL rewrite or redirect rule.\n target (string) -- [REQUIRED]The target pattern for a URL rewrite or redirect rule.\n status (string) --The status code for a URL rewrite or redirect rule.\n condition (string) --The condition for a URL rewrite or redirect rule, e.g. country code.\n \n \n\n :type tags: dict\n :param tags: Tag for an Amplify App\n (string) --\n (string) --\n \n\n :type buildSpec: string\n :param buildSpec: BuildSpec for an Amplify App\n\n :rtype: dict\n :return: {\n 'app': {\n 'appId': 'string',\n 'appArn': 'string',\n 'name': 'string',\n 'tags': {\n 'string': 'string'\n },\n 'description': 'string',\n 'repository': 'string',\n 'platform': 'IOS'|'ANDROID'|'WEB'|'REACT_NATIVE',\n 'createTime': datetime(2015, 1, 1),\n 'updateTime': datetime(2015, 1, 1),\n 'iamServiceRoleArn': 'string',\n 'environmentVariables': {\n 'string': 'string'\n },\n 'defaultDomain': 'string',\n 'enableBranchAutoBuild': True|False,\n 'enableBasicAuth': True|False,\n 'basicAuthCredentials': 'string',\n 'customRules': [\n {\n 'source': 'string',\n 'target': 'string',\n 'status': 'string',\n 'condition': 'string'\n },\n ],\n 'productionBranch': {\n 'lastDeployTime': datetime(2015, 1, 1),\n 'status': 'string',\n 'thumbnailUrl': 'string',\n 'branchName': 'string'\n },\n 'buildSpec': 'string'\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef create_branch(appId=None, branchName=None, description=None, stage=None, framework=None, enableNotification=None, enableAutoBuild=None, environmentVariables=None, basicAuthCredentials=None, enableBasicAuth=None, tags=None, buildSpec=None, ttl=None):\n \"\"\"\n Creates a new Branch for an Amplify App.\n See also: AWS API Documentation\n \n \n :example: response = client.create_branch(\n appId='string',\n branchName='string',\n description='string',\n stage='PRODUCTION'|'BETA'|'DEVELOPMENT'|'EXPERIMENTAL',\n framework='string',\n enableNotification=True|False,\n enableAutoBuild=True|False,\n environmentVariables={\n 'string': 'string'\n },\n basicAuthCredentials='string',\n enableBasicAuth=True|False,\n tags={\n 'string': 'string'\n },\n buildSpec='string',\n ttl='string'\n )\n \n \n :type appId: string\n :param appId: [REQUIRED]\n Unique Id for an Amplify App.\n \n\n :type branchName: string\n :param branchName: [REQUIRED]\n Name for the branch.\n \n\n :type description: string\n :param description: Description for the branch.\n\n :type stage: string\n :param stage: Stage for the branch.\n\n :type framework: string\n :param framework: Framework for the branch.\n\n :type enableNotification: boolean\n :param enableNotification: Enables notifications for the branch.\n\n :type enableAutoBuild: boolean\n :param enableAutoBuild: Enables auto building for the branch.\n\n :type environmentVariables: dict\n :param environmentVariables: Environment Variables for the branch.\n (string) --\n (string) --\n \n\n :type basicAuthCredentials: string\n :param basicAuthCredentials: Basic Authorization credentials for the branch.\n\n :type enableBasicAuth: boolean\n :param enableBasicAuth: Enables Basic Auth for the branch.\n\n :type tags: dict\n :param tags: Tag for the branch.\n (string) --\n (string) --\n \n\n :type buildSpec: string\n :param buildSpec: BuildSpec for the branch.\n\n :type ttl: string\n :param ttl: The content TTL for the website in seconds.\n\n :rtype: dict\n :return: {\n 'branch': {\n 'branchArn': 'string',\n 'branchName': 'string',\n 'description': 'string',\n 'tags': {\n 'string': 'string'\n },\n 'stage': 'PRODUCTION'|'BETA'|'DEVELOPMENT'|'EXPERIMENTAL',\n 'displayName': 'string',\n 'enableNotification': True|False,\n 'createTime': datetime(2015, 1, 1),\n 'updateTime': datetime(2015, 1, 1),\n 'environmentVariables': {\n 'string': 'string'\n },\n 'enableAutoBuild': True|False,\n 'customDomains': [\n 'string',\n ],\n 'framework': 'string',\n 'activeJobId': 'string',\n 'totalNumberOfJobs': 'string',\n 'enableBasicAuth': True|False,\n 'thumbnailUrl': 'string',\n 'basicAuthCredentials': 'string',\n 'buildSpec': 'string',\n 'ttl': 'string'\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef create_domain_association(appId=None, domainName=None, enableAutoSubDomain=None, subDomainSettings=None):\n \"\"\"\n Create a new DomainAssociation on an App\n See also: AWS API Documentation\n \n \n :example: response = client.create_domain_association(\n appId='string',\n domainName='string',\n enableAutoSubDomain=True|False,\n subDomainSettings=[\n {\n 'prefix': 'string',\n 'branchName': 'string'\n },\n ]\n )\n \n \n :type appId: string\n :param appId: [REQUIRED]\n Unique Id for an Amplify App.\n \n\n :type domainName: string\n :param domainName: [REQUIRED]\n Domain name for the Domain Association.\n \n\n :type enableAutoSubDomain: boolean\n :param enableAutoSubDomain: Enables automated creation of Subdomains for branches.\n\n :type subDomainSettings: list\n :param subDomainSettings: [REQUIRED]\n Setting structure for the Subdomain.\n (dict) --Setting for the Subdomain.\n prefix (string) -- [REQUIRED]Prefix setting for the Subdomain.\n branchName (string) -- [REQUIRED]Branch name setting for the Subdomain.\n \n \n\n :rtype: dict\n :return: {\n 'domainAssociation': {\n 'domainAssociationArn': 'string',\n 'domainName': 'string',\n 'enableAutoSubDomain': True|False,\n 'domainStatus': 'PENDING_VERIFICATION'|'IN_PROGRESS'|'AVAILABLE'|'PENDING_DEPLOYMENT'|'FAILED',\n 'statusReason': 'string',\n 'certificateVerificationDNSRecord': 'string',\n 'subDomains': [\n {\n 'subDomainSetting': {\n 'prefix': 'string',\n 'branchName': 'string'\n },\n 'verified': True|False,\n 'dnsRecord': 'string'\n },\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef delete_app(appId=None):\n \"\"\"\n Delete an existing Amplify App by appId.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_app(\n appId='string'\n )\n \n \n :type appId: string\n :param appId: [REQUIRED]\n Unique Id for an Amplify App.\n \n\n :rtype: dict\n :return: {\n 'app': {\n 'appId': 'string',\n 'appArn': 'string',\n 'name': 'string',\n 'tags': {\n 'string': 'string'\n },\n 'description': 'string',\n 'repository': 'string',\n 'platform': 'IOS'|'ANDROID'|'WEB'|'REACT_NATIVE',\n 'createTime': datetime(2015, 1, 1),\n 'updateTime': datetime(2015, 1, 1),\n 'iamServiceRoleArn': 'string',\n 'environmentVariables': {\n 'string': 'string'\n },\n 'defaultDomain': 'string',\n 'enableBranchAutoBuild': True|False,\n 'enableBasicAuth': True|False,\n 'basicAuthCredentials': 'string',\n 'customRules': [\n {\n 'source': 'string',\n 'target': 'string',\n 'status': 'string',\n 'condition': 'string'\n },\n ],\n 'productionBranch': {\n 'lastDeployTime': datetime(2015, 1, 1),\n 'status': 'string',\n 'thumbnailUrl': 'string',\n 'branchName': 'string'\n },\n 'buildSpec': 'string'\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef delete_branch(appId=None, branchName=None):\n \"\"\"\n Deletes a branch for an Amplify App.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_branch(\n appId='string',\n branchName='string'\n )\n \n \n :type appId: string\n :param appId: [REQUIRED]\n Unique Id for an Amplify App.\n \n\n :type branchName: string\n :param branchName: [REQUIRED]\n Name for the branch.\n \n\n :rtype: dict\n :return: {\n 'branch': {\n 'branchArn': 'string',\n 'branchName': 'string',\n 'description': 'string',\n 'tags': {\n 'string': 'string'\n },\n 'stage': 'PRODUCTION'|'BETA'|'DEVELOPMENT'|'EXPERIMENTAL',\n 'displayName': 'string',\n 'enableNotification': True|False,\n 'createTime': datetime(2015, 1, 1),\n 'updateTime': datetime(2015, 1, 1),\n 'environmentVariables': {\n 'string': 'string'\n },\n 'enableAutoBuild': True|False,\n 'customDomains': [\n 'string',\n ],\n 'framework': 'string',\n 'activeJobId': 'string',\n 'totalNumberOfJobs': 'string',\n 'enableBasicAuth': True|False,\n 'thumbnailUrl': 'string',\n 'basicAuthCredentials': 'string',\n 'buildSpec': 'string',\n 'ttl': 'string'\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef delete_domain_association(appId=None, domainName=None):\n \"\"\"\n Deletes a DomainAssociation.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_domain_association(\n appId='string',\n domainName='string'\n )\n \n \n :type appId: string\n :param appId: [REQUIRED]\n Unique Id for an Amplify App.\n \n\n :type domainName: string\n :param domainName: [REQUIRED]\n Name of the domain.\n \n\n :rtype: dict\n :return: {\n 'domainAssociation': {\n 'domainAssociationArn': 'string',\n 'domainName': 'string',\n 'enableAutoSubDomain': True|False,\n 'domainStatus': 'PENDING_VERIFICATION'|'IN_PROGRESS'|'AVAILABLE'|'PENDING_DEPLOYMENT'|'FAILED',\n 'statusReason': 'string',\n 'certificateVerificationDNSRecord': 'string',\n 'subDomains': [\n {\n 'subDomainSetting': {\n 'prefix': 'string',\n 'branchName': 'string'\n },\n 'verified': True|False,\n 'dnsRecord': 'string'\n },\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef delete_job(appId=None, branchName=None, jobId=None):\n \"\"\"\n Delete a job, for an Amplify branch, part of Amplify App.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_job(\n appId='string',\n branchName='string',\n jobId='string'\n )\n \n \n :type appId: string\n :param appId: [REQUIRED]\n Unique Id for an Amplify App.\n \n\n :type branchName: string\n :param branchName: [REQUIRED]\n Name for the branch, for the Job.\n \n\n :type jobId: string\n :param jobId: [REQUIRED]\n Unique Id for the Job.\n \n\n :rtype: dict\n :return: {\n 'jobSummary': {\n 'jobArn': 'string',\n 'jobId': 'string',\n 'commitId': 'string',\n 'commitMessage': 'string',\n 'commitTime': datetime(2015, 1, 1),\n 'startTime': datetime(2015, 1, 1),\n 'status': 'PENDING'|'PROVISIONING'|'RUNNING'|'FAILED'|'SUCCEED'|'CANCELLING'|'CANCELLED',\n 'endTime': datetime(2015, 1, 1),\n 'jobType': 'RELEASE'|'RETRY'|'WEB_HOOK'\n }\n }\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_app(appId=None):\n \"\"\"\n Retrieves an existing Amplify App by appId.\n See also: AWS API Documentation\n \n \n :example: response = client.get_app(\n appId='string'\n )\n \n \n :type appId: string\n :param appId: [REQUIRED]\n Unique Id for an Amplify App.\n \n\n :rtype: dict\n :return: {\n 'app': {\n 'appId': 'string',\n 'appArn': 'string',\n 'name': 'string',\n 'tags': {\n 'string': 'string'\n },\n 'description': 'string',\n 'repository': 'string',\n 'platform': 'IOS'|'ANDROID'|'WEB'|'REACT_NATIVE',\n 'createTime': datetime(2015, 1, 1),\n 'updateTime': datetime(2015, 1, 1),\n 'iamServiceRoleArn': 'string',\n 'environmentVariables': {\n 'string': 'string'\n },\n 'defaultDomain': 'string',\n 'enableBranchAutoBuild': True|False,\n 'enableBasicAuth': True|False,\n 'basicAuthCredentials': 'string',\n 'customRules': [\n {\n 'source': 'string',\n 'target': 'string',\n 'status': 'string',\n 'condition': 'string'\n },\n ],\n 'productionBranch': {\n 'lastDeployTime': datetime(2015, 1, 1),\n 'status': 'string',\n 'thumbnailUrl': 'string',\n 'branchName': 'string'\n },\n 'buildSpec': 'string'\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_branch(appId=None, branchName=None):\n \"\"\"\n Retrieves a branch for an Amplify App.\n See also: AWS API Documentation\n \n \n :example: response = client.get_branch(\n appId='string',\n branchName='string'\n )\n \n \n :type appId: string\n :param appId: [REQUIRED]\n Unique Id for an Amplify App.\n \n\n :type branchName: string\n :param branchName: [REQUIRED]\n Name for the branch.\n \n\n :rtype: dict\n :return: {\n 'branch': {\n 'branchArn': 'string',\n 'branchName': 'string',\n 'description': 'string',\n 'tags': {\n 'string': 'string'\n },\n 'stage': 'PRODUCTION'|'BETA'|'DEVELOPMENT'|'EXPERIMENTAL',\n 'displayName': 'string',\n 'enableNotification': True|False,\n 'createTime': datetime(2015, 1, 1),\n 'updateTime': datetime(2015, 1, 1),\n 'environmentVariables': {\n 'string': 'string'\n },\n 'enableAutoBuild': True|False,\n 'customDomains': [\n 'string',\n ],\n 'framework': 'string',\n 'activeJobId': 'string',\n 'totalNumberOfJobs': 'string',\n 'enableBasicAuth': True|False,\n 'thumbnailUrl': 'string',\n 'basicAuthCredentials': 'string',\n 'buildSpec': 'string',\n 'ttl': 'string'\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_domain_association(appId=None, domainName=None):\n \"\"\"\n Retrieves domain info that corresponds to an appId and domainName.\n See also: AWS API Documentation\n \n \n :example: response = client.get_domain_association(\n appId='string',\n domainName='string'\n )\n \n \n :type appId: string\n :param appId: [REQUIRED]\n Unique Id for an Amplify App.\n \n\n :type domainName: string\n :param domainName: [REQUIRED]\n Name of the domain.\n \n\n :rtype: dict\n :return: {\n 'domainAssociation': {\n 'domainAssociationArn': 'string',\n 'domainName': 'string',\n 'enableAutoSubDomain': True|False,\n 'domainStatus': 'PENDING_VERIFICATION'|'IN_PROGRESS'|'AVAILABLE'|'PENDING_DEPLOYMENT'|'FAILED',\n 'statusReason': 'string',\n 'certificateVerificationDNSRecord': 'string',\n 'subDomains': [\n {\n 'subDomainSetting': {\n 'prefix': 'string',\n 'branchName': 'string'\n },\n 'verified': True|False,\n 'dnsRecord': 'string'\n },\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_job(appId=None, branchName=None, jobId=None):\n \"\"\"\n Get a job for a branch, part of an Amplify App.\n See also: AWS API Documentation\n \n \n :example: response = client.get_job(\n appId='string',\n branchName='string',\n jobId='string'\n )\n \n \n :type appId: string\n :param appId: [REQUIRED]\n Unique Id for an Amplify App.\n \n\n :type branchName: string\n :param branchName: [REQUIRED]\n Name for the branch, for the Job.\n \n\n :type jobId: string\n :param jobId: [REQUIRED]\n Unique Id for the Job.\n \n\n :rtype: dict\n :return: {\n 'job': {\n 'summary': {\n 'jobArn': 'string',\n 'jobId': 'string',\n 'commitId': 'string',\n 'commitMessage': 'string',\n 'commitTime': datetime(2015, 1, 1),\n 'startTime': datetime(2015, 1, 1),\n 'status': 'PENDING'|'PROVISIONING'|'RUNNING'|'FAILED'|'SUCCEED'|'CANCELLING'|'CANCELLED',\n 'endTime': datetime(2015, 1, 1),\n 'jobType': 'RELEASE'|'RETRY'|'WEB_HOOK'\n },\n 'steps': [\n {\n 'stepName': 'string',\n 'startTime': datetime(2015, 1, 1),\n 'status': 'PENDING'|'PROVISIONING'|'RUNNING'|'FAILED'|'SUCCEED'|'CANCELLING'|'CANCELLED',\n 'endTime': datetime(2015, 1, 1),\n 'logUrl': 'string',\n 'artifactsUrl': 'string',\n 'screenshots': {\n 'string': 'string'\n }\n },\n ]\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_apps(nextToken=None, maxResults=None):\n \"\"\"\n Lists existing Amplify Apps.\n See also: AWS API Documentation\n \n \n :example: response = client.list_apps(\n nextToken='string',\n maxResults=123\n )\n \n \n :type nextToken: string\n :param nextToken: Pagination token. If non-null pagination token is returned in a result, then pass its value in another request to fetch more entries.\n\n :type maxResults: integer\n :param maxResults: Maximum number of records to list in a single response.\n\n :rtype: dict\n :return: {\n 'apps': [\n {\n 'appId': 'string',\n 'appArn': 'string',\n 'name': 'string',\n 'tags': {\n 'string': 'string'\n },\n 'description': 'string',\n 'repository': 'string',\n 'platform': 'IOS'|'ANDROID'|'WEB'|'REACT_NATIVE',\n 'createTime': datetime(2015, 1, 1),\n 'updateTime': datetime(2015, 1, 1),\n 'iamServiceRoleArn': 'string',\n 'environmentVariables': {\n 'string': 'string'\n },\n 'defaultDomain': 'string',\n 'enableBranchAutoBuild': True|False,\n 'enableBasicAuth': True|False,\n 'basicAuthCredentials': 'string',\n 'customRules': [\n {\n 'source': 'string',\n 'target': 'string',\n 'status': 'string',\n 'condition': 'string'\n },\n ],\n 'productionBranch': {\n 'lastDeployTime': datetime(2015, 1, 1),\n 'status': 'string',\n 'thumbnailUrl': 'string',\n 'branchName': 'string'\n },\n 'buildSpec': 'string'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef list_branches(appId=None, nextToken=None, maxResults=None):\n \"\"\"\n Lists branches for an Amplify App.\n See also: AWS API Documentation\n \n \n :example: response = client.list_branches(\n appId='string',\n nextToken='string',\n maxResults=123\n )\n \n \n :type appId: string\n :param appId: [REQUIRED]\n Unique Id for an Amplify App.\n \n\n :type nextToken: string\n :param nextToken: Pagination token. Set to null to start listing branches from start. If a non-null pagination token is returned in a result, then pass its value in here to list more branches.\n\n :type maxResults: integer\n :param maxResults: Maximum number of records to list in a single response.\n\n :rtype: dict\n :return: {\n 'branches': [\n {\n 'branchArn': 'string',\n 'branchName': 'string',\n 'description': 'string',\n 'tags': {\n 'string': 'string'\n },\n 'stage': 'PRODUCTION'|'BETA'|'DEVELOPMENT'|'EXPERIMENTAL',\n 'displayName': 'string',\n 'enableNotification': True|False,\n 'createTime': datetime(2015, 1, 1),\n 'updateTime': datetime(2015, 1, 1),\n 'environmentVariables': {\n 'string': 'string'\n },\n 'enableAutoBuild': True|False,\n 'customDomains': [\n 'string',\n ],\n 'framework': 'string',\n 'activeJobId': 'string',\n 'totalNumberOfJobs': 'string',\n 'enableBasicAuth': True|False,\n 'thumbnailUrl': 'string',\n 'basicAuthCredentials': 'string',\n 'buildSpec': 'string',\n 'ttl': 'string'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef list_domain_associations(appId=None, nextToken=None, maxResults=None):\n \"\"\"\n List domains with an app\n See also: AWS API Documentation\n \n \n :example: response = client.list_domain_associations(\n appId='string',\n nextToken='string',\n maxResults=123\n )\n \n \n :type appId: string\n :param appId: [REQUIRED]\n Unique Id for an Amplify App.\n \n\n :type nextToken: string\n :param nextToken: Pagination token. Set to null to start listing Apps from start. If non-null pagination token is returned in a result, then pass its value in here to list more projects.\n\n :type maxResults: integer\n :param maxResults: Maximum number of records to list in a single response.\n\n :rtype: dict\n :return: {\n 'domainAssociations': [\n {\n 'domainAssociationArn': 'string',\n 'domainName': 'string',\n 'enableAutoSubDomain': True|False,\n 'domainStatus': 'PENDING_VERIFICATION'|'IN_PROGRESS'|'AVAILABLE'|'PENDING_DEPLOYMENT'|'FAILED',\n 'statusReason': 'string',\n 'certificateVerificationDNSRecord': 'string',\n 'subDomains': [\n {\n 'subDomainSetting': {\n 'prefix': 'string',\n 'branchName': 'string'\n },\n 'verified': True|False,\n 'dnsRecord': 'string'\n },\n ]\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_jobs(appId=None, branchName=None, nextToken=None, maxResults=None):\n \"\"\"\n List Jobs for a branch, part of an Amplify App.\n See also: AWS API Documentation\n \n \n :example: response = client.list_jobs(\n appId='string',\n branchName='string',\n nextToken='string',\n maxResults=123\n )\n \n \n :type appId: string\n :param appId: [REQUIRED]\n Unique Id for an Amplify App.\n \n\n :type branchName: string\n :param branchName: [REQUIRED]\n Name for a branch.\n \n\n :type nextToken: string\n :param nextToken: Pagination token. Set to null to start listing steps from start. If a non-null pagination token is returned in a result, then pass its value in here to list more steps.\n\n :type maxResults: integer\n :param maxResults: Maximum number of records to list in a single response.\n\n :rtype: dict\n :return: {\n 'jobSummaries': [\n {\n 'jobArn': 'string',\n 'jobId': 'string',\n 'commitId': 'string',\n 'commitMessage': 'string',\n 'commitTime': datetime(2015, 1, 1),\n 'startTime': datetime(2015, 1, 1),\n 'status': 'PENDING'|'PROVISIONING'|'RUNNING'|'FAILED'|'SUCCEED'|'CANCELLING'|'CANCELLED',\n 'endTime': datetime(2015, 1, 1),\n 'jobType': 'RELEASE'|'RETRY'|'WEB_HOOK'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef start_job(appId=None, branchName=None, jobId=None, jobType=None, jobReason=None, commitId=None, commitMessage=None, commitTime=None):\n \"\"\"\n Starts a new job for a branch, part of an Amplify App.\n See also: AWS API Documentation\n \n \n :example: response = client.start_job(\n appId='string',\n branchName='string',\n jobId='string',\n jobType='RELEASE'|'RETRY'|'WEB_HOOK',\n jobReason='string',\n commitId='string',\n commitMessage='string',\n commitTime=datetime(2015, 1, 1)\n )\n \n \n :type appId: string\n :param appId: [REQUIRED]\n Unique Id for an Amplify App.\n \n\n :type branchName: string\n :param branchName: [REQUIRED]\n Name for the branch, for the Job.\n \n\n :type jobId: string\n :param jobId: Unique Id for the Job.\n\n :type jobType: string\n :param jobType: [REQUIRED]\n Type for the Job.\n \n\n :type jobReason: string\n :param jobReason: Reason for the Job.\n\n :type commitId: string\n :param commitId: Commit Id from 3rd party repository provider for the Job.\n\n :type commitMessage: string\n :param commitMessage: Commit message from 3rd party repository provider for the Job.\n\n :type commitTime: datetime\n :param commitTime: Commit date / time for the Job.\n\n :rtype: dict\n :return: {\n 'jobSummary': {\n 'jobArn': 'string',\n 'jobId': 'string',\n 'commitId': 'string',\n 'commitMessage': 'string',\n 'commitTime': datetime(2015, 1, 1),\n 'startTime': datetime(2015, 1, 1),\n 'status': 'PENDING'|'PROVISIONING'|'RUNNING'|'FAILED'|'SUCCEED'|'CANCELLING'|'CANCELLED',\n 'endTime': datetime(2015, 1, 1),\n 'jobType': 'RELEASE'|'RETRY'|'WEB_HOOK'\n }\n }\n \n \n \"\"\"\n pass\n\ndef stop_job(appId=None, branchName=None, jobId=None):\n \"\"\"\n Stop a job that is in progress, for an Amplify branch, part of Amplify App.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_job(\n appId='string',\n branchName='string',\n jobId='string'\n )\n \n \n :type appId: string\n :param appId: [REQUIRED]\n Unique Id for an Amplify App.\n \n\n :type branchName: string\n :param branchName: [REQUIRED]\n Name for the branch, for the Job.\n \n\n :type jobId: string\n :param jobId: [REQUIRED]\n Unique Id for the Job.\n \n\n :rtype: dict\n :return: {\n 'jobSummary': {\n 'jobArn': 'string',\n 'jobId': 'string',\n 'commitId': 'string',\n 'commitMessage': 'string',\n 'commitTime': datetime(2015, 1, 1),\n 'startTime': datetime(2015, 1, 1),\n 'status': 'PENDING'|'PROVISIONING'|'RUNNING'|'FAILED'|'SUCCEED'|'CANCELLING'|'CANCELLED',\n 'endTime': datetime(2015, 1, 1),\n 'jobType': 'RELEASE'|'RETRY'|'WEB_HOOK'\n }\n }\n \n \n \"\"\"\n pass\n\ndef update_app(appId=None, name=None, description=None, platform=None, iamServiceRoleArn=None, environmentVariables=None, enableBranchAutoBuild=None, enableBasicAuth=None, basicAuthCredentials=None, customRules=None, buildSpec=None):\n \"\"\"\n Updates an existing Amplify App.\n See also: AWS API Documentation\n \n \n :example: response = client.update_app(\n appId='string',\n name='string',\n description='string',\n platform='IOS'|'ANDROID'|'WEB'|'REACT_NATIVE',\n iamServiceRoleArn='string',\n environmentVariables={\n 'string': 'string'\n },\n enableBranchAutoBuild=True|False,\n enableBasicAuth=True|False,\n basicAuthCredentials='string',\n customRules=[\n {\n 'source': 'string',\n 'target': 'string',\n 'status': 'string',\n 'condition': 'string'\n },\n ],\n buildSpec='string'\n )\n \n \n :type appId: string\n :param appId: [REQUIRED]\n Unique Id for an Amplify App.\n \n\n :type name: string\n :param name: Name for an Amplify App.\n\n :type description: string\n :param description: Description for an Amplify App.\n\n :type platform: string\n :param platform: Platform for an Amplify App.\n\n :type iamServiceRoleArn: string\n :param iamServiceRoleArn: IAM service role for an Amplify App.\n\n :type environmentVariables: dict\n :param environmentVariables: Environment Variables for an Amplify App.\n (string) --\n (string) --\n \n\n :type enableBranchAutoBuild: boolean\n :param enableBranchAutoBuild: Enables branch auto-building for an Amplify App.\n\n :type enableBasicAuth: boolean\n :param enableBasicAuth: Enables Basic Authorization for an Amplify App.\n\n :type basicAuthCredentials: string\n :param basicAuthCredentials: Basic Authorization credentials for an Amplify App.\n\n :type customRules: list\n :param customRules: Custom redirect / rewrite rules for an Amplify App.\n (dict) --Custom rewrite / redirect rule.\n source (string) -- [REQUIRED]The source pattern for a URL rewrite or redirect rule.\n target (string) -- [REQUIRED]The target pattern for a URL rewrite or redirect rule.\n status (string) --The status code for a URL rewrite or redirect rule.\n condition (string) --The condition for a URL rewrite or redirect rule, e.g. country code.\n \n \n\n :type buildSpec: string\n :param buildSpec: BuildSpec for an Amplify App.\n\n :rtype: dict\n :return: {\n 'app': {\n 'appId': 'string',\n 'appArn': 'string',\n 'name': 'string',\n 'tags': {\n 'string': 'string'\n },\n 'description': 'string',\n 'repository': 'string',\n 'platform': 'IOS'|'ANDROID'|'WEB'|'REACT_NATIVE',\n 'createTime': datetime(2015, 1, 1),\n 'updateTime': datetime(2015, 1, 1),\n 'iamServiceRoleArn': 'string',\n 'environmentVariables': {\n 'string': 'string'\n },\n 'defaultDomain': 'string',\n 'enableBranchAutoBuild': True|False,\n 'enableBasicAuth': True|False,\n 'basicAuthCredentials': 'string',\n 'customRules': [\n {\n 'source': 'string',\n 'target': 'string',\n 'status': 'string',\n 'condition': 'string'\n },\n ],\n 'productionBranch': {\n 'lastDeployTime': datetime(2015, 1, 1),\n 'status': 'string',\n 'thumbnailUrl': 'string',\n 'branchName': 'string'\n },\n 'buildSpec': 'string'\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef update_branch(appId=None, branchName=None, description=None, framework=None, stage=None, enableNotification=None, enableAutoBuild=None, environmentVariables=None, basicAuthCredentials=None, enableBasicAuth=None, buildSpec=None, ttl=None):\n \"\"\"\n Updates a branch for an Amplify App.\n See also: AWS API Documentation\n \n \n :example: response = client.update_branch(\n appId='string',\n branchName='string',\n description='string',\n framework='string',\n stage='PRODUCTION'|'BETA'|'DEVELOPMENT'|'EXPERIMENTAL',\n enableNotification=True|False,\n enableAutoBuild=True|False,\n environmentVariables={\n 'string': 'string'\n },\n basicAuthCredentials='string',\n enableBasicAuth=True|False,\n buildSpec='string',\n ttl='string'\n )\n \n \n :type appId: string\n :param appId: [REQUIRED]\n Unique Id for an Amplify App.\n \n\n :type branchName: string\n :param branchName: [REQUIRED]\n Name for the branch.\n \n\n :type description: string\n :param description: Description for the branch.\n\n :type framework: string\n :param framework: Framework for the branch.\n\n :type stage: string\n :param stage: Stage for the branch.\n\n :type enableNotification: boolean\n :param enableNotification: Enables notifications for the branch.\n\n :type enableAutoBuild: boolean\n :param enableAutoBuild: Enables auto building for the branch.\n\n :type environmentVariables: dict\n :param environmentVariables: Environment Variables for the branch.\n (string) --\n (string) --\n \n\n :type basicAuthCredentials: string\n :param basicAuthCredentials: Basic Authorization credentials for the branch.\n\n :type enableBasicAuth: boolean\n :param enableBasicAuth: Enables Basic Auth for the branch.\n\n :type buildSpec: string\n :param buildSpec: BuildSpec for the branch.\n\n :type ttl: string\n :param ttl: The content TTL for the website in seconds.\n\n :rtype: dict\n :return: {\n 'branch': {\n 'branchArn': 'string',\n 'branchName': 'string',\n 'description': 'string',\n 'tags': {\n 'string': 'string'\n },\n 'stage': 'PRODUCTION'|'BETA'|'DEVELOPMENT'|'EXPERIMENTAL',\n 'displayName': 'string',\n 'enableNotification': True|False,\n 'createTime': datetime(2015, 1, 1),\n 'updateTime': datetime(2015, 1, 1),\n 'environmentVariables': {\n 'string': 'string'\n },\n 'enableAutoBuild': True|False,\n 'customDomains': [\n 'string',\n ],\n 'framework': 'string',\n 'activeJobId': 'string',\n 'totalNumberOfJobs': 'string',\n 'enableBasicAuth': True|False,\n 'thumbnailUrl': 'string',\n 'basicAuthCredentials': 'string',\n 'buildSpec': 'string',\n 'ttl': 'string'\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef update_domain_association(appId=None, domainName=None, enableAutoSubDomain=None, subDomainSettings=None):\n \"\"\"\n Create a new DomainAssociation on an App\n See also: AWS API Documentation\n \n \n :example: response = client.update_domain_association(\n appId='string',\n domainName='string',\n enableAutoSubDomain=True|False,\n subDomainSettings=[\n {\n 'prefix': 'string',\n 'branchName': 'string'\n },\n ]\n )\n \n \n :type appId: string\n :param appId: [REQUIRED]\n Unique Id for an Amplify App.\n \n\n :type domainName: string\n :param domainName: [REQUIRED]\n Name of the domain.\n \n\n :type enableAutoSubDomain: boolean\n :param enableAutoSubDomain: Enables automated creation of Subdomains for branches.\n\n :type subDomainSettings: list\n :param subDomainSettings: [REQUIRED]\n Setting structure for the Subdomain.\n (dict) --Setting for the Subdomain.\n prefix (string) -- [REQUIRED]Prefix setting for the Subdomain.\n branchName (string) -- [REQUIRED]Branch name setting for the Subdomain.\n \n \n\n :rtype: dict\n :return: {\n 'domainAssociation': {\n 'domainAssociationArn': 'string',\n 'domainName': 'string',\n 'enableAutoSubDomain': True|False,\n 'domainStatus': 'PENDING_VERIFICATION'|'IN_PROGRESS'|'AVAILABLE'|'PENDING_DEPLOYMENT'|'FAILED',\n 'statusReason': 'string',\n 'certificateVerificationDNSRecord': 'string',\n 'subDomains': [\n {\n 'subDomainSetting': {\n 'prefix': 'string',\n 'branchName': 'string'\n },\n 'verified': True|False,\n 'dnsRecord': 'string'\n },\n ]\n }\n }\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.5550896525382996, "alphanum_fraction": 0.5579046607017517, "avg_line_length": 50.44520950317383, "blob_id": "1e5382a0b92c11f382443be4ccb6c718a1cd9b63", "content_id": "8360093acb107f1100c245ea34c8b565e725ce96", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 119714, "license_type": "permissive", "max_line_length": 459, "num_lines": 2327, "path": "/pyboto3/kinesisanalyticsv2.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef add_application_cloud_watch_logging_option(ApplicationName=None, CurrentApplicationVersionId=None, CloudWatchLoggingOption=None):\n \"\"\"\n Adds an Amazon CloudWatch log stream to monitor application configuration errors.\n See also: AWS API Documentation\n \n \n :example: response = client.add_application_cloud_watch_logging_option(\n ApplicationName='string',\n CurrentApplicationVersionId=123,\n CloudWatchLoggingOption={\n 'LogStreamARN': 'string'\n }\n )\n \n \n :type ApplicationName: string\n :param ApplicationName: [REQUIRED]\n The Kinesis Data Analytics application name.\n \n\n :type CurrentApplicationVersionId: integer\n :param CurrentApplicationVersionId: [REQUIRED]\n The version ID of the Kinesis Data Analytics application. You can retrieve the application version ID using DescribeApplication .\n \n\n :type CloudWatchLoggingOption: dict\n :param CloudWatchLoggingOption: [REQUIRED]\n Provides the Amazon CloudWatch log stream Amazon Resource Name (ARN).\n LogStreamARN (string) -- [REQUIRED]The ARN of the CloudWatch log to receive application messages.\n \n\n :rtype: dict\n :return: {\n 'ApplicationARN': 'string',\n 'ApplicationVersionId': 123,\n 'CloudWatchLoggingOptionDescriptions': [\n {\n 'CloudWatchLoggingOptionId': 'string',\n 'LogStreamARN': 'string',\n 'RoleARN': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef add_application_input(ApplicationName=None, CurrentApplicationVersionId=None, Input=None):\n \"\"\"\n Adds a streaming source to your SQL-based Amazon Kinesis Data Analytics application.\n You can add a streaming source when you create an application, or you can use this operation to add a streaming source after you create an application. For more information, see CreateApplication .\n Any configuration update, including adding a streaming source using this operation, results in a new version of the application. You can use the DescribeApplication operation to find the current application version.\n See also: AWS API Documentation\n \n \n :example: response = client.add_application_input(\n ApplicationName='string',\n CurrentApplicationVersionId=123,\n Input={\n 'NamePrefix': 'string',\n 'InputProcessingConfiguration': {\n 'InputLambdaProcessor': {\n 'ResourceARN': 'string'\n }\n },\n 'KinesisStreamsInput': {\n 'ResourceARN': 'string'\n },\n 'KinesisFirehoseInput': {\n 'ResourceARN': 'string'\n },\n 'InputParallelism': {\n 'Count': 123\n },\n 'InputSchema': {\n 'RecordFormat': {\n 'RecordFormatType': 'JSON'|'CSV',\n 'MappingParameters': {\n 'JSONMappingParameters': {\n 'RecordRowPath': 'string'\n },\n 'CSVMappingParameters': {\n 'RecordRowDelimiter': 'string',\n 'RecordColumnDelimiter': 'string'\n }\n }\n },\n 'RecordEncoding': 'string',\n 'RecordColumns': [\n {\n 'Name': 'string',\n 'Mapping': 'string',\n 'SqlType': 'string'\n },\n ]\n }\n }\n )\n \n \n :type ApplicationName: string\n :param ApplicationName: [REQUIRED]\n The name of your existing application to which you want to add the streaming source.\n \n\n :type CurrentApplicationVersionId: integer\n :param CurrentApplicationVersionId: [REQUIRED]\n The current version of your application. You can use the DescribeApplication operation to find the current application version.\n \n\n :type Input: dict\n :param Input: [REQUIRED]\n The Input to add.\n NamePrefix (string) -- [REQUIRED]The name prefix to use when creating an in-application stream. Suppose that you specify a prefix 'MyInApplicationStream .' Kinesis Data Analytics then creates one or more (as per the InputParallelism count you specified) in-application streams with the names 'MyInApplicationStream_001 ,' 'MyInApplicationStream_002 ,' and so on.\n InputProcessingConfiguration (dict) --The InputProcessingConfiguration for the input. An input processor transforms records as they are received from the stream, before the application's SQL code executes. Currently, the only input processing configuration available is InputLambdaProcessor .\n InputLambdaProcessor (dict) -- [REQUIRED]The InputLambdaProcessor that is used to preprocess the records in the stream before being processed by your application code.\n ResourceARN (string) -- [REQUIRED]The ARN of the AWS Lambda function that operates on records in the stream.\n \n KinesisStreamsInput (dict) --If the streaming source is an Amazon Kinesis data stream, identifies the stream's Amazon Resource Name (ARN).\n ResourceARN (string) -- [REQUIRED]The ARN of the input Kinesis data stream to read.\n KinesisFirehoseInput (dict) --If the streaming source is an Amazon Kinesis Data Firehose delivery stream, identifies the delivery stream's ARN.\n ResourceARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the delivery stream.\n InputParallelism (dict) --Describes the number of in-application streams to create.\n Count (integer) --The number of in-application streams to create.\n InputSchema (dict) -- [REQUIRED]Describes the format of the data in the streaming source, and how each data element maps to corresponding columns in the in-application stream that is being created.\n Also used to describe the format of the reference data source.\n RecordFormat (dict) -- [REQUIRED]Specifies the format of the records on the streaming source.\n RecordFormatType (string) -- [REQUIRED]The type of record format.\n MappingParameters (dict) --When you configure application input at the time of creating or updating an application, provides additional mapping information specific to the record format (such as JSON, CSV, or record fields delimited by some delimiter) on the streaming source.\n JSONMappingParameters (dict) --Provides additional mapping information when JSON is the record format on the streaming source.\n RecordRowPath (string) -- [REQUIRED]The path to the top-level parent that contains the records.\n CSVMappingParameters (dict) --Provides additional mapping information when the record format uses delimiters (for example, CSV).\n RecordRowDelimiter (string) -- [REQUIRED]The row delimiter. For example, in a CSV format, 'n' is the typical row delimiter.\n RecordColumnDelimiter (string) -- [REQUIRED]The column delimiter. For example, in a CSV format, a comma (',') is the typical column delimiter.\n \n RecordEncoding (string) --Specifies the encoding of the records in the streaming source. For example, UTF-8.\n RecordColumns (list) -- [REQUIRED]A list of RecordColumn objects.\n (dict) --For an SQL-based Amazon Kinesis Data Analytics application, describes the mapping of each data element in the streaming source to the corresponding column in the in-application stream.\n Also used to describe the format of the reference data source.\n Name (string) -- [REQUIRED]The name of the column that is created in the in-application input stream or reference table.\n Mapping (string) --A reference to the data element in the streaming input of the reference data source.\n SqlType (string) -- [REQUIRED]The type of column created in the in-application input stream or reference table.\n \n \n \n\n :rtype: dict\n :return: {\n 'ApplicationARN': 'string',\n 'ApplicationVersionId': 123,\n 'InputDescriptions': [\n {\n 'InputId': 'string',\n 'NamePrefix': 'string',\n 'InAppStreamNames': [\n 'string',\n ],\n 'InputProcessingConfigurationDescription': {\n 'InputLambdaProcessorDescription': {\n 'ResourceARN': 'string',\n 'RoleARN': 'string'\n }\n },\n 'KinesisStreamsInputDescription': {\n 'ResourceARN': 'string',\n 'RoleARN': 'string'\n },\n 'KinesisFirehoseInputDescription': {\n 'ResourceARN': 'string',\n 'RoleARN': 'string'\n },\n 'InputSchema': {\n 'RecordFormat': {\n 'RecordFormatType': 'JSON'|'CSV',\n 'MappingParameters': {\n 'JSONMappingParameters': {\n 'RecordRowPath': 'string'\n },\n 'CSVMappingParameters': {\n 'RecordRowDelimiter': 'string',\n 'RecordColumnDelimiter': 'string'\n }\n }\n },\n 'RecordEncoding': 'string',\n 'RecordColumns': [\n {\n 'Name': 'string',\n 'Mapping': 'string',\n 'SqlType': 'string'\n },\n ]\n },\n 'InputParallelism': {\n 'Count': 123\n },\n 'InputStartingPositionConfiguration': {\n 'InputStartingPosition': 'NOW'|'TRIM_HORIZON'|'LAST_STOPPED_POINT'\n }\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef add_application_input_processing_configuration(ApplicationName=None, CurrentApplicationVersionId=None, InputId=None, InputProcessingConfiguration=None):\n \"\"\"\n Adds an InputProcessingConfiguration to an SQL-based Kinesis Data Analytics application. An input processor pre-processes records on the input stream before the application's SQL code executes. Currently, the only input processor available is AWS Lambda .\n See also: AWS API Documentation\n \n \n :example: response = client.add_application_input_processing_configuration(\n ApplicationName='string',\n CurrentApplicationVersionId=123,\n InputId='string',\n InputProcessingConfiguration={\n 'InputLambdaProcessor': {\n 'ResourceARN': 'string'\n }\n }\n )\n \n \n :type ApplicationName: string\n :param ApplicationName: [REQUIRED]\n The name of the application to which you want to add the input processing configuration.\n \n\n :type CurrentApplicationVersionId: integer\n :param CurrentApplicationVersionId: [REQUIRED]\n The version of the application to which you want to add the input processing configuration. You can use the DescribeApplication operation to get the current application version. If the version specified is not the current version, the ConcurrentModificationException is returned.\n \n\n :type InputId: string\n :param InputId: [REQUIRED]\n The ID of the input configuration to add the input processing configuration to. You can get a list of the input IDs for an application using the DescribeApplication operation.\n \n\n :type InputProcessingConfiguration: dict\n :param InputProcessingConfiguration: [REQUIRED]\n The InputProcessingConfiguration to add to the application.\n InputLambdaProcessor (dict) -- [REQUIRED]The InputLambdaProcessor that is used to preprocess the records in the stream before being processed by your application code.\n ResourceARN (string) -- [REQUIRED]The ARN of the AWS Lambda function that operates on records in the stream.\n \n \n\n :rtype: dict\n :return: {\n 'ApplicationARN': 'string',\n 'ApplicationVersionId': 123,\n 'InputId': 'string',\n 'InputProcessingConfigurationDescription': {\n 'InputLambdaProcessorDescription': {\n 'ResourceARN': 'string',\n 'RoleARN': 'string'\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef add_application_output(ApplicationName=None, CurrentApplicationVersionId=None, Output=None):\n \"\"\"\n Adds an external destination to your SQL-based Amazon Kinesis Data Analytics application.\n If you want Kinesis Data Analytics to deliver data from an in-application stream within your application to an external destination (such as an Kinesis data stream, a Kinesis Data Firehose delivery stream, or an AWS Lambda function), you add the relevant configuration to your application using this operation. You can configure one or more outputs for your application. Each output configuration maps an in-application stream and an external destination.\n You can use one of the output configurations to deliver data from your in-application error stream to an external destination so that you can analyze the errors.\n Any configuration update, including adding a streaming source using this operation, results in a new version of the application. You can use the DescribeApplication operation to find the current application version.\n See also: AWS API Documentation\n \n \n :example: response = client.add_application_output(\n ApplicationName='string',\n CurrentApplicationVersionId=123,\n Output={\n 'Name': 'string',\n 'KinesisStreamsOutput': {\n 'ResourceARN': 'string'\n },\n 'KinesisFirehoseOutput': {\n 'ResourceARN': 'string'\n },\n 'LambdaOutput': {\n 'ResourceARN': 'string'\n },\n 'DestinationSchema': {\n 'RecordFormatType': 'JSON'|'CSV'\n }\n }\n )\n \n \n :type ApplicationName: string\n :param ApplicationName: [REQUIRED]\n The name of the application to which you want to add the output configuration.\n \n\n :type CurrentApplicationVersionId: integer\n :param CurrentApplicationVersionId: [REQUIRED]\n The version of the application to which you want to add the output configuration. You can use the DescribeApplication operation to get the current application version. If the version specified is not the current version, the ConcurrentModificationException is returned.\n \n\n :type Output: dict\n :param Output: [REQUIRED]\n An array of objects, each describing one output configuration. In the output configuration, you specify the name of an in-application stream, a destination (that is, a Kinesis data stream, a Kinesis Data Firehose delivery stream, or an AWS Lambda function), and record the formation to use when writing to the destination.\n Name (string) -- [REQUIRED]The name of the in-application stream.\n KinesisStreamsOutput (dict) --Identifies an Amazon Kinesis data stream as the destination.\n ResourceARN (string) -- [REQUIRED]The ARN of the destination Kinesis data stream to write to.\n KinesisFirehoseOutput (dict) --Identifies an Amazon Kinesis Data Firehose delivery stream as the destination.\n ResourceARN (string) -- [REQUIRED]The ARN of the destination delivery stream to write to.\n LambdaOutput (dict) --Identifies an AWS Lambda function as the destination.\n ResourceARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the destination Lambda function to write to.\n DestinationSchema (dict) -- [REQUIRED]Describes the data format when records are written to the destination.\n RecordFormatType (string) -- [REQUIRED]Specifies the format of the records on the output stream.\n \n \n\n :rtype: dict\n :return: {\n 'ApplicationARN': 'string',\n 'ApplicationVersionId': 123,\n 'OutputDescriptions': [\n {\n 'OutputId': 'string',\n 'Name': 'string',\n 'KinesisStreamsOutputDescription': {\n 'ResourceARN': 'string',\n 'RoleARN': 'string'\n },\n 'KinesisFirehoseOutputDescription': {\n 'ResourceARN': 'string',\n 'RoleARN': 'string'\n },\n 'LambdaOutputDescription': {\n 'ResourceARN': 'string',\n 'RoleARN': 'string'\n },\n 'DestinationSchema': {\n 'RecordFormatType': 'JSON'|'CSV'\n }\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef add_application_reference_data_source(ApplicationName=None, CurrentApplicationVersionId=None, ReferenceDataSource=None):\n \"\"\"\n Adds a reference data source to an existing SQL-based Amazon Kinesis Data Analytics application.\n Kinesis Data Analytics reads reference data (that is, an Amazon S3 object) and creates an in-application table within your application. In the request, you provide the source (S3 bucket name and object key name), name of the in-application table to create, and the necessary mapping information that describes how data in an Amazon S3 object maps to columns in the resulting in-application table.\n See also: AWS API Documentation\n \n \n :example: response = client.add_application_reference_data_source(\n ApplicationName='string',\n CurrentApplicationVersionId=123,\n ReferenceDataSource={\n 'TableName': 'string',\n 'S3ReferenceDataSource': {\n 'BucketARN': 'string',\n 'FileKey': 'string'\n },\n 'ReferenceSchema': {\n 'RecordFormat': {\n 'RecordFormatType': 'JSON'|'CSV',\n 'MappingParameters': {\n 'JSONMappingParameters': {\n 'RecordRowPath': 'string'\n },\n 'CSVMappingParameters': {\n 'RecordRowDelimiter': 'string',\n 'RecordColumnDelimiter': 'string'\n }\n }\n },\n 'RecordEncoding': 'string',\n 'RecordColumns': [\n {\n 'Name': 'string',\n 'Mapping': 'string',\n 'SqlType': 'string'\n },\n ]\n }\n }\n )\n \n \n :type ApplicationName: string\n :param ApplicationName: [REQUIRED]\n The name of an existing application.\n \n\n :type CurrentApplicationVersionId: integer\n :param CurrentApplicationVersionId: [REQUIRED]\n The version of the application for which you are adding the reference data source. You can use the DescribeApplication operation to get the current application version. If the version specified is not the current version, the ConcurrentModificationException is returned.\n \n\n :type ReferenceDataSource: dict\n :param ReferenceDataSource: [REQUIRED]\n The reference data source can be an object in your Amazon S3 bucket. Kinesis Data Analytics reads the object and copies the data into the in-application table that is created. You provide an S3 bucket, object key name, and the resulting in-application table that is created.\n TableName (string) -- [REQUIRED]The name of the in-application table to create.\n S3ReferenceDataSource (dict) --Identifies the S3 bucket and object that contains the reference data. A Kinesis Data Analytics application loads reference data only once. If the data changes, you call the UpdateApplication operation to trigger reloading of data into your application.\n BucketARN (string) --The Amazon Resource Name (ARN) of the S3 bucket.\n FileKey (string) --The object key name containing the reference data.\n ReferenceSchema (dict) -- [REQUIRED]Describes the format of the data in the streaming source, and how each data element maps to corresponding columns created in the in-application stream.\n RecordFormat (dict) -- [REQUIRED]Specifies the format of the records on the streaming source.\n RecordFormatType (string) -- [REQUIRED]The type of record format.\n MappingParameters (dict) --When you configure application input at the time of creating or updating an application, provides additional mapping information specific to the record format (such as JSON, CSV, or record fields delimited by some delimiter) on the streaming source.\n JSONMappingParameters (dict) --Provides additional mapping information when JSON is the record format on the streaming source.\n RecordRowPath (string) -- [REQUIRED]The path to the top-level parent that contains the records.\n CSVMappingParameters (dict) --Provides additional mapping information when the record format uses delimiters (for example, CSV).\n RecordRowDelimiter (string) -- [REQUIRED]The row delimiter. For example, in a CSV format, 'n' is the typical row delimiter.\n RecordColumnDelimiter (string) -- [REQUIRED]The column delimiter. For example, in a CSV format, a comma (',') is the typical column delimiter.\n \n RecordEncoding (string) --Specifies the encoding of the records in the streaming source. For example, UTF-8.\n RecordColumns (list) -- [REQUIRED]A list of RecordColumn objects.\n (dict) --For an SQL-based Amazon Kinesis Data Analytics application, describes the mapping of each data element in the streaming source to the corresponding column in the in-application stream.\n Also used to describe the format of the reference data source.\n Name (string) -- [REQUIRED]The name of the column that is created in the in-application input stream or reference table.\n Mapping (string) --A reference to the data element in the streaming input of the reference data source.\n SqlType (string) -- [REQUIRED]The type of column created in the in-application input stream or reference table.\n \n \n \n\n :rtype: dict\n :return: {\n 'ApplicationARN': 'string',\n 'ApplicationVersionId': 123,\n 'ReferenceDataSourceDescriptions': [\n {\n 'ReferenceId': 'string',\n 'TableName': 'string',\n 'S3ReferenceDataSourceDescription': {\n 'BucketARN': 'string',\n 'FileKey': 'string',\n 'ReferenceRoleARN': 'string'\n },\n 'ReferenceSchema': {\n 'RecordFormat': {\n 'RecordFormatType': 'JSON'|'CSV',\n 'MappingParameters': {\n 'JSONMappingParameters': {\n 'RecordRowPath': 'string'\n },\n 'CSVMappingParameters': {\n 'RecordRowDelimiter': 'string',\n 'RecordColumnDelimiter': 'string'\n }\n }\n },\n 'RecordEncoding': 'string',\n 'RecordColumns': [\n {\n 'Name': 'string',\n 'Mapping': 'string',\n 'SqlType': 'string'\n },\n ]\n }\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_application(ApplicationName=None, ApplicationDescription=None, RuntimeEnvironment=None, ServiceExecutionRole=None, ApplicationConfiguration=None, CloudWatchLoggingOptions=None):\n \"\"\"\n Creates an Amazon Kinesis Data Analytics application. For information about creating a Kinesis Data Analytics application, see Creating an Application .\n See also: AWS API Documentation\n \n \n :example: response = client.create_application(\n ApplicationName='string',\n ApplicationDescription='string',\n RuntimeEnvironment='SQL-1_0'|'FLINK-1_6',\n ServiceExecutionRole='string',\n ApplicationConfiguration={\n 'SqlApplicationConfiguration': {\n 'Inputs': [\n {\n 'NamePrefix': 'string',\n 'InputProcessingConfiguration': {\n 'InputLambdaProcessor': {\n 'ResourceARN': 'string'\n }\n },\n 'KinesisStreamsInput': {\n 'ResourceARN': 'string'\n },\n 'KinesisFirehoseInput': {\n 'ResourceARN': 'string'\n },\n 'InputParallelism': {\n 'Count': 123\n },\n 'InputSchema': {\n 'RecordFormat': {\n 'RecordFormatType': 'JSON'|'CSV',\n 'MappingParameters': {\n 'JSONMappingParameters': {\n 'RecordRowPath': 'string'\n },\n 'CSVMappingParameters': {\n 'RecordRowDelimiter': 'string',\n 'RecordColumnDelimiter': 'string'\n }\n }\n },\n 'RecordEncoding': 'string',\n 'RecordColumns': [\n {\n 'Name': 'string',\n 'Mapping': 'string',\n 'SqlType': 'string'\n },\n ]\n }\n },\n ],\n 'Outputs': [\n {\n 'Name': 'string',\n 'KinesisStreamsOutput': {\n 'ResourceARN': 'string'\n },\n 'KinesisFirehoseOutput': {\n 'ResourceARN': 'string'\n },\n 'LambdaOutput': {\n 'ResourceARN': 'string'\n },\n 'DestinationSchema': {\n 'RecordFormatType': 'JSON'|'CSV'\n }\n },\n ],\n 'ReferenceDataSources': [\n {\n 'TableName': 'string',\n 'S3ReferenceDataSource': {\n 'BucketARN': 'string',\n 'FileKey': 'string'\n },\n 'ReferenceSchema': {\n 'RecordFormat': {\n 'RecordFormatType': 'JSON'|'CSV',\n 'MappingParameters': {\n 'JSONMappingParameters': {\n 'RecordRowPath': 'string'\n },\n 'CSVMappingParameters': {\n 'RecordRowDelimiter': 'string',\n 'RecordColumnDelimiter': 'string'\n }\n }\n },\n 'RecordEncoding': 'string',\n 'RecordColumns': [\n {\n 'Name': 'string',\n 'Mapping': 'string',\n 'SqlType': 'string'\n },\n ]\n }\n },\n ]\n },\n 'FlinkApplicationConfiguration': {\n 'CheckpointConfiguration': {\n 'ConfigurationType': 'DEFAULT'|'CUSTOM',\n 'CheckpointingEnabled': True|False,\n 'CheckpointInterval': 123,\n 'MinPauseBetweenCheckpoints': 123\n },\n 'MonitoringConfiguration': {\n 'ConfigurationType': 'DEFAULT'|'CUSTOM',\n 'MetricsLevel': 'APPLICATION'|'TASK'|'OPERATOR'|'PARALLELISM',\n 'LogLevel': 'INFO'|'WARN'|'ERROR'|'DEBUG'\n },\n 'ParallelismConfiguration': {\n 'ConfigurationType': 'DEFAULT'|'CUSTOM',\n 'Parallelism': 123,\n 'ParallelismPerKPU': 123,\n 'AutoScalingEnabled': True|False\n }\n },\n 'EnvironmentProperties': {\n 'PropertyGroups': [\n {\n 'PropertyGroupId': 'string',\n 'PropertyMap': {\n 'string': 'string'\n }\n },\n ]\n },\n 'ApplicationCodeConfiguration': {\n 'CodeContent': {\n 'TextContent': 'string',\n 'ZipFileContent': b'bytes',\n 'S3ContentLocation': {\n 'BucketARN': 'string',\n 'FileKey': 'string',\n 'ObjectVersion': 'string'\n }\n },\n 'CodeContentType': 'PLAINTEXT'|'ZIPFILE'\n },\n 'ApplicationSnapshotConfiguration': {\n 'SnapshotsEnabled': True|False\n }\n },\n CloudWatchLoggingOptions=[\n {\n 'LogStreamARN': 'string'\n },\n ]\n )\n \n \n :type ApplicationName: string\n :param ApplicationName: [REQUIRED]\n The name of your application (for example, sample-app ).\n \n\n :type ApplicationDescription: string\n :param ApplicationDescription: A summary description of the application.\n\n :type RuntimeEnvironment: string\n :param RuntimeEnvironment: [REQUIRED]\n The runtime environment for the application (SQL-1.0 or JAVA-8-FLINK-1.5 ).\n \n\n :type ServiceExecutionRole: string\n :param ServiceExecutionRole: [REQUIRED]\n The IAM role used by the application to access Kinesis data streams, Kinesis Data Firehose delivery streams, Amazon S3 objects, and other external resources.\n \n\n :type ApplicationConfiguration: dict\n :param ApplicationConfiguration: Use this parameter to configure the application.\n SqlApplicationConfiguration (dict) --The creation and update parameters for an SQL-based Kinesis Data Analytics application.\n Inputs (list) --The array of Input objects describing the input streams used by the application.\n (dict) --When you configure the application input for an SQL-based Amazon Kinesis Data Analytics application, you specify the streaming source, the in-application stream name that is created, and the mapping between the two.\n NamePrefix (string) -- [REQUIRED]The name prefix to use when creating an in-application stream. Suppose that you specify a prefix 'MyInApplicationStream .' Kinesis Data Analytics then creates one or more (as per the InputParallelism count you specified) in-application streams with the names 'MyInApplicationStream_001 ,' 'MyInApplicationStream_002 ,' and so on.\n InputProcessingConfiguration (dict) --The InputProcessingConfiguration for the input. An input processor transforms records as they are received from the stream, before the application's SQL code executes. Currently, the only input processing configuration available is InputLambdaProcessor .\n InputLambdaProcessor (dict) -- [REQUIRED]The InputLambdaProcessor that is used to preprocess the records in the stream before being processed by your application code.\n ResourceARN (string) -- [REQUIRED]The ARN of the AWS Lambda function that operates on records in the stream.\n \n KinesisStreamsInput (dict) --If the streaming source is an Amazon Kinesis data stream, identifies the stream's Amazon Resource Name (ARN).\n ResourceARN (string) -- [REQUIRED]The ARN of the input Kinesis data stream to read.\n KinesisFirehoseInput (dict) --If the streaming source is an Amazon Kinesis Data Firehose delivery stream, identifies the delivery stream's ARN.\n ResourceARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the delivery stream.\n InputParallelism (dict) --Describes the number of in-application streams to create.\n Count (integer) --The number of in-application streams to create.\n InputSchema (dict) -- [REQUIRED]Describes the format of the data in the streaming source, and how each data element maps to corresponding columns in the in-application stream that is being created.\n Also used to describe the format of the reference data source.\n RecordFormat (dict) -- [REQUIRED]Specifies the format of the records on the streaming source.\n RecordFormatType (string) -- [REQUIRED]The type of record format.\n MappingParameters (dict) --When you configure application input at the time of creating or updating an application, provides additional mapping information specific to the record format (such as JSON, CSV, or record fields delimited by some delimiter) on the streaming source.\n JSONMappingParameters (dict) --Provides additional mapping information when JSON is the record format on the streaming source.\n RecordRowPath (string) -- [REQUIRED]The path to the top-level parent that contains the records.\n CSVMappingParameters (dict) --Provides additional mapping information when the record format uses delimiters (for example, CSV).\n RecordRowDelimiter (string) -- [REQUIRED]The row delimiter. For example, in a CSV format, 'n' is the typical row delimiter.\n RecordColumnDelimiter (string) -- [REQUIRED]The column delimiter. For example, in a CSV format, a comma (',') is the typical column delimiter.\n \n RecordEncoding (string) --Specifies the encoding of the records in the streaming source. For example, UTF-8.\n RecordColumns (list) -- [REQUIRED]A list of RecordColumn objects.\n (dict) --For an SQL-based Amazon Kinesis Data Analytics application, describes the mapping of each data element in the streaming source to the corresponding column in the in-application stream.\n Also used to describe the format of the reference data source.\n Name (string) -- [REQUIRED]The name of the column that is created in the in-application input stream or reference table.\n Mapping (string) --A reference to the data element in the streaming input of the reference data source.\n SqlType (string) -- [REQUIRED]The type of column created in the in-application input stream or reference table.\n \n \n Outputs (list) --The array of Output objects describing the destination streams used by the application.\n (dict) --Describes an SQL-based Amazon Kinesis Data Analytics application's output configuration, in which you identify an in-application stream and a destination where you want the in-application stream data to be written. The destination can be a Kinesis data stream or a Kinesis Data Firehose delivery stream.\n Name (string) -- [REQUIRED]The name of the in-application stream.\n KinesisStreamsOutput (dict) --Identifies an Amazon Kinesis data stream as the destination.\n ResourceARN (string) -- [REQUIRED]The ARN of the destination Kinesis data stream to write to.\n KinesisFirehoseOutput (dict) --Identifies an Amazon Kinesis Data Firehose delivery stream as the destination.\n ResourceARN (string) -- [REQUIRED]The ARN of the destination delivery stream to write to.\n LambdaOutput (dict) --Identifies an AWS Lambda function as the destination.\n ResourceARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the destination Lambda function to write to.\n DestinationSchema (dict) -- [REQUIRED]Describes the data format when records are written to the destination.\n RecordFormatType (string) -- [REQUIRED]Specifies the format of the records on the output stream.\n \n ReferenceDataSources (list) --The array of ReferenceDataSource objects describing the reference data sources used by the application.\n (dict) --For an SQL-based Amazon Kinesis Data Analytics application, describes the reference data source by providing the source information (Amazon S3 bucket name and object key name), the resulting in-application table name that is created, and the necessary schema to map the data elements in the Amazon S3 object to the in-application table.\n TableName (string) -- [REQUIRED]The name of the in-application table to create.\n S3ReferenceDataSource (dict) --Identifies the S3 bucket and object that contains the reference data. A Kinesis Data Analytics application loads reference data only once. If the data changes, you call the UpdateApplication operation to trigger reloading of data into your application.\n BucketARN (string) --The Amazon Resource Name (ARN) of the S3 bucket.\n FileKey (string) --The object key name containing the reference data.\n ReferenceSchema (dict) -- [REQUIRED]Describes the format of the data in the streaming source, and how each data element maps to corresponding columns created in the in-application stream.\n RecordFormat (dict) -- [REQUIRED]Specifies the format of the records on the streaming source.\n RecordFormatType (string) -- [REQUIRED]The type of record format.\n MappingParameters (dict) --When you configure application input at the time of creating or updating an application, provides additional mapping information specific to the record format (such as JSON, CSV, or record fields delimited by some delimiter) on the streaming source.\n JSONMappingParameters (dict) --Provides additional mapping information when JSON is the record format on the streaming source.\n RecordRowPath (string) -- [REQUIRED]The path to the top-level parent that contains the records.\n CSVMappingParameters (dict) --Provides additional mapping information when the record format uses delimiters (for example, CSV).\n RecordRowDelimiter (string) -- [REQUIRED]The row delimiter. For example, in a CSV format, 'n' is the typical row delimiter.\n RecordColumnDelimiter (string) -- [REQUIRED]The column delimiter. For example, in a CSV format, a comma (',') is the typical column delimiter.\n \n RecordEncoding (string) --Specifies the encoding of the records in the streaming source. For example, UTF-8.\n RecordColumns (list) -- [REQUIRED]A list of RecordColumn objects.\n (dict) --For an SQL-based Amazon Kinesis Data Analytics application, describes the mapping of each data element in the streaming source to the corresponding column in the in-application stream.\n Also used to describe the format of the reference data source.\n Name (string) -- [REQUIRED]The name of the column that is created in the in-application input stream or reference table.\n Mapping (string) --A reference to the data element in the streaming input of the reference data source.\n SqlType (string) -- [REQUIRED]The type of column created in the in-application input stream or reference table.\n \n \n \n FlinkApplicationConfiguration (dict) --The creation and update parameters for a Java-based Kinesis Data Analytics application.\n CheckpointConfiguration (dict) --Describes an application's checkpointing configuration. Checkpointing is the process of persisting application state for fault tolerance. For more information, see Checkpoints for Fault Tolerance in the Apache Flink Documentation .\n ConfigurationType (string) -- [REQUIRED]Describes whether the application uses Amazon Kinesis Data Analytics' default checkpointing behavior.\n CheckpointingEnabled (boolean) --Describes whether checkpointing is enabled for a Java-based Kinesis Data Analytics application.\n CheckpointInterval (integer) --Describes the interval in milliseconds between checkpoint operations.\n MinPauseBetweenCheckpoints (integer) --Describes the minimum time in milliseconds after a checkpoint operation completes that a new checkpoint operation can start. If a checkpoint operation takes longer than the CheckpointInterval , the application otherwise performs continual checkpoint operations. For more information, see Tuning Checkpointing in the Apache Flink Documentation .\n MonitoringConfiguration (dict) --Describes configuration parameters for Amazon CloudWatch logging for an application.\n ConfigurationType (string) -- [REQUIRED]Describes whether to use the default CloudWatch logging configuration for an application.\n MetricsLevel (string) --Describes the granularity of the CloudWatch Logs for an application.\n LogLevel (string) --Describes the verbosity of the CloudWatch Logs for an application.\n ParallelismConfiguration (dict) --Describes parameters for how an application executes multiple tasks simultaneously.\n ConfigurationType (string) -- [REQUIRED]Describes whether the application uses the default parallelism for the Kinesis Data Analytics service.\n Parallelism (integer) --Describes the initial number of parallel tasks that a Java-based Kinesis Data Analytics application can perform. The Kinesis Data Analytics service can increase this number automatically if ParallelismConfiguration$AutoScalingEnabled is set to true .\n ParallelismPerKPU (integer) --Describes the number of parallel tasks that a Java-based Kinesis Data Analytics application can perform per Kinesis Processing Unit (KPU) used by the application. For more information about KPUs, see Amazon Kinesis Data Analytics Pricing .\n AutoScalingEnabled (boolean) --Describes whether the Kinesis Data Analytics service can increase the parallelism of the application in response to increased throughput.\n \n EnvironmentProperties (dict) --Describes execution properties for a Java-based Kinesis Data Analytics application.\n PropertyGroups (list) -- [REQUIRED]Describes the execution property groups.\n (dict) --Property key-value pairs passed into a Java-based Kinesis Data Analytics application.\n PropertyGroupId (string) -- [REQUIRED]Describes the key of an application execution property key-value pair.\n PropertyMap (dict) -- [REQUIRED]Describes the value of an application execution property key-value pair.\n (string) --\n (string) --\n \n \n ApplicationCodeConfiguration (dict) -- [REQUIRED]The code location and type parameters for a Java-based Kinesis Data Analytics application.\n CodeContent (dict) --The location and type of the application code.\n TextContent (string) --The text-format code for a Java-based Kinesis Data Analytics application.\n ZipFileContent (bytes) --The zip-format code for a Java-based Kinesis Data Analytics application.\n S3ContentLocation (dict) --Information about the Amazon S3 bucket containing the application code.\n BucketARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) for the S3 bucket containing the application code.\n FileKey (string) -- [REQUIRED]The file key for the object containing the application code.\n ObjectVersion (string) --The version of the object containing the application code.\n \n CodeContentType (string) -- [REQUIRED]Specifies whether the code content is in text or zip format.\n ApplicationSnapshotConfiguration (dict) --Describes whether snapshots are enabled for a Java-based Kinesis Data Analytics application.\n SnapshotsEnabled (boolean) -- [REQUIRED]Describes whether snapshots are enabled for a Java-based Kinesis Data Analytics application.\n \n \n\n :type CloudWatchLoggingOptions: list\n :param CloudWatchLoggingOptions: Use this parameter to configure an Amazon CloudWatch log stream to monitor application configuration errors.\n (dict) --Provides a description of Amazon CloudWatch logging options, including the log stream Amazon Resource Name (ARN).\n LogStreamARN (string) -- [REQUIRED]The ARN of the CloudWatch log to receive application messages.\n \n \n\n :rtype: dict\n :return: {\n 'ApplicationDetail': {\n 'ApplicationARN': 'string',\n 'ApplicationDescription': 'string',\n 'ApplicationName': 'string',\n 'RuntimeEnvironment': 'SQL-1_0'|'FLINK-1_6',\n 'ServiceExecutionRole': 'string',\n 'ApplicationStatus': 'DELETING'|'STARTING'|'STOPPING'|'READY'|'RUNNING'|'UPDATING',\n 'ApplicationVersionId': 123,\n 'CreateTimestamp': datetime(2015, 1, 1),\n 'LastUpdateTimestamp': datetime(2015, 1, 1),\n 'ApplicationConfigurationDescription': {\n 'SqlApplicationConfigurationDescription': {\n 'InputDescriptions': [\n {\n 'InputId': 'string',\n 'NamePrefix': 'string',\n 'InAppStreamNames': [\n 'string',\n ],\n 'InputProcessingConfigurationDescription': {\n 'InputLambdaProcessorDescription': {\n 'ResourceARN': 'string',\n 'RoleARN': 'string'\n }\n },\n 'KinesisStreamsInputDescription': {\n 'ResourceARN': 'string',\n 'RoleARN': 'string'\n },\n 'KinesisFirehoseInputDescription': {\n 'ResourceARN': 'string',\n 'RoleARN': 'string'\n },\n 'InputSchema': {\n 'RecordFormat': {\n 'RecordFormatType': 'JSON'|'CSV',\n 'MappingParameters': {\n 'JSONMappingParameters': {\n 'RecordRowPath': 'string'\n },\n 'CSVMappingParameters': {\n 'RecordRowDelimiter': 'string',\n 'RecordColumnDelimiter': 'string'\n }\n }\n },\n 'RecordEncoding': 'string',\n 'RecordColumns': [\n {\n 'Name': 'string',\n 'Mapping': 'string',\n 'SqlType': 'string'\n },\n ]\n },\n 'InputParallelism': {\n 'Count': 123\n },\n 'InputStartingPositionConfiguration': {\n 'InputStartingPosition': 'NOW'|'TRIM_HORIZON'|'LAST_STOPPED_POINT'\n }\n },\n ],\n 'OutputDescriptions': [\n {\n 'OutputId': 'string',\n 'Name': 'string',\n 'KinesisStreamsOutputDescription': {\n 'ResourceARN': 'string',\n 'RoleARN': 'string'\n },\n 'KinesisFirehoseOutputDescription': {\n 'ResourceARN': 'string',\n 'RoleARN': 'string'\n },\n 'LambdaOutputDescription': {\n 'ResourceARN': 'string',\n 'RoleARN': 'string'\n },\n 'DestinationSchema': {\n 'RecordFormatType': 'JSON'|'CSV'\n }\n },\n ],\n 'ReferenceDataSourceDescriptions': [\n {\n 'ReferenceId': 'string',\n 'TableName': 'string',\n 'S3ReferenceDataSourceDescription': {\n 'BucketARN': 'string',\n 'FileKey': 'string',\n 'ReferenceRoleARN': 'string'\n },\n 'ReferenceSchema': {\n 'RecordFormat': {\n 'RecordFormatType': 'JSON'|'CSV',\n 'MappingParameters': {\n 'JSONMappingParameters': {\n 'RecordRowPath': 'string'\n },\n 'CSVMappingParameters': {\n 'RecordRowDelimiter': 'string',\n 'RecordColumnDelimiter': 'string'\n }\n }\n },\n 'RecordEncoding': 'string',\n 'RecordColumns': [\n {\n 'Name': 'string',\n 'Mapping': 'string',\n 'SqlType': 'string'\n },\n ]\n }\n },\n ]\n },\n 'ApplicationCodeConfigurationDescription': {\n 'CodeContentType': 'PLAINTEXT'|'ZIPFILE',\n 'CodeContentDescription': {\n 'TextContent': 'string',\n 'CodeMD5': 'string',\n 'CodeSize': 123,\n 'S3ApplicationCodeLocationDescription': {\n 'BucketARN': 'string',\n 'FileKey': 'string',\n 'ObjectVersion': 'string'\n }\n }\n },\n 'RunConfigurationDescription': {\n 'ApplicationRestoreConfigurationDescription': {\n 'ApplicationRestoreType': 'SKIP_RESTORE_FROM_SNAPSHOT'|'RESTORE_FROM_LATEST_SNAPSHOT'|'RESTORE_FROM_CUSTOM_SNAPSHOT',\n 'SnapshotName': 'string'\n }\n },\n 'FlinkApplicationConfigurationDescription': {\n 'CheckpointConfigurationDescription': {\n 'ConfigurationType': 'DEFAULT'|'CUSTOM',\n 'CheckpointingEnabled': True|False,\n 'CheckpointInterval': 123,\n 'MinPauseBetweenCheckpoints': 123\n },\n 'MonitoringConfigurationDescription': {\n 'ConfigurationType': 'DEFAULT'|'CUSTOM',\n 'MetricsLevel': 'APPLICATION'|'TASK'|'OPERATOR'|'PARALLELISM',\n 'LogLevel': 'INFO'|'WARN'|'ERROR'|'DEBUG'\n },\n 'ParallelismConfigurationDescription': {\n 'ConfigurationType': 'DEFAULT'|'CUSTOM',\n 'Parallelism': 123,\n 'ParallelismPerKPU': 123,\n 'CurrentParallelism': 123,\n 'AutoScalingEnabled': True|False\n },\n 'JobPlanDescription': 'string'\n },\n 'EnvironmentPropertyDescriptions': {\n 'PropertyGroupDescriptions': [\n {\n 'PropertyGroupId': 'string',\n 'PropertyMap': {\n 'string': 'string'\n }\n },\n ]\n },\n 'ApplicationSnapshotConfigurationDescription': {\n 'SnapshotsEnabled': True|False\n }\n },\n 'CloudWatchLoggingOptionDescriptions': [\n {\n 'CloudWatchLoggingOptionId': 'string',\n 'LogStreamARN': 'string',\n 'RoleARN': 'string'\n },\n ]\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef create_application_snapshot(ApplicationName=None, SnapshotName=None):\n \"\"\"\n Creates a snapshot of the application's state data.\n See also: AWS API Documentation\n \n \n :example: response = client.create_application_snapshot(\n ApplicationName='string',\n SnapshotName='string'\n )\n \n \n :type ApplicationName: string\n :param ApplicationName: [REQUIRED]\n The name of an existing application\n \n\n :type SnapshotName: string\n :param SnapshotName: [REQUIRED]\n An identifier for the application snapshot.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_application(ApplicationName=None, CreateTimestamp=None):\n \"\"\"\n Deletes the specified application. Kinesis Data Analytics halts application execution and deletes the application.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_application(\n ApplicationName='string',\n CreateTimestamp=datetime(2015, 1, 1)\n )\n \n \n :type ApplicationName: string\n :param ApplicationName: [REQUIRED]\n The name of the application to delete.\n \n\n :type CreateTimestamp: datetime\n :param CreateTimestamp: [REQUIRED]\n Use the DescribeApplication operation to get this value.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_application_cloud_watch_logging_option(ApplicationName=None, CurrentApplicationVersionId=None, CloudWatchLoggingOptionId=None):\n \"\"\"\n Deletes an Amazon CloudWatch log stream from an Amazon Kinesis Data Analytics application.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_application_cloud_watch_logging_option(\n ApplicationName='string',\n CurrentApplicationVersionId=123,\n CloudWatchLoggingOptionId='string'\n )\n \n \n :type ApplicationName: string\n :param ApplicationName: [REQUIRED]\n The application name.\n \n\n :type CurrentApplicationVersionId: integer\n :param CurrentApplicationVersionId: [REQUIRED]\n The version ID of the application. You can retrieve the application version ID using DescribeApplication .\n \n\n :type CloudWatchLoggingOptionId: string\n :param CloudWatchLoggingOptionId: [REQUIRED]\n The CloudWatchLoggingOptionId of the Amazon CloudWatch logging option to delete. You can get the CloudWatchLoggingOptionId by using the DescribeApplication operation.\n \n\n :rtype: dict\n :return: {\n 'ApplicationARN': 'string',\n 'ApplicationVersionId': 123,\n 'CloudWatchLoggingOptionDescriptions': [\n {\n 'CloudWatchLoggingOptionId': 'string',\n 'LogStreamARN': 'string',\n 'RoleARN': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef delete_application_input_processing_configuration(ApplicationName=None, CurrentApplicationVersionId=None, InputId=None):\n \"\"\"\n Deletes an InputProcessingConfiguration from an input.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_application_input_processing_configuration(\n ApplicationName='string',\n CurrentApplicationVersionId=123,\n InputId='string'\n )\n \n \n :type ApplicationName: string\n :param ApplicationName: [REQUIRED]\n The name of the application.\n \n\n :type CurrentApplicationVersionId: integer\n :param CurrentApplicationVersionId: [REQUIRED]\n The application version. You can use the DescribeApplication operation to get the current application version. If the version specified is not the current version, the ConcurrentModificationException is returned.\n \n\n :type InputId: string\n :param InputId: [REQUIRED]\n The ID of the input configuration from which to delete the input processing configuration. You can get a list of the input IDs for an application by using the DescribeApplication operation.\n \n\n :rtype: dict\n :return: {\n 'ApplicationARN': 'string',\n 'ApplicationVersionId': 123\n }\n \n \n \"\"\"\n pass\n\ndef delete_application_output(ApplicationName=None, CurrentApplicationVersionId=None, OutputId=None):\n \"\"\"\n Deletes the output destination configuration from your SQL-based Amazon Kinesis Data Analytics application's configuration. Kinesis Data Analytics will no longer write data from the corresponding in-application stream to the external output destination.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_application_output(\n ApplicationName='string',\n CurrentApplicationVersionId=123,\n OutputId='string'\n )\n \n \n :type ApplicationName: string\n :param ApplicationName: [REQUIRED]\n The application name.\n \n\n :type CurrentApplicationVersionId: integer\n :param CurrentApplicationVersionId: [REQUIRED]\n The application version. You can use the DescribeApplication operation to get the current application version. If the version specified is not the current version, the ConcurrentModificationException is returned.\n \n\n :type OutputId: string\n :param OutputId: [REQUIRED]\n The ID of the configuration to delete. Each output configuration that is added to the application (either when the application is created or later) using the AddApplicationOutput operation has a unique ID. You need to provide the ID to uniquely identify the output configuration that you want to delete from the application configuration. You can use the DescribeApplication operation to get the specific OutputId .\n \n\n :rtype: dict\n :return: {\n 'ApplicationARN': 'string',\n 'ApplicationVersionId': 123\n }\n \n \n \"\"\"\n pass\n\ndef delete_application_reference_data_source(ApplicationName=None, CurrentApplicationVersionId=None, ReferenceId=None):\n \"\"\"\n Deletes a reference data source configuration from the specified SQL-based Amazon Kinesis Data Analytics application's configuration.\n If the application is running, Kinesis Data Analytics immediately removes the in-application table that you created using the AddApplicationReferenceDataSource operation.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_application_reference_data_source(\n ApplicationName='string',\n CurrentApplicationVersionId=123,\n ReferenceId='string'\n )\n \n \n :type ApplicationName: string\n :param ApplicationName: [REQUIRED]\n The name of an existing application.\n \n\n :type CurrentApplicationVersionId: integer\n :param CurrentApplicationVersionId: [REQUIRED]\n The current application version. You can use the DescribeApplication operation to get the current application version. If the version specified is not the current version, the ConcurrentModificationException is returned.\n \n\n :type ReferenceId: string\n :param ReferenceId: [REQUIRED]\n The ID of the reference data source. When you add a reference data source to your application using the AddApplicationReferenceDataSource , Kinesis Data Analytics assigns an ID. You can use the DescribeApplication operation to get the reference ID.\n \n\n :rtype: dict\n :return: {\n 'ApplicationARN': 'string',\n 'ApplicationVersionId': 123\n }\n \n \n \"\"\"\n pass\n\ndef delete_application_snapshot(ApplicationName=None, SnapshotName=None, SnapshotCreationTimestamp=None):\n \"\"\"\n Deletes a snapshot of application state.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_application_snapshot(\n ApplicationName='string',\n SnapshotName='string',\n SnapshotCreationTimestamp=datetime(2015, 1, 1)\n )\n \n \n :type ApplicationName: string\n :param ApplicationName: [REQUIRED]\n The name of an existing application.\n \n\n :type SnapshotName: string\n :param SnapshotName: [REQUIRED]\n The identifier for the snapshot delete.\n \n\n :type SnapshotCreationTimestamp: datetime\n :param SnapshotCreationTimestamp: [REQUIRED]\n The creation timestamp of the application snapshot to delete. You can retrieve this value using or .\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef describe_application(ApplicationName=None, IncludeAdditionalDetails=None):\n \"\"\"\n Returns information about a specific Amazon Kinesis Data Analytics application.\n If you want to retrieve a list of all applications in your account, use the ListApplications operation.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_application(\n ApplicationName='string',\n IncludeAdditionalDetails=True|False\n )\n \n \n :type ApplicationName: string\n :param ApplicationName: [REQUIRED]\n The name of the application.\n \n\n :type IncludeAdditionalDetails: boolean\n :param IncludeAdditionalDetails: Displays verbose information about a Kinesis Data Analytics application, including the application's job plan.\n\n :rtype: dict\n :return: {\n 'ApplicationDetail': {\n 'ApplicationARN': 'string',\n 'ApplicationDescription': 'string',\n 'ApplicationName': 'string',\n 'RuntimeEnvironment': 'SQL-1_0'|'FLINK-1_6',\n 'ServiceExecutionRole': 'string',\n 'ApplicationStatus': 'DELETING'|'STARTING'|'STOPPING'|'READY'|'RUNNING'|'UPDATING',\n 'ApplicationVersionId': 123,\n 'CreateTimestamp': datetime(2015, 1, 1),\n 'LastUpdateTimestamp': datetime(2015, 1, 1),\n 'ApplicationConfigurationDescription': {\n 'SqlApplicationConfigurationDescription': {\n 'InputDescriptions': [\n {\n 'InputId': 'string',\n 'NamePrefix': 'string',\n 'InAppStreamNames': [\n 'string',\n ],\n 'InputProcessingConfigurationDescription': {\n 'InputLambdaProcessorDescription': {\n 'ResourceARN': 'string',\n 'RoleARN': 'string'\n }\n },\n 'KinesisStreamsInputDescription': {\n 'ResourceARN': 'string',\n 'RoleARN': 'string'\n },\n 'KinesisFirehoseInputDescription': {\n 'ResourceARN': 'string',\n 'RoleARN': 'string'\n },\n 'InputSchema': {\n 'RecordFormat': {\n 'RecordFormatType': 'JSON'|'CSV',\n 'MappingParameters': {\n 'JSONMappingParameters': {\n 'RecordRowPath': 'string'\n },\n 'CSVMappingParameters': {\n 'RecordRowDelimiter': 'string',\n 'RecordColumnDelimiter': 'string'\n }\n }\n },\n 'RecordEncoding': 'string',\n 'RecordColumns': [\n {\n 'Name': 'string',\n 'Mapping': 'string',\n 'SqlType': 'string'\n },\n ]\n },\n 'InputParallelism': {\n 'Count': 123\n },\n 'InputStartingPositionConfiguration': {\n 'InputStartingPosition': 'NOW'|'TRIM_HORIZON'|'LAST_STOPPED_POINT'\n }\n },\n ],\n 'OutputDescriptions': [\n {\n 'OutputId': 'string',\n 'Name': 'string',\n 'KinesisStreamsOutputDescription': {\n 'ResourceARN': 'string',\n 'RoleARN': 'string'\n },\n 'KinesisFirehoseOutputDescription': {\n 'ResourceARN': 'string',\n 'RoleARN': 'string'\n },\n 'LambdaOutputDescription': {\n 'ResourceARN': 'string',\n 'RoleARN': 'string'\n },\n 'DestinationSchema': {\n 'RecordFormatType': 'JSON'|'CSV'\n }\n },\n ],\n 'ReferenceDataSourceDescriptions': [\n {\n 'ReferenceId': 'string',\n 'TableName': 'string',\n 'S3ReferenceDataSourceDescription': {\n 'BucketARN': 'string',\n 'FileKey': 'string',\n 'ReferenceRoleARN': 'string'\n },\n 'ReferenceSchema': {\n 'RecordFormat': {\n 'RecordFormatType': 'JSON'|'CSV',\n 'MappingParameters': {\n 'JSONMappingParameters': {\n 'RecordRowPath': 'string'\n },\n 'CSVMappingParameters': {\n 'RecordRowDelimiter': 'string',\n 'RecordColumnDelimiter': 'string'\n }\n }\n },\n 'RecordEncoding': 'string',\n 'RecordColumns': [\n {\n 'Name': 'string',\n 'Mapping': 'string',\n 'SqlType': 'string'\n },\n ]\n }\n },\n ]\n },\n 'ApplicationCodeConfigurationDescription': {\n 'CodeContentType': 'PLAINTEXT'|'ZIPFILE',\n 'CodeContentDescription': {\n 'TextContent': 'string',\n 'CodeMD5': 'string',\n 'CodeSize': 123,\n 'S3ApplicationCodeLocationDescription': {\n 'BucketARN': 'string',\n 'FileKey': 'string',\n 'ObjectVersion': 'string'\n }\n }\n },\n 'RunConfigurationDescription': {\n 'ApplicationRestoreConfigurationDescription': {\n 'ApplicationRestoreType': 'SKIP_RESTORE_FROM_SNAPSHOT'|'RESTORE_FROM_LATEST_SNAPSHOT'|'RESTORE_FROM_CUSTOM_SNAPSHOT',\n 'SnapshotName': 'string'\n }\n },\n 'FlinkApplicationConfigurationDescription': {\n 'CheckpointConfigurationDescription': {\n 'ConfigurationType': 'DEFAULT'|'CUSTOM',\n 'CheckpointingEnabled': True|False,\n 'CheckpointInterval': 123,\n 'MinPauseBetweenCheckpoints': 123\n },\n 'MonitoringConfigurationDescription': {\n 'ConfigurationType': 'DEFAULT'|'CUSTOM',\n 'MetricsLevel': 'APPLICATION'|'TASK'|'OPERATOR'|'PARALLELISM',\n 'LogLevel': 'INFO'|'WARN'|'ERROR'|'DEBUG'\n },\n 'ParallelismConfigurationDescription': {\n 'ConfigurationType': 'DEFAULT'|'CUSTOM',\n 'Parallelism': 123,\n 'ParallelismPerKPU': 123,\n 'CurrentParallelism': 123,\n 'AutoScalingEnabled': True|False\n },\n 'JobPlanDescription': 'string'\n },\n 'EnvironmentPropertyDescriptions': {\n 'PropertyGroupDescriptions': [\n {\n 'PropertyGroupId': 'string',\n 'PropertyMap': {\n 'string': 'string'\n }\n },\n ]\n },\n 'ApplicationSnapshotConfigurationDescription': {\n 'SnapshotsEnabled': True|False\n }\n },\n 'CloudWatchLoggingOptionDescriptions': [\n {\n 'CloudWatchLoggingOptionId': 'string',\n 'LogStreamARN': 'string',\n 'RoleARN': 'string'\n },\n ]\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_application_snapshot(ApplicationName=None, SnapshotName=None):\n \"\"\"\n Returns information about a snapshot of application state data.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_application_snapshot(\n ApplicationName='string',\n SnapshotName='string'\n )\n \n \n :type ApplicationName: string\n :param ApplicationName: [REQUIRED]\n The name of an existing application.\n \n\n :type SnapshotName: string\n :param SnapshotName: [REQUIRED]\n The identifier of an application snapshot. You can retrieve this value using .\n \n\n :rtype: dict\n :return: {\n 'SnapshotDetails': {\n 'SnapshotName': 'string',\n 'SnapshotStatus': 'CREATING'|'READY'|'DELETING'|'FAILED',\n 'ApplicationVersionId': 123,\n 'SnapshotCreationTimestamp': datetime(2015, 1, 1)\n }\n }\n \n \n \"\"\"\n pass\n\ndef discover_input_schema(ResourceARN=None, ServiceExecutionRole=None, InputStartingPositionConfiguration=None, S3Configuration=None, InputProcessingConfiguration=None):\n \"\"\"\n Infers a schema for an SQL-based Amazon Kinesis Data Analytics application by evaluating sample records on the specified streaming source (Kinesis data stream or Kinesis Data Firehose delivery stream) or Amazon S3 object. In the response, the operation returns the inferred schema and also the sample records that the operation used to infer the schema.\n You can use the inferred schema when configuring a streaming source for your application. When you create an application using the Kinesis Data Analytics console, the console uses this operation to infer a schema and show it in the console user interface.\n See also: AWS API Documentation\n \n \n :example: response = client.discover_input_schema(\n ResourceARN='string',\n ServiceExecutionRole='string',\n InputStartingPositionConfiguration={\n 'InputStartingPosition': 'NOW'|'TRIM_HORIZON'|'LAST_STOPPED_POINT'\n },\n S3Configuration={\n 'BucketARN': 'string',\n 'FileKey': 'string'\n },\n InputProcessingConfiguration={\n 'InputLambdaProcessor': {\n 'ResourceARN': 'string'\n }\n }\n )\n \n \n :type ResourceARN: string\n :param ResourceARN: The Amazon Resource Name (ARN) of the streaming source.\n\n :type ServiceExecutionRole: string\n :param ServiceExecutionRole: [REQUIRED]\n The ARN of the role that is used to access the streaming source.\n \n\n :type InputStartingPositionConfiguration: dict\n :param InputStartingPositionConfiguration: The point at which you want Kinesis Data Analytics to start reading records from the specified streaming source discovery purposes.\n InputStartingPosition (string) --The starting position on the stream.\n NOW - Start reading just after the most recent record in the stream, and start at the request timestamp that the customer issued.\n TRIM_HORIZON - Start reading at the last untrimmed record in the stream, which is the oldest record available in the stream. This option is not available for an Amazon Kinesis Data Firehose delivery stream.\n LAST_STOPPED_POINT - Resume reading from where the application last stopped reading.\n \n\n :type S3Configuration: dict\n :param S3Configuration: Specify this parameter to discover a schema from data in an Amazon S3 object.\n BucketARN (string) -- [REQUIRED]The ARN of the S3 bucket that contains the data.\n FileKey (string) -- [REQUIRED]The name of the object that contains the data.\n \n\n :type InputProcessingConfiguration: dict\n :param InputProcessingConfiguration: The InputProcessingConfiguration to use to preprocess the records before discovering the schema of the records.\n InputLambdaProcessor (dict) -- [REQUIRED]The InputLambdaProcessor that is used to preprocess the records in the stream before being processed by your application code.\n ResourceARN (string) -- [REQUIRED]The ARN of the AWS Lambda function that operates on records in the stream.\n \n \n\n :rtype: dict\n :return: {\n 'InputSchema': {\n 'RecordFormat': {\n 'RecordFormatType': 'JSON'|'CSV',\n 'MappingParameters': {\n 'JSONMappingParameters': {\n 'RecordRowPath': 'string'\n },\n 'CSVMappingParameters': {\n 'RecordRowDelimiter': 'string',\n 'RecordColumnDelimiter': 'string'\n }\n }\n },\n 'RecordEncoding': 'string',\n 'RecordColumns': [\n {\n 'Name': 'string',\n 'Mapping': 'string',\n 'SqlType': 'string'\n },\n ]\n },\n 'ParsedInputRecords': [\n [\n 'string',\n ],\n ],\n 'ProcessedInputRecords': [\n 'string',\n ],\n 'RawInputRecords': [\n 'string',\n ]\n }\n \n \n :returns: \n (list) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_application_snapshots(ApplicationName=None, Limit=None, NextToken=None):\n \"\"\"\n Lists information about the current application snapshots.\n See also: AWS API Documentation\n \n \n :example: response = client.list_application_snapshots(\n ApplicationName='string',\n Limit=123,\n NextToken='string'\n )\n \n \n :type ApplicationName: string\n :param ApplicationName: [REQUIRED]\n The name of an existing application.\n \n\n :type Limit: integer\n :param Limit: The maximum number of application snapshots to list.\n\n :type NextToken: string\n :param NextToken: Use this parameter if you receive a NextToken response in a previous request that indicates that there is more output available. Set it to the value of the previous call's NextToken response to indicate where the output should continue from.\n\n :rtype: dict\n :return: {\n 'SnapshotSummaries': [\n {\n 'SnapshotName': 'string',\n 'SnapshotStatus': 'CREATING'|'READY'|'DELETING'|'FAILED',\n 'ApplicationVersionId': 123,\n 'SnapshotCreationTimestamp': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_applications(Limit=None, NextToken=None):\n \"\"\"\n Returns a list of Amazon Kinesis Data Analytics applications in your account. For each application, the response includes the application name, Amazon Resource Name (ARN), and status.\n If you want detailed information about a specific application, use DescribeApplication .\n See also: AWS API Documentation\n \n \n :example: response = client.list_applications(\n Limit=123,\n NextToken='string'\n )\n \n \n :type Limit: integer\n :param Limit: The maximum number of applications to list.\n\n :type NextToken: string\n :param NextToken: If a previous command returned a pagination token, pass it into this value to retrieve the next set of results. For more information about pagination, see Using the AWS Command Line Interface's Pagination Options .\n\n :rtype: dict\n :return: {\n 'ApplicationSummaries': [\n {\n 'ApplicationName': 'string',\n 'ApplicationARN': 'string',\n 'ApplicationStatus': 'DELETING'|'STARTING'|'STOPPING'|'READY'|'RUNNING'|'UPDATING',\n 'ApplicationVersionId': 123,\n 'RuntimeEnvironment': 'SQL-1_0'|'FLINK-1_6'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef start_application(ApplicationName=None, RunConfiguration=None):\n \"\"\"\n Starts the specified Amazon Kinesis Data Analytics application. After creating an application, you must exclusively call this operation to start your application.\n See also: AWS API Documentation\n \n \n :example: response = client.start_application(\n ApplicationName='string',\n RunConfiguration={\n 'SqlRunConfigurations': [\n {\n 'InputId': 'string',\n 'InputStartingPositionConfiguration': {\n 'InputStartingPosition': 'NOW'|'TRIM_HORIZON'|'LAST_STOPPED_POINT'\n }\n },\n ],\n 'ApplicationRestoreConfiguration': {\n 'ApplicationRestoreType': 'SKIP_RESTORE_FROM_SNAPSHOT'|'RESTORE_FROM_LATEST_SNAPSHOT'|'RESTORE_FROM_CUSTOM_SNAPSHOT',\n 'SnapshotName': 'string'\n }\n }\n )\n \n \n :type ApplicationName: string\n :param ApplicationName: [REQUIRED]\n The name of the application.\n \n\n :type RunConfiguration: dict\n :param RunConfiguration: [REQUIRED]\n Identifies the run configuration (start parameters) of a Kinesis Data Analytics application.\n SqlRunConfigurations (list) --Describes the starting parameters for an SQL-based Kinesis Data Analytics application.\n (dict) --Describes the starting parameters for an SQL-based Kinesis Data Analytics application.\n InputId (string) -- [REQUIRED]The input source ID. You can get this ID by calling the DescribeApplication operation.\n InputStartingPositionConfiguration (dict) -- [REQUIRED]The point at which you want the application to start processing records from the streaming source.\n InputStartingPosition (string) --The starting position on the stream.\n NOW - Start reading just after the most recent record in the stream, and start at the request timestamp that the customer issued.\n TRIM_HORIZON - Start reading at the last untrimmed record in the stream, which is the oldest record available in the stream. This option is not available for an Amazon Kinesis Data Firehose delivery stream.\n LAST_STOPPED_POINT - Resume reading from where the application last stopped reading.\n \n \n ApplicationRestoreConfiguration (dict) --Describes the restore behavior of a restarting application.\n ApplicationRestoreType (string) -- [REQUIRED]Specifies how the application should be restored.\n SnapshotName (string) --The identifier of an existing snapshot of application state to use to restart an application. The application uses this value if RESTORE_FROM_CUSTOM_SNAPSHOT is specified for the ApplicationRestoreType .\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef stop_application(ApplicationName=None):\n \"\"\"\n Stops the application from processing data. You can stop an application only if it is in the running state. You can use the DescribeApplication operation to find the application state.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_application(\n ApplicationName='string'\n )\n \n \n :type ApplicationName: string\n :param ApplicationName: [REQUIRED]\n The name of the running application to stop.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef update_application(ApplicationName=None, CurrentApplicationVersionId=None, ApplicationConfigurationUpdate=None, ServiceExecutionRoleUpdate=None, RunConfigurationUpdate=None, CloudWatchLoggingOptionUpdates=None):\n \"\"\"\n Updates an existing Amazon Kinesis Data Analytics application. Using this operation, you can update application code, input configuration, and output configuration.\n Kinesis Data Analytics updates the ApplicationVersionId each time you update your application.\n See also: AWS API Documentation\n \n \n :example: response = client.update_application(\n ApplicationName='string',\n CurrentApplicationVersionId=123,\n ApplicationConfigurationUpdate={\n 'SqlApplicationConfigurationUpdate': {\n 'InputUpdates': [\n {\n 'InputId': 'string',\n 'NamePrefixUpdate': 'string',\n 'InputProcessingConfigurationUpdate': {\n 'InputLambdaProcessorUpdate': {\n 'ResourceARNUpdate': 'string'\n }\n },\n 'KinesisStreamsInputUpdate': {\n 'ResourceARNUpdate': 'string'\n },\n 'KinesisFirehoseInputUpdate': {\n 'ResourceARNUpdate': 'string'\n },\n 'InputSchemaUpdate': {\n 'RecordFormatUpdate': {\n 'RecordFormatType': 'JSON'|'CSV',\n 'MappingParameters': {\n 'JSONMappingParameters': {\n 'RecordRowPath': 'string'\n },\n 'CSVMappingParameters': {\n 'RecordRowDelimiter': 'string',\n 'RecordColumnDelimiter': 'string'\n }\n }\n },\n 'RecordEncodingUpdate': 'string',\n 'RecordColumnUpdates': [\n {\n 'Name': 'string',\n 'Mapping': 'string',\n 'SqlType': 'string'\n },\n ]\n },\n 'InputParallelismUpdate': {\n 'CountUpdate': 123\n }\n },\n ],\n 'OutputUpdates': [\n {\n 'OutputId': 'string',\n 'NameUpdate': 'string',\n 'KinesisStreamsOutputUpdate': {\n 'ResourceARNUpdate': 'string'\n },\n 'KinesisFirehoseOutputUpdate': {\n 'ResourceARNUpdate': 'string'\n },\n 'LambdaOutputUpdate': {\n 'ResourceARNUpdate': 'string'\n },\n 'DestinationSchemaUpdate': {\n 'RecordFormatType': 'JSON'|'CSV'\n }\n },\n ],\n 'ReferenceDataSourceUpdates': [\n {\n 'ReferenceId': 'string',\n 'TableNameUpdate': 'string',\n 'S3ReferenceDataSourceUpdate': {\n 'BucketARNUpdate': 'string',\n 'FileKeyUpdate': 'string'\n },\n 'ReferenceSchemaUpdate': {\n 'RecordFormat': {\n 'RecordFormatType': 'JSON'|'CSV',\n 'MappingParameters': {\n 'JSONMappingParameters': {\n 'RecordRowPath': 'string'\n },\n 'CSVMappingParameters': {\n 'RecordRowDelimiter': 'string',\n 'RecordColumnDelimiter': 'string'\n }\n }\n },\n 'RecordEncoding': 'string',\n 'RecordColumns': [\n {\n 'Name': 'string',\n 'Mapping': 'string',\n 'SqlType': 'string'\n },\n ]\n }\n },\n ]\n },\n 'ApplicationCodeConfigurationUpdate': {\n 'CodeContentTypeUpdate': 'PLAINTEXT'|'ZIPFILE',\n 'CodeContentUpdate': {\n 'TextContentUpdate': 'string',\n 'ZipFileContentUpdate': b'bytes',\n 'S3ContentLocationUpdate': {\n 'BucketARNUpdate': 'string',\n 'FileKeyUpdate': 'string',\n 'ObjectVersionUpdate': 'string'\n }\n }\n },\n 'FlinkApplicationConfigurationUpdate': {\n 'CheckpointConfigurationUpdate': {\n 'ConfigurationTypeUpdate': 'DEFAULT'|'CUSTOM',\n 'CheckpointingEnabledUpdate': True|False,\n 'CheckpointIntervalUpdate': 123,\n 'MinPauseBetweenCheckpointsUpdate': 123\n },\n 'MonitoringConfigurationUpdate': {\n 'ConfigurationTypeUpdate': 'DEFAULT'|'CUSTOM',\n 'MetricsLevelUpdate': 'APPLICATION'|'TASK'|'OPERATOR'|'PARALLELISM',\n 'LogLevelUpdate': 'INFO'|'WARN'|'ERROR'|'DEBUG'\n },\n 'ParallelismConfigurationUpdate': {\n 'ConfigurationTypeUpdate': 'DEFAULT'|'CUSTOM',\n 'ParallelismUpdate': 123,\n 'ParallelismPerKPUUpdate': 123,\n 'AutoScalingEnabledUpdate': True|False\n }\n },\n 'EnvironmentPropertyUpdates': {\n 'PropertyGroups': [\n {\n 'PropertyGroupId': 'string',\n 'PropertyMap': {\n 'string': 'string'\n }\n },\n ]\n },\n 'ApplicationSnapshotConfigurationUpdate': {\n 'SnapshotsEnabledUpdate': True|False\n }\n },\n ServiceExecutionRoleUpdate='string',\n RunConfigurationUpdate={\n 'ApplicationRestoreConfiguration': {\n 'ApplicationRestoreType': 'SKIP_RESTORE_FROM_SNAPSHOT'|'RESTORE_FROM_LATEST_SNAPSHOT'|'RESTORE_FROM_CUSTOM_SNAPSHOT',\n 'SnapshotName': 'string'\n }\n },\n CloudWatchLoggingOptionUpdates=[\n {\n 'CloudWatchLoggingOptionId': 'string',\n 'LogStreamARNUpdate': 'string'\n },\n ]\n )\n \n \n :type ApplicationName: string\n :param ApplicationName: [REQUIRED]\n The name of the application to update.\n \n\n :type CurrentApplicationVersionId: integer\n :param CurrentApplicationVersionId: [REQUIRED]\n The current application version ID. You can retrieve the application version ID using DescribeApplication .\n \n\n :type ApplicationConfigurationUpdate: dict\n :param ApplicationConfigurationUpdate: Describes application configuration updates.\n SqlApplicationConfigurationUpdate (dict) --Describes updates to an SQL-based Kinesis Data Analytics application's configuration.\n InputUpdates (list) --The array of InputUpdate objects describing the new input streams used by the application.\n (dict) --For an SQL-based Amazon Kinesis Data Analytics application, describes updates to a specific input configuration (identified by the InputId of an application).\n InputId (string) -- [REQUIRED]The input ID of the application input to be updated.\n NamePrefixUpdate (string) --The name prefix for in-application streams that Kinesis Data Analytics creates for the specific streaming source.\n InputProcessingConfigurationUpdate (dict) --Describes updates to an InputProcessingConfiguration .\n InputLambdaProcessorUpdate (dict) -- [REQUIRED]Provides update information for an InputLambdaProcessor .\n ResourceARNUpdate (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the new AWS Lambda function that is used to preprocess the records in the stream.\n \n KinesisStreamsInputUpdate (dict) --If a Kinesis data stream is the streaming source to be updated, provides an updated stream Amazon Resource Name (ARN).\n ResourceARNUpdate (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the input Kinesis data stream to read.\n KinesisFirehoseInputUpdate (dict) --If a Kinesis Data Firehose delivery stream is the streaming source to be updated, provides an updated stream ARN.\n ResourceARNUpdate (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the input delivery stream to read.\n InputSchemaUpdate (dict) --Describes the data format on the streaming source, and how record elements on the streaming source map to columns of the in-application stream that is created.\n RecordFormatUpdate (dict) --Specifies the format of the records on the streaming source.\n RecordFormatType (string) -- [REQUIRED]The type of record format.\n MappingParameters (dict) --When you configure application input at the time of creating or updating an application, provides additional mapping information specific to the record format (such as JSON, CSV, or record fields delimited by some delimiter) on the streaming source.\n JSONMappingParameters (dict) --Provides additional mapping information when JSON is the record format on the streaming source.\n RecordRowPath (string) -- [REQUIRED]The path to the top-level parent that contains the records.\n CSVMappingParameters (dict) --Provides additional mapping information when the record format uses delimiters (for example, CSV).\n RecordRowDelimiter (string) -- [REQUIRED]The row delimiter. For example, in a CSV format, 'n' is the typical row delimiter.\n RecordColumnDelimiter (string) -- [REQUIRED]The column delimiter. For example, in a CSV format, a comma (',') is the typical column delimiter.\n \n RecordEncodingUpdate (string) --Specifies the encoding of the records in the streaming source; for example, UTF-8.\n RecordColumnUpdates (list) --A list of RecordColumn objects. Each object describes the mapping of the streaming source element to the corresponding column in the in-application stream.\n (dict) --For an SQL-based Amazon Kinesis Data Analytics application, describes the mapping of each data element in the streaming source to the corresponding column in the in-application stream.\n Also used to describe the format of the reference data source.\n Name (string) -- [REQUIRED]The name of the column that is created in the in-application input stream or reference table.\n Mapping (string) --A reference to the data element in the streaming input of the reference data source.\n SqlType (string) -- [REQUIRED]The type of column created in the in-application input stream or reference table.\n \n InputParallelismUpdate (dict) --Describes the parallelism updates (the number of in-application streams Kinesis Data Analytics creates for the specific streaming source).\n CountUpdate (integer) -- [REQUIRED]The number of in-application streams to create for the specified streaming source.\n \n OutputUpdates (list) --The array of OutputUpdate objects describing the new destination streams used by the application.\n (dict) --For an SQL-based Amazon Kinesis Data Analytics application, describes updates to the output configuration identified by the OutputId .\n OutputId (string) -- [REQUIRED]Identifies the specific output configuration that you want to update.\n NameUpdate (string) --If you want to specify a different in-application stream for this output configuration, use this field to specify the new in-application stream name.\n KinesisStreamsOutputUpdate (dict) --Describes a Kinesis data stream as the destination for the output.\n ResourceARNUpdate (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Kinesis data stream where you want to write the output.\n KinesisFirehoseOutputUpdate (dict) --Describes a Kinesis Data Firehose delivery stream as the destination for the output.\n ResourceARNUpdate (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the delivery stream to write to.\n LambdaOutputUpdate (dict) --Describes an AWS Lambda function as the destination for the output.\n ResourceARNUpdate (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the destination AWS Lambda function.\n DestinationSchemaUpdate (dict) --Describes the data format when records are written to the destination.\n RecordFormatType (string) -- [REQUIRED]Specifies the format of the records on the output stream.\n \n ReferenceDataSourceUpdates (list) --The array of ReferenceDataSourceUpdate objects describing the new reference data sources used by the application.\n (dict) --When you update a reference data source configuration for a SQL-based Amazon Kinesis Data Analytics application, this object provides all the updated values (such as the source bucket name and object key name), the in-application table name that is created, and updated mapping information that maps the data in the Amazon S3 object to the in-application reference table that is created.\n ReferenceId (string) -- [REQUIRED]The ID of the reference data source that is being updated. You can use the DescribeApplication operation to get this value.\n TableNameUpdate (string) --The in-application table name that is created by this update.\n S3ReferenceDataSourceUpdate (dict) --Describes the S3 bucket name, object key name, and IAM role that Kinesis Data Analytics can assume to read the Amazon S3 object on your behalf and populate the in-application reference table.\n BucketARNUpdate (string) --The Amazon Resource Name (ARN) of the S3 bucket.\n FileKeyUpdate (string) --The object key name.\n ReferenceSchemaUpdate (dict) --Describes the format of the data in the streaming source, and how each data element maps to corresponding columns created in the in-application stream.\n RecordFormat (dict) -- [REQUIRED]Specifies the format of the records on the streaming source.\n RecordFormatType (string) -- [REQUIRED]The type of record format.\n MappingParameters (dict) --When you configure application input at the time of creating or updating an application, provides additional mapping information specific to the record format (such as JSON, CSV, or record fields delimited by some delimiter) on the streaming source.\n JSONMappingParameters (dict) --Provides additional mapping information when JSON is the record format on the streaming source.\n RecordRowPath (string) -- [REQUIRED]The path to the top-level parent that contains the records.\n CSVMappingParameters (dict) --Provides additional mapping information when the record format uses delimiters (for example, CSV).\n RecordRowDelimiter (string) -- [REQUIRED]The row delimiter. For example, in a CSV format, 'n' is the typical row delimiter.\n RecordColumnDelimiter (string) -- [REQUIRED]The column delimiter. For example, in a CSV format, a comma (',') is the typical column delimiter.\n \n RecordEncoding (string) --Specifies the encoding of the records in the streaming source. For example, UTF-8.\n RecordColumns (list) -- [REQUIRED]A list of RecordColumn objects.\n (dict) --For an SQL-based Amazon Kinesis Data Analytics application, describes the mapping of each data element in the streaming source to the corresponding column in the in-application stream.\n Also used to describe the format of the reference data source.\n Name (string) -- [REQUIRED]The name of the column that is created in the in-application input stream or reference table.\n Mapping (string) --A reference to the data element in the streaming input of the reference data source.\n SqlType (string) -- [REQUIRED]The type of column created in the in-application input stream or reference table.\n \n \n \n ApplicationCodeConfigurationUpdate (dict) --Describes updates to a Java-based Kinesis Data Analytics application's code configuration.\n CodeContentTypeUpdate (string) --Describes updates to the code content type.\n CodeContentUpdate (dict) --Describes updates to the code content of an application.\n TextContentUpdate (string) --Describes an update to the text code for an application.\n ZipFileContentUpdate (bytes) --Describes an update to the zipped code for an application.\n S3ContentLocationUpdate (dict) --Describes an update to the location of code for an application.\n BucketARNUpdate (string) --The new Amazon Resource Name (ARN) for the S3 bucket containing the application code.\n FileKeyUpdate (string) --The new file key for the object containing the application code.\n ObjectVersionUpdate (string) --The new version of the object containing the application code.\n \n FlinkApplicationConfigurationUpdate (dict) --Describes updates to a Java-based Kinesis Data Analytics application's configuration.\n CheckpointConfigurationUpdate (dict) --Describes updates to an application's checkpointing configuration. Checkpointing is the process of persisting application state for fault tolerance.\n ConfigurationTypeUpdate (string) --Describes updates to whether the application uses the default checkpointing behavior of Kinesis Data Analytics.\n CheckpointingEnabledUpdate (boolean) --Describes updates to whether checkpointing is enabled for an application.\n CheckpointIntervalUpdate (integer) --Describes updates to the interval in milliseconds between checkpoint operations.\n MinPauseBetweenCheckpointsUpdate (integer) --Describes updates to the minimum time in milliseconds after a checkpoint operation completes that a new checkpoint operation can start.\n MonitoringConfigurationUpdate (dict) --Describes updates to the configuration parameters for Amazon CloudWatch logging for an application.\n ConfigurationTypeUpdate (string) --Describes updates to whether to use the default CloudWatch logging configuration for an application.\n MetricsLevelUpdate (string) --Describes updates to the granularity of the CloudWatch Logs for an application.\n LogLevelUpdate (string) --Describes updates to the verbosity of the CloudWatch Logs for an application.\n ParallelismConfigurationUpdate (dict) --Describes updates to the parameters for how an application executes multiple tasks simultaneously.\n ConfigurationTypeUpdate (string) --Describes updates to whether the application uses the default parallelism for the Kinesis Data Analytics service, or if a custom parallelism is used.\n ParallelismUpdate (integer) --Describes updates to the initial number of parallel tasks an application can perform.\n ParallelismPerKPUUpdate (integer) --Describes updates to the number of parallel tasks an application can perform per Kinesis Processing Unit (KPU) used by the application.\n AutoScalingEnabledUpdate (boolean) --Describes updates to whether the Kinesis Data Analytics service can increase the parallelism of the application in response to increased throughput.\n \n EnvironmentPropertyUpdates (dict) --Describes updates to the environment properties for a Java-based Kinesis Data Analytics application.\n PropertyGroups (list) -- [REQUIRED]Describes updates to the execution property groups.\n (dict) --Property key-value pairs passed into a Java-based Kinesis Data Analytics application.\n PropertyGroupId (string) -- [REQUIRED]Describes the key of an application execution property key-value pair.\n PropertyMap (dict) -- [REQUIRED]Describes the value of an application execution property key-value pair.\n (string) --\n (string) --\n \n \n ApplicationSnapshotConfigurationUpdate (dict) --Describes whether snapshots are enabled for a Java-based Kinesis Data Analytics application.\n SnapshotsEnabledUpdate (boolean) -- [REQUIRED]Describes updates to whether snapshots are enabled for a Java-based Kinesis Data Analytics application.\n \n \n\n :type ServiceExecutionRoleUpdate: string\n :param ServiceExecutionRoleUpdate: Describes updates to the service execution role.\n\n :type RunConfigurationUpdate: dict\n :param RunConfigurationUpdate: Describes updates to the application's starting parameters.\n ApplicationRestoreConfiguration (dict) --Describes updates to the restore behavior of a restarting application.\n ApplicationRestoreType (string) -- [REQUIRED]Specifies how the application should be restored.\n SnapshotName (string) --The identifier of an existing snapshot of application state to use to restart an application. The application uses this value if RESTORE_FROM_CUSTOM_SNAPSHOT is specified for the ApplicationRestoreType .\n \n \n\n :type CloudWatchLoggingOptionUpdates: list\n :param CloudWatchLoggingOptionUpdates: Describes application Amazon CloudWatch logging option updates. You can only update existing CloudWatch logging options with this action. To add a new CloudWatch logging option, use AddApplicationCloudWatchLoggingOption .\n (dict) --Describes the Amazon CloudWatch logging option updates.\n CloudWatchLoggingOptionId (string) -- [REQUIRED]The ID of the CloudWatch logging option to update\n LogStreamARNUpdate (string) --The Amazon Resource Name (ARN) of the CloudWatch log to receive application messages.\n \n \n\n :rtype: dict\n :return: {\n 'ApplicationDetail': {\n 'ApplicationARN': 'string',\n 'ApplicationDescription': 'string',\n 'ApplicationName': 'string',\n 'RuntimeEnvironment': 'SQL-1_0'|'FLINK-1_6',\n 'ServiceExecutionRole': 'string',\n 'ApplicationStatus': 'DELETING'|'STARTING'|'STOPPING'|'READY'|'RUNNING'|'UPDATING',\n 'ApplicationVersionId': 123,\n 'CreateTimestamp': datetime(2015, 1, 1),\n 'LastUpdateTimestamp': datetime(2015, 1, 1),\n 'ApplicationConfigurationDescription': {\n 'SqlApplicationConfigurationDescription': {\n 'InputDescriptions': [\n {\n 'InputId': 'string',\n 'NamePrefix': 'string',\n 'InAppStreamNames': [\n 'string',\n ],\n 'InputProcessingConfigurationDescription': {\n 'InputLambdaProcessorDescription': {\n 'ResourceARN': 'string',\n 'RoleARN': 'string'\n }\n },\n 'KinesisStreamsInputDescription': {\n 'ResourceARN': 'string',\n 'RoleARN': 'string'\n },\n 'KinesisFirehoseInputDescription': {\n 'ResourceARN': 'string',\n 'RoleARN': 'string'\n },\n 'InputSchema': {\n 'RecordFormat': {\n 'RecordFormatType': 'JSON'|'CSV',\n 'MappingParameters': {\n 'JSONMappingParameters': {\n 'RecordRowPath': 'string'\n },\n 'CSVMappingParameters': {\n 'RecordRowDelimiter': 'string',\n 'RecordColumnDelimiter': 'string'\n }\n }\n },\n 'RecordEncoding': 'string',\n 'RecordColumns': [\n {\n 'Name': 'string',\n 'Mapping': 'string',\n 'SqlType': 'string'\n },\n ]\n },\n 'InputParallelism': {\n 'Count': 123\n },\n 'InputStartingPositionConfiguration': {\n 'InputStartingPosition': 'NOW'|'TRIM_HORIZON'|'LAST_STOPPED_POINT'\n }\n },\n ],\n 'OutputDescriptions': [\n {\n 'OutputId': 'string',\n 'Name': 'string',\n 'KinesisStreamsOutputDescription': {\n 'ResourceARN': 'string',\n 'RoleARN': 'string'\n },\n 'KinesisFirehoseOutputDescription': {\n 'ResourceARN': 'string',\n 'RoleARN': 'string'\n },\n 'LambdaOutputDescription': {\n 'ResourceARN': 'string',\n 'RoleARN': 'string'\n },\n 'DestinationSchema': {\n 'RecordFormatType': 'JSON'|'CSV'\n }\n },\n ],\n 'ReferenceDataSourceDescriptions': [\n {\n 'ReferenceId': 'string',\n 'TableName': 'string',\n 'S3ReferenceDataSourceDescription': {\n 'BucketARN': 'string',\n 'FileKey': 'string',\n 'ReferenceRoleARN': 'string'\n },\n 'ReferenceSchema': {\n 'RecordFormat': {\n 'RecordFormatType': 'JSON'|'CSV',\n 'MappingParameters': {\n 'JSONMappingParameters': {\n 'RecordRowPath': 'string'\n },\n 'CSVMappingParameters': {\n 'RecordRowDelimiter': 'string',\n 'RecordColumnDelimiter': 'string'\n }\n }\n },\n 'RecordEncoding': 'string',\n 'RecordColumns': [\n {\n 'Name': 'string',\n 'Mapping': 'string',\n 'SqlType': 'string'\n },\n ]\n }\n },\n ]\n },\n 'ApplicationCodeConfigurationDescription': {\n 'CodeContentType': 'PLAINTEXT'|'ZIPFILE',\n 'CodeContentDescription': {\n 'TextContent': 'string',\n 'CodeMD5': 'string',\n 'CodeSize': 123,\n 'S3ApplicationCodeLocationDescription': {\n 'BucketARN': 'string',\n 'FileKey': 'string',\n 'ObjectVersion': 'string'\n }\n }\n },\n 'RunConfigurationDescription': {\n 'ApplicationRestoreConfigurationDescription': {\n 'ApplicationRestoreType': 'SKIP_RESTORE_FROM_SNAPSHOT'|'RESTORE_FROM_LATEST_SNAPSHOT'|'RESTORE_FROM_CUSTOM_SNAPSHOT',\n 'SnapshotName': 'string'\n }\n },\n 'FlinkApplicationConfigurationDescription': {\n 'CheckpointConfigurationDescription': {\n 'ConfigurationType': 'DEFAULT'|'CUSTOM',\n 'CheckpointingEnabled': True|False,\n 'CheckpointInterval': 123,\n 'MinPauseBetweenCheckpoints': 123\n },\n 'MonitoringConfigurationDescription': {\n 'ConfigurationType': 'DEFAULT'|'CUSTOM',\n 'MetricsLevel': 'APPLICATION'|'TASK'|'OPERATOR'|'PARALLELISM',\n 'LogLevel': 'INFO'|'WARN'|'ERROR'|'DEBUG'\n },\n 'ParallelismConfigurationDescription': {\n 'ConfigurationType': 'DEFAULT'|'CUSTOM',\n 'Parallelism': 123,\n 'ParallelismPerKPU': 123,\n 'CurrentParallelism': 123,\n 'AutoScalingEnabled': True|False\n },\n 'JobPlanDescription': 'string'\n },\n 'EnvironmentPropertyDescriptions': {\n 'PropertyGroupDescriptions': [\n {\n 'PropertyGroupId': 'string',\n 'PropertyMap': {\n 'string': 'string'\n }\n },\n ]\n },\n 'ApplicationSnapshotConfigurationDescription': {\n 'SnapshotsEnabled': True|False\n }\n },\n 'CloudWatchLoggingOptionDescriptions': [\n {\n 'CloudWatchLoggingOptionId': 'string',\n 'LogStreamARN': 'string',\n 'RoleARN': 'string'\n },\n ]\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6198806762695312, "alphanum_fraction": 0.622753381729126, "avg_line_length": 31.13136100769043, "blob_id": "aad73fbf36cd1281a70532a6da47408325a58a38", "content_id": "bbcc9c968efcf23d2057c98ec92201b3cc7de294", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 27152, "license_type": "permissive", "max_line_length": 327, "num_lines": 845, "path": "/pyboto3/codestar.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef associate_team_member(projectId=None, clientRequestToken=None, userArn=None, projectRole=None, remoteAccessAllowed=None):\n \"\"\"\n Adds an IAM user to the team for an AWS CodeStar project.\n See also: AWS API Documentation\n \n \n :example: response = client.associate_team_member(\n projectId='string',\n clientRequestToken='string',\n userArn='string',\n projectRole='string',\n remoteAccessAllowed=True|False\n )\n \n \n :type projectId: string\n :param projectId: [REQUIRED]\n The ID of the project to which you will add the IAM user.\n \n\n :type clientRequestToken: string\n :param clientRequestToken: A user- or system-generated token that identifies the entity that requested the team member association to the project. This token can be used to repeat the request.\n\n :type userArn: string\n :param userArn: [REQUIRED]\n The Amazon Resource Name (ARN) for the IAM user you want to add to the AWS CodeStar project.\n \n\n :type projectRole: string\n :param projectRole: [REQUIRED]\n The AWS CodeStar project role that will apply to this user. This role determines what actions a user can take in an AWS CodeStar project.\n \n\n :type remoteAccessAllowed: boolean\n :param remoteAccessAllowed: Whether the team member is allowed to use an SSH public/private key pair to remotely access project resources, for example Amazon EC2 instances.\n\n :rtype: dict\n :return: {\n 'clientRequestToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_project(name=None, id=None, description=None, clientRequestToken=None, sourceCode=None, toolchain=None, tags=None):\n \"\"\"\n Creates a project, including project resources. This action creates a project based on a submitted project request. A set of source code files and a toolchain template file can be included with the project request. If these are not provided, an empty project is created.\n See also: AWS API Documentation\n \n \n :example: response = client.create_project(\n name='string',\n id='string',\n description='string',\n clientRequestToken='string',\n sourceCode=[\n {\n 'source': {\n 's3': {\n 'bucketName': 'string',\n 'bucketKey': 'string'\n }\n },\n 'destination': {\n 'codeCommit': {\n 'name': 'string'\n },\n 'gitHub': {\n 'name': 'string',\n 'description': 'string',\n 'type': 'string',\n 'owner': 'string',\n 'privateRepository': True|False,\n 'issuesEnabled': True|False,\n 'token': 'string'\n }\n }\n },\n ],\n toolchain={\n 'source': {\n 's3': {\n 'bucketName': 'string',\n 'bucketKey': 'string'\n }\n },\n 'roleArn': 'string',\n 'stackParameters': {\n 'string': 'string'\n }\n },\n tags={\n 'string': 'string'\n }\n )\n \n \n :type name: string\n :param name: [REQUIRED]\n The display name for the project to be created in AWS CodeStar.\n \n\n :type id: string\n :param id: [REQUIRED]\n The ID of the project to be created in AWS CodeStar.\n \n\n :type description: string\n :param description: The description of the project, if any.\n\n :type clientRequestToken: string\n :param clientRequestToken: A user- or system-generated token that identifies the entity that requested project creation. This token can be used to repeat the request.\n\n :type sourceCode: list\n :param sourceCode: A list of the Code objects submitted with the project request. If this parameter is specified, the request must also include the toolchain parameter.\n (dict) --Location and destination information about the source code files provided with the project request. The source code is uploaded to the new project source repository after project creation.\n source (dict) -- [REQUIRED]The location where the source code files provided with the project request are stored. AWS CodeStar retrieves the files during project creation.\n s3 (dict) -- [REQUIRED]Information about the Amazon S3 location where the source code files provided with the project request are stored.\n bucketName (string) --The Amazon S3 bucket name where the source code files provided with the project request are stored.\n bucketKey (string) --The Amazon S3 object key where the source code files provided with the project request are stored.\n \n destination (dict) -- [REQUIRED]The repository to be created in AWS CodeStar. Valid values are AWS CodeCommit or GitHub. After AWS CodeStar provisions the new repository, the source code files provided with the project request are placed in the repository.\n codeCommit (dict) --Information about the AWS CodeCommit repository to be created in AWS CodeStar. This is where the source code files provided with the project request will be uploaded after project creation.\n name (string) -- [REQUIRED]The name of the AWS CodeCommit repository to be created in AWS CodeStar.\n gitHub (dict) --Information about the GitHub repository to be created in AWS CodeStar. This is where the source code files provided with the project request will be uploaded after project creation.\n name (string) -- [REQUIRED]Name of the GitHub repository to be created in AWS CodeStar.\n description (string) --Description for the GitHub repository to be created in AWS CodeStar. This description displays in GitHub after the repository is created.\n type (string) -- [REQUIRED]The type of GitHub repository to be created in AWS CodeStar. Valid values are User or Organization.\n owner (string) -- [REQUIRED]The GitHub username for the owner of the GitHub repository to be created in AWS CodeStar. If this repository should be owned by a GitHub organization, provide its name.\n privateRepository (boolean) -- [REQUIRED]Whether the GitHub repository is to be a private repository.\n issuesEnabled (boolean) -- [REQUIRED]Whether to enable issues for the GitHub repository.\n token (string) -- [REQUIRED]The GitHub user's personal access token for the GitHub repository.\n \n \n \n\n :type toolchain: dict\n :param toolchain: The name of the toolchain template file submitted with the project request. If this parameter is specified, the request must also include the sourceCode parameter.\n source (dict) -- [REQUIRED]The Amazon S3 location where the toolchain template file provided with the project request is stored. AWS CodeStar retrieves the file during project creation.\n s3 (dict) -- [REQUIRED]The Amazon S3 bucket where the toolchain template file provided with the project request is stored.\n bucketName (string) --The Amazon S3 bucket name where the source code files provided with the project request are stored.\n bucketKey (string) --The Amazon S3 object key where the source code files provided with the project request are stored.\n \n roleArn (string) --The service role ARN for AWS CodeStar to use for the toolchain template during stack provisioning.\n stackParameters (dict) --The list of parameter overrides to be passed into the toolchain template during stack provisioning, if any.\n (string) --\n (string) --\n \n \n\n :type tags: dict\n :param tags: The tags created for the project.\n (string) --\n (string) --\n \n\n :rtype: dict\n :return: {\n 'id': 'string',\n 'arn': 'string',\n 'clientRequestToken': 'string',\n 'projectTemplateId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_user_profile(userArn=None, displayName=None, emailAddress=None, sshPublicKey=None):\n \"\"\"\n Creates a profile for a user that includes user preferences, such as the display name and email address assocciated with the user, in AWS CodeStar. The user profile is not project-specific. Information in the user profile is displayed wherever the user's information appears to other users in AWS CodeStar.\n See also: AWS API Documentation\n \n \n :example: response = client.create_user_profile(\n userArn='string',\n displayName='string',\n emailAddress='string',\n sshPublicKey='string'\n )\n \n \n :type userArn: string\n :param userArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the user in IAM.\n \n\n :type displayName: string\n :param displayName: [REQUIRED]\n The name that will be displayed as the friendly name for the user in AWS CodeStar.\n \n\n :type emailAddress: string\n :param emailAddress: [REQUIRED]\n The email address that will be displayed as part of the user's profile in AWS CodeStar.\n \n\n :type sshPublicKey: string\n :param sshPublicKey: The SSH public key associated with the user in AWS CodeStar. If a project owner allows the user remote access to project resources, this public key will be used along with the user's private key for SSH access.\n\n :rtype: dict\n :return: {\n 'userArn': 'string',\n 'displayName': 'string',\n 'emailAddress': 'string',\n 'sshPublicKey': 'string',\n 'createdTimestamp': datetime(2015, 1, 1),\n 'lastModifiedTimestamp': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef delete_project(id=None, clientRequestToken=None, deleteStack=None):\n \"\"\"\n Deletes a project, including project resources. Does not delete users associated with the project, but does delete the IAM roles that allowed access to the project.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_project(\n id='string',\n clientRequestToken='string',\n deleteStack=True|False\n )\n \n \n :type id: string\n :param id: [REQUIRED]\n The ID of the project to be deleted in AWS CodeStar.\n \n\n :type clientRequestToken: string\n :param clientRequestToken: A user- or system-generated token that identifies the entity that requested project deletion. This token can be used to repeat the request.\n\n :type deleteStack: boolean\n :param deleteStack: Whether to send a delete request for the primary stack in AWS CloudFormation originally used to generate the project and its resources. This option will delete all AWS resources for the project (except for any buckets in Amazon S3) as well as deleting the project itself. Recommended for most use cases.\n\n :rtype: dict\n :return: {\n 'stackId': 'string',\n 'projectArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_user_profile(userArn=None):\n \"\"\"\n Deletes a user profile in AWS CodeStar, including all personal preference data associated with that profile, such as display name and email address. It does not delete the history of that user, for example the history of commits made by that user.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_user_profile(\n userArn='string'\n )\n \n \n :type userArn: string\n :param userArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the user to delete from AWS CodeStar.\n \n\n :rtype: dict\n :return: {\n 'userArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_project(id=None):\n \"\"\"\n Describes a project and its resources.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_project(\n id='string'\n )\n \n \n :type id: string\n :param id: [REQUIRED]\n The ID of the project.\n \n\n :rtype: dict\n :return: {\n 'name': 'string',\n 'id': 'string',\n 'arn': 'string',\n 'description': 'string',\n 'clientRequestToken': 'string',\n 'createdTimeStamp': datetime(2015, 1, 1),\n 'stackId': 'string',\n 'projectTemplateId': 'string',\n 'status': {\n 'state': 'string',\n 'reason': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_user_profile(userArn=None):\n \"\"\"\n Describes a user in AWS CodeStar and the user attributes across all projects.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_user_profile(\n userArn='string'\n )\n \n \n :type userArn: string\n :param userArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the user.\n \n\n :rtype: dict\n :return: {\n 'userArn': 'string',\n 'displayName': 'string',\n 'emailAddress': 'string',\n 'sshPublicKey': 'string',\n 'createdTimestamp': datetime(2015, 1, 1),\n 'lastModifiedTimestamp': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef disassociate_team_member(projectId=None, userArn=None):\n \"\"\"\n Removes a user from a project. Removing a user from a project also removes the IAM policies from that user that allowed access to the project and its resources. Disassociating a team member does not remove that user's profile from AWS CodeStar. It does not remove the user from IAM.\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_team_member(\n projectId='string',\n userArn='string'\n )\n \n \n :type projectId: string\n :param projectId: [REQUIRED]\n The ID of the AWS CodeStar project from which you want to remove a team member.\n \n\n :type userArn: string\n :param userArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the IAM user or group whom you want to remove from the project.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_projects(nextToken=None, maxResults=None):\n \"\"\"\n Lists all projects in AWS CodeStar associated with your AWS account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_projects(\n nextToken='string',\n maxResults=123\n )\n \n \n :type nextToken: string\n :param nextToken: The continuation token to be used to return the next set of results, if the results cannot be returned in one response.\n\n :type maxResults: integer\n :param maxResults: The maximum amount of data that can be contained in a single set of results.\n\n :rtype: dict\n :return: {\n 'projects': [\n {\n 'projectId': 'string',\n 'projectArn': 'string'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_resources(projectId=None, nextToken=None, maxResults=None):\n \"\"\"\n Lists resources associated with a project in AWS CodeStar.\n See also: AWS API Documentation\n \n \n :example: response = client.list_resources(\n projectId='string',\n nextToken='string',\n maxResults=123\n )\n \n \n :type projectId: string\n :param projectId: [REQUIRED]\n The ID of the project.\n \n\n :type nextToken: string\n :param nextToken: The continuation token for the next set of results, if the results cannot be returned in one response.\n\n :type maxResults: integer\n :param maxResults: The maximum amount of data that can be contained in a single set of results.\n\n :rtype: dict\n :return: {\n 'resources': [\n {\n 'id': 'string'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_tags_for_project(id=None, nextToken=None, maxResults=None):\n \"\"\"\n Gets the tags for a project.\n See also: AWS API Documentation\n \n \n :example: response = client.list_tags_for_project(\n id='string',\n nextToken='string',\n maxResults=123\n )\n \n \n :type id: string\n :param id: [REQUIRED]\n The ID of the project to get tags for.\n \n\n :type nextToken: string\n :param nextToken: Reserved for future use.\n\n :type maxResults: integer\n :param maxResults: Reserved for future use.\n\n :rtype: dict\n :return: {\n 'tags': {\n 'string': 'string'\n },\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef list_team_members(projectId=None, nextToken=None, maxResults=None):\n \"\"\"\n Lists all team members associated with a project.\n See also: AWS API Documentation\n \n \n :example: response = client.list_team_members(\n projectId='string',\n nextToken='string',\n maxResults=123\n )\n \n \n :type projectId: string\n :param projectId: [REQUIRED]\n The ID of the project for which you want to list team members.\n \n\n :type nextToken: string\n :param nextToken: The continuation token for the next set of results, if the results cannot be returned in one response.\n\n :type maxResults: integer\n :param maxResults: The maximum number of team members you want returned in a response.\n\n :rtype: dict\n :return: {\n 'teamMembers': [\n {\n 'userArn': 'string',\n 'projectRole': 'string',\n 'remoteAccessAllowed': True|False\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_user_profiles(nextToken=None, maxResults=None):\n \"\"\"\n Lists all the user profiles configured for your AWS account in AWS CodeStar.\n See also: AWS API Documentation\n \n \n :example: response = client.list_user_profiles(\n nextToken='string',\n maxResults=123\n )\n \n \n :type nextToken: string\n :param nextToken: The continuation token for the next set of results, if the results cannot be returned in one response.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return in a response.\n\n :rtype: dict\n :return: {\n 'userProfiles': [\n {\n 'userArn': 'string',\n 'displayName': 'string',\n 'emailAddress': 'string',\n 'sshPublicKey': 'string'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef tag_project(id=None, tags=None):\n \"\"\"\n Adds tags to a project.\n See also: AWS API Documentation\n \n \n :example: response = client.tag_project(\n id='string',\n tags={\n 'string': 'string'\n }\n )\n \n \n :type id: string\n :param id: [REQUIRED]\n The ID of the project you want to add a tag to.\n \n\n :type tags: dict\n :param tags: [REQUIRED]\n The tags you want to add to the project.\n (string) --\n (string) --\n \n\n :rtype: dict\n :return: {\n 'tags': {\n 'string': 'string'\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef untag_project(id=None, tags=None):\n \"\"\"\n Removes tags from a project.\n See also: AWS API Documentation\n \n \n :example: response = client.untag_project(\n id='string',\n tags=[\n 'string',\n ]\n )\n \n \n :type id: string\n :param id: [REQUIRED]\n The ID of the project to remove tags from.\n \n\n :type tags: list\n :param tags: [REQUIRED]\n The tags to remove from the project.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_project(id=None, name=None, description=None):\n \"\"\"\n Updates a project in AWS CodeStar.\n See also: AWS API Documentation\n \n \n :example: response = client.update_project(\n id='string',\n name='string',\n description='string'\n )\n \n \n :type id: string\n :param id: [REQUIRED]\n The ID of the project you want to update.\n \n\n :type name: string\n :param name: The name of the project you want to update.\n\n :type description: string\n :param description: The description of the project, if any.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_team_member(projectId=None, userArn=None, projectRole=None, remoteAccessAllowed=None):\n \"\"\"\n Updates a team member's attributes in an AWS CodeStar project. For example, you can change a team member's role in the project, or change whether they have remote access to project resources.\n See also: AWS API Documentation\n \n \n :example: response = client.update_team_member(\n projectId='string',\n userArn='string',\n projectRole='string',\n remoteAccessAllowed=True|False\n )\n \n \n :type projectId: string\n :param projectId: [REQUIRED]\n The ID of the project.\n \n\n :type userArn: string\n :param userArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the user for whom you want to change team membership attributes.\n \n\n :type projectRole: string\n :param projectRole: The role assigned to the user in the project. Project roles have different levels of access. For more information, see Working with Teams in the AWS CodeStar User Guide .\n\n :type remoteAccessAllowed: boolean\n :param remoteAccessAllowed: Whether a team member is allowed to remotely access project resources using the SSH public key associated with the user's profile. Even if this is set to True, the user must associate a public key with their profile before the user can access resources.\n\n :rtype: dict\n :return: {\n 'userArn': 'string',\n 'projectRole': 'string',\n 'remoteAccessAllowed': True|False\n }\n \n \n \"\"\"\n pass\n\ndef update_user_profile(userArn=None, displayName=None, emailAddress=None, sshPublicKey=None):\n \"\"\"\n Updates a user's profile in AWS CodeStar. The user profile is not project-specific. Information in the user profile is displayed wherever the user's information appears to other users in AWS CodeStar.\n See also: AWS API Documentation\n \n \n :example: response = client.update_user_profile(\n userArn='string',\n displayName='string',\n emailAddress='string',\n sshPublicKey='string'\n )\n \n \n :type userArn: string\n :param userArn: [REQUIRED]\n The name that will be displayed as the friendly name for the user in AWS CodeStar.\n \n\n :type displayName: string\n :param displayName: The name that is displayed as the friendly name for the user in AWS CodeStar.\n\n :type emailAddress: string\n :param emailAddress: The email address that is displayed as part of the user's profile in AWS CodeStar.\n\n :type sshPublicKey: string\n :param sshPublicKey: The SSH public key associated with the user in AWS CodeStar. If a project owner allows the user remote access to project resources, this public key will be used along with the user's private key for SSH access.\n\n :rtype: dict\n :return: {\n 'userArn': 'string',\n 'displayName': 'string',\n 'emailAddress': 'string',\n 'sshPublicKey': 'string',\n 'createdTimestamp': datetime(2015, 1, 1),\n 'lastModifiedTimestamp': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.666007399559021, "alphanum_fraction": 0.6665018796920776, "avg_line_length": 40.2729606628418, "blob_id": "59513487eba024d010324738b2998157b70a5c4c", "content_id": "bb9b64bf3a59eab23ea60636c199215e0035b3a5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16180, "license_type": "permissive", "max_line_length": 278, "num_lines": 392, "path": "/pyboto3/pinpointsmsvoice.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_configuration_set(ConfigurationSetName=None):\n \"\"\"\n Create a new configuration set. After you create the configuration set, you can add one or more event destinations to it.\n See also: AWS API Documentation\n \n \n :example: response = client.create_configuration_set(\n ConfigurationSetName='string'\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: The name that you want to give the configuration set.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef create_configuration_set_event_destination(ConfigurationSetName=None, EventDestination=None, EventDestinationName=None):\n \"\"\"\n Create a new event destination in a configuration set.\n See also: AWS API Documentation\n \n \n :example: response = client.create_configuration_set_event_destination(\n ConfigurationSetName='string',\n EventDestination={\n 'CloudWatchLogsDestination': {\n 'IamRoleArn': 'string',\n 'LogGroupArn': 'string'\n },\n 'Enabled': True|False,\n 'KinesisFirehoseDestination': {\n 'DeliveryStreamArn': 'string',\n 'IamRoleArn': 'string'\n },\n 'MatchingEventTypes': [\n 'INITIATED_CALL'|'RINGING'|'ANSWERED'|'COMPLETED_CALL'|'BUSY'|'FAILED'|'NO_ANSWER',\n ],\n 'SnsDestination': {\n 'TopicArn': 'string'\n }\n },\n EventDestinationName='string'\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: [REQUIRED] ConfigurationSetName\n\n :type EventDestination: dict\n :param EventDestination: An object that defines a single event destination.\n CloudWatchLogsDestination (dict) -- An object that contains information about an event destination that sends data to Amazon CloudWatch Logs.\n IamRoleArn (string) -- The Amazon Resource Name (ARN) of an Amazon Identity and Access Management (IAM) role that is able to write event data to an Amazon CloudWatch destination.\n LogGroupArn (string) -- The name of the Amazon CloudWatch Log Group that you want to record events in.\n Enabled (boolean) -- Indicates whether or not the event destination is enabled. If the event destination is enabled, then Amazon Pinpoint sends response data to the specified event destination.\n KinesisFirehoseDestination (dict) -- An object that contains information about an event destination that sends data to Amazon Kinesis Data Firehose.\n DeliveryStreamArn (string) -- The Amazon Resource Name (ARN) of an IAM role that can write data to an Amazon Kinesis Data Firehose stream.\n IamRoleArn (string) -- The Amazon Resource Name (ARN) of the Amazon Kinesis Data Firehose destination that you want to use in the event destination.\n MatchingEventTypes (list) -- An array of EventDestination objects. Each EventDestination object includes ARNs and other information that define an event destination.\n (string) -- The types of events that are sent to the event destination.\n SnsDestination (dict) -- An object that contains information about an event destination that sends data to Amazon SNS.\n TopicArn (string) -- The Amazon Resource Name (ARN) of the Amazon SNS topic that you want to publish events to.\n \n\n :type EventDestinationName: string\n :param EventDestinationName: A name that identifies the event destination.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) -- CreateConfigurationSetEventDestinationResponse\n \n \"\"\"\n pass\n\ndef delete_configuration_set(ConfigurationSetName=None):\n \"\"\"\n Deletes an existing configuration set.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_configuration_set(\n ConfigurationSetName='string'\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: [REQUIRED] ConfigurationSetName\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_configuration_set_event_destination(ConfigurationSetName=None, EventDestinationName=None):\n \"\"\"\n Deletes an event destination in a configuration set.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_configuration_set_event_destination(\n ConfigurationSetName='string',\n EventDestinationName='string'\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: [REQUIRED] ConfigurationSetName\n\n :type EventDestinationName: string\n :param EventDestinationName: [REQUIRED] EventDestinationName\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) -- DeleteConfigurationSetEventDestinationResponse\n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_configuration_set_event_destinations(ConfigurationSetName=None):\n \"\"\"\n Obtain information about an event destination, including the types of events it reports, the Amazon Resource Name (ARN) of the destination, and the name of the event destination.\n See also: AWS API Documentation\n \n \n :example: response = client.get_configuration_set_event_destinations(\n ConfigurationSetName='string'\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: [REQUIRED] ConfigurationSetName\n\n :rtype: dict\n :return: {\n 'EventDestinations': [\n {\n 'CloudWatchLogsDestination': {\n 'IamRoleArn': 'string',\n 'LogGroupArn': 'string'\n },\n 'Enabled': True|False,\n 'KinesisFirehoseDestination': {\n 'DeliveryStreamArn': 'string',\n 'IamRoleArn': 'string'\n },\n 'MatchingEventTypes': [\n 'INITIATED_CALL'|'RINGING'|'ANSWERED'|'COMPLETED_CALL'|'BUSY'|'FAILED'|'NO_ANSWER',\n ],\n 'Name': 'string',\n 'SnsDestination': {\n 'TopicArn': 'string'\n }\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef send_voice_message(CallerId=None, ConfigurationSetName=None, Content=None, DestinationPhoneNumber=None, OriginationPhoneNumber=None):\n \"\"\"\n Create a new voice message and send it to a recipient's phone number.\n See also: AWS API Documentation\n \n \n :example: response = client.send_voice_message(\n CallerId='string',\n ConfigurationSetName='string',\n Content={\n 'CallInstructionsMessage': {\n 'Text': 'string'\n },\n 'PlainTextMessage': {\n 'LanguageCode': 'string',\n 'Text': 'string',\n 'VoiceId': 'string'\n },\n 'SSMLMessage': {\n 'LanguageCode': 'string',\n 'Text': 'string',\n 'VoiceId': 'string'\n }\n },\n DestinationPhoneNumber='string',\n OriginationPhoneNumber='string'\n )\n \n \n :type CallerId: string\n :param CallerId: The phone number that appears on recipients' devices when they receive the message.\n\n :type ConfigurationSetName: string\n :param ConfigurationSetName: The name of the configuration set that you want to use to send the message.\n\n :type Content: dict\n :param Content: An object that contains a voice message and information about the recipient that you want to send it to.\n CallInstructionsMessage (dict) -- An object that defines a message that contains text formatted using Amazon Pinpoint Voice Instructions markup.\n Text (string) -- The language to use when delivering the message. For a complete list of supported languages, see the Amazon Polly Developer Guide.\n PlainTextMessage (dict) -- An object that defines a message that contains unformatted text.\n LanguageCode (string) -- The language to use when delivering the message. For a complete list of supported languages, see the Amazon Polly Developer Guide.\n Text (string) -- The plain (not SSML-formatted) text to deliver to the recipient.\n VoiceId (string) -- The name of the voice that you want to use to deliver the message. For a complete list of supported voices, see the Amazon Polly Developer Guide.\n SSMLMessage (dict) -- An object that defines a message that contains SSML-formatted text.\n LanguageCode (string) -- The language to use when delivering the message. For a complete list of supported languages, see the Amazon Polly Developer Guide.\n Text (string) -- The SSML-formatted text to deliver to the recipient.\n VoiceId (string) -- The name of the voice that you want to use to deliver the message. For a complete list of supported voices, see the Amazon Polly Developer Guide.\n \n\n :type DestinationPhoneNumber: string\n :param DestinationPhoneNumber: The phone number that you want to send the voice message to.\n\n :type OriginationPhoneNumber: string\n :param OriginationPhoneNumber: The phone number that Amazon Pinpoint should use to send the voice message. This isn't necessarily the phone number that appears on recipients' devices when they receive the message, because you can specify a CallerId parameter in the request.\n\n :rtype: dict\n :return: {\n 'MessageId': 'string'\n }\n \n \n :returns: \n (dict) -- SendVoiceMessageResponse\n MessageId (string) -- A unique identifier for the voice message.\n \n \n \n \"\"\"\n pass\n\ndef update_configuration_set_event_destination(ConfigurationSetName=None, EventDestination=None, EventDestinationName=None):\n \"\"\"\n Update an event destination in a configuration set. An event destination is a location that you publish information about your voice calls to. For example, you can log an event to an Amazon CloudWatch destination when a call fails.\n See also: AWS API Documentation\n \n \n :example: response = client.update_configuration_set_event_destination(\n ConfigurationSetName='string',\n EventDestination={\n 'CloudWatchLogsDestination': {\n 'IamRoleArn': 'string',\n 'LogGroupArn': 'string'\n },\n 'Enabled': True|False,\n 'KinesisFirehoseDestination': {\n 'DeliveryStreamArn': 'string',\n 'IamRoleArn': 'string'\n },\n 'MatchingEventTypes': [\n 'INITIATED_CALL'|'RINGING'|'ANSWERED'|'COMPLETED_CALL'|'BUSY'|'FAILED'|'NO_ANSWER',\n ],\n 'SnsDestination': {\n 'TopicArn': 'string'\n }\n },\n EventDestinationName='string'\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: [REQUIRED] ConfigurationSetName\n\n :type EventDestination: dict\n :param EventDestination: An object that defines a single event destination.\n CloudWatchLogsDestination (dict) -- An object that contains information about an event destination that sends data to Amazon CloudWatch Logs.\n IamRoleArn (string) -- The Amazon Resource Name (ARN) of an Amazon Identity and Access Management (IAM) role that is able to write event data to an Amazon CloudWatch destination.\n LogGroupArn (string) -- The name of the Amazon CloudWatch Log Group that you want to record events in.\n Enabled (boolean) -- Indicates whether or not the event destination is enabled. If the event destination is enabled, then Amazon Pinpoint sends response data to the specified event destination.\n KinesisFirehoseDestination (dict) -- An object that contains information about an event destination that sends data to Amazon Kinesis Data Firehose.\n DeliveryStreamArn (string) -- The Amazon Resource Name (ARN) of an IAM role that can write data to an Amazon Kinesis Data Firehose stream.\n IamRoleArn (string) -- The Amazon Resource Name (ARN) of the Amazon Kinesis Data Firehose destination that you want to use in the event destination.\n MatchingEventTypes (list) -- An array of EventDestination objects. Each EventDestination object includes ARNs and other information that define an event destination.\n (string) -- The types of events that are sent to the event destination.\n SnsDestination (dict) -- An object that contains information about an event destination that sends data to Amazon SNS.\n TopicArn (string) -- The Amazon Resource Name (ARN) of the Amazon SNS topic that you want to publish events to.\n \n\n :type EventDestinationName: string\n :param EventDestinationName: [REQUIRED] EventDestinationName\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) -- UpdateConfigurationSetEventDestinationResponse\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6038284301757812, "alphanum_fraction": 0.6098300218582153, "avg_line_length": 42.910152435302734, "blob_id": "0f1ebe56eed8399142b11687badb85f046745fad", "content_id": "8b57b1b2a421fc77e3a4b85560ec0fa8408fc703", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 240936, "license_type": "permissive", "max_line_length": 988, "num_lines": 5487, "path": "/pyboto3/neptune.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef add_role_to_db_cluster(DBClusterIdentifier=None, RoleArn=None):\n \"\"\"\n Associates an Identity and Access Management (IAM) role from an Neptune DB cluster.\n See also: AWS API Documentation\n \n \n :example: response = client.add_role_to_db_cluster(\n DBClusterIdentifier='string',\n RoleArn='string'\n )\n \n \n :type DBClusterIdentifier: string\n :param DBClusterIdentifier: [REQUIRED]\n The name of the DB cluster to associate the IAM role with.\n \n\n :type RoleArn: string\n :param RoleArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the IAM role to associate with the Neptune DB cluster, for example arn:aws:iam::123456789012:role/NeptuneAccessRole .\n \n\n \"\"\"\n pass\n\ndef add_source_identifier_to_subscription(SubscriptionName=None, SourceIdentifier=None):\n \"\"\"\n Adds a source identifier to an existing event notification subscription.\n See also: AWS API Documentation\n \n \n :example: response = client.add_source_identifier_to_subscription(\n SubscriptionName='string',\n SourceIdentifier='string'\n )\n \n \n :type SubscriptionName: string\n :param SubscriptionName: [REQUIRED]\n The name of the event notification subscription you want to add a source identifier to.\n \n\n :type SourceIdentifier: string\n :param SourceIdentifier: [REQUIRED]\n The identifier of the event source to be added.\n Constraints:\n If the source type is a DB instance, then a DBInstanceIdentifier must be supplied.\n If the source type is a DB security group, a DBSecurityGroupName must be supplied.\n If the source type is a DB parameter group, a DBParameterGroupName must be supplied.\n If the source type is a DB snapshot, a DBSnapshotIdentifier must be supplied.\n \n\n :rtype: dict\n :return: {\n 'EventSubscription': {\n 'CustomerAwsId': 'string',\n 'CustSubscriptionId': 'string',\n 'SnsTopicArn': 'string',\n 'Status': 'string',\n 'SubscriptionCreationTime': 'string',\n 'SourceType': 'string',\n 'SourceIdsList': [\n 'string',\n ],\n 'EventCategoriesList': [\n 'string',\n ],\n 'Enabled': True|False,\n 'EventSubscriptionArn': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef add_tags_to_resource(ResourceName=None, Tags=None):\n \"\"\"\n Adds metadata tags to an Amazon Neptune resource. These tags can also be used with cost allocation reporting to track cost associated with Amazon Neptune resources, or used in a Condition statement in an IAM policy for Amazon Neptune.\n See also: AWS API Documentation\n \n \n :example: response = client.add_tags_to_resource(\n ResourceName='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type ResourceName: string\n :param ResourceName: [REQUIRED]\n The Amazon Neptune resource that the tags are added to. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an Amazon Resource Name (ARN) .\n \n\n :type Tags: list\n :param Tags: [REQUIRED]\n The tags to be assigned to the Amazon Neptune resource.\n (dict) --Metadata assigned to an Amazon Neptune resource consisting of a key-value pair.\n Key (string) --A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n Value (string) --A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n \n \n\n \"\"\"\n pass\n\ndef apply_pending_maintenance_action(ResourceIdentifier=None, ApplyAction=None, OptInType=None):\n \"\"\"\n Applies a pending maintenance action to a resource (for example, to a DB instance).\n See also: AWS API Documentation\n \n \n :example: response = client.apply_pending_maintenance_action(\n ResourceIdentifier='string',\n ApplyAction='string',\n OptInType='string'\n )\n \n \n :type ResourceIdentifier: string\n :param ResourceIdentifier: [REQUIRED]\n The Amazon Resource Name (ARN) of the resource that the pending maintenance action applies to. For information about creating an ARN, see Constructing an Amazon Resource Name (ARN) .\n \n\n :type ApplyAction: string\n :param ApplyAction: [REQUIRED]\n The pending maintenance action to apply to this resource.\n Valid values: system-update , db-upgrade\n \n\n :type OptInType: string\n :param OptInType: [REQUIRED]\n A value that specifies the type of opt-in request, or undoes an opt-in request. An opt-in request of type immediate can't be undone.\n Valid values:\n immediate - Apply the maintenance action immediately.\n next-maintenance - Apply the maintenance action during the next maintenance window for the resource.\n undo-opt-in - Cancel any existing next-maintenance opt-in requests.\n \n\n :rtype: dict\n :return: {\n 'ResourcePendingMaintenanceActions': {\n 'ResourceIdentifier': 'string',\n 'PendingMaintenanceActionDetails': [\n {\n 'Action': 'string',\n 'AutoAppliedAfterDate': datetime(2015, 1, 1),\n 'ForcedApplyDate': datetime(2015, 1, 1),\n 'OptInStatus': 'string',\n 'CurrentApplyDate': datetime(2015, 1, 1),\n 'Description': 'string'\n },\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef copy_db_cluster_parameter_group(SourceDBClusterParameterGroupIdentifier=None, TargetDBClusterParameterGroupIdentifier=None, TargetDBClusterParameterGroupDescription=None, Tags=None):\n \"\"\"\n Copies the specified DB cluster parameter group.\n See also: AWS API Documentation\n \n \n :example: response = client.copy_db_cluster_parameter_group(\n SourceDBClusterParameterGroupIdentifier='string',\n TargetDBClusterParameterGroupIdentifier='string',\n TargetDBClusterParameterGroupDescription='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type SourceDBClusterParameterGroupIdentifier: string\n :param SourceDBClusterParameterGroupIdentifier: [REQUIRED]\n The identifier or Amazon Resource Name (ARN) for the source DB cluster parameter group. For information about creating an ARN, see Constructing an Amazon Resource Name (ARN) .\n Constraints:\n Must specify a valid DB cluster parameter group.\n If the source DB cluster parameter group is in the same AWS Region as the copy, specify a valid DB parameter group identifier, for example my-db-cluster-param-group , or a valid ARN.\n If the source DB parameter group is in a different AWS Region than the copy, specify a valid DB cluster parameter group ARN, for example arn:aws:rds:us-east-1:123456789012:cluster-pg:custom-cluster-group1 .\n \n\n :type TargetDBClusterParameterGroupIdentifier: string\n :param TargetDBClusterParameterGroupIdentifier: [REQUIRED]\n The identifier for the copied DB cluster parameter group.\n Constraints:\n Cannot be null, empty, or blank\n Must contain from 1 to 255 letters, numbers, or hyphens\n First character must be a letter\n Cannot end with a hyphen or contain two consecutive hyphens\n Example: my-cluster-param-group1\n \n\n :type TargetDBClusterParameterGroupDescription: string\n :param TargetDBClusterParameterGroupDescription: [REQUIRED]\n A description for the copied DB cluster parameter group.\n \n\n :type Tags: list\n :param Tags: A list of tags. For more information, see Tagging Amazon Neptune Resources .\n (dict) --Metadata assigned to an Amazon Neptune resource consisting of a key-value pair.\n Key (string) --A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n Value (string) --A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n \n \n\n :rtype: dict\n :return: {\n 'DBClusterParameterGroup': {\n 'DBClusterParameterGroupName': 'string',\n 'DBParameterGroupFamily': 'string',\n 'Description': 'string',\n 'DBClusterParameterGroupArn': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef copy_db_cluster_snapshot(SourceDBClusterSnapshotIdentifier=None, TargetDBClusterSnapshotIdentifier=None, KmsKeyId=None, PreSignedUrl=None, CopyTags=None, Tags=None, SourceRegion=None):\n \"\"\"\n Copies a snapshot of a DB cluster.\n To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.\n You can copy an encrypted DB cluster snapshot from another AWS Region. In that case, the AWS Region where you call the CopyDBClusterSnapshot action is the destination AWS Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another AWS Region, you must provide the following values:\n To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process .\n To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified by TargetDBClusterSnapshotIdentifier while that DB cluster snapshot is in \"copying\" status.\n See also: AWS API Documentation\n \n \n :example: response = client.copy_db_cluster_snapshot(\n SourceDBClusterSnapshotIdentifier='string',\n TargetDBClusterSnapshotIdentifier='string',\n KmsKeyId='string',\n CopyTags=True|False,\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n SourceRegion='string'\n )\n \n \n :type SourceDBClusterSnapshotIdentifier: string\n :param SourceDBClusterSnapshotIdentifier: [REQUIRED]\n The identifier of the DB cluster snapshot to copy. This parameter is not case-sensitive.\n You can't copy an encrypted, shared DB cluster snapshot from one AWS Region to another.\n Constraints:\n Must specify a valid system snapshot in the 'available' state.\n If the source snapshot is in the same AWS Region as the copy, specify a valid DB snapshot identifier.\n If the source snapshot is in a different AWS Region than the copy, specify a valid DB cluster snapshot ARN.\n Example: my-cluster-snapshot1\n \n\n :type TargetDBClusterSnapshotIdentifier: string\n :param TargetDBClusterSnapshotIdentifier: [REQUIRED]\n The identifier of the new DB cluster snapshot to create from the source DB cluster snapshot. This parameter is not case-sensitive.\n Constraints:\n Must contain from 1 to 63 letters, numbers, or hyphens.\n First character must be a letter.\n Cannot end with a hyphen or contain two consecutive hyphens.\n Example: my-cluster-snapshot2\n \n\n :type KmsKeyId: string\n :param KmsKeyId: The AWS AWS KMS key ID for an encrypted DB cluster snapshot. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.\n If you copy an unencrypted DB cluster snapshot and specify a value for the KmsKeyId parameter, Amazon Neptune encrypts the target DB cluster snapshot using the specified KMS encryption key.\n If you copy an encrypted DB cluster snapshot from your AWS account, you can specify a value for KmsKeyId to encrypt the copy with a new KMS encryption key. If you don't specify a value for KmsKeyId , then the copy of the DB cluster snapshot is encrypted with the same KMS key as the source DB cluster snapshot.\n If you copy an encrypted DB cluster snapshot that is shared from another AWS account, then you must specify a value for KmsKeyId .\n To copy an encrypted DB cluster snapshot to another AWS Region, you must set KmsKeyId to the KMS key ID you want to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region. KMS encryption keys are specific to the AWS Region that they are created in, and you can't use encryption keys from one AWS Region in another AWS Region.\n \n\n :type PreSignedUrl: string\n :param PreSignedUrl: The URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot API action in the AWS Region that contains the source DB cluster snapshot to copy. The PreSignedUrl parameter must be used when copying an encrypted DB cluster snapshot from another AWS Region.\n The pre-signed URL must be a valid request for the CopyDBSClusterSnapshot API action that can be executed in the source AWS Region that contains the encrypted DB cluster snapshot to be copied. The pre-signed URL request must contain the following parameter values:\n KmsKeyId - The AWS KMS key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region. This is the same identifier for both the CopyDBClusterSnapshot action that is called in the destination AWS Region, and the action contained in the pre-signed URL.\n DestinationRegion - The name of the AWS Region that the DB cluster snapshot will be created in.\n SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are copying an encrypted DB cluster snapshot from the us-west-2 AWS Region, then your SourceDBClusterSnapshotIdentifier looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:neptune-cluster1-snapshot-20161115 .\n To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process .\n Please note that this parameter is automatically populated if it is not provided. Including this parameter is not required\n \n\n :type CopyTags: boolean\n :param CopyTags: True to copy all tags from the source DB cluster snapshot to the target DB cluster snapshot, and otherwise false. The default is false.\n\n :type Tags: list\n :param Tags: A list of tags. For more information, see Tagging Amazon Neptune Resources .\n (dict) --Metadata assigned to an Amazon Neptune resource consisting of a key-value pair.\n Key (string) --A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n Value (string) --A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n \n \n\n :type SourceRegion: string\n :param SourceRegion: The ID of the region that contains the snapshot to be copied.\n\n :rtype: dict\n :return: {\n 'DBClusterSnapshot': {\n 'AvailabilityZones': [\n 'string',\n ],\n 'DBClusterSnapshotIdentifier': 'string',\n 'DBClusterIdentifier': 'string',\n 'SnapshotCreateTime': datetime(2015, 1, 1),\n 'Engine': 'string',\n 'AllocatedStorage': 123,\n 'Status': 'string',\n 'Port': 123,\n 'VpcId': 'string',\n 'ClusterCreateTime': datetime(2015, 1, 1),\n 'MasterUsername': 'string',\n 'EngineVersion': 'string',\n 'LicenseModel': 'string',\n 'SnapshotType': 'string',\n 'PercentProgress': 123,\n 'StorageEncrypted': True|False,\n 'KmsKeyId': 'string',\n 'DBClusterSnapshotArn': 'string',\n 'SourceDBClusterSnapshotArn': 'string',\n 'IAMDatabaseAuthenticationEnabled': True|False\n }\n }\n \n \n :returns: \n TargetDBClusterSnapshotIdentifier - The identifier for the new copy of the DB cluster snapshot in the destination AWS Region.\n SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the ARN format for the source AWS Region and is the same value as the SourceDBClusterSnapshotIdentifier in the pre-signed URL.\n \n \"\"\"\n pass\n\ndef copy_db_parameter_group(SourceDBParameterGroupIdentifier=None, TargetDBParameterGroupIdentifier=None, TargetDBParameterGroupDescription=None, Tags=None):\n \"\"\"\n Copies the specified DB parameter group.\n See also: AWS API Documentation\n \n \n :example: response = client.copy_db_parameter_group(\n SourceDBParameterGroupIdentifier='string',\n TargetDBParameterGroupIdentifier='string',\n TargetDBParameterGroupDescription='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type SourceDBParameterGroupIdentifier: string\n :param SourceDBParameterGroupIdentifier: [REQUIRED]\n The identifier or ARN for the source DB parameter group. For information about creating an ARN, see Constructing an Amazon Resource Name (ARN) .\n Constraints:\n Must specify a valid DB parameter group.\n Must specify a valid DB parameter group identifier, for example my-db-param-group , or a valid ARN.\n \n\n :type TargetDBParameterGroupIdentifier: string\n :param TargetDBParameterGroupIdentifier: [REQUIRED]\n The identifier for the copied DB parameter group.\n Constraints:\n Cannot be null, empty, or blank\n Must contain from 1 to 255 letters, numbers, or hyphens\n First character must be a letter\n Cannot end with a hyphen or contain two consecutive hyphens\n Example: my-db-parameter-group\n \n\n :type TargetDBParameterGroupDescription: string\n :param TargetDBParameterGroupDescription: [REQUIRED]\n A description for the copied DB parameter group.\n \n\n :type Tags: list\n :param Tags: A list of tags. For more information, see Tagging Amazon Neptune Resources .\n (dict) --Metadata assigned to an Amazon Neptune resource consisting of a key-value pair.\n Key (string) --A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n Value (string) --A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n \n \n\n :rtype: dict\n :return: {\n 'DBParameterGroup': {\n 'DBParameterGroupName': 'string',\n 'DBParameterGroupFamily': 'string',\n 'Description': 'string',\n 'DBParameterGroupArn': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef create_db_cluster(AvailabilityZones=None, BackupRetentionPeriod=None, CharacterSetName=None, DatabaseName=None, DBClusterIdentifier=None, DBClusterParameterGroupName=None, VpcSecurityGroupIds=None, DBSubnetGroupName=None, Engine=None, EngineVersion=None, Port=None, MasterUsername=None, MasterUserPassword=None, OptionGroupName=None, PreferredBackupWindow=None, PreferredMaintenanceWindow=None, ReplicationSourceIdentifier=None, Tags=None, StorageEncrypted=None, KmsKeyId=None, PreSignedUrl=None, EnableIAMDatabaseAuthentication=None, SourceRegion=None):\n \"\"\"\n Creates a new Amazon Neptune DB cluster.\n You can use the ReplicationSourceIdentifier parameter to create the DB cluster as a Read Replica of another DB cluster or Amazon Neptune DB instance. For cross-region replication where the DB cluster identified by ReplicationSourceIdentifier is encrypted, you must also specify the PreSignedUrl parameter.\n See also: AWS API Documentation\n \n \n :example: response = client.create_db_cluster(\n AvailabilityZones=[\n 'string',\n ],\n BackupRetentionPeriod=123,\n CharacterSetName='string',\n DatabaseName='string',\n DBClusterIdentifier='string',\n DBClusterParameterGroupName='string',\n VpcSecurityGroupIds=[\n 'string',\n ],\n DBSubnetGroupName='string',\n Engine='string',\n EngineVersion='string',\n Port=123,\n MasterUsername='string',\n MasterUserPassword='string',\n OptionGroupName='string',\n PreferredBackupWindow='string',\n PreferredMaintenanceWindow='string',\n ReplicationSourceIdentifier='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n StorageEncrypted=True|False,\n KmsKeyId='string',\n EnableIAMDatabaseAuthentication=True|False,\n SourceRegion='string'\n )\n \n \n :type AvailabilityZones: list\n :param AvailabilityZones: A list of EC2 Availability Zones that instances in the DB cluster can be created in.\n (string) --\n \n\n :type BackupRetentionPeriod: integer\n :param BackupRetentionPeriod: The number of days for which automated backups are retained. You must specify a minimum value of 1.\n Default: 1\n Constraints:\n Must be a value from 1 to 35\n \n\n :type CharacterSetName: string\n :param CharacterSetName: A value that indicates that the DB cluster should be associated with the specified CharacterSet.\n\n :type DatabaseName: string\n :param DatabaseName: The name for your database of up to 64 alpha-numeric characters. If you do not provide a name, Amazon Neptune will not create a database in the DB cluster you are creating.\n\n :type DBClusterIdentifier: string\n :param DBClusterIdentifier: [REQUIRED]\n The DB cluster identifier. This parameter is stored as a lowercase string.\n Constraints:\n Must contain from 1 to 63 letters, numbers, or hyphens.\n First character must be a letter.\n Cannot end with a hyphen or contain two consecutive hyphens.\n Example: my-cluster1\n \n\n :type DBClusterParameterGroupName: string\n :param DBClusterParameterGroupName: The name of the DB cluster parameter group to associate with this DB cluster. If this argument is omitted, the default is used.\n Constraints:\n If supplied, must match the name of an existing DBClusterParameterGroup.\n \n\n :type VpcSecurityGroupIds: list\n :param VpcSecurityGroupIds: A list of EC2 VPC security groups to associate with this DB cluster.\n (string) --\n \n\n :type DBSubnetGroupName: string\n :param DBSubnetGroupName: A DB subnet group to associate with this DB cluster.\n Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.\n Example: mySubnetgroup\n \n\n :type Engine: string\n :param Engine: [REQUIRED]\n The name of the database engine to be used for this DB cluster.\n Valid Values: neptune\n \n\n :type EngineVersion: string\n :param EngineVersion: The version number of the database engine to use.\n Example: 1.0.1\n \n\n :type Port: integer\n :param Port: The port number on which the instances in the DB cluster accept connections.\n Default: 8182\n \n\n :type MasterUsername: string\n :param MasterUsername: The name of the master user for the DB cluster.\n Constraints:\n Must be 1 to 16 letters or numbers.\n First character must be a letter.\n Cannot be a reserved word for the chosen database engine.\n \n\n :type MasterUserPassword: string\n :param MasterUserPassword: The password for the master database user. This password can contain any printable ASCII character except '/', ''', or '@'.\n Constraints: Must contain from 8 to 41 characters.\n \n\n :type OptionGroupName: string\n :param OptionGroupName: A value that indicates that the DB cluster should be associated with the specified option group.\n Permanent options can't be removed from an option group. The option group can't be removed from a DB cluster once it is associated with a DB cluster.\n \n\n :type PreferredBackupWindow: string\n :param PreferredBackupWindow: The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.\n The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon Neptune User Guide.\n Constraints:\n Must be in the format hh24:mi-hh24:mi .\n Must be in Universal Coordinated Time (UTC).\n Must not conflict with the preferred maintenance window.\n Must be at least 30 minutes.\n \n\n :type PreferredMaintenanceWindow: string\n :param PreferredMaintenanceWindow: The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).\n Format: ddd:hh24:mi-ddd:hh24:mi\n The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon Neptune User Guide.\n Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.\n Constraints: Minimum 30-minute window.\n \n\n :type ReplicationSourceIdentifier: string\n :param ReplicationSourceIdentifier: The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a Read Replica.\n\n :type Tags: list\n :param Tags: A list of tags. For more information, see Tagging Amazon Neptune Resources .\n (dict) --Metadata assigned to an Amazon Neptune resource consisting of a key-value pair.\n Key (string) --A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n Value (string) --A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n \n \n\n :type StorageEncrypted: boolean\n :param StorageEncrypted: Specifies whether the DB cluster is encrypted.\n\n :type KmsKeyId: string\n :param KmsKeyId: The AWS KMS key identifier for an encrypted DB cluster.\n The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are creating a DB cluster with the same AWS account that owns the KMS encryption key used to encrypt the new DB cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.\n If an encryption key is not specified in KmsKeyId :\n If ReplicationSourceIdentifier identifies an encrypted source, then Amazon Neptune will use the encryption key used to encrypt the source. Otherwise, Amazon Neptune will use your default encryption key.\n If the StorageEncrypted parameter is true and ReplicationSourceIdentifier is not specified, then Amazon Neptune will use your default encryption key.\n AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.\n If you create a Read Replica of an encrypted DB cluster in another AWS Region, you must set KmsKeyId to a KMS key ID that is valid in the destination AWS Region. This key is used to encrypt the Read Replica in that AWS Region.\n \n\n :type PreSignedUrl: string\n :param PreSignedUrl: A URL that contains a Signature Version 4 signed request for the CreateDBCluster action to be called in the source AWS Region where the DB cluster is replicated from. You only need to specify PreSignedUrl when you are performing cross-region replication from an encrypted DB cluster.\n The pre-signed URL must be a valid request for the CreateDBCluster API action that can be executed in the source AWS Region that contains the encrypted DB cluster to be copied.\n The pre-signed URL request must contain the following parameter values:\n KmsKeyId - The AWS KMS key identifier for the key to use to encrypt the copy of the DB cluster in the destination AWS Region. This should refer to the same KMS key for both the CreateDBCluster action that is called in the destination AWS Region, and the action contained in the pre-signed URL.\n DestinationRegion - The name of the AWS Region that Read Replica will be created in.\n ReplicationSourceIdentifier - The DB cluster identifier for the encrypted DB cluster to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are copying an encrypted DB cluster from the us-west-2 AWS Region, then your ReplicationSourceIdentifier would look like Example: arn:aws:rds:us-west-2:123456789012:cluster:neptune-cluster1 .\n To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process .\n Please note that this parameter is automatically populated if it is not provided. Including this parameter is not required\n \n\n :type EnableIAMDatabaseAuthentication: boolean\n :param EnableIAMDatabaseAuthentication: True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.\n Default: false\n \n\n :type SourceRegion: string\n :param SourceRegion: The ID of the region that contains the source for the db cluster.\n\n :rtype: dict\n :return: {\n 'DBCluster': {\n 'AllocatedStorage': 123,\n 'AvailabilityZones': [\n 'string',\n ],\n 'BackupRetentionPeriod': 123,\n 'CharacterSetName': 'string',\n 'DatabaseName': 'string',\n 'DBClusterIdentifier': 'string',\n 'DBClusterParameterGroup': 'string',\n 'DBSubnetGroup': 'string',\n 'Status': 'string',\n 'PercentProgress': 'string',\n 'EarliestRestorableTime': datetime(2015, 1, 1),\n 'Endpoint': 'string',\n 'ReaderEndpoint': 'string',\n 'MultiAZ': True|False,\n 'Engine': 'string',\n 'EngineVersion': 'string',\n 'LatestRestorableTime': datetime(2015, 1, 1),\n 'Port': 123,\n 'MasterUsername': 'string',\n 'DBClusterOptionGroupMemberships': [\n {\n 'DBClusterOptionGroupName': 'string',\n 'Status': 'string'\n },\n ],\n 'PreferredBackupWindow': 'string',\n 'PreferredMaintenanceWindow': 'string',\n 'ReplicationSourceIdentifier': 'string',\n 'ReadReplicaIdentifiers': [\n 'string',\n ],\n 'DBClusterMembers': [\n {\n 'DBInstanceIdentifier': 'string',\n 'IsClusterWriter': True|False,\n 'DBClusterParameterGroupStatus': 'string',\n 'PromotionTier': 123\n },\n ],\n 'VpcSecurityGroups': [\n {\n 'VpcSecurityGroupId': 'string',\n 'Status': 'string'\n },\n ],\n 'HostedZoneId': 'string',\n 'StorageEncrypted': True|False,\n 'KmsKeyId': 'string',\n 'DbClusterResourceId': 'string',\n 'DBClusterArn': 'string',\n 'AssociatedRoles': [\n {\n 'RoleArn': 'string',\n 'Status': 'string'\n },\n ],\n 'IAMDatabaseAuthenticationEnabled': True|False,\n 'CloneGroupId': 'string',\n 'ClusterCreateTime': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef create_db_cluster_parameter_group(DBClusterParameterGroupName=None, DBParameterGroupFamily=None, Description=None, Tags=None):\n \"\"\"\n Creates a new DB cluster parameter group.\n Parameters in a DB cluster parameter group apply to all of the instances in a DB cluster.\n A DB cluster parameter group is initially created with the default parameters for the database engine used by instances in the DB cluster. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBClusterParameterGroup . Once you've created a DB cluster parameter group, you need to associate it with your DB cluster using ModifyDBCluster . When you associate a new DB cluster parameter group with a running DB cluster, you need to reboot the DB instances in the DB cluster without failover for the new DB cluster parameter group and associated settings to take effect.\n See also: AWS API Documentation\n \n \n :example: response = client.create_db_cluster_parameter_group(\n DBClusterParameterGroupName='string',\n DBParameterGroupFamily='string',\n Description='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type DBClusterParameterGroupName: string\n :param DBClusterParameterGroupName: [REQUIRED]\n The name of the DB cluster parameter group.\n Constraints:\n Must match the name of an existing DBClusterParameterGroup.\n Note\n This value is stored as a lowercase string.\n \n\n :type DBParameterGroupFamily: string\n :param DBParameterGroupFamily: [REQUIRED]\n The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family.\n \n\n :type Description: string\n :param Description: [REQUIRED]\n The description for the DB cluster parameter group.\n \n\n :type Tags: list\n :param Tags: A list of tags. For more information, see Tagging Amazon Neptune Resources .\n (dict) --Metadata assigned to an Amazon Neptune resource consisting of a key-value pair.\n Key (string) --A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n Value (string) --A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n \n \n\n :rtype: dict\n :return: {\n 'DBClusterParameterGroup': {\n 'DBClusterParameterGroupName': 'string',\n 'DBParameterGroupFamily': 'string',\n 'Description': 'string',\n 'DBClusterParameterGroupArn': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef create_db_cluster_snapshot(DBClusterSnapshotIdentifier=None, DBClusterIdentifier=None, Tags=None):\n \"\"\"\n Creates a snapshot of a DB cluster.\n See also: AWS API Documentation\n \n \n :example: response = client.create_db_cluster_snapshot(\n DBClusterSnapshotIdentifier='string',\n DBClusterIdentifier='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type DBClusterSnapshotIdentifier: string\n :param DBClusterSnapshotIdentifier: [REQUIRED]\n The identifier of the DB cluster snapshot. This parameter is stored as a lowercase string.\n Constraints:\n Must contain from 1 to 63 letters, numbers, or hyphens.\n First character must be a letter.\n Cannot end with a hyphen or contain two consecutive hyphens.\n Example: my-cluster1-snapshot1\n \n\n :type DBClusterIdentifier: string\n :param DBClusterIdentifier: [REQUIRED]\n The identifier of the DB cluster to create a snapshot for. This parameter is not case-sensitive.\n Constraints:\n Must match the identifier of an existing DBCluster.\n Example: my-cluster1\n \n\n :type Tags: list\n :param Tags: The tags to be assigned to the DB cluster snapshot.\n (dict) --Metadata assigned to an Amazon Neptune resource consisting of a key-value pair.\n Key (string) --A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n Value (string) --A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n \n \n\n :rtype: dict\n :return: {\n 'DBClusterSnapshot': {\n 'AvailabilityZones': [\n 'string',\n ],\n 'DBClusterSnapshotIdentifier': 'string',\n 'DBClusterIdentifier': 'string',\n 'SnapshotCreateTime': datetime(2015, 1, 1),\n 'Engine': 'string',\n 'AllocatedStorage': 123,\n 'Status': 'string',\n 'Port': 123,\n 'VpcId': 'string',\n 'ClusterCreateTime': datetime(2015, 1, 1),\n 'MasterUsername': 'string',\n 'EngineVersion': 'string',\n 'LicenseModel': 'string',\n 'SnapshotType': 'string',\n 'PercentProgress': 123,\n 'StorageEncrypted': True|False,\n 'KmsKeyId': 'string',\n 'DBClusterSnapshotArn': 'string',\n 'SourceDBClusterSnapshotArn': 'string',\n 'IAMDatabaseAuthenticationEnabled': True|False\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef create_db_instance(DBName=None, DBInstanceIdentifier=None, AllocatedStorage=None, DBInstanceClass=None, Engine=None, MasterUsername=None, MasterUserPassword=None, DBSecurityGroups=None, VpcSecurityGroupIds=None, AvailabilityZone=None, DBSubnetGroupName=None, PreferredMaintenanceWindow=None, DBParameterGroupName=None, BackupRetentionPeriod=None, PreferredBackupWindow=None, Port=None, MultiAZ=None, EngineVersion=None, AutoMinorVersionUpgrade=None, LicenseModel=None, Iops=None, OptionGroupName=None, CharacterSetName=None, PubliclyAccessible=None, Tags=None, DBClusterIdentifier=None, StorageType=None, TdeCredentialArn=None, TdeCredentialPassword=None, StorageEncrypted=None, KmsKeyId=None, Domain=None, CopyTagsToSnapshot=None, MonitoringInterval=None, MonitoringRoleArn=None, DomainIAMRoleName=None, PromotionTier=None, Timezone=None, EnableIAMDatabaseAuthentication=None, EnablePerformanceInsights=None, PerformanceInsightsKMSKeyId=None, EnableCloudwatchLogsExports=None):\n \"\"\"\n Creates a new DB instance.\n See also: AWS API Documentation\n \n \n :example: response = client.create_db_instance(\n DBName='string',\n DBInstanceIdentifier='string',\n AllocatedStorage=123,\n DBInstanceClass='string',\n Engine='string',\n MasterUsername='string',\n MasterUserPassword='string',\n DBSecurityGroups=[\n 'string',\n ],\n VpcSecurityGroupIds=[\n 'string',\n ],\n AvailabilityZone='string',\n DBSubnetGroupName='string',\n PreferredMaintenanceWindow='string',\n DBParameterGroupName='string',\n BackupRetentionPeriod=123,\n PreferredBackupWindow='string',\n Port=123,\n MultiAZ=True|False,\n EngineVersion='string',\n AutoMinorVersionUpgrade=True|False,\n LicenseModel='string',\n Iops=123,\n OptionGroupName='string',\n CharacterSetName='string',\n PubliclyAccessible=True|False,\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n DBClusterIdentifier='string',\n StorageType='string',\n TdeCredentialArn='string',\n TdeCredentialPassword='string',\n StorageEncrypted=True|False,\n KmsKeyId='string',\n Domain='string',\n CopyTagsToSnapshot=True|False,\n MonitoringInterval=123,\n MonitoringRoleArn='string',\n DomainIAMRoleName='string',\n PromotionTier=123,\n Timezone='string',\n EnableIAMDatabaseAuthentication=True|False,\n EnablePerformanceInsights=True|False,\n PerformanceInsightsKMSKeyId='string',\n EnableCloudwatchLogsExports=[\n 'string',\n ]\n )\n \n \n :type DBName: string\n :param DBName: The database name.\n Type: String\n \n\n :type DBInstanceIdentifier: string\n :param DBInstanceIdentifier: [REQUIRED]\n The DB instance identifier. This parameter is stored as a lowercase string.\n Constraints:\n Must contain from 1 to 63 letters, numbers, or hyphens.\n First character must be a letter.\n Cannot end with a hyphen or contain two consecutive hyphens.\n Example: mydbinstance\n \n\n :type AllocatedStorage: integer\n :param AllocatedStorage: The amount of storage (in gibibytes) to allocate for the DB instance.\n Type: Integer\n Not applicable. Neptune cluster volumes automatically grow as the amount of data in your database increases, though you are only charged for the space that you use in a Neptune cluster volume.\n \n\n :type DBInstanceClass: string\n :param DBInstanceClass: [REQUIRED]\n The compute and memory capacity of the DB instance, for example, db.m4.large . Not all DB instance classes are available in all AWS Regions.\n \n\n :type Engine: string\n :param Engine: [REQUIRED]\n The name of the database engine to be used for this instance.\n Valid Values: neptune\n \n\n :type MasterUsername: string\n :param MasterUsername: The name for the master user. Not used.\n\n :type MasterUserPassword: string\n :param MasterUserPassword: The password for the master user. The password can include any printable ASCII character except '/', ''', or '@'.\n Not used.\n \n\n :type DBSecurityGroups: list\n :param DBSecurityGroups: A list of DB security groups to associate with this DB instance.\n Default: The default DB security group for the database engine.\n (string) --\n \n\n :type VpcSecurityGroupIds: list\n :param VpcSecurityGroupIds: A list of EC2 VPC security groups to associate with this DB instance.\n Not applicable. The associated list of EC2 VPC security groups is managed by the DB cluster. For more information, see CreateDBCluster .\n Default: The default EC2 VPC security group for the DB subnet group's VPC.\n (string) --\n \n\n :type AvailabilityZone: string\n :param AvailabilityZone: The EC2 Availability Zone that the DB instance is created in.\n Default: A random, system-chosen Availability Zone in the endpoint's AWS Region.\n Example: us-east-1d\n Constraint: The AvailabilityZone parameter can't be specified if the MultiAZ parameter is set to true . The specified Availability Zone must be in the same AWS Region as the current endpoint.\n \n\n :type DBSubnetGroupName: string\n :param DBSubnetGroupName: A DB subnet group to associate with this DB instance.\n If there is no DB subnet group, then it is a non-VPC DB instance.\n \n\n :type PreferredMaintenanceWindow: string\n :param PreferredMaintenanceWindow: The time range each week during which system maintenance can occur, in Universal Coordinated Time (UTC).\n Format: ddd:hh24:mi-ddd:hh24:mi\n The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week.\n Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.\n Constraints: Minimum 30-minute window.\n \n\n :type DBParameterGroupName: string\n :param DBParameterGroupName: The name of the DB parameter group to associate with this DB instance. If this argument is omitted, the default DBParameterGroup for the specified engine is used.\n Constraints:\n Must be 1 to 255 letters, numbers, or hyphens.\n First character must be a letter\n Cannot end with a hyphen or contain two consecutive hyphens\n \n\n :type BackupRetentionPeriod: integer\n :param BackupRetentionPeriod: The number of days for which automated backups are retained.\n Not applicable. The retention period for automated backups is managed by the DB cluster. For more information, see CreateDBCluster .\n Default: 1\n Constraints:\n Must be a value from 0 to 35\n Cannot be set to 0 if the DB instance is a source to Read Replicas\n \n\n :type PreferredBackupWindow: string\n :param PreferredBackupWindow: The daily time range during which automated backups are created.\n Not applicable. The daily time range for creating automated backups is managed by the DB cluster. For more information, see CreateDBCluster .\n \n\n :type Port: integer\n :param Port: The port number on which the database accepts connections.\n Not applicable. The port is managed by the DB cluster. For more information, see CreateDBCluster .\n Default: 8182\n Type: Integer\n \n\n :type MultiAZ: boolean\n :param MultiAZ: Specifies if the DB instance is a Multi-AZ deployment. You can't set the AvailabilityZone parameter if the MultiAZ parameter is set to true.\n\n :type EngineVersion: string\n :param EngineVersion: The version number of the database engine to use.\n\n :type AutoMinorVersionUpgrade: boolean\n :param AutoMinorVersionUpgrade: Indicates that minor engine upgrades are applied automatically to the DB instance during the maintenance window.\n Default: true\n \n\n :type LicenseModel: string\n :param LicenseModel: License model information for this DB instance.\n Valid values: license-included | bring-your-own-license | general-public-license\n \n\n :type Iops: integer\n :param Iops: The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.\n\n :type OptionGroupName: string\n :param OptionGroupName: Indicates that the DB instance should be associated with the specified option group.\n Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group, and that option group can't be removed from a DB instance once it is associated with a DB instance\n \n\n :type CharacterSetName: string\n :param CharacterSetName: Indicates that the DB instance should be associated with the specified CharacterSet.\n Not applicable. The character set is managed by the DB cluster. For more information, see CreateDBCluster .\n \n\n :type PubliclyAccessible: boolean\n :param PubliclyAccessible: This parameter is not supported.\n\n :type Tags: list\n :param Tags: A list of tags. For more information, see Tagging Amazon Neptune Resources .\n (dict) --Metadata assigned to an Amazon Neptune resource consisting of a key-value pair.\n Key (string) --A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n Value (string) --A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n \n \n\n :type DBClusterIdentifier: string\n :param DBClusterIdentifier: The identifier of the DB cluster that the instance will belong to.\n For information on creating a DB cluster, see CreateDBCluster .\n Type: String\n \n\n :type StorageType: string\n :param StorageType: Specifies the storage type to be associated with the DB instance.\n Not applicable. Storage is managed by the DB Cluster.\n \n\n :type TdeCredentialArn: string\n :param TdeCredentialArn: The ARN from the key store with which to associate the instance for TDE encryption.\n\n :type TdeCredentialPassword: string\n :param TdeCredentialPassword: The password for the given ARN from the key store in order to access the device.\n\n :type StorageEncrypted: boolean\n :param StorageEncrypted: Specifies whether the DB instance is encrypted.\n Not applicable. The encryption for DB instances is managed by the DB cluster. For more information, see CreateDBCluster .\n Default: false\n \n\n :type KmsKeyId: string\n :param KmsKeyId: The AWS KMS key identifier for an encrypted DB instance.\n The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are creating a DB instance with the same AWS account that owns the KMS encryption key used to encrypt the new DB instance, then you can use the KMS key alias instead of the ARN for the KM encryption key.\n Not applicable. The KMS key identifier is managed by the DB cluster. For more information, see CreateDBCluster .\n If the StorageEncrypted parameter is true, and you do not specify a value for the KmsKeyId parameter, then Amazon Neptune will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.\n \n\n :type Domain: string\n :param Domain: Specify the Active Directory Domain to create the instance in.\n\n :type CopyTagsToSnapshot: boolean\n :param CopyTagsToSnapshot: True to copy all tags from the DB instance to snapshots of the DB instance, and otherwise false. The default is false.\n\n :type MonitoringInterval: integer\n :param MonitoringInterval: The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0.\n If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0.\n Valid Values: 0, 1, 5, 10, 15, 30, 60\n \n\n :type MonitoringRoleArn: string\n :param MonitoringRoleArn: The ARN for the IAM role that permits Neptune to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess .\n If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value.\n \n\n :type DomainIAMRoleName: string\n :param DomainIAMRoleName: Specify the name of the IAM role to be used when making API calls to the Directory Service.\n\n :type PromotionTier: integer\n :param PromotionTier: A value that specifies the order in which an Read Replica is promoted to the primary instance after a failure of the existing primary instance.\n Default: 1\n Valid Values: 0 - 15\n \n\n :type Timezone: string\n :param Timezone: The time zone of the DB instance.\n\n :type EnableIAMDatabaseAuthentication: boolean\n :param EnableIAMDatabaseAuthentication: True to enable AWS Identity and Access Management (IAM) authentication for Neptune.\n Default: false\n \n\n :type EnablePerformanceInsights: boolean\n :param EnablePerformanceInsights: True to enable Performance Insights for the DB instance, and otherwise false.\n\n :type PerformanceInsightsKMSKeyId: string\n :param PerformanceInsightsKMSKeyId: The AWS KMS key identifier for encryption of Performance Insights data. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.\n\n :type EnableCloudwatchLogsExports: list\n :param EnableCloudwatchLogsExports: The list of log types that need to be enabled for exporting to CloudWatch Logs.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'DBInstance': {\n 'DBInstanceIdentifier': 'string',\n 'DBInstanceClass': 'string',\n 'Engine': 'string',\n 'DBInstanceStatus': 'string',\n 'MasterUsername': 'string',\n 'DBName': 'string',\n 'Endpoint': {\n 'Address': 'string',\n 'Port': 123,\n 'HostedZoneId': 'string'\n },\n 'AllocatedStorage': 123,\n 'InstanceCreateTime': datetime(2015, 1, 1),\n 'PreferredBackupWindow': 'string',\n 'BackupRetentionPeriod': 123,\n 'DBSecurityGroups': [\n {\n 'DBSecurityGroupName': 'string',\n 'Status': 'string'\n },\n ],\n 'VpcSecurityGroups': [\n {\n 'VpcSecurityGroupId': 'string',\n 'Status': 'string'\n },\n ],\n 'DBParameterGroups': [\n {\n 'DBParameterGroupName': 'string',\n 'ParameterApplyStatus': 'string'\n },\n ],\n 'AvailabilityZone': 'string',\n 'DBSubnetGroup': {\n 'DBSubnetGroupName': 'string',\n 'DBSubnetGroupDescription': 'string',\n 'VpcId': 'string',\n 'SubnetGroupStatus': 'string',\n 'Subnets': [\n {\n 'SubnetIdentifier': 'string',\n 'SubnetAvailabilityZone': {\n 'Name': 'string'\n },\n 'SubnetStatus': 'string'\n },\n ],\n 'DBSubnetGroupArn': 'string'\n },\n 'PreferredMaintenanceWindow': 'string',\n 'PendingModifiedValues': {\n 'DBInstanceClass': 'string',\n 'AllocatedStorage': 123,\n 'MasterUserPassword': 'string',\n 'Port': 123,\n 'BackupRetentionPeriod': 123,\n 'MultiAZ': True|False,\n 'EngineVersion': 'string',\n 'LicenseModel': 'string',\n 'Iops': 123,\n 'DBInstanceIdentifier': 'string',\n 'StorageType': 'string',\n 'CACertificateIdentifier': 'string',\n 'DBSubnetGroupName': 'string',\n 'PendingCloudwatchLogsExports': {\n 'LogTypesToEnable': [\n 'string',\n ],\n 'LogTypesToDisable': [\n 'string',\n ]\n }\n },\n 'LatestRestorableTime': datetime(2015, 1, 1),\n 'MultiAZ': True|False,\n 'EngineVersion': 'string',\n 'AutoMinorVersionUpgrade': True|False,\n 'ReadReplicaSourceDBInstanceIdentifier': 'string',\n 'ReadReplicaDBInstanceIdentifiers': [\n 'string',\n ],\n 'ReadReplicaDBClusterIdentifiers': [\n 'string',\n ],\n 'LicenseModel': 'string',\n 'Iops': 123,\n 'OptionGroupMemberships': [\n {\n 'OptionGroupName': 'string',\n 'Status': 'string'\n },\n ],\n 'CharacterSetName': 'string',\n 'SecondaryAvailabilityZone': 'string',\n 'PubliclyAccessible': True|False,\n 'StatusInfos': [\n {\n 'StatusType': 'string',\n 'Normal': True|False,\n 'Status': 'string',\n 'Message': 'string'\n },\n ],\n 'StorageType': 'string',\n 'TdeCredentialArn': 'string',\n 'DbInstancePort': 123,\n 'DBClusterIdentifier': 'string',\n 'StorageEncrypted': True|False,\n 'KmsKeyId': 'string',\n 'DbiResourceId': 'string',\n 'CACertificateIdentifier': 'string',\n 'DomainMemberships': [\n {\n 'Domain': 'string',\n 'Status': 'string',\n 'FQDN': 'string',\n 'IAMRoleName': 'string'\n },\n ],\n 'CopyTagsToSnapshot': True|False,\n 'MonitoringInterval': 123,\n 'EnhancedMonitoringResourceArn': 'string',\n 'MonitoringRoleArn': 'string',\n 'PromotionTier': 123,\n 'DBInstanceArn': 'string',\n 'Timezone': 'string',\n 'IAMDatabaseAuthenticationEnabled': True|False,\n 'PerformanceInsightsEnabled': True|False,\n 'PerformanceInsightsKMSKeyId': 'string',\n 'EnabledCloudwatchLogsExports': [\n 'string',\n ]\n }\n }\n \n \n :returns: \n ModifyDBInstance\n RebootDBInstance\n \n \"\"\"\n pass\n\ndef create_db_parameter_group(DBParameterGroupName=None, DBParameterGroupFamily=None, Description=None, Tags=None):\n \"\"\"\n Creates a new DB parameter group.\n A DB parameter group is initially created with the default parameters for the database engine used by the DB instance. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBParameterGroup . Once you've created a DB parameter group, you need to associate it with your DB instance using ModifyDBInstance . When you associate a new DB parameter group with a running DB instance, you need to reboot the DB instance without failover for the new DB parameter group and associated settings to take effect.\n See also: AWS API Documentation\n \n \n :example: response = client.create_db_parameter_group(\n DBParameterGroupName='string',\n DBParameterGroupFamily='string',\n Description='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type DBParameterGroupName: string\n :param DBParameterGroupName: [REQUIRED]\n The name of the DB parameter group.\n Constraints:\n Must be 1 to 255 letters, numbers, or hyphens.\n First character must be a letter\n Cannot end with a hyphen or contain two consecutive hyphens\n Note\n This value is stored as a lowercase string.\n \n\n :type DBParameterGroupFamily: string\n :param DBParameterGroupFamily: [REQUIRED]\n The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a database engine and engine version compatible with that DB parameter group family.\n \n\n :type Description: string\n :param Description: [REQUIRED]\n The description for the DB parameter group.\n \n\n :type Tags: list\n :param Tags: A list of tags. For more information, see Tagging Amazon Neptune Resources .\n (dict) --Metadata assigned to an Amazon Neptune resource consisting of a key-value pair.\n Key (string) --A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n Value (string) --A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n \n \n\n :rtype: dict\n :return: {\n 'DBParameterGroup': {\n 'DBParameterGroupName': 'string',\n 'DBParameterGroupFamily': 'string',\n 'Description': 'string',\n 'DBParameterGroupArn': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef create_db_subnet_group(DBSubnetGroupName=None, DBSubnetGroupDescription=None, SubnetIds=None, Tags=None):\n \"\"\"\n Creates a new DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the AWS Region.\n See also: AWS API Documentation\n \n \n :example: response = client.create_db_subnet_group(\n DBSubnetGroupName='string',\n DBSubnetGroupDescription='string',\n SubnetIds=[\n 'string',\n ],\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type DBSubnetGroupName: string\n :param DBSubnetGroupName: [REQUIRED]\n The name for the DB subnet group. This value is stored as a lowercase string.\n Constraints: Must contain no more than 255 letters, numbers, periods, underscores, spaces, or hyphens. Must not be default.\n Example: mySubnetgroup\n \n\n :type DBSubnetGroupDescription: string\n :param DBSubnetGroupDescription: [REQUIRED]\n The description for the DB subnet group.\n \n\n :type SubnetIds: list\n :param SubnetIds: [REQUIRED]\n The EC2 Subnet IDs for the DB subnet group.\n (string) --\n \n\n :type Tags: list\n :param Tags: A list of tags. For more information, see Tagging Amazon Neptune Resources .\n (dict) --Metadata assigned to an Amazon Neptune resource consisting of a key-value pair.\n Key (string) --A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n Value (string) --A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n \n \n\n :rtype: dict\n :return: {\n 'DBSubnetGroup': {\n 'DBSubnetGroupName': 'string',\n 'DBSubnetGroupDescription': 'string',\n 'VpcId': 'string',\n 'SubnetGroupStatus': 'string',\n 'Subnets': [\n {\n 'SubnetIdentifier': 'string',\n 'SubnetAvailabilityZone': {\n 'Name': 'string'\n },\n 'SubnetStatus': 'string'\n },\n ],\n 'DBSubnetGroupArn': 'string'\n }\n }\n \n \n :returns: \n OrderableDBInstanceOption\n \n \"\"\"\n pass\n\ndef create_event_subscription(SubscriptionName=None, SnsTopicArn=None, SourceType=None, EventCategories=None, SourceIds=None, Enabled=None, Tags=None):\n \"\"\"\n Creates an event notification subscription. This action requires a topic ARN (Amazon Resource Name) created by either the Neptune console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.\n You can specify the type of source (SourceType) you want to be notified of, provide a list of Neptune sources (SourceIds) that triggers the events, and provide a list of event categories (EventCategories) for events you want to be notified of. For example, you can specify SourceType = db-instance, SourceIds = mydbinstance1, mydbinstance2 and EventCategories = Availability, Backup.\n If you specify both the SourceType and SourceIds, such as SourceType = db-instance and SourceIdentifier = myDBInstance1, you are notified of all the db-instance events for the specified source. If you specify a SourceType but do not specify a SourceIdentifier, you receive notice of the events for that source type for all your Neptune sources. If you do not specify either the SourceType nor the SourceIdentifier, you are notified of events generated from all Neptune sources belonging to your customer account.\n See also: AWS API Documentation\n \n \n :example: response = client.create_event_subscription(\n SubscriptionName='string',\n SnsTopicArn='string',\n SourceType='string',\n EventCategories=[\n 'string',\n ],\n SourceIds=[\n 'string',\n ],\n Enabled=True|False,\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type SubscriptionName: string\n :param SubscriptionName: [REQUIRED]\n The name of the subscription.\n Constraints: The name must be less than 255 characters.\n \n\n :type SnsTopicArn: string\n :param SnsTopicArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.\n \n\n :type SourceType: string\n :param SourceType: The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned.\n Valid values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot\n \n\n :type EventCategories: list\n :param EventCategories: A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType by using the DescribeEventCategories action.\n (string) --\n \n\n :type SourceIds: list\n :param SourceIds: The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it can't end with a hyphen or contain two consecutive hyphens.\n Constraints:\n If SourceIds are supplied, SourceType must also be provided.\n If the source type is a DB instance, then a DBInstanceIdentifier must be supplied.\n If the source type is a DB security group, a DBSecurityGroupName must be supplied.\n If the source type is a DB parameter group, a DBParameterGroupName must be supplied.\n If the source type is a DB snapshot, a DBSnapshotIdentifier must be supplied.\n (string) --\n \n\n :type Enabled: boolean\n :param Enabled: A Boolean value; set to true to activate the subscription, set to false to create the subscription but not active it.\n\n :type Tags: list\n :param Tags: A list of tags. For more information, see Tagging Amazon Neptune Resources .\n (dict) --Metadata assigned to an Amazon Neptune resource consisting of a key-value pair.\n Key (string) --A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n Value (string) --A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n \n \n\n :rtype: dict\n :return: {\n 'EventSubscription': {\n 'CustomerAwsId': 'string',\n 'CustSubscriptionId': 'string',\n 'SnsTopicArn': 'string',\n 'Status': 'string',\n 'SubscriptionCreationTime': 'string',\n 'SourceType': 'string',\n 'SourceIdsList': [\n 'string',\n ],\n 'EventCategoriesList': [\n 'string',\n ],\n 'Enabled': True|False,\n 'EventSubscriptionArn': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef delete_db_cluster(DBClusterIdentifier=None, SkipFinalSnapshot=None, FinalDBSnapshotIdentifier=None):\n \"\"\"\n The DeleteDBCluster action deletes a previously provisioned DB cluster. When you delete a DB cluster, all automated backups for that DB cluster are deleted and can't be recovered. Manual DB cluster snapshots of the specified DB cluster are not deleted.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_db_cluster(\n DBClusterIdentifier='string',\n SkipFinalSnapshot=True|False,\n FinalDBSnapshotIdentifier='string'\n )\n \n \n :type DBClusterIdentifier: string\n :param DBClusterIdentifier: [REQUIRED]\n The DB cluster identifier for the DB cluster to be deleted. This parameter isn't case-sensitive.\n Constraints:\n Must match an existing DBClusterIdentifier.\n \n\n :type SkipFinalSnapshot: boolean\n :param SkipFinalSnapshot: Determines whether a final DB cluster snapshot is created before the DB cluster is deleted. If true is specified, no DB cluster snapshot is created. If false is specified, a DB cluster snapshot is created before the DB cluster is deleted.\n Note\n You must specify a FinalDBSnapshotIdentifier parameter if SkipFinalSnapshot is false .\n Default: false\n \n\n :type FinalDBSnapshotIdentifier: string\n :param FinalDBSnapshotIdentifier: The DB cluster snapshot identifier of the new DB cluster snapshot created when SkipFinalSnapshot is set to false .\n Note\n Specifying this parameter and also setting the SkipFinalShapshot parameter to true results in an error.\n Constraints:\n Must be 1 to 255 letters, numbers, or hyphens.\n First character must be a letter\n Cannot end with a hyphen or contain two consecutive hyphens\n \n\n :rtype: dict\n :return: {\n 'DBCluster': {\n 'AllocatedStorage': 123,\n 'AvailabilityZones': [\n 'string',\n ],\n 'BackupRetentionPeriod': 123,\n 'CharacterSetName': 'string',\n 'DatabaseName': 'string',\n 'DBClusterIdentifier': 'string',\n 'DBClusterParameterGroup': 'string',\n 'DBSubnetGroup': 'string',\n 'Status': 'string',\n 'PercentProgress': 'string',\n 'EarliestRestorableTime': datetime(2015, 1, 1),\n 'Endpoint': 'string',\n 'ReaderEndpoint': 'string',\n 'MultiAZ': True|False,\n 'Engine': 'string',\n 'EngineVersion': 'string',\n 'LatestRestorableTime': datetime(2015, 1, 1),\n 'Port': 123,\n 'MasterUsername': 'string',\n 'DBClusterOptionGroupMemberships': [\n {\n 'DBClusterOptionGroupName': 'string',\n 'Status': 'string'\n },\n ],\n 'PreferredBackupWindow': 'string',\n 'PreferredMaintenanceWindow': 'string',\n 'ReplicationSourceIdentifier': 'string',\n 'ReadReplicaIdentifiers': [\n 'string',\n ],\n 'DBClusterMembers': [\n {\n 'DBInstanceIdentifier': 'string',\n 'IsClusterWriter': True|False,\n 'DBClusterParameterGroupStatus': 'string',\n 'PromotionTier': 123\n },\n ],\n 'VpcSecurityGroups': [\n {\n 'VpcSecurityGroupId': 'string',\n 'Status': 'string'\n },\n ],\n 'HostedZoneId': 'string',\n 'StorageEncrypted': True|False,\n 'KmsKeyId': 'string',\n 'DbClusterResourceId': 'string',\n 'DBClusterArn': 'string',\n 'AssociatedRoles': [\n {\n 'RoleArn': 'string',\n 'Status': 'string'\n },\n ],\n 'IAMDatabaseAuthenticationEnabled': True|False,\n 'CloneGroupId': 'string',\n 'ClusterCreateTime': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef delete_db_cluster_parameter_group(DBClusterParameterGroupName=None):\n \"\"\"\n Deletes a specified DB cluster parameter group. The DB cluster parameter group to be deleted can't be associated with any DB clusters.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_db_cluster_parameter_group(\n DBClusterParameterGroupName='string'\n )\n \n \n :type DBClusterParameterGroupName: string\n :param DBClusterParameterGroupName: [REQUIRED]\n The name of the DB cluster parameter group.\n Constraints:\n Must be the name of an existing DB cluster parameter group.\n You can't delete a default DB cluster parameter group.\n Cannot be associated with any DB clusters.\n \n\n \"\"\"\n pass\n\ndef delete_db_cluster_snapshot(DBClusterSnapshotIdentifier=None):\n \"\"\"\n Deletes a DB cluster snapshot. If the snapshot is being copied, the copy operation is terminated.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_db_cluster_snapshot(\n DBClusterSnapshotIdentifier='string'\n )\n \n \n :type DBClusterSnapshotIdentifier: string\n :param DBClusterSnapshotIdentifier: [REQUIRED]\n The identifier of the DB cluster snapshot to delete.\n Constraints: Must be the name of an existing DB cluster snapshot in the available state.\n \n\n :rtype: dict\n :return: {\n 'DBClusterSnapshot': {\n 'AvailabilityZones': [\n 'string',\n ],\n 'DBClusterSnapshotIdentifier': 'string',\n 'DBClusterIdentifier': 'string',\n 'SnapshotCreateTime': datetime(2015, 1, 1),\n 'Engine': 'string',\n 'AllocatedStorage': 123,\n 'Status': 'string',\n 'Port': 123,\n 'VpcId': 'string',\n 'ClusterCreateTime': datetime(2015, 1, 1),\n 'MasterUsername': 'string',\n 'EngineVersion': 'string',\n 'LicenseModel': 'string',\n 'SnapshotType': 'string',\n 'PercentProgress': 123,\n 'StorageEncrypted': True|False,\n 'KmsKeyId': 'string',\n 'DBClusterSnapshotArn': 'string',\n 'SourceDBClusterSnapshotArn': 'string',\n 'IAMDatabaseAuthenticationEnabled': True|False\n }\n }\n \n \n \"\"\"\n pass\n\ndef delete_db_instance(DBInstanceIdentifier=None, SkipFinalSnapshot=None, FinalDBSnapshotIdentifier=None):\n \"\"\"\n The DeleteDBInstance action deletes a previously provisioned DB instance. When you delete a DB instance, all automated backups for that instance are deleted and can't be recovered. Manual DB snapshots of the DB instance to be deleted by DeleteDBInstance are not deleted.\n If you request a final DB snapshot the status of the Amazon Neptune DB instance is deleting until the DB snapshot is created. The API action DescribeDBInstance is used to monitor the status of this operation. The action can't be canceled or reverted once submitted.\n Note that when a DB instance is in a failure state and has a status of failed , incompatible-restore , or incompatible-network , you can only delete it when the SkipFinalSnapshot parameter is set to true .\n If the specified DB instance is part of a DB cluster, you can't delete the DB instance if both of the following conditions are true:\n To delete a DB instance in this case, first call the PromoteReadReplicaDBCluster API action to promote the DB cluster so it's no longer a Read Replica. After the promotion completes, then call the DeleteDBInstance API action to delete the final instance in the DB cluster.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_db_instance(\n DBInstanceIdentifier='string',\n SkipFinalSnapshot=True|False,\n FinalDBSnapshotIdentifier='string'\n )\n \n \n :type DBInstanceIdentifier: string\n :param DBInstanceIdentifier: [REQUIRED]\n The DB instance identifier for the DB instance to be deleted. This parameter isn't case-sensitive.\n Constraints:\n Must match the name of an existing DB instance.\n \n\n :type SkipFinalSnapshot: boolean\n :param SkipFinalSnapshot: Determines whether a final DB snapshot is created before the DB instance is deleted. If true is specified, no DBSnapshot is created. If false is specified, a DB snapshot is created before the DB instance is deleted.\n Note that when a DB instance is in a failure state and has a status of 'failed', 'incompatible-restore', or 'incompatible-network', it can only be deleted when the SkipFinalSnapshot parameter is set to 'true'.\n Specify true when deleting a Read Replica.\n Note\n The FinalDBSnapshotIdentifier parameter must be specified if SkipFinalSnapshot is false .\n Default: false\n \n\n :type FinalDBSnapshotIdentifier: string\n :param FinalDBSnapshotIdentifier: The DBSnapshotIdentifier of the new DBSnapshot created when SkipFinalSnapshot is set to false .\n Note\n Specifying this parameter and also setting the SkipFinalShapshot parameter to true results in an error.\n Constraints:\n Must be 1 to 255 letters or numbers.\n First character must be a letter\n Cannot end with a hyphen or contain two consecutive hyphens\n Cannot be specified when deleting a Read Replica.\n \n\n :rtype: dict\n :return: {\n 'DBInstance': {\n 'DBInstanceIdentifier': 'string',\n 'DBInstanceClass': 'string',\n 'Engine': 'string',\n 'DBInstanceStatus': 'string',\n 'MasterUsername': 'string',\n 'DBName': 'string',\n 'Endpoint': {\n 'Address': 'string',\n 'Port': 123,\n 'HostedZoneId': 'string'\n },\n 'AllocatedStorage': 123,\n 'InstanceCreateTime': datetime(2015, 1, 1),\n 'PreferredBackupWindow': 'string',\n 'BackupRetentionPeriod': 123,\n 'DBSecurityGroups': [\n {\n 'DBSecurityGroupName': 'string',\n 'Status': 'string'\n },\n ],\n 'VpcSecurityGroups': [\n {\n 'VpcSecurityGroupId': 'string',\n 'Status': 'string'\n },\n ],\n 'DBParameterGroups': [\n {\n 'DBParameterGroupName': 'string',\n 'ParameterApplyStatus': 'string'\n },\n ],\n 'AvailabilityZone': 'string',\n 'DBSubnetGroup': {\n 'DBSubnetGroupName': 'string',\n 'DBSubnetGroupDescription': 'string',\n 'VpcId': 'string',\n 'SubnetGroupStatus': 'string',\n 'Subnets': [\n {\n 'SubnetIdentifier': 'string',\n 'SubnetAvailabilityZone': {\n 'Name': 'string'\n },\n 'SubnetStatus': 'string'\n },\n ],\n 'DBSubnetGroupArn': 'string'\n },\n 'PreferredMaintenanceWindow': 'string',\n 'PendingModifiedValues': {\n 'DBInstanceClass': 'string',\n 'AllocatedStorage': 123,\n 'MasterUserPassword': 'string',\n 'Port': 123,\n 'BackupRetentionPeriod': 123,\n 'MultiAZ': True|False,\n 'EngineVersion': 'string',\n 'LicenseModel': 'string',\n 'Iops': 123,\n 'DBInstanceIdentifier': 'string',\n 'StorageType': 'string',\n 'CACertificateIdentifier': 'string',\n 'DBSubnetGroupName': 'string',\n 'PendingCloudwatchLogsExports': {\n 'LogTypesToEnable': [\n 'string',\n ],\n 'LogTypesToDisable': [\n 'string',\n ]\n }\n },\n 'LatestRestorableTime': datetime(2015, 1, 1),\n 'MultiAZ': True|False,\n 'EngineVersion': 'string',\n 'AutoMinorVersionUpgrade': True|False,\n 'ReadReplicaSourceDBInstanceIdentifier': 'string',\n 'ReadReplicaDBInstanceIdentifiers': [\n 'string',\n ],\n 'ReadReplicaDBClusterIdentifiers': [\n 'string',\n ],\n 'LicenseModel': 'string',\n 'Iops': 123,\n 'OptionGroupMemberships': [\n {\n 'OptionGroupName': 'string',\n 'Status': 'string'\n },\n ],\n 'CharacterSetName': 'string',\n 'SecondaryAvailabilityZone': 'string',\n 'PubliclyAccessible': True|False,\n 'StatusInfos': [\n {\n 'StatusType': 'string',\n 'Normal': True|False,\n 'Status': 'string',\n 'Message': 'string'\n },\n ],\n 'StorageType': 'string',\n 'TdeCredentialArn': 'string',\n 'DbInstancePort': 123,\n 'DBClusterIdentifier': 'string',\n 'StorageEncrypted': True|False,\n 'KmsKeyId': 'string',\n 'DbiResourceId': 'string',\n 'CACertificateIdentifier': 'string',\n 'DomainMemberships': [\n {\n 'Domain': 'string',\n 'Status': 'string',\n 'FQDN': 'string',\n 'IAMRoleName': 'string'\n },\n ],\n 'CopyTagsToSnapshot': True|False,\n 'MonitoringInterval': 123,\n 'EnhancedMonitoringResourceArn': 'string',\n 'MonitoringRoleArn': 'string',\n 'PromotionTier': 123,\n 'DBInstanceArn': 'string',\n 'Timezone': 'string',\n 'IAMDatabaseAuthenticationEnabled': True|False,\n 'PerformanceInsightsEnabled': True|False,\n 'PerformanceInsightsKMSKeyId': 'string',\n 'EnabledCloudwatchLogsExports': [\n 'string',\n ]\n }\n }\n \n \n :returns: \n DBInstanceIdentifier (string) -- [REQUIRED]\n The DB instance identifier for the DB instance to be deleted. This parameter isn't case-sensitive.\n Constraints:\n \n Must match the name of an existing DB instance.\n \n \n SkipFinalSnapshot (boolean) -- Determines whether a final DB snapshot is created before the DB instance is deleted. If true is specified, no DBSnapshot is created. If false is specified, a DB snapshot is created before the DB instance is deleted.\n Note that when a DB instance is in a failure state and has a status of 'failed', 'incompatible-restore', or 'incompatible-network', it can only be deleted when the SkipFinalSnapshot parameter is set to \"true\".\n Specify true when deleting a Read Replica.\n \n Note\n The FinalDBSnapshotIdentifier parameter must be specified if SkipFinalSnapshot is false .\n \n Default: false\n \n FinalDBSnapshotIdentifier (string) -- The DBSnapshotIdentifier of the new DBSnapshot created when SkipFinalSnapshot is set to false .\n \n Note\n Specifying this parameter and also setting the SkipFinalShapshot parameter to true results in an error.\n \n Constraints:\n \n Must be 1 to 255 letters or numbers.\n First character must be a letter\n Cannot end with a hyphen or contain two consecutive hyphens\n Cannot be specified when deleting a Read Replica.\n \n \n \n \"\"\"\n pass\n\ndef delete_db_parameter_group(DBParameterGroupName=None):\n \"\"\"\n Deletes a specified DBParameterGroup. The DBParameterGroup to be deleted can't be associated with any DB instances.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_db_parameter_group(\n DBParameterGroupName='string'\n )\n \n \n :type DBParameterGroupName: string\n :param DBParameterGroupName: [REQUIRED]\n The name of the DB parameter group.\n Constraints:\n Must be the name of an existing DB parameter group\n You can't delete a default DB parameter group\n Cannot be associated with any DB instances\n \n\n \"\"\"\n pass\n\ndef delete_db_subnet_group(DBSubnetGroupName=None):\n \"\"\"\n Deletes a DB subnet group.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_db_subnet_group(\n DBSubnetGroupName='string'\n )\n \n \n :type DBSubnetGroupName: string\n :param DBSubnetGroupName: [REQUIRED]\n The name of the database subnet group to delete.\n Note\n You can't delete the default subnet group.\n Constraints:\n Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.\n Example: mySubnetgroup\n \n\n \"\"\"\n pass\n\ndef delete_event_subscription(SubscriptionName=None):\n \"\"\"\n Deletes an event notification subscription.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_event_subscription(\n SubscriptionName='string'\n )\n \n \n :type SubscriptionName: string\n :param SubscriptionName: [REQUIRED]\n The name of the event notification subscription you want to delete.\n \n\n :rtype: dict\n :return: {\n 'EventSubscription': {\n 'CustomerAwsId': 'string',\n 'CustSubscriptionId': 'string',\n 'SnsTopicArn': 'string',\n 'Status': 'string',\n 'SubscriptionCreationTime': 'string',\n 'SourceType': 'string',\n 'SourceIdsList': [\n 'string',\n ],\n 'EventCategoriesList': [\n 'string',\n ],\n 'Enabled': True|False,\n 'EventSubscriptionArn': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_db_cluster_parameter_groups(DBClusterParameterGroupName=None, Filters=None, MaxRecords=None, Marker=None):\n \"\"\"\n Returns a list of DBClusterParameterGroup descriptions. If a DBClusterParameterGroupName parameter is specified, the list will contain only the description of the specified DB cluster parameter group.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_db_cluster_parameter_groups(\n DBClusterParameterGroupName='string',\n Filters=[\n {\n 'Name': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxRecords=123,\n Marker='string'\n )\n \n \n :type DBClusterParameterGroupName: string\n :param DBClusterParameterGroupName: The name of a specific DB cluster parameter group to return details for.\n Constraints:\n If supplied, must match the name of an existing DBClusterParameterGroup.\n \n\n :type Filters: list\n :param Filters: This parameter is not currently supported.\n (dict) --This type is not currently supported.\n Name (string) -- [REQUIRED]This parameter is not currently supported.\n Values (list) -- [REQUIRED]This parameter is not currently supported.\n (string) --\n \n \n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.\n Default: 100\n Constraints: Minimum 20, maximum 100.\n \n\n :type Marker: string\n :param Marker: An optional pagination token provided by a previous DescribeDBClusterParameterGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .\n\n :rtype: dict\n :return: {\n 'Marker': 'string',\n 'DBClusterParameterGroups': [\n {\n 'DBClusterParameterGroupName': 'string',\n 'DBParameterGroupFamily': 'string',\n 'Description': 'string',\n 'DBClusterParameterGroupArn': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef describe_db_cluster_parameters(DBClusterParameterGroupName=None, Source=None, Filters=None, MaxRecords=None, Marker=None):\n \"\"\"\n Returns the detailed parameter list for a particular DB cluster parameter group.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_db_cluster_parameters(\n DBClusterParameterGroupName='string',\n Source='string',\n Filters=[\n {\n 'Name': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxRecords=123,\n Marker='string'\n )\n \n \n :type DBClusterParameterGroupName: string\n :param DBClusterParameterGroupName: [REQUIRED]\n The name of a specific DB cluster parameter group to return parameter details for.\n Constraints:\n If supplied, must match the name of an existing DBClusterParameterGroup.\n \n\n :type Source: string\n :param Source: A value that indicates to return only parameters for a specific source. Parameter sources can be engine , service , or customer .\n\n :type Filters: list\n :param Filters: This parameter is not currently supported.\n (dict) --This type is not currently supported.\n Name (string) -- [REQUIRED]This parameter is not currently supported.\n Values (list) -- [REQUIRED]This parameter is not currently supported.\n (string) --\n \n \n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.\n Default: 100\n Constraints: Minimum 20, maximum 100.\n \n\n :type Marker: string\n :param Marker: An optional pagination token provided by a previous DescribeDBClusterParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .\n\n :rtype: dict\n :return: {\n 'Parameters': [\n {\n 'ParameterName': 'string',\n 'ParameterValue': 'string',\n 'Description': 'string',\n 'Source': 'string',\n 'ApplyType': 'string',\n 'DataType': 'string',\n 'AllowedValues': 'string',\n 'IsModifiable': True|False,\n 'MinimumEngineVersion': 'string',\n 'ApplyMethod': 'immediate'|'pending-reboot'\n },\n ],\n 'Marker': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_db_cluster_snapshot_attributes(DBClusterSnapshotIdentifier=None):\n \"\"\"\n Returns a list of DB cluster snapshot attribute names and values for a manual DB cluster snapshot.\n When sharing snapshots with other AWS accounts, DescribeDBClusterSnapshotAttributes returns the restore attribute and a list of IDs for the AWS accounts that are authorized to copy or restore the manual DB cluster snapshot. If all is included in the list of values for the restore attribute, then the manual DB cluster snapshot is public and can be copied or restored by all AWS accounts.\n To add or remove access for an AWS account to copy or restore a manual DB cluster snapshot, or to make the manual DB cluster snapshot public or private, use the ModifyDBClusterSnapshotAttribute API action.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_db_cluster_snapshot_attributes(\n DBClusterSnapshotIdentifier='string'\n )\n \n \n :type DBClusterSnapshotIdentifier: string\n :param DBClusterSnapshotIdentifier: [REQUIRED]\n The identifier for the DB cluster snapshot to describe the attributes for.\n \n\n :rtype: dict\n :return: {\n 'DBClusterSnapshotAttributesResult': {\n 'DBClusterSnapshotIdentifier': 'string',\n 'DBClusterSnapshotAttributes': [\n {\n 'AttributeName': 'string',\n 'AttributeValues': [\n 'string',\n ]\n },\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_db_cluster_snapshots(DBClusterIdentifier=None, DBClusterSnapshotIdentifier=None, SnapshotType=None, Filters=None, MaxRecords=None, Marker=None, IncludeShared=None, IncludePublic=None):\n \"\"\"\n Returns information about DB cluster snapshots. This API action supports pagination.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_db_cluster_snapshots(\n DBClusterIdentifier='string',\n DBClusterSnapshotIdentifier='string',\n SnapshotType='string',\n Filters=[\n {\n 'Name': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxRecords=123,\n Marker='string',\n IncludeShared=True|False,\n IncludePublic=True|False\n )\n \n \n :type DBClusterIdentifier: string\n :param DBClusterIdentifier: The ID of the DB cluster to retrieve the list of DB cluster snapshots for. This parameter can't be used in conjunction with the DBClusterSnapshotIdentifier parameter. This parameter is not case-sensitive.\n Constraints:\n If supplied, must match the identifier of an existing DBCluster.\n \n\n :type DBClusterSnapshotIdentifier: string\n :param DBClusterSnapshotIdentifier: A specific DB cluster snapshot identifier to describe. This parameter can't be used in conjunction with the DBClusterIdentifier parameter. This value is stored as a lowercase string.\n Constraints:\n If supplied, must match the identifier of an existing DBClusterSnapshot.\n If this identifier is for an automated snapshot, the SnapshotType parameter must also be specified.\n \n\n :type SnapshotType: string\n :param SnapshotType: The type of DB cluster snapshots to be returned. You can specify one of the following values:\n automated - Return all DB cluster snapshots that have been automatically taken by Amazon Neptune for my AWS account.\n manual - Return all DB cluster snapshots that have been taken by my AWS account.\n shared - Return all manual DB cluster snapshots that have been shared to my AWS account.\n public - Return all DB cluster snapshots that have been marked as public.\n If you don't specify a SnapshotType value, then both automated and manual DB cluster snapshots are returned. You can include shared DB cluster snapshots with these results by setting the IncludeShared parameter to true . You can include public DB cluster snapshots with these results by setting the IncludePublic parameter to true .\n The IncludeShared and IncludePublic parameters don't apply for SnapshotType values of manual or automated . The IncludePublic parameter doesn't apply when SnapshotType is set to shared . The IncludeShared parameter doesn't apply when SnapshotType is set to public .\n \n\n :type Filters: list\n :param Filters: This parameter is not currently supported.\n (dict) --This type is not currently supported.\n Name (string) -- [REQUIRED]This parameter is not currently supported.\n Values (list) -- [REQUIRED]This parameter is not currently supported.\n (string) --\n \n \n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.\n Default: 100\n Constraints: Minimum 20, maximum 100.\n \n\n :type Marker: string\n :param Marker: An optional pagination token provided by a previous DescribeDBClusterSnapshots request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .\n\n :type IncludeShared: boolean\n :param IncludeShared: True to include shared manual DB cluster snapshots from other AWS accounts that this AWS account has been given permission to copy or restore, and otherwise false. The default is false .\n You can give an AWS account permission to restore a manual DB cluster snapshot from another AWS account by the ModifyDBClusterSnapshotAttribute API action.\n \n\n :type IncludePublic: boolean\n :param IncludePublic: True to include manual DB cluster snapshots that are public and can be copied or restored by any AWS account, and otherwise false. The default is false . The default is false.\n You can share a manual DB cluster snapshot as public by using the ModifyDBClusterSnapshotAttribute API action.\n \n\n :rtype: dict\n :return: {\n 'Marker': 'string',\n 'DBClusterSnapshots': [\n {\n 'AvailabilityZones': [\n 'string',\n ],\n 'DBClusterSnapshotIdentifier': 'string',\n 'DBClusterIdentifier': 'string',\n 'SnapshotCreateTime': datetime(2015, 1, 1),\n 'Engine': 'string',\n 'AllocatedStorage': 123,\n 'Status': 'string',\n 'Port': 123,\n 'VpcId': 'string',\n 'ClusterCreateTime': datetime(2015, 1, 1),\n 'MasterUsername': 'string',\n 'EngineVersion': 'string',\n 'LicenseModel': 'string',\n 'SnapshotType': 'string',\n 'PercentProgress': 123,\n 'StorageEncrypted': True|False,\n 'KmsKeyId': 'string',\n 'DBClusterSnapshotArn': 'string',\n 'SourceDBClusterSnapshotArn': 'string',\n 'IAMDatabaseAuthenticationEnabled': True|False\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_db_clusters(DBClusterIdentifier=None, Filters=None, MaxRecords=None, Marker=None):\n \"\"\"\n Returns information about provisioned DB clusters. This API supports pagination.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_db_clusters(\n DBClusterIdentifier='string',\n Filters=[\n {\n 'Name': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxRecords=123,\n Marker='string'\n )\n \n \n :type DBClusterIdentifier: string\n :param DBClusterIdentifier: The user-supplied DB cluster identifier. If this parameter is specified, information from only the specific DB cluster is returned. This parameter isn't case-sensitive.\n Constraints:\n If supplied, must match an existing DBClusterIdentifier.\n \n\n :type Filters: list\n :param Filters: A filter that specifies one or more DB clusters to describe.\n Supported filters:\n db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list will only include information about the DB clusters identified by these ARNs.\n (dict) --This type is not currently supported.\n Name (string) -- [REQUIRED]This parameter is not currently supported.\n Values (list) -- [REQUIRED]This parameter is not currently supported.\n (string) --\n \n \n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.\n Default: 100\n Constraints: Minimum 20, maximum 100.\n \n\n :type Marker: string\n :param Marker: An optional pagination token provided by a previous DescribeDBClusters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .\n\n :rtype: dict\n :return: {\n 'Marker': 'string',\n 'DBClusters': [\n {\n 'AllocatedStorage': 123,\n 'AvailabilityZones': [\n 'string',\n ],\n 'BackupRetentionPeriod': 123,\n 'CharacterSetName': 'string',\n 'DatabaseName': 'string',\n 'DBClusterIdentifier': 'string',\n 'DBClusterParameterGroup': 'string',\n 'DBSubnetGroup': 'string',\n 'Status': 'string',\n 'PercentProgress': 'string',\n 'EarliestRestorableTime': datetime(2015, 1, 1),\n 'Endpoint': 'string',\n 'ReaderEndpoint': 'string',\n 'MultiAZ': True|False,\n 'Engine': 'string',\n 'EngineVersion': 'string',\n 'LatestRestorableTime': datetime(2015, 1, 1),\n 'Port': 123,\n 'MasterUsername': 'string',\n 'DBClusterOptionGroupMemberships': [\n {\n 'DBClusterOptionGroupName': 'string',\n 'Status': 'string'\n },\n ],\n 'PreferredBackupWindow': 'string',\n 'PreferredMaintenanceWindow': 'string',\n 'ReplicationSourceIdentifier': 'string',\n 'ReadReplicaIdentifiers': [\n 'string',\n ],\n 'DBClusterMembers': [\n {\n 'DBInstanceIdentifier': 'string',\n 'IsClusterWriter': True|False,\n 'DBClusterParameterGroupStatus': 'string',\n 'PromotionTier': 123\n },\n ],\n 'VpcSecurityGroups': [\n {\n 'VpcSecurityGroupId': 'string',\n 'Status': 'string'\n },\n ],\n 'HostedZoneId': 'string',\n 'StorageEncrypted': True|False,\n 'KmsKeyId': 'string',\n 'DbClusterResourceId': 'string',\n 'DBClusterArn': 'string',\n 'AssociatedRoles': [\n {\n 'RoleArn': 'string',\n 'Status': 'string'\n },\n ],\n 'IAMDatabaseAuthenticationEnabled': True|False,\n 'CloneGroupId': 'string',\n 'ClusterCreateTime': datetime(2015, 1, 1)\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_db_engine_versions(Engine=None, EngineVersion=None, DBParameterGroupFamily=None, Filters=None, MaxRecords=None, Marker=None, DefaultOnly=None, ListSupportedCharacterSets=None, ListSupportedTimezones=None):\n \"\"\"\n Returns a list of the available DB engines.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_db_engine_versions(\n Engine='string',\n EngineVersion='string',\n DBParameterGroupFamily='string',\n Filters=[\n {\n 'Name': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxRecords=123,\n Marker='string',\n DefaultOnly=True|False,\n ListSupportedCharacterSets=True|False,\n ListSupportedTimezones=True|False\n )\n \n \n :type Engine: string\n :param Engine: The database engine to return.\n\n :type EngineVersion: string\n :param EngineVersion: The database engine version to return.\n Example: 5.1.49\n \n\n :type DBParameterGroupFamily: string\n :param DBParameterGroupFamily: The name of a specific DB parameter group family to return details for.\n Constraints:\n If supplied, must match an existing DBParameterGroupFamily.\n \n\n :type Filters: list\n :param Filters: Not currently supported.\n (dict) --This type is not currently supported.\n Name (string) -- [REQUIRED]This parameter is not currently supported.\n Values (list) -- [REQUIRED]This parameter is not currently supported.\n (string) --\n \n \n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.\n Default: 100\n Constraints: Minimum 20, maximum 100.\n \n\n :type Marker: string\n :param Marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .\n\n :type DefaultOnly: boolean\n :param DefaultOnly: Indicates that only the default version of the specified engine or engine and major version combination is returned.\n\n :type ListSupportedCharacterSets: boolean\n :param ListSupportedCharacterSets: If this parameter is specified and the requested engine supports the CharacterSetName parameter for CreateDBInstance , the response includes a list of supported character sets for each engine version.\n\n :type ListSupportedTimezones: boolean\n :param ListSupportedTimezones: If this parameter is specified and the requested engine supports the TimeZone parameter for CreateDBInstance , the response includes a list of supported time zones for each engine version.\n\n :rtype: dict\n :return: {\n 'Marker': 'string',\n 'DBEngineVersions': [\n {\n 'Engine': 'string',\n 'EngineVersion': 'string',\n 'DBParameterGroupFamily': 'string',\n 'DBEngineDescription': 'string',\n 'DBEngineVersionDescription': 'string',\n 'DefaultCharacterSet': {\n 'CharacterSetName': 'string',\n 'CharacterSetDescription': 'string'\n },\n 'SupportedCharacterSets': [\n {\n 'CharacterSetName': 'string',\n 'CharacterSetDescription': 'string'\n },\n ],\n 'ValidUpgradeTarget': [\n {\n 'Engine': 'string',\n 'EngineVersion': 'string',\n 'Description': 'string',\n 'AutoUpgrade': True|False,\n 'IsMajorVersionUpgrade': True|False\n },\n ],\n 'SupportedTimezones': [\n {\n 'TimezoneName': 'string'\n },\n ],\n 'ExportableLogTypes': [\n 'string',\n ],\n 'SupportsLogExportsToCloudwatchLogs': True|False,\n 'SupportsReadReplica': True|False\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_db_instances(DBInstanceIdentifier=None, Filters=None, MaxRecords=None, Marker=None):\n \"\"\"\n Returns information about provisioned instances. This API supports pagination.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_db_instances(\n DBInstanceIdentifier='string',\n Filters=[\n {\n 'Name': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxRecords=123,\n Marker='string'\n )\n \n \n :type DBInstanceIdentifier: string\n :param DBInstanceIdentifier: The user-supplied instance identifier. If this parameter is specified, information from only the specific DB instance is returned. This parameter isn't case-sensitive.\n Constraints:\n If supplied, must match the identifier of an existing DBInstance.\n \n\n :type Filters: list\n :param Filters: A filter that specifies one or more DB instances to describe.\n Supported filters:\n db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list will only include information about the DB instances associated with the DB clusters identified by these ARNs.\n db-instance-id - Accepts DB instance identifiers and DB instance Amazon Resource Names (ARNs). The results list will only include information about the DB instances identified by these ARNs.\n (dict) --This type is not currently supported.\n Name (string) -- [REQUIRED]This parameter is not currently supported.\n Values (list) -- [REQUIRED]This parameter is not currently supported.\n (string) --\n \n \n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.\n Default: 100\n Constraints: Minimum 20, maximum 100.\n \n\n :type Marker: string\n :param Marker: An optional pagination token provided by a previous DescribeDBInstances request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .\n\n :rtype: dict\n :return: {\n 'Marker': 'string',\n 'DBInstances': [\n {\n 'DBInstanceIdentifier': 'string',\n 'DBInstanceClass': 'string',\n 'Engine': 'string',\n 'DBInstanceStatus': 'string',\n 'MasterUsername': 'string',\n 'DBName': 'string',\n 'Endpoint': {\n 'Address': 'string',\n 'Port': 123,\n 'HostedZoneId': 'string'\n },\n 'AllocatedStorage': 123,\n 'InstanceCreateTime': datetime(2015, 1, 1),\n 'PreferredBackupWindow': 'string',\n 'BackupRetentionPeriod': 123,\n 'DBSecurityGroups': [\n {\n 'DBSecurityGroupName': 'string',\n 'Status': 'string'\n },\n ],\n 'VpcSecurityGroups': [\n {\n 'VpcSecurityGroupId': 'string',\n 'Status': 'string'\n },\n ],\n 'DBParameterGroups': [\n {\n 'DBParameterGroupName': 'string',\n 'ParameterApplyStatus': 'string'\n },\n ],\n 'AvailabilityZone': 'string',\n 'DBSubnetGroup': {\n 'DBSubnetGroupName': 'string',\n 'DBSubnetGroupDescription': 'string',\n 'VpcId': 'string',\n 'SubnetGroupStatus': 'string',\n 'Subnets': [\n {\n 'SubnetIdentifier': 'string',\n 'SubnetAvailabilityZone': {\n 'Name': 'string'\n },\n 'SubnetStatus': 'string'\n },\n ],\n 'DBSubnetGroupArn': 'string'\n },\n 'PreferredMaintenanceWindow': 'string',\n 'PendingModifiedValues': {\n 'DBInstanceClass': 'string',\n 'AllocatedStorage': 123,\n 'MasterUserPassword': 'string',\n 'Port': 123,\n 'BackupRetentionPeriod': 123,\n 'MultiAZ': True|False,\n 'EngineVersion': 'string',\n 'LicenseModel': 'string',\n 'Iops': 123,\n 'DBInstanceIdentifier': 'string',\n 'StorageType': 'string',\n 'CACertificateIdentifier': 'string',\n 'DBSubnetGroupName': 'string',\n 'PendingCloudwatchLogsExports': {\n 'LogTypesToEnable': [\n 'string',\n ],\n 'LogTypesToDisable': [\n 'string',\n ]\n }\n },\n 'LatestRestorableTime': datetime(2015, 1, 1),\n 'MultiAZ': True|False,\n 'EngineVersion': 'string',\n 'AutoMinorVersionUpgrade': True|False,\n 'ReadReplicaSourceDBInstanceIdentifier': 'string',\n 'ReadReplicaDBInstanceIdentifiers': [\n 'string',\n ],\n 'ReadReplicaDBClusterIdentifiers': [\n 'string',\n ],\n 'LicenseModel': 'string',\n 'Iops': 123,\n 'OptionGroupMemberships': [\n {\n 'OptionGroupName': 'string',\n 'Status': 'string'\n },\n ],\n 'CharacterSetName': 'string',\n 'SecondaryAvailabilityZone': 'string',\n 'PubliclyAccessible': True|False,\n 'StatusInfos': [\n {\n 'StatusType': 'string',\n 'Normal': True|False,\n 'Status': 'string',\n 'Message': 'string'\n },\n ],\n 'StorageType': 'string',\n 'TdeCredentialArn': 'string',\n 'DbInstancePort': 123,\n 'DBClusterIdentifier': 'string',\n 'StorageEncrypted': True|False,\n 'KmsKeyId': 'string',\n 'DbiResourceId': 'string',\n 'CACertificateIdentifier': 'string',\n 'DomainMemberships': [\n {\n 'Domain': 'string',\n 'Status': 'string',\n 'FQDN': 'string',\n 'IAMRoleName': 'string'\n },\n ],\n 'CopyTagsToSnapshot': True|False,\n 'MonitoringInterval': 123,\n 'EnhancedMonitoringResourceArn': 'string',\n 'MonitoringRoleArn': 'string',\n 'PromotionTier': 123,\n 'DBInstanceArn': 'string',\n 'Timezone': 'string',\n 'IAMDatabaseAuthenticationEnabled': True|False,\n 'PerformanceInsightsEnabled': True|False,\n 'PerformanceInsightsKMSKeyId': 'string',\n 'EnabledCloudwatchLogsExports': [\n 'string',\n ]\n },\n ]\n }\n \n \n :returns: \n ModifyDBInstance\n RebootDBInstance\n \n \"\"\"\n pass\n\ndef describe_db_parameter_groups(DBParameterGroupName=None, Filters=None, MaxRecords=None, Marker=None):\n \"\"\"\n Returns a list of DBParameterGroup descriptions. If a DBParameterGroupName is specified, the list will contain only the description of the specified DB parameter group.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_db_parameter_groups(\n DBParameterGroupName='string',\n Filters=[\n {\n 'Name': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxRecords=123,\n Marker='string'\n )\n \n \n :type DBParameterGroupName: string\n :param DBParameterGroupName: The name of a specific DB parameter group to return details for.\n Constraints:\n If supplied, must match the name of an existing DBClusterParameterGroup.\n \n\n :type Filters: list\n :param Filters: This parameter is not currently supported.\n (dict) --This type is not currently supported.\n Name (string) -- [REQUIRED]This parameter is not currently supported.\n Values (list) -- [REQUIRED]This parameter is not currently supported.\n (string) --\n \n \n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.\n Default: 100\n Constraints: Minimum 20, maximum 100.\n \n\n :type Marker: string\n :param Marker: An optional pagination token provided by a previous DescribeDBParameterGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .\n\n :rtype: dict\n :return: {\n 'Marker': 'string',\n 'DBParameterGroups': [\n {\n 'DBParameterGroupName': 'string',\n 'DBParameterGroupFamily': 'string',\n 'Description': 'string',\n 'DBParameterGroupArn': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef describe_db_parameters(DBParameterGroupName=None, Source=None, Filters=None, MaxRecords=None, Marker=None):\n \"\"\"\n Returns the detailed parameter list for a particular DB parameter group.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_db_parameters(\n DBParameterGroupName='string',\n Source='string',\n Filters=[\n {\n 'Name': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxRecords=123,\n Marker='string'\n )\n \n \n :type DBParameterGroupName: string\n :param DBParameterGroupName: [REQUIRED]\n The name of a specific DB parameter group to return details for.\n Constraints:\n If supplied, must match the name of an existing DBParameterGroup.\n \n\n :type Source: string\n :param Source: The parameter types to return.\n Default: All parameter types returned\n Valid Values: user | system | engine-default\n \n\n :type Filters: list\n :param Filters: This parameter is not currently supported.\n (dict) --This type is not currently supported.\n Name (string) -- [REQUIRED]This parameter is not currently supported.\n Values (list) -- [REQUIRED]This parameter is not currently supported.\n (string) --\n \n \n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.\n Default: 100\n Constraints: Minimum 20, maximum 100.\n \n\n :type Marker: string\n :param Marker: An optional pagination token provided by a previous DescribeDBParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .\n\n :rtype: dict\n :return: {\n 'Parameters': [\n {\n 'ParameterName': 'string',\n 'ParameterValue': 'string',\n 'Description': 'string',\n 'Source': 'string',\n 'ApplyType': 'string',\n 'DataType': 'string',\n 'AllowedValues': 'string',\n 'IsModifiable': True|False,\n 'MinimumEngineVersion': 'string',\n 'ApplyMethod': 'immediate'|'pending-reboot'\n },\n ],\n 'Marker': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_db_subnet_groups(DBSubnetGroupName=None, Filters=None, MaxRecords=None, Marker=None):\n \"\"\"\n Returns a list of DBSubnetGroup descriptions. If a DBSubnetGroupName is specified, the list will contain only the descriptions of the specified DBSubnetGroup.\n For an overview of CIDR ranges, go to the Wikipedia Tutorial .\n See also: AWS API Documentation\n \n \n :example: response = client.describe_db_subnet_groups(\n DBSubnetGroupName='string',\n Filters=[\n {\n 'Name': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxRecords=123,\n Marker='string'\n )\n \n \n :type DBSubnetGroupName: string\n :param DBSubnetGroupName: The name of the DB subnet group to return details for.\n\n :type Filters: list\n :param Filters: This parameter is not currently supported.\n (dict) --This type is not currently supported.\n Name (string) -- [REQUIRED]This parameter is not currently supported.\n Values (list) -- [REQUIRED]This parameter is not currently supported.\n (string) --\n \n \n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.\n Default: 100\n Constraints: Minimum 20, maximum 100.\n \n\n :type Marker: string\n :param Marker: An optional pagination token provided by a previous DescribeDBSubnetGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .\n\n :rtype: dict\n :return: {\n 'Marker': 'string',\n 'DBSubnetGroups': [\n {\n 'DBSubnetGroupName': 'string',\n 'DBSubnetGroupDescription': 'string',\n 'VpcId': 'string',\n 'SubnetGroupStatus': 'string',\n 'Subnets': [\n {\n 'SubnetIdentifier': 'string',\n 'SubnetAvailabilityZone': {\n 'Name': 'string'\n },\n 'SubnetStatus': 'string'\n },\n ],\n 'DBSubnetGroupArn': 'string'\n },\n ]\n }\n \n \n :returns: \n OrderableDBInstanceOption\n \n \"\"\"\n pass\n\ndef describe_engine_default_cluster_parameters(DBParameterGroupFamily=None, Filters=None, MaxRecords=None, Marker=None):\n \"\"\"\n Returns the default engine and system parameter information for the cluster database engine.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_engine_default_cluster_parameters(\n DBParameterGroupFamily='string',\n Filters=[\n {\n 'Name': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxRecords=123,\n Marker='string'\n )\n \n \n :type DBParameterGroupFamily: string\n :param DBParameterGroupFamily: [REQUIRED]\n The name of the DB cluster parameter group family to return engine parameter information for.\n \n\n :type Filters: list\n :param Filters: This parameter is not currently supported.\n (dict) --This type is not currently supported.\n Name (string) -- [REQUIRED]This parameter is not currently supported.\n Values (list) -- [REQUIRED]This parameter is not currently supported.\n (string) --\n \n \n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.\n Default: 100\n Constraints: Minimum 20, maximum 100.\n \n\n :type Marker: string\n :param Marker: An optional pagination token provided by a previous DescribeEngineDefaultClusterParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .\n\n :rtype: dict\n :return: {\n 'EngineDefaults': {\n 'DBParameterGroupFamily': 'string',\n 'Marker': 'string',\n 'Parameters': [\n {\n 'ParameterName': 'string',\n 'ParameterValue': 'string',\n 'Description': 'string',\n 'Source': 'string',\n 'ApplyType': 'string',\n 'DataType': 'string',\n 'AllowedValues': 'string',\n 'IsModifiable': True|False,\n 'MinimumEngineVersion': 'string',\n 'ApplyMethod': 'immediate'|'pending-reboot'\n },\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_engine_default_parameters(DBParameterGroupFamily=None, Filters=None, MaxRecords=None, Marker=None):\n \"\"\"\n Returns the default engine and system parameter information for the specified database engine.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_engine_default_parameters(\n DBParameterGroupFamily='string',\n Filters=[\n {\n 'Name': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxRecords=123,\n Marker='string'\n )\n \n \n :type DBParameterGroupFamily: string\n :param DBParameterGroupFamily: [REQUIRED]\n The name of the DB parameter group family.\n \n\n :type Filters: list\n :param Filters: Not currently supported.\n (dict) --This type is not currently supported.\n Name (string) -- [REQUIRED]This parameter is not currently supported.\n Values (list) -- [REQUIRED]This parameter is not currently supported.\n (string) --\n \n \n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.\n Default: 100\n Constraints: Minimum 20, maximum 100.\n \n\n :type Marker: string\n :param Marker: An optional pagination token provided by a previous DescribeEngineDefaultParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .\n\n :rtype: dict\n :return: {\n 'EngineDefaults': {\n 'DBParameterGroupFamily': 'string',\n 'Marker': 'string',\n 'Parameters': [\n {\n 'ParameterName': 'string',\n 'ParameterValue': 'string',\n 'Description': 'string',\n 'Source': 'string',\n 'ApplyType': 'string',\n 'DataType': 'string',\n 'AllowedValues': 'string',\n 'IsModifiable': True|False,\n 'MinimumEngineVersion': 'string',\n 'ApplyMethod': 'immediate'|'pending-reboot'\n },\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_event_categories(SourceType=None, Filters=None):\n \"\"\"\n Displays a list of categories for all event source types, or, if specified, for a specified source type.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_event_categories(\n SourceType='string',\n Filters=[\n {\n 'Name': 'string',\n 'Values': [\n 'string',\n ]\n },\n ]\n )\n \n \n :type SourceType: string\n :param SourceType: The type of source that is generating the events.\n Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot\n \n\n :type Filters: list\n :param Filters: This parameter is not currently supported.\n (dict) --This type is not currently supported.\n Name (string) -- [REQUIRED]This parameter is not currently supported.\n Values (list) -- [REQUIRED]This parameter is not currently supported.\n (string) --\n \n \n\n :rtype: dict\n :return: {\n 'EventCategoriesMapList': [\n {\n 'SourceType': 'string',\n 'EventCategories': [\n 'string',\n ]\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_event_subscriptions(SubscriptionName=None, Filters=None, MaxRecords=None, Marker=None):\n \"\"\"\n Lists all the subscription descriptions for a customer account. The description for a subscription includes SubscriptionName, SNSTopicARN, CustomerID, SourceType, SourceID, CreationTime, and Status.\n If you specify a SubscriptionName, lists the description for that subscription.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_event_subscriptions(\n SubscriptionName='string',\n Filters=[\n {\n 'Name': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxRecords=123,\n Marker='string'\n )\n \n \n :type SubscriptionName: string\n :param SubscriptionName: The name of the event notification subscription you want to describe.\n\n :type Filters: list\n :param Filters: This parameter is not currently supported.\n (dict) --This type is not currently supported.\n Name (string) -- [REQUIRED]This parameter is not currently supported.\n Values (list) -- [REQUIRED]This parameter is not currently supported.\n (string) --\n \n \n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.\n Default: 100\n Constraints: Minimum 20, maximum 100.\n \n\n :type Marker: string\n :param Marker: An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .\n\n :rtype: dict\n :return: {\n 'Marker': 'string',\n 'EventSubscriptionsList': [\n {\n 'CustomerAwsId': 'string',\n 'CustSubscriptionId': 'string',\n 'SnsTopicArn': 'string',\n 'Status': 'string',\n 'SubscriptionCreationTime': 'string',\n 'SourceType': 'string',\n 'SourceIdsList': [\n 'string',\n ],\n 'EventCategoriesList': [\n 'string',\n ],\n 'Enabled': True|False,\n 'EventSubscriptionArn': 'string'\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_events(SourceIdentifier=None, SourceType=None, StartTime=None, EndTime=None, Duration=None, EventCategories=None, Filters=None, MaxRecords=None, Marker=None):\n \"\"\"\n Returns events related to DB instances, DB security groups, DB snapshots, and DB parameter groups for the past 14 days. Events specific to a particular DB instance, DB security group, database snapshot, or DB parameter group can be obtained by providing the name as a parameter. By default, the past hour of events are returned.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_events(\n SourceIdentifier='string',\n SourceType='db-instance'|'db-parameter-group'|'db-security-group'|'db-snapshot'|'db-cluster'|'db-cluster-snapshot',\n StartTime=datetime(2015, 1, 1),\n EndTime=datetime(2015, 1, 1),\n Duration=123,\n EventCategories=[\n 'string',\n ],\n Filters=[\n {\n 'Name': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxRecords=123,\n Marker='string'\n )\n \n \n :type SourceIdentifier: string\n :param SourceIdentifier: The identifier of the event source for which events are returned. If not specified, then all sources are included in the response.\n Constraints:\n If SourceIdentifier is supplied, SourceType must also be provided.\n If the source type is DBInstance , then a DBInstanceIdentifier must be supplied.\n If the source type is DBSecurityGroup , a DBSecurityGroupName must be supplied.\n If the source type is DBParameterGroup , a DBParameterGroupName must be supplied.\n If the source type is DBSnapshot , a DBSnapshotIdentifier must be supplied.\n Cannot end with a hyphen or contain two consecutive hyphens.\n \n\n :type SourceType: string\n :param SourceType: The event source to retrieve events for. If no value is specified, all events are returned.\n\n :type StartTime: datetime\n :param StartTime: The beginning of the time interval to retrieve events for, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.\n Example: 2009-07-08T18:00Z\n \n\n :type EndTime: datetime\n :param EndTime: The end of the time interval for which to retrieve events, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.\n Example: 2009-07-08T18:00Z\n \n\n :type Duration: integer\n :param Duration: The number of minutes to retrieve events for.\n Default: 60\n \n\n :type EventCategories: list\n :param EventCategories: A list of event categories that trigger notifications for a event notification subscription.\n (string) --\n \n\n :type Filters: list\n :param Filters: This parameter is not currently supported.\n (dict) --This type is not currently supported.\n Name (string) -- [REQUIRED]This parameter is not currently supported.\n Values (list) -- [REQUIRED]This parameter is not currently supported.\n (string) --\n \n \n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.\n Default: 100\n Constraints: Minimum 20, maximum 100.\n \n\n :type Marker: string\n :param Marker: An optional pagination token provided by a previous DescribeEvents request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .\n\n :rtype: dict\n :return: {\n 'Marker': 'string',\n 'Events': [\n {\n 'SourceIdentifier': 'string',\n 'SourceType': 'db-instance'|'db-parameter-group'|'db-security-group'|'db-snapshot'|'db-cluster'|'db-cluster-snapshot',\n 'Message': 'string',\n 'EventCategories': [\n 'string',\n ],\n 'Date': datetime(2015, 1, 1),\n 'SourceArn': 'string'\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_orderable_db_instance_options(Engine=None, EngineVersion=None, DBInstanceClass=None, LicenseModel=None, Vpc=None, Filters=None, MaxRecords=None, Marker=None):\n \"\"\"\n Returns a list of orderable DB instance options for the specified engine.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_orderable_db_instance_options(\n Engine='string',\n EngineVersion='string',\n DBInstanceClass='string',\n LicenseModel='string',\n Vpc=True|False,\n Filters=[\n {\n 'Name': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxRecords=123,\n Marker='string'\n )\n \n \n :type Engine: string\n :param Engine: [REQUIRED]\n The name of the engine to retrieve DB instance options for.\n \n\n :type EngineVersion: string\n :param EngineVersion: The engine version filter value. Specify this parameter to show only the available offerings matching the specified engine version.\n\n :type DBInstanceClass: string\n :param DBInstanceClass: The DB instance class filter value. Specify this parameter to show only the available offerings matching the specified DB instance class.\n\n :type LicenseModel: string\n :param LicenseModel: The license model filter value. Specify this parameter to show only the available offerings matching the specified license model.\n\n :type Vpc: boolean\n :param Vpc: The VPC filter value. Specify this parameter to show only the available VPC or non-VPC offerings.\n\n :type Filters: list\n :param Filters: This parameter is not currently supported.\n (dict) --This type is not currently supported.\n Name (string) -- [REQUIRED]This parameter is not currently supported.\n Values (list) -- [REQUIRED]This parameter is not currently supported.\n (string) --\n \n \n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.\n Default: 100\n Constraints: Minimum 20, maximum 100.\n \n\n :type Marker: string\n :param Marker: An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .\n\n :rtype: dict\n :return: {\n 'OrderableDBInstanceOptions': [\n {\n 'Engine': 'string',\n 'EngineVersion': 'string',\n 'DBInstanceClass': 'string',\n 'LicenseModel': 'string',\n 'AvailabilityZones': [\n {\n 'Name': 'string'\n },\n ],\n 'MultiAZCapable': True|False,\n 'ReadReplicaCapable': True|False,\n 'Vpc': True|False,\n 'SupportsStorageEncryption': True|False,\n 'StorageType': 'string',\n 'SupportsIops': True|False,\n 'SupportsEnhancedMonitoring': True|False,\n 'SupportsIAMDatabaseAuthentication': True|False,\n 'SupportsPerformanceInsights': True|False,\n 'MinStorageSize': 123,\n 'MaxStorageSize': 123,\n 'MinIopsPerDbInstance': 123,\n 'MaxIopsPerDbInstance': 123,\n 'MinIopsPerGib': 123.0,\n 'MaxIopsPerGib': 123.0\n },\n ],\n 'Marker': 'string'\n }\n \n \n :returns: \n OrderableDBInstanceOption\n \n \"\"\"\n pass\n\ndef describe_pending_maintenance_actions(ResourceIdentifier=None, Filters=None, Marker=None, MaxRecords=None):\n \"\"\"\n Returns a list of resources (for example, DB instances) that have at least one pending maintenance action.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_pending_maintenance_actions(\n ResourceIdentifier='string',\n Filters=[\n {\n 'Name': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n Marker='string',\n MaxRecords=123\n )\n \n \n :type ResourceIdentifier: string\n :param ResourceIdentifier: The ARN of a resource to return pending maintenance actions for.\n\n :type Filters: list\n :param Filters: A filter that specifies one or more resources to return pending maintenance actions for.\n Supported filters:\n db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list will only include pending maintenance actions for the DB clusters identified by these ARNs.\n db-instance-id - Accepts DB instance identifiers and DB instance ARNs. The results list will only include pending maintenance actions for the DB instances identified by these ARNs.\n (dict) --This type is not currently supported.\n Name (string) -- [REQUIRED]This parameter is not currently supported.\n Values (list) -- [REQUIRED]This parameter is not currently supported.\n (string) --\n \n \n\n :type Marker: string\n :param Marker: An optional pagination token provided by a previous DescribePendingMaintenanceActions request. If this parameter is specified, the response includes only records beyond the marker, up to a number of records specified by MaxRecords .\n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.\n Default: 100\n Constraints: Minimum 20, maximum 100.\n \n\n :rtype: dict\n :return: {\n 'PendingMaintenanceActions': [\n {\n 'ResourceIdentifier': 'string',\n 'PendingMaintenanceActionDetails': [\n {\n 'Action': 'string',\n 'AutoAppliedAfterDate': datetime(2015, 1, 1),\n 'ForcedApplyDate': datetime(2015, 1, 1),\n 'OptInStatus': 'string',\n 'CurrentApplyDate': datetime(2015, 1, 1),\n 'Description': 'string'\n },\n ]\n },\n ],\n 'Marker': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_valid_db_instance_modifications(DBInstanceIdentifier=None):\n \"\"\"\n You can call DescribeValidDBInstanceModifications to learn what modifications you can make to your DB instance. You can use this information when you call ModifyDBInstance .\n See also: AWS API Documentation\n \n \n :example: response = client.describe_valid_db_instance_modifications(\n DBInstanceIdentifier='string'\n )\n \n \n :type DBInstanceIdentifier: string\n :param DBInstanceIdentifier: [REQUIRED]\n The customer identifier or the ARN of your DB instance.\n \n\n :rtype: dict\n :return: {\n 'ValidDBInstanceModificationsMessage': {\n 'Storage': [\n {\n 'StorageType': 'string',\n 'StorageSize': [\n {\n 'From': 123,\n 'To': 123,\n 'Step': 123\n },\n ],\n 'ProvisionedIops': [\n {\n 'From': 123,\n 'To': 123,\n 'Step': 123\n },\n ],\n 'IopsToStorageRatio': [\n {\n 'From': 123.0,\n 'To': 123.0\n },\n ]\n },\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef failover_db_cluster(DBClusterIdentifier=None, TargetDBInstanceIdentifier=None):\n \"\"\"\n Forces a failover for a DB cluster.\n A failover for a DB cluster promotes one of the Read Replicas (read-only instances) in the DB cluster to be the primary instance (the cluster writer).\n Amazon Neptune will automatically fail over to a Read Replica, if one exists, when the primary instance fails. You can force a failover when you want to simulate a failure of a primary instance for testing. Because each instance in a DB cluster has its own endpoint address, you will need to clean up and re-establish any existing connections that use those endpoint addresses when the failover is complete.\n See also: AWS API Documentation\n \n \n :example: response = client.failover_db_cluster(\n DBClusterIdentifier='string',\n TargetDBInstanceIdentifier='string'\n )\n \n \n :type DBClusterIdentifier: string\n :param DBClusterIdentifier: A DB cluster identifier to force a failover for. This parameter is not case-sensitive.\n Constraints:\n Must match the identifier of an existing DBCluster.\n \n\n :type TargetDBInstanceIdentifier: string\n :param TargetDBInstanceIdentifier: The name of the instance to promote to the primary instance.\n You must specify the instance identifier for an Read Replica in the DB cluster. For example, mydbcluster-replica1 .\n \n\n :rtype: dict\n :return: {\n 'DBCluster': {\n 'AllocatedStorage': 123,\n 'AvailabilityZones': [\n 'string',\n ],\n 'BackupRetentionPeriod': 123,\n 'CharacterSetName': 'string',\n 'DatabaseName': 'string',\n 'DBClusterIdentifier': 'string',\n 'DBClusterParameterGroup': 'string',\n 'DBSubnetGroup': 'string',\n 'Status': 'string',\n 'PercentProgress': 'string',\n 'EarliestRestorableTime': datetime(2015, 1, 1),\n 'Endpoint': 'string',\n 'ReaderEndpoint': 'string',\n 'MultiAZ': True|False,\n 'Engine': 'string',\n 'EngineVersion': 'string',\n 'LatestRestorableTime': datetime(2015, 1, 1),\n 'Port': 123,\n 'MasterUsername': 'string',\n 'DBClusterOptionGroupMemberships': [\n {\n 'DBClusterOptionGroupName': 'string',\n 'Status': 'string'\n },\n ],\n 'PreferredBackupWindow': 'string',\n 'PreferredMaintenanceWindow': 'string',\n 'ReplicationSourceIdentifier': 'string',\n 'ReadReplicaIdentifiers': [\n 'string',\n ],\n 'DBClusterMembers': [\n {\n 'DBInstanceIdentifier': 'string',\n 'IsClusterWriter': True|False,\n 'DBClusterParameterGroupStatus': 'string',\n 'PromotionTier': 123\n },\n ],\n 'VpcSecurityGroups': [\n {\n 'VpcSecurityGroupId': 'string',\n 'Status': 'string'\n },\n ],\n 'HostedZoneId': 'string',\n 'StorageEncrypted': True|False,\n 'KmsKeyId': 'string',\n 'DbClusterResourceId': 'string',\n 'DBClusterArn': 'string',\n 'AssociatedRoles': [\n {\n 'RoleArn': 'string',\n 'Status': 'string'\n },\n ],\n 'IAMDatabaseAuthenticationEnabled': True|False,\n 'CloneGroupId': 'string',\n 'ClusterCreateTime': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_tags_for_resource(ResourceName=None, Filters=None):\n \"\"\"\n Lists all tags on an Amazon Neptune resource.\n See also: AWS API Documentation\n \n \n :example: response = client.list_tags_for_resource(\n ResourceName='string',\n Filters=[\n {\n 'Name': 'string',\n 'Values': [\n 'string',\n ]\n },\n ]\n )\n \n \n :type ResourceName: string\n :param ResourceName: [REQUIRED]\n The Amazon Neptune resource with tags to be listed. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an Amazon Resource Name (ARN) .\n \n\n :type Filters: list\n :param Filters: This parameter is not currently supported.\n (dict) --This type is not currently supported.\n Name (string) -- [REQUIRED]This parameter is not currently supported.\n Values (list) -- [REQUIRED]This parameter is not currently supported.\n (string) --\n \n \n\n :rtype: dict\n :return: {\n 'TagList': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef modify_db_cluster(DBClusterIdentifier=None, NewDBClusterIdentifier=None, ApplyImmediately=None, BackupRetentionPeriod=None, DBClusterParameterGroupName=None, VpcSecurityGroupIds=None, Port=None, MasterUserPassword=None, OptionGroupName=None, PreferredBackupWindow=None, PreferredMaintenanceWindow=None, EnableIAMDatabaseAuthentication=None, EngineVersion=None):\n \"\"\"\n Modify a setting for a DB cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request.\n See also: AWS API Documentation\n \n \n :example: response = client.modify_db_cluster(\n DBClusterIdentifier='string',\n NewDBClusterIdentifier='string',\n ApplyImmediately=True|False,\n BackupRetentionPeriod=123,\n DBClusterParameterGroupName='string',\n VpcSecurityGroupIds=[\n 'string',\n ],\n Port=123,\n MasterUserPassword='string',\n OptionGroupName='string',\n PreferredBackupWindow='string',\n PreferredMaintenanceWindow='string',\n EnableIAMDatabaseAuthentication=True|False,\n EngineVersion='string'\n )\n \n \n :type DBClusterIdentifier: string\n :param DBClusterIdentifier: [REQUIRED]\n The DB cluster identifier for the cluster being modified. This parameter is not case-sensitive.\n Constraints:\n Must match the identifier of an existing DBCluster.\n \n\n :type NewDBClusterIdentifier: string\n :param NewDBClusterIdentifier: The new DB cluster identifier for the DB cluster when renaming a DB cluster. This value is stored as a lowercase string.\n Constraints:\n Must contain from 1 to 63 letters, numbers, or hyphens\n The first character must be a letter\n Cannot end with a hyphen or contain two consecutive hyphens\n Example: my-cluster2\n \n\n :type ApplyImmediately: boolean\n :param ApplyImmediately: A value that specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB cluster. If this parameter is set to false , changes to the DB cluster are applied during the next maintenance window.\n The ApplyImmediately parameter only affects the NewDBClusterIdentifier and MasterUserPassword values. If you set the ApplyImmediately parameter value to false, then changes to the NewDBClusterIdentifier and MasterUserPassword values are applied during the next maintenance window. All other changes are applied immediately, regardless of the value of the ApplyImmediately parameter.\n Default: false\n \n\n :type BackupRetentionPeriod: integer\n :param BackupRetentionPeriod: The number of days for which automated backups are retained. You must specify a minimum value of 1.\n Default: 1\n Constraints:\n Must be a value from 1 to 35\n \n\n :type DBClusterParameterGroupName: string\n :param DBClusterParameterGroupName: The name of the DB cluster parameter group to use for the DB cluster.\n\n :type VpcSecurityGroupIds: list\n :param VpcSecurityGroupIds: A list of VPC security groups that the DB cluster will belong to.\n (string) --\n \n\n :type Port: integer\n :param Port: The port number on which the DB cluster accepts connections.\n Constraints: Value must be 1150-65535\n Default: The same port as the original DB cluster.\n \n\n :type MasterUserPassword: string\n :param MasterUserPassword: The new password for the master database user. This password can contain any printable ASCII character except '/', ''', or '@'.\n Constraints: Must contain from 8 to 41 characters.\n \n\n :type OptionGroupName: string\n :param OptionGroupName: A value that indicates that the DB cluster should be associated with the specified option group. Changing this parameter doesn't result in an outage except in the following case, and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted.\n Permanent options can't be removed from an option group. The option group can't be removed from a DB cluster once it is associated with a DB cluster.\n \n\n :type PreferredBackupWindow: string\n :param PreferredBackupWindow: The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.\n The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region.\n Constraints:\n Must be in the format hh24:mi-hh24:mi .\n Must be in Universal Coordinated Time (UTC).\n Must not conflict with the preferred maintenance window.\n Must be at least 30 minutes.\n \n\n :type PreferredMaintenanceWindow: string\n :param PreferredMaintenanceWindow: The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).\n Format: ddd:hh24:mi-ddd:hh24:mi\n The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week.\n Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.\n Constraints: Minimum 30-minute window.\n \n\n :type EnableIAMDatabaseAuthentication: boolean\n :param EnableIAMDatabaseAuthentication: True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.\n Default: false\n \n\n :type EngineVersion: string\n :param EngineVersion: The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true.\n For a list of valid engine versions, see CreateDBInstance , or call DescribeDBEngineVersions .\n \n\n :rtype: dict\n :return: {\n 'DBCluster': {\n 'AllocatedStorage': 123,\n 'AvailabilityZones': [\n 'string',\n ],\n 'BackupRetentionPeriod': 123,\n 'CharacterSetName': 'string',\n 'DatabaseName': 'string',\n 'DBClusterIdentifier': 'string',\n 'DBClusterParameterGroup': 'string',\n 'DBSubnetGroup': 'string',\n 'Status': 'string',\n 'PercentProgress': 'string',\n 'EarliestRestorableTime': datetime(2015, 1, 1),\n 'Endpoint': 'string',\n 'ReaderEndpoint': 'string',\n 'MultiAZ': True|False,\n 'Engine': 'string',\n 'EngineVersion': 'string',\n 'LatestRestorableTime': datetime(2015, 1, 1),\n 'Port': 123,\n 'MasterUsername': 'string',\n 'DBClusterOptionGroupMemberships': [\n {\n 'DBClusterOptionGroupName': 'string',\n 'Status': 'string'\n },\n ],\n 'PreferredBackupWindow': 'string',\n 'PreferredMaintenanceWindow': 'string',\n 'ReplicationSourceIdentifier': 'string',\n 'ReadReplicaIdentifiers': [\n 'string',\n ],\n 'DBClusterMembers': [\n {\n 'DBInstanceIdentifier': 'string',\n 'IsClusterWriter': True|False,\n 'DBClusterParameterGroupStatus': 'string',\n 'PromotionTier': 123\n },\n ],\n 'VpcSecurityGroups': [\n {\n 'VpcSecurityGroupId': 'string',\n 'Status': 'string'\n },\n ],\n 'HostedZoneId': 'string',\n 'StorageEncrypted': True|False,\n 'KmsKeyId': 'string',\n 'DbClusterResourceId': 'string',\n 'DBClusterArn': 'string',\n 'AssociatedRoles': [\n {\n 'RoleArn': 'string',\n 'Status': 'string'\n },\n ],\n 'IAMDatabaseAuthenticationEnabled': True|False,\n 'CloneGroupId': 'string',\n 'ClusterCreateTime': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef modify_db_cluster_parameter_group(DBClusterParameterGroupName=None, Parameters=None):\n \"\"\"\n Modifies the parameters of a DB cluster parameter group. To modify more than one parameter, submit a list of the following: ParameterName , ParameterValue , and ApplyMethod . A maximum of 20 parameters can be modified in a single request.\n See also: AWS API Documentation\n \n \n :example: response = client.modify_db_cluster_parameter_group(\n DBClusterParameterGroupName='string',\n Parameters=[\n {\n 'ParameterName': 'string',\n 'ParameterValue': 'string',\n 'Description': 'string',\n 'Source': 'string',\n 'ApplyType': 'string',\n 'DataType': 'string',\n 'AllowedValues': 'string',\n 'IsModifiable': True|False,\n 'MinimumEngineVersion': 'string',\n 'ApplyMethod': 'immediate'|'pending-reboot'\n },\n ]\n )\n \n \n :type DBClusterParameterGroupName: string\n :param DBClusterParameterGroupName: [REQUIRED]\n The name of the DB cluster parameter group to modify.\n \n\n :type Parameters: list\n :param Parameters: [REQUIRED]\n A list of parameters in the DB cluster parameter group to modify.\n (dict) --This data type is used as a request parameter in the ModifyDBParameterGroup and ResetDBParameterGroup actions.\n This data type is used as a response element in the DescribeEngineDefaultParameters and DescribeDBParameters actions.\n ParameterName (string) --Specifies the name of the parameter.\n ParameterValue (string) --Specifies the value of the parameter.\n Description (string) --Provides a description of the parameter.\n Source (string) --Indicates the source of the parameter value.\n ApplyType (string) --Specifies the engine specific parameters type.\n DataType (string) --Specifies the valid data type for the parameter.\n AllowedValues (string) --Specifies the valid range of values for the parameter.\n IsModifiable (boolean) --Indicates whether (true ) or not (false ) the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.\n MinimumEngineVersion (string) --The earliest engine version to which the parameter can apply.\n ApplyMethod (string) --Indicates when to apply parameter updates.\n \n \n\n :rtype: dict\n :return: {\n 'DBClusterParameterGroupName': 'string'\n }\n \n \n :returns: \n Must be 1 to 255 letters or numbers.\n First character must be a letter\n Cannot end with a hyphen or contain two consecutive hyphens\n \n \"\"\"\n pass\n\ndef modify_db_cluster_snapshot_attribute(DBClusterSnapshotIdentifier=None, AttributeName=None, ValuesToAdd=None, ValuesToRemove=None):\n \"\"\"\n Adds an attribute and values to, or removes an attribute and values from, a manual DB cluster snapshot.\n To share a manual DB cluster snapshot with other AWS accounts, specify restore as the AttributeName and use the ValuesToAdd parameter to add a list of IDs of the AWS accounts that are authorized to restore the manual DB cluster snapshot. Use the value all to make the manual DB cluster snapshot public, which means that it can be copied or restored by all AWS accounts. Do not add the all value for any manual DB cluster snapshots that contain private information that you don't want available to all AWS accounts. If a manual DB cluster snapshot is encrypted, it can be shared, but only by specifying a list of authorized AWS account IDs for the ValuesToAdd parameter. You can't use all as a value for that parameter in this case.\n To view which AWS accounts have access to copy or restore a manual DB cluster snapshot, or whether a manual DB cluster snapshot public or private, use the DescribeDBClusterSnapshotAttributes API action.\n See also: AWS API Documentation\n \n \n :example: response = client.modify_db_cluster_snapshot_attribute(\n DBClusterSnapshotIdentifier='string',\n AttributeName='string',\n ValuesToAdd=[\n 'string',\n ],\n ValuesToRemove=[\n 'string',\n ]\n )\n \n \n :type DBClusterSnapshotIdentifier: string\n :param DBClusterSnapshotIdentifier: [REQUIRED]\n The identifier for the DB cluster snapshot to modify the attributes for.\n \n\n :type AttributeName: string\n :param AttributeName: [REQUIRED]\n The name of the DB cluster snapshot attribute to modify.\n To manage authorization for other AWS accounts to copy or restore a manual DB cluster snapshot, set this value to restore .\n \n\n :type ValuesToAdd: list\n :param ValuesToAdd: A list of DB cluster snapshot attributes to add to the attribute specified by AttributeName .\n To authorize other AWS accounts to copy or restore a manual DB cluster snapshot, set this list to include one or more AWS account IDs, or all to make the manual DB cluster snapshot restorable by any AWS account. Do not add the all value for any manual DB cluster snapshots that contain private information that you don't want available to all AWS accounts.\n (string) --\n \n\n :type ValuesToRemove: list\n :param ValuesToRemove: A list of DB cluster snapshot attributes to remove from the attribute specified by AttributeName .\n To remove authorization for other AWS accounts to copy or restore a manual DB cluster snapshot, set this list to include one or more AWS account identifiers, or all to remove authorization for any AWS account to copy or restore the DB cluster snapshot. If you specify all , an AWS account whose account ID is explicitly added to the restore attribute can still copy or restore a manual DB cluster snapshot.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'DBClusterSnapshotAttributesResult': {\n 'DBClusterSnapshotIdentifier': 'string',\n 'DBClusterSnapshotAttributes': [\n {\n 'AttributeName': 'string',\n 'AttributeValues': [\n 'string',\n ]\n },\n ]\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef modify_db_instance(DBInstanceIdentifier=None, AllocatedStorage=None, DBInstanceClass=None, DBSubnetGroupName=None, DBSecurityGroups=None, VpcSecurityGroupIds=None, ApplyImmediately=None, MasterUserPassword=None, DBParameterGroupName=None, BackupRetentionPeriod=None, PreferredBackupWindow=None, PreferredMaintenanceWindow=None, MultiAZ=None, EngineVersion=None, AllowMajorVersionUpgrade=None, AutoMinorVersionUpgrade=None, LicenseModel=None, Iops=None, OptionGroupName=None, NewDBInstanceIdentifier=None, StorageType=None, TdeCredentialArn=None, TdeCredentialPassword=None, CACertificateIdentifier=None, Domain=None, CopyTagsToSnapshot=None, MonitoringInterval=None, DBPortNumber=None, PubliclyAccessible=None, MonitoringRoleArn=None, DomainIAMRoleName=None, PromotionTier=None, EnableIAMDatabaseAuthentication=None, EnablePerformanceInsights=None, PerformanceInsightsKMSKeyId=None, CloudwatchLogsExportConfiguration=None):\n \"\"\"\n Modifies settings for a DB instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. To learn what modifications you can make to your DB instance, call DescribeValidDBInstanceModifications before you call ModifyDBInstance .\n See also: AWS API Documentation\n \n \n :example: response = client.modify_db_instance(\n DBInstanceIdentifier='string',\n AllocatedStorage=123,\n DBInstanceClass='string',\n DBSubnetGroupName='string',\n DBSecurityGroups=[\n 'string',\n ],\n VpcSecurityGroupIds=[\n 'string',\n ],\n ApplyImmediately=True|False,\n MasterUserPassword='string',\n DBParameterGroupName='string',\n BackupRetentionPeriod=123,\n PreferredBackupWindow='string',\n PreferredMaintenanceWindow='string',\n MultiAZ=True|False,\n EngineVersion='string',\n AllowMajorVersionUpgrade=True|False,\n AutoMinorVersionUpgrade=True|False,\n LicenseModel='string',\n Iops=123,\n OptionGroupName='string',\n NewDBInstanceIdentifier='string',\n StorageType='string',\n TdeCredentialArn='string',\n TdeCredentialPassword='string',\n CACertificateIdentifier='string',\n Domain='string',\n CopyTagsToSnapshot=True|False,\n MonitoringInterval=123,\n DBPortNumber=123,\n PubliclyAccessible=True|False,\n MonitoringRoleArn='string',\n DomainIAMRoleName='string',\n PromotionTier=123,\n EnableIAMDatabaseAuthentication=True|False,\n EnablePerformanceInsights=True|False,\n PerformanceInsightsKMSKeyId='string',\n CloudwatchLogsExportConfiguration={\n 'EnableLogTypes': [\n 'string',\n ],\n 'DisableLogTypes': [\n 'string',\n ]\n }\n )\n \n \n :type DBInstanceIdentifier: string\n :param DBInstanceIdentifier: [REQUIRED]\n The DB instance identifier. This value is stored as a lowercase string.\n Constraints:\n Must match the identifier of an existing DBInstance.\n \n\n :type AllocatedStorage: integer\n :param AllocatedStorage: The new amount of storage (in gibibytes) to allocate for the DB instance.\n Not applicable. Storage is managed by the DB Cluster.\n \n\n :type DBInstanceClass: string\n :param DBInstanceClass: The new compute and memory capacity of the DB instance, for example, db.m4.large . Not all DB instance classes are available in all AWS Regions.\n If you modify the DB instance class, an outage occurs during the change. The change is applied during the next maintenance window, unless ApplyImmediately is specified as true for this request.\n Default: Uses existing setting\n \n\n :type DBSubnetGroupName: string\n :param DBSubnetGroupName: The new DB subnet group for the DB instance. You can use this parameter to move your DB instance to a different VPC.\n Changing the subnet group causes an outage during the change. The change is applied during the next maintenance window, unless you specify true for the ApplyImmediately parameter.\n Constraints: If supplied, must match the name of an existing DBSubnetGroup.\n Example: mySubnetGroup\n \n\n :type DBSecurityGroups: list\n :param DBSecurityGroups: A list of DB security groups to authorize on this DB instance. Changing this setting doesn't result in an outage and the change is asynchronously applied as soon as possible.\n Constraints:\n If supplied, must match existing DBSecurityGroups.\n (string) --\n \n\n :type VpcSecurityGroupIds: list\n :param VpcSecurityGroupIds: A list of EC2 VPC security groups to authorize on this DB instance. This change is asynchronously applied as soon as possible.\n Not applicable. The associated list of EC2 VPC security groups is managed by the DB cluster. For more information, see ModifyDBCluster .\n Constraints:\n If supplied, must match existing VpcSecurityGroupIds.\n (string) --\n \n\n :type ApplyImmediately: boolean\n :param ApplyImmediately: Specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB instance.\n If this parameter is set to false , changes to the DB instance are applied during the next maintenance window. Some parameter changes can cause an outage and are applied on the next call to RebootDBInstance , or the next failure reboot.\n Default: false\n \n\n :type MasterUserPassword: string\n :param MasterUserPassword: The new password for the master user. The password can include any printable ASCII character except '/', ''', or '@'.\n Not applicable.\n Default: Uses existing setting\n \n\n :type DBParameterGroupName: string\n :param DBParameterGroupName: The name of the DB parameter group to apply to the DB instance. Changing this setting doesn't result in an outage. The parameter group name itself is changed immediately, but the actual parameter changes are not applied until you reboot the instance without failover. The db instance will NOT be rebooted automatically and the parameter changes will NOT be applied during the next maintenance window.\n Default: Uses existing setting\n Constraints: The DB parameter group must be in the same DB parameter group family as this DB instance.\n \n\n :type BackupRetentionPeriod: integer\n :param BackupRetentionPeriod: The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.\n Not applicable. The retention period for automated backups is managed by the DB cluster. For more information, see ModifyDBCluster .\n Default: Uses existing setting\n \n\n :type PreferredBackupWindow: string\n :param PreferredBackupWindow: The daily time range during which automated backups are created if automated backups are enabled.\n Not applicable. The daily time range for creating automated backups is managed by the DB cluster. For more information, see ModifyDBCluster .\n Constraints:\n Must be in the format hh24:mi-hh24:mi\n Must be in Universal Time Coordinated (UTC)\n Must not conflict with the preferred maintenance window\n Must be at least 30 minutes\n \n\n :type PreferredMaintenanceWindow: string\n :param PreferredMaintenanceWindow: The weekly time range (in UTC) during which system maintenance can occur, which might result in an outage. Changing this parameter doesn't result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, then changing this parameter will cause a reboot of the DB instance. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied.\n Default: Uses existing setting\n Format: ddd:hh24:mi-ddd:hh24:mi\n Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun\n Constraints: Must be at least 30 minutes\n \n\n :type MultiAZ: boolean\n :param MultiAZ: Specifies if the DB instance is a Multi-AZ deployment. Changing this parameter doesn't result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.\n\n :type EngineVersion: string\n :param EngineVersion: The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.\n For major version upgrades, if a nondefault DB parameter group is currently in use, a new DB parameter group in the DB parameter group family for the new engine version must be specified. The new DB parameter group can be the default for that DB parameter group family.\n \n\n :type AllowMajorVersionUpgrade: boolean\n :param AllowMajorVersionUpgrade: Indicates that major version upgrades are allowed. Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible.\n Constraints: This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the DB instance's current version.\n \n\n :type AutoMinorVersionUpgrade: boolean\n :param AutoMinorVersionUpgrade: Indicates that minor version upgrades are applied automatically to the DB instance during the maintenance window. Changing this parameter doesn't result in an outage except in the following case and the change is asynchronously applied as soon as possible. An outage will result if this parameter is set to true during the maintenance window, and a newer minor version is available, and Neptune has enabled auto patching for that engine version.\n\n :type LicenseModel: string\n :param LicenseModel: The license model for the DB instance.\n Valid values: license-included | bring-your-own-license | general-public-license\n \n\n :type Iops: integer\n :param Iops: The new Provisioned IOPS (I/O operations per second) value for the instance.\n Changing this setting doesn't result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.\n Default: Uses existing setting\n \n\n :type OptionGroupName: string\n :param OptionGroupName: Indicates that the DB instance should be associated with the specified option group. Changing this parameter doesn't result in an outage except in the following case and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted.\n Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group, and that option group can't be removed from a DB instance once it is associated with a DB instance\n \n\n :type NewDBInstanceIdentifier: string\n :param NewDBInstanceIdentifier: The new DB instance identifier for the DB instance when renaming a DB instance. When you change the DB instance identifier, an instance reboot will occur immediately if you set Apply Immediately to true, or will occur during the next maintenance window if Apply Immediately to false. This value is stored as a lowercase string.\n Constraints:\n Must contain from 1 to 63 letters, numbers, or hyphens.\n The first character must be a letter.\n Cannot end with a hyphen or contain two consecutive hyphens.\n Example: mydbinstance\n \n\n :type StorageType: string\n :param StorageType: Specifies the storage type to be associated with the DB instance.\n If you specify Provisioned IOPS (io1 ), you must also include a value for the Iops parameter.\n If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon Neptune operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a Read Replica for the instance, and creating a DB snapshot of the instance.\n Valid values: standard | gp2 | io1\n Default: io1 if the Iops parameter is specified, otherwise standard\n \n\n :type TdeCredentialArn: string\n :param TdeCredentialArn: The ARN from the key store with which to associate the instance for TDE encryption.\n\n :type TdeCredentialPassword: string\n :param TdeCredentialPassword: The password for the given ARN from the key store in order to access the device.\n\n :type CACertificateIdentifier: string\n :param CACertificateIdentifier: Indicates the certificate that needs to be associated with the instance.\n\n :type Domain: string\n :param Domain: Not supported.\n\n :type CopyTagsToSnapshot: boolean\n :param CopyTagsToSnapshot: True to copy all tags from the DB instance to snapshots of the DB instance, and otherwise false. The default is false.\n\n :type MonitoringInterval: integer\n :param MonitoringInterval: The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0.\n If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0.\n Valid Values: 0, 1, 5, 10, 15, 30, 60\n \n\n :type DBPortNumber: integer\n :param DBPortNumber: The port number on which the database accepts connections.\n The value of the DBPortNumber parameter must not match any of the port values specified for options in the option group for the DB instance.\n Your database will restart when you change the DBPortNumber value regardless of the value of the ApplyImmediately parameter.\n Default: 8182\n \n\n :type PubliclyAccessible: boolean\n :param PubliclyAccessible: This parameter is not supported.\n\n :type MonitoringRoleArn: string\n :param MonitoringRoleArn: The ARN for the IAM role that permits Neptune to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess .\n If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value.\n \n\n :type DomainIAMRoleName: string\n :param DomainIAMRoleName: Not supported\n\n :type PromotionTier: integer\n :param PromotionTier: A value that specifies the order in which a Read Replica is promoted to the primary instance after a failure of the existing primary instance.\n Default: 1\n Valid Values: 0 - 15\n \n\n :type EnableIAMDatabaseAuthentication: boolean\n :param EnableIAMDatabaseAuthentication: True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.\n You can enable IAM database authentication for the following database engines\n Not applicable. Mapping AWS IAM accounts to database accounts is managed by the DB cluster. For more information, see ModifyDBCluster .\n Default: false\n \n\n :type EnablePerformanceInsights: boolean\n :param EnablePerformanceInsights: True to enable Performance Insights for the DB instance, and otherwise false.\n\n :type PerformanceInsightsKMSKeyId: string\n :param PerformanceInsightsKMSKeyId: The AWS KMS key identifier for encryption of Performance Insights data. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.\n\n :type CloudwatchLogsExportConfiguration: dict\n :param CloudwatchLogsExportConfiguration: The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB instance or DB cluster.\n EnableLogTypes (list) --The list of log types to enable.\n (string) --\n DisableLogTypes (list) --The list of log types to disable.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'DBInstance': {\n 'DBInstanceIdentifier': 'string',\n 'DBInstanceClass': 'string',\n 'Engine': 'string',\n 'DBInstanceStatus': 'string',\n 'MasterUsername': 'string',\n 'DBName': 'string',\n 'Endpoint': {\n 'Address': 'string',\n 'Port': 123,\n 'HostedZoneId': 'string'\n },\n 'AllocatedStorage': 123,\n 'InstanceCreateTime': datetime(2015, 1, 1),\n 'PreferredBackupWindow': 'string',\n 'BackupRetentionPeriod': 123,\n 'DBSecurityGroups': [\n {\n 'DBSecurityGroupName': 'string',\n 'Status': 'string'\n },\n ],\n 'VpcSecurityGroups': [\n {\n 'VpcSecurityGroupId': 'string',\n 'Status': 'string'\n },\n ],\n 'DBParameterGroups': [\n {\n 'DBParameterGroupName': 'string',\n 'ParameterApplyStatus': 'string'\n },\n ],\n 'AvailabilityZone': 'string',\n 'DBSubnetGroup': {\n 'DBSubnetGroupName': 'string',\n 'DBSubnetGroupDescription': 'string',\n 'VpcId': 'string',\n 'SubnetGroupStatus': 'string',\n 'Subnets': [\n {\n 'SubnetIdentifier': 'string',\n 'SubnetAvailabilityZone': {\n 'Name': 'string'\n },\n 'SubnetStatus': 'string'\n },\n ],\n 'DBSubnetGroupArn': 'string'\n },\n 'PreferredMaintenanceWindow': 'string',\n 'PendingModifiedValues': {\n 'DBInstanceClass': 'string',\n 'AllocatedStorage': 123,\n 'MasterUserPassword': 'string',\n 'Port': 123,\n 'BackupRetentionPeriod': 123,\n 'MultiAZ': True|False,\n 'EngineVersion': 'string',\n 'LicenseModel': 'string',\n 'Iops': 123,\n 'DBInstanceIdentifier': 'string',\n 'StorageType': 'string',\n 'CACertificateIdentifier': 'string',\n 'DBSubnetGroupName': 'string',\n 'PendingCloudwatchLogsExports': {\n 'LogTypesToEnable': [\n 'string',\n ],\n 'LogTypesToDisable': [\n 'string',\n ]\n }\n },\n 'LatestRestorableTime': datetime(2015, 1, 1),\n 'MultiAZ': True|False,\n 'EngineVersion': 'string',\n 'AutoMinorVersionUpgrade': True|False,\n 'ReadReplicaSourceDBInstanceIdentifier': 'string',\n 'ReadReplicaDBInstanceIdentifiers': [\n 'string',\n ],\n 'ReadReplicaDBClusterIdentifiers': [\n 'string',\n ],\n 'LicenseModel': 'string',\n 'Iops': 123,\n 'OptionGroupMemberships': [\n {\n 'OptionGroupName': 'string',\n 'Status': 'string'\n },\n ],\n 'CharacterSetName': 'string',\n 'SecondaryAvailabilityZone': 'string',\n 'PubliclyAccessible': True|False,\n 'StatusInfos': [\n {\n 'StatusType': 'string',\n 'Normal': True|False,\n 'Status': 'string',\n 'Message': 'string'\n },\n ],\n 'StorageType': 'string',\n 'TdeCredentialArn': 'string',\n 'DbInstancePort': 123,\n 'DBClusterIdentifier': 'string',\n 'StorageEncrypted': True|False,\n 'KmsKeyId': 'string',\n 'DbiResourceId': 'string',\n 'CACertificateIdentifier': 'string',\n 'DomainMemberships': [\n {\n 'Domain': 'string',\n 'Status': 'string',\n 'FQDN': 'string',\n 'IAMRoleName': 'string'\n },\n ],\n 'CopyTagsToSnapshot': True|False,\n 'MonitoringInterval': 123,\n 'EnhancedMonitoringResourceArn': 'string',\n 'MonitoringRoleArn': 'string',\n 'PromotionTier': 123,\n 'DBInstanceArn': 'string',\n 'Timezone': 'string',\n 'IAMDatabaseAuthenticationEnabled': True|False,\n 'PerformanceInsightsEnabled': True|False,\n 'PerformanceInsightsKMSKeyId': 'string',\n 'EnabledCloudwatchLogsExports': [\n 'string',\n ]\n }\n }\n \n \n :returns: \n ModifyDBInstance\n RebootDBInstance\n \n \"\"\"\n pass\n\ndef modify_db_parameter_group(DBParameterGroupName=None, Parameters=None):\n \"\"\"\n Modifies the parameters of a DB parameter group. To modify more than one parameter, submit a list of the following: ParameterName , ParameterValue , and ApplyMethod . A maximum of 20 parameters can be modified in a single request.\n See also: AWS API Documentation\n \n \n :example: response = client.modify_db_parameter_group(\n DBParameterGroupName='string',\n Parameters=[\n {\n 'ParameterName': 'string',\n 'ParameterValue': 'string',\n 'Description': 'string',\n 'Source': 'string',\n 'ApplyType': 'string',\n 'DataType': 'string',\n 'AllowedValues': 'string',\n 'IsModifiable': True|False,\n 'MinimumEngineVersion': 'string',\n 'ApplyMethod': 'immediate'|'pending-reboot'\n },\n ]\n )\n \n \n :type DBParameterGroupName: string\n :param DBParameterGroupName: [REQUIRED]\n The name of the DB parameter group.\n Constraints:\n If supplied, must match the name of an existing DBParameterGroup.\n \n\n :type Parameters: list\n :param Parameters: [REQUIRED]\n An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters can be modified in a single request.\n Valid Values (for the application method): immediate | pending-reboot\n Note\n You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when you reboot the DB instance without failover.\n (dict) --This data type is used as a request parameter in the ModifyDBParameterGroup and ResetDBParameterGroup actions.\n This data type is used as a response element in the DescribeEngineDefaultParameters and DescribeDBParameters actions.\n ParameterName (string) --Specifies the name of the parameter.\n ParameterValue (string) --Specifies the value of the parameter.\n Description (string) --Provides a description of the parameter.\n Source (string) --Indicates the source of the parameter value.\n ApplyType (string) --Specifies the engine specific parameters type.\n DataType (string) --Specifies the valid data type for the parameter.\n AllowedValues (string) --Specifies the valid range of values for the parameter.\n IsModifiable (boolean) --Indicates whether (true ) or not (false ) the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.\n MinimumEngineVersion (string) --The earliest engine version to which the parameter can apply.\n ApplyMethod (string) --Indicates when to apply parameter updates.\n \n \n\n :rtype: dict\n :return: {\n 'DBParameterGroupName': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef modify_db_subnet_group(DBSubnetGroupName=None, DBSubnetGroupDescription=None, SubnetIds=None):\n \"\"\"\n Modifies an existing DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the AWS Region.\n See also: AWS API Documentation\n \n \n :example: response = client.modify_db_subnet_group(\n DBSubnetGroupName='string',\n DBSubnetGroupDescription='string',\n SubnetIds=[\n 'string',\n ]\n )\n \n \n :type DBSubnetGroupName: string\n :param DBSubnetGroupName: [REQUIRED]\n The name for the DB subnet group. This value is stored as a lowercase string. You can't modify the default subnet group.\n Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.\n Example: mySubnetgroup\n \n\n :type DBSubnetGroupDescription: string\n :param DBSubnetGroupDescription: The description for the DB subnet group.\n\n :type SubnetIds: list\n :param SubnetIds: [REQUIRED]\n The EC2 subnet IDs for the DB subnet group.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'DBSubnetGroup': {\n 'DBSubnetGroupName': 'string',\n 'DBSubnetGroupDescription': 'string',\n 'VpcId': 'string',\n 'SubnetGroupStatus': 'string',\n 'Subnets': [\n {\n 'SubnetIdentifier': 'string',\n 'SubnetAvailabilityZone': {\n 'Name': 'string'\n },\n 'SubnetStatus': 'string'\n },\n ],\n 'DBSubnetGroupArn': 'string'\n }\n }\n \n \n :returns: \n OrderableDBInstanceOption\n \n \"\"\"\n pass\n\ndef modify_event_subscription(SubscriptionName=None, SnsTopicArn=None, SourceType=None, EventCategories=None, Enabled=None):\n \"\"\"\n Modifies an existing event notification subscription. Note that you can't modify the source identifiers using this call; to change source identifiers for a subscription, use the AddSourceIdentifierToSubscription and RemoveSourceIdentifierFromSubscription calls.\n You can see a list of the event categories for a given SourceType by using the DescribeEventCategories action.\n See also: AWS API Documentation\n \n \n :example: response = client.modify_event_subscription(\n SubscriptionName='string',\n SnsTopicArn='string',\n SourceType='string',\n EventCategories=[\n 'string',\n ],\n Enabled=True|False\n )\n \n \n :type SubscriptionName: string\n :param SubscriptionName: [REQUIRED]\n The name of the event notification subscription.\n \n\n :type SnsTopicArn: string\n :param SnsTopicArn: The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.\n\n :type SourceType: string\n :param SourceType: The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned.\n Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot\n \n\n :type EventCategories: list\n :param EventCategories: A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType by using the DescribeEventCategories action.\n (string) --\n \n\n :type Enabled: boolean\n :param Enabled: A Boolean value; set to true to activate the subscription.\n\n :rtype: dict\n :return: {\n 'EventSubscription': {\n 'CustomerAwsId': 'string',\n 'CustSubscriptionId': 'string',\n 'SnsTopicArn': 'string',\n 'Status': 'string',\n 'SubscriptionCreationTime': 'string',\n 'SourceType': 'string',\n 'SourceIdsList': [\n 'string',\n ],\n 'EventCategoriesList': [\n 'string',\n ],\n 'Enabled': True|False,\n 'EventSubscriptionArn': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef promote_read_replica_db_cluster(DBClusterIdentifier=None):\n \"\"\"\n Promotes a Read Replica DB cluster to a standalone DB cluster.\n See also: AWS API Documentation\n \n \n :example: response = client.promote_read_replica_db_cluster(\n DBClusterIdentifier='string'\n )\n \n \n :type DBClusterIdentifier: string\n :param DBClusterIdentifier: [REQUIRED]\n The identifier of the DB cluster Read Replica to promote. This parameter is not case-sensitive.\n Constraints:\n Must match the identifier of an existing DBCluster Read Replica.\n Example: my-cluster-replica1\n \n\n :rtype: dict\n :return: {\n 'DBCluster': {\n 'AllocatedStorage': 123,\n 'AvailabilityZones': [\n 'string',\n ],\n 'BackupRetentionPeriod': 123,\n 'CharacterSetName': 'string',\n 'DatabaseName': 'string',\n 'DBClusterIdentifier': 'string',\n 'DBClusterParameterGroup': 'string',\n 'DBSubnetGroup': 'string',\n 'Status': 'string',\n 'PercentProgress': 'string',\n 'EarliestRestorableTime': datetime(2015, 1, 1),\n 'Endpoint': 'string',\n 'ReaderEndpoint': 'string',\n 'MultiAZ': True|False,\n 'Engine': 'string',\n 'EngineVersion': 'string',\n 'LatestRestorableTime': datetime(2015, 1, 1),\n 'Port': 123,\n 'MasterUsername': 'string',\n 'DBClusterOptionGroupMemberships': [\n {\n 'DBClusterOptionGroupName': 'string',\n 'Status': 'string'\n },\n ],\n 'PreferredBackupWindow': 'string',\n 'PreferredMaintenanceWindow': 'string',\n 'ReplicationSourceIdentifier': 'string',\n 'ReadReplicaIdentifiers': [\n 'string',\n ],\n 'DBClusterMembers': [\n {\n 'DBInstanceIdentifier': 'string',\n 'IsClusterWriter': True|False,\n 'DBClusterParameterGroupStatus': 'string',\n 'PromotionTier': 123\n },\n ],\n 'VpcSecurityGroups': [\n {\n 'VpcSecurityGroupId': 'string',\n 'Status': 'string'\n },\n ],\n 'HostedZoneId': 'string',\n 'StorageEncrypted': True|False,\n 'KmsKeyId': 'string',\n 'DbClusterResourceId': 'string',\n 'DBClusterArn': 'string',\n 'AssociatedRoles': [\n {\n 'RoleArn': 'string',\n 'Status': 'string'\n },\n ],\n 'IAMDatabaseAuthenticationEnabled': True|False,\n 'CloneGroupId': 'string',\n 'ClusterCreateTime': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef reboot_db_instance(DBInstanceIdentifier=None, ForceFailover=None):\n \"\"\"\n You might need to reboot your DB instance, usually for maintenance reasons. For example, if you make certain modifications, or if you change the DB parameter group associated with the DB instance, you must reboot the instance for the changes to take effect.\n Rebooting a DB instance restarts the database engine service. Rebooting a DB instance results in a momentary outage, during which the DB instance status is set to rebooting.\n See also: AWS API Documentation\n \n \n :example: response = client.reboot_db_instance(\n DBInstanceIdentifier='string',\n ForceFailover=True|False\n )\n \n \n :type DBInstanceIdentifier: string\n :param DBInstanceIdentifier: [REQUIRED]\n The DB instance identifier. This parameter is stored as a lowercase string.\n Constraints:\n Must match the identifier of an existing DBInstance.\n \n\n :type ForceFailover: boolean\n :param ForceFailover: When true , the reboot is conducted through a MultiAZ failover.\n Constraint: You can't specify true if the instance is not configured for MultiAZ.\n \n\n :rtype: dict\n :return: {\n 'DBInstance': {\n 'DBInstanceIdentifier': 'string',\n 'DBInstanceClass': 'string',\n 'Engine': 'string',\n 'DBInstanceStatus': 'string',\n 'MasterUsername': 'string',\n 'DBName': 'string',\n 'Endpoint': {\n 'Address': 'string',\n 'Port': 123,\n 'HostedZoneId': 'string'\n },\n 'AllocatedStorage': 123,\n 'InstanceCreateTime': datetime(2015, 1, 1),\n 'PreferredBackupWindow': 'string',\n 'BackupRetentionPeriod': 123,\n 'DBSecurityGroups': [\n {\n 'DBSecurityGroupName': 'string',\n 'Status': 'string'\n },\n ],\n 'VpcSecurityGroups': [\n {\n 'VpcSecurityGroupId': 'string',\n 'Status': 'string'\n },\n ],\n 'DBParameterGroups': [\n {\n 'DBParameterGroupName': 'string',\n 'ParameterApplyStatus': 'string'\n },\n ],\n 'AvailabilityZone': 'string',\n 'DBSubnetGroup': {\n 'DBSubnetGroupName': 'string',\n 'DBSubnetGroupDescription': 'string',\n 'VpcId': 'string',\n 'SubnetGroupStatus': 'string',\n 'Subnets': [\n {\n 'SubnetIdentifier': 'string',\n 'SubnetAvailabilityZone': {\n 'Name': 'string'\n },\n 'SubnetStatus': 'string'\n },\n ],\n 'DBSubnetGroupArn': 'string'\n },\n 'PreferredMaintenanceWindow': 'string',\n 'PendingModifiedValues': {\n 'DBInstanceClass': 'string',\n 'AllocatedStorage': 123,\n 'MasterUserPassword': 'string',\n 'Port': 123,\n 'BackupRetentionPeriod': 123,\n 'MultiAZ': True|False,\n 'EngineVersion': 'string',\n 'LicenseModel': 'string',\n 'Iops': 123,\n 'DBInstanceIdentifier': 'string',\n 'StorageType': 'string',\n 'CACertificateIdentifier': 'string',\n 'DBSubnetGroupName': 'string',\n 'PendingCloudwatchLogsExports': {\n 'LogTypesToEnable': [\n 'string',\n ],\n 'LogTypesToDisable': [\n 'string',\n ]\n }\n },\n 'LatestRestorableTime': datetime(2015, 1, 1),\n 'MultiAZ': True|False,\n 'EngineVersion': 'string',\n 'AutoMinorVersionUpgrade': True|False,\n 'ReadReplicaSourceDBInstanceIdentifier': 'string',\n 'ReadReplicaDBInstanceIdentifiers': [\n 'string',\n ],\n 'ReadReplicaDBClusterIdentifiers': [\n 'string',\n ],\n 'LicenseModel': 'string',\n 'Iops': 123,\n 'OptionGroupMemberships': [\n {\n 'OptionGroupName': 'string',\n 'Status': 'string'\n },\n ],\n 'CharacterSetName': 'string',\n 'SecondaryAvailabilityZone': 'string',\n 'PubliclyAccessible': True|False,\n 'StatusInfos': [\n {\n 'StatusType': 'string',\n 'Normal': True|False,\n 'Status': 'string',\n 'Message': 'string'\n },\n ],\n 'StorageType': 'string',\n 'TdeCredentialArn': 'string',\n 'DbInstancePort': 123,\n 'DBClusterIdentifier': 'string',\n 'StorageEncrypted': True|False,\n 'KmsKeyId': 'string',\n 'DbiResourceId': 'string',\n 'CACertificateIdentifier': 'string',\n 'DomainMemberships': [\n {\n 'Domain': 'string',\n 'Status': 'string',\n 'FQDN': 'string',\n 'IAMRoleName': 'string'\n },\n ],\n 'CopyTagsToSnapshot': True|False,\n 'MonitoringInterval': 123,\n 'EnhancedMonitoringResourceArn': 'string',\n 'MonitoringRoleArn': 'string',\n 'PromotionTier': 123,\n 'DBInstanceArn': 'string',\n 'Timezone': 'string',\n 'IAMDatabaseAuthenticationEnabled': True|False,\n 'PerformanceInsightsEnabled': True|False,\n 'PerformanceInsightsKMSKeyId': 'string',\n 'EnabledCloudwatchLogsExports': [\n 'string',\n ]\n }\n }\n \n \n :returns: \n ModifyDBInstance\n RebootDBInstance\n \n \"\"\"\n pass\n\ndef remove_role_from_db_cluster(DBClusterIdentifier=None, RoleArn=None):\n \"\"\"\n Disassociates an Identity and Access Management (IAM) role from a DB cluster.\n See also: AWS API Documentation\n \n \n :example: response = client.remove_role_from_db_cluster(\n DBClusterIdentifier='string',\n RoleArn='string'\n )\n \n \n :type DBClusterIdentifier: string\n :param DBClusterIdentifier: [REQUIRED]\n The name of the DB cluster to disassociate the IAM role from.\n \n\n :type RoleArn: string\n :param RoleArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the IAM role to disassociate from the DB cluster, for example arn:aws:iam::123456789012:role/NeptuneAccessRole .\n \n\n \"\"\"\n pass\n\ndef remove_source_identifier_from_subscription(SubscriptionName=None, SourceIdentifier=None):\n \"\"\"\n Removes a source identifier from an existing event notification subscription.\n See also: AWS API Documentation\n \n \n :example: response = client.remove_source_identifier_from_subscription(\n SubscriptionName='string',\n SourceIdentifier='string'\n )\n \n \n :type SubscriptionName: string\n :param SubscriptionName: [REQUIRED]\n The name of the event notification subscription you want to remove a source identifier from.\n \n\n :type SourceIdentifier: string\n :param SourceIdentifier: [REQUIRED]\n The source identifier to be removed from the subscription, such as the DB instance identifier for a DB instance or the name of a security group.\n \n\n :rtype: dict\n :return: {\n 'EventSubscription': {\n 'CustomerAwsId': 'string',\n 'CustSubscriptionId': 'string',\n 'SnsTopicArn': 'string',\n 'Status': 'string',\n 'SubscriptionCreationTime': 'string',\n 'SourceType': 'string',\n 'SourceIdsList': [\n 'string',\n ],\n 'EventCategoriesList': [\n 'string',\n ],\n 'Enabled': True|False,\n 'EventSubscriptionArn': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef remove_tags_from_resource(ResourceName=None, TagKeys=None):\n \"\"\"\n Removes metadata tags from an Amazon Neptune resource.\n See also: AWS API Documentation\n \n \n :example: response = client.remove_tags_from_resource(\n ResourceName='string',\n TagKeys=[\n 'string',\n ]\n )\n \n \n :type ResourceName: string\n :param ResourceName: [REQUIRED]\n The Amazon Neptune resource that the tags are removed from. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an Amazon Resource Name (ARN) .\n \n\n :type TagKeys: list\n :param TagKeys: [REQUIRED]\n The tag key (name) of the tag to be removed.\n (string) --\n \n\n \"\"\"\n pass\n\ndef reset_db_cluster_parameter_group(DBClusterParameterGroupName=None, ResetAllParameters=None, Parameters=None):\n \"\"\"\n Modifies the parameters of a DB cluster parameter group to the default value. To reset specific parameters submit a list of the following: ParameterName and ApplyMethod . To reset the entire DB cluster parameter group, specify the DBClusterParameterGroupName and ResetAllParameters parameters.\n When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to pending-reboot to take effect on the next DB instance restart or RebootDBInstance request. You must call RebootDBInstance for every DB instance in your DB cluster that you want the updated static parameter to apply to.\n See also: AWS API Documentation\n \n \n :example: response = client.reset_db_cluster_parameter_group(\n DBClusterParameterGroupName='string',\n ResetAllParameters=True|False,\n Parameters=[\n {\n 'ParameterName': 'string',\n 'ParameterValue': 'string',\n 'Description': 'string',\n 'Source': 'string',\n 'ApplyType': 'string',\n 'DataType': 'string',\n 'AllowedValues': 'string',\n 'IsModifiable': True|False,\n 'MinimumEngineVersion': 'string',\n 'ApplyMethod': 'immediate'|'pending-reboot'\n },\n ]\n )\n \n \n :type DBClusterParameterGroupName: string\n :param DBClusterParameterGroupName: [REQUIRED]\n The name of the DB cluster parameter group to reset.\n \n\n :type ResetAllParameters: boolean\n :param ResetAllParameters: A value that is set to true to reset all parameters in the DB cluster parameter group to their default values, and false otherwise. You can't use this parameter if there is a list of parameter names specified for the Parameters parameter.\n\n :type Parameters: list\n :param Parameters: A list of parameter names in the DB cluster parameter group to reset to the default values. You can't use this parameter if the ResetAllParameters parameter is set to true .\n (dict) --This data type is used as a request parameter in the ModifyDBParameterGroup and ResetDBParameterGroup actions.\n This data type is used as a response element in the DescribeEngineDefaultParameters and DescribeDBParameters actions.\n ParameterName (string) --Specifies the name of the parameter.\n ParameterValue (string) --Specifies the value of the parameter.\n Description (string) --Provides a description of the parameter.\n Source (string) --Indicates the source of the parameter value.\n ApplyType (string) --Specifies the engine specific parameters type.\n DataType (string) --Specifies the valid data type for the parameter.\n AllowedValues (string) --Specifies the valid range of values for the parameter.\n IsModifiable (boolean) --Indicates whether (true ) or not (false ) the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.\n MinimumEngineVersion (string) --The earliest engine version to which the parameter can apply.\n ApplyMethod (string) --Indicates when to apply parameter updates.\n \n \n\n :rtype: dict\n :return: {\n 'DBClusterParameterGroupName': 'string'\n }\n \n \n :returns: \n Must be 1 to 255 letters or numbers.\n First character must be a letter\n Cannot end with a hyphen or contain two consecutive hyphens\n \n \"\"\"\n pass\n\ndef reset_db_parameter_group(DBParameterGroupName=None, ResetAllParameters=None, Parameters=None):\n \"\"\"\n Modifies the parameters of a DB parameter group to the engine/system default value. To reset specific parameters, provide a list of the following: ParameterName and ApplyMethod . To reset the entire DB parameter group, specify the DBParameterGroup name and ResetAllParameters parameters. When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to pending-reboot to take effect on the next DB instance restart or RebootDBInstance request.\n See also: AWS API Documentation\n \n \n :example: response = client.reset_db_parameter_group(\n DBParameterGroupName='string',\n ResetAllParameters=True|False,\n Parameters=[\n {\n 'ParameterName': 'string',\n 'ParameterValue': 'string',\n 'Description': 'string',\n 'Source': 'string',\n 'ApplyType': 'string',\n 'DataType': 'string',\n 'AllowedValues': 'string',\n 'IsModifiable': True|False,\n 'MinimumEngineVersion': 'string',\n 'ApplyMethod': 'immediate'|'pending-reboot'\n },\n ]\n )\n \n \n :type DBParameterGroupName: string\n :param DBParameterGroupName: [REQUIRED]\n The name of the DB parameter group.\n Constraints:\n Must match the name of an existing DBParameterGroup.\n \n\n :type ResetAllParameters: boolean\n :param ResetAllParameters: Specifies whether (true ) or not (false ) to reset all parameters in the DB parameter group to default values.\n Default: true\n \n\n :type Parameters: list\n :param Parameters: To reset the entire DB parameter group, specify the DBParameterGroup name and ResetAllParameters parameters. To reset specific parameters, provide a list of the following: ParameterName and ApplyMethod . A maximum of 20 parameters can be modified in a single request.\n Valid Values (for Apply method): pending-reboot\n (dict) --This data type is used as a request parameter in the ModifyDBParameterGroup and ResetDBParameterGroup actions.\n This data type is used as a response element in the DescribeEngineDefaultParameters and DescribeDBParameters actions.\n ParameterName (string) --Specifies the name of the parameter.\n ParameterValue (string) --Specifies the value of the parameter.\n Description (string) --Provides a description of the parameter.\n Source (string) --Indicates the source of the parameter value.\n ApplyType (string) --Specifies the engine specific parameters type.\n DataType (string) --Specifies the valid data type for the parameter.\n AllowedValues (string) --Specifies the valid range of values for the parameter.\n IsModifiable (boolean) --Indicates whether (true ) or not (false ) the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.\n MinimumEngineVersion (string) --The earliest engine version to which the parameter can apply.\n ApplyMethod (string) --Indicates when to apply parameter updates.\n \n \n\n :rtype: dict\n :return: {\n 'DBParameterGroupName': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef restore_db_cluster_from_snapshot(AvailabilityZones=None, DBClusterIdentifier=None, SnapshotIdentifier=None, Engine=None, EngineVersion=None, Port=None, DBSubnetGroupName=None, DatabaseName=None, OptionGroupName=None, VpcSecurityGroupIds=None, Tags=None, KmsKeyId=None, EnableIAMDatabaseAuthentication=None):\n \"\"\"\n Creates a new DB cluster from a DB snapshot or DB cluster snapshot.\n If a DB snapshot is specified, the target DB cluster is created from the source DB snapshot with a default configuration and default security group.\n If a DB cluster snapshot is specified, the target DB cluster is created from the source DB cluster restore point with the same configuration as the original source DB cluster, except that the new DB cluster is created with the default security group.\n See also: AWS API Documentation\n \n \n :example: response = client.restore_db_cluster_from_snapshot(\n AvailabilityZones=[\n 'string',\n ],\n DBClusterIdentifier='string',\n SnapshotIdentifier='string',\n Engine='string',\n EngineVersion='string',\n Port=123,\n DBSubnetGroupName='string',\n DatabaseName='string',\n OptionGroupName='string',\n VpcSecurityGroupIds=[\n 'string',\n ],\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n KmsKeyId='string',\n EnableIAMDatabaseAuthentication=True|False\n )\n \n \n :type AvailabilityZones: list\n :param AvailabilityZones: Provides the list of EC2 Availability Zones that instances in the restored DB cluster can be created in.\n (string) --\n \n\n :type DBClusterIdentifier: string\n :param DBClusterIdentifier: [REQUIRED]\n The name of the DB cluster to create from the DB snapshot or DB cluster snapshot. This parameter isn't case-sensitive.\n Constraints:\n Must contain from 1 to 63 letters, numbers, or hyphens\n First character must be a letter\n Cannot end with a hyphen or contain two consecutive hyphens\n Example: my-snapshot-id\n \n\n :type SnapshotIdentifier: string\n :param SnapshotIdentifier: [REQUIRED]\n The identifier for the DB snapshot or DB cluster snapshot to restore from.\n You can use either the name or the Amazon Resource Name (ARN) to specify a DB cluster snapshot. However, you can use only the ARN to specify a DB snapshot.\n Constraints:\n Must match the identifier of an existing Snapshot.\n \n\n :type Engine: string\n :param Engine: [REQUIRED]\n The database engine to use for the new DB cluster.\n Default: The same as source\n Constraint: Must be compatible with the engine of the source\n \n\n :type EngineVersion: string\n :param EngineVersion: The version of the database engine to use for the new DB cluster.\n\n :type Port: integer\n :param Port: The port number on which the new DB cluster accepts connections.\n Constraints: Value must be 1150-65535\n Default: The same port as the original DB cluster.\n \n\n :type DBSubnetGroupName: string\n :param DBSubnetGroupName: The name of the DB subnet group to use for the new DB cluster.\n Constraints: If supplied, must match the name of an existing DBSubnetGroup.\n Example: mySubnetgroup\n \n\n :type DatabaseName: string\n :param DatabaseName: The database name for the restored DB cluster.\n\n :type OptionGroupName: string\n :param OptionGroupName: The name of the option group to use for the restored DB cluster.\n\n :type VpcSecurityGroupIds: list\n :param VpcSecurityGroupIds: A list of VPC security groups that the new DB cluster will belong to.\n (string) --\n \n\n :type Tags: list\n :param Tags: The tags to be assigned to the restored DB cluster.\n (dict) --Metadata assigned to an Amazon Neptune resource consisting of a key-value pair.\n Key (string) --A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n Value (string) --A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n \n \n\n :type KmsKeyId: string\n :param KmsKeyId: The AWS KMS key identifier to use when restoring an encrypted DB cluster from a DB snapshot or DB cluster snapshot.\n The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are restoring a DB cluster with the same AWS account that owns the KMS encryption key used to encrypt the new DB cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.\n If you do not specify a value for the KmsKeyId parameter, then the following will occur:\n If the DB snapshot or DB cluster snapshot in SnapshotIdentifier is encrypted, then the restored DB cluster is encrypted using the KMS key that was used to encrypt the DB snapshot or DB cluster snapshot.\n If the DB snapshot or DB cluster snapshot in SnapshotIdentifier is not encrypted, then the restored DB cluster is not encrypted.\n \n\n :type EnableIAMDatabaseAuthentication: boolean\n :param EnableIAMDatabaseAuthentication: True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.\n Default: false\n \n\n :rtype: dict\n :return: {\n 'DBCluster': {\n 'AllocatedStorage': 123,\n 'AvailabilityZones': [\n 'string',\n ],\n 'BackupRetentionPeriod': 123,\n 'CharacterSetName': 'string',\n 'DatabaseName': 'string',\n 'DBClusterIdentifier': 'string',\n 'DBClusterParameterGroup': 'string',\n 'DBSubnetGroup': 'string',\n 'Status': 'string',\n 'PercentProgress': 'string',\n 'EarliestRestorableTime': datetime(2015, 1, 1),\n 'Endpoint': 'string',\n 'ReaderEndpoint': 'string',\n 'MultiAZ': True|False,\n 'Engine': 'string',\n 'EngineVersion': 'string',\n 'LatestRestorableTime': datetime(2015, 1, 1),\n 'Port': 123,\n 'MasterUsername': 'string',\n 'DBClusterOptionGroupMemberships': [\n {\n 'DBClusterOptionGroupName': 'string',\n 'Status': 'string'\n },\n ],\n 'PreferredBackupWindow': 'string',\n 'PreferredMaintenanceWindow': 'string',\n 'ReplicationSourceIdentifier': 'string',\n 'ReadReplicaIdentifiers': [\n 'string',\n ],\n 'DBClusterMembers': [\n {\n 'DBInstanceIdentifier': 'string',\n 'IsClusterWriter': True|False,\n 'DBClusterParameterGroupStatus': 'string',\n 'PromotionTier': 123\n },\n ],\n 'VpcSecurityGroups': [\n {\n 'VpcSecurityGroupId': 'string',\n 'Status': 'string'\n },\n ],\n 'HostedZoneId': 'string',\n 'StorageEncrypted': True|False,\n 'KmsKeyId': 'string',\n 'DbClusterResourceId': 'string',\n 'DBClusterArn': 'string',\n 'AssociatedRoles': [\n {\n 'RoleArn': 'string',\n 'Status': 'string'\n },\n ],\n 'IAMDatabaseAuthenticationEnabled': True|False,\n 'CloneGroupId': 'string',\n 'ClusterCreateTime': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef restore_db_cluster_to_point_in_time(DBClusterIdentifier=None, RestoreType=None, SourceDBClusterIdentifier=None, RestoreToTime=None, UseLatestRestorableTime=None, Port=None, DBSubnetGroupName=None, OptionGroupName=None, VpcSecurityGroupIds=None, Tags=None, KmsKeyId=None, EnableIAMDatabaseAuthentication=None):\n \"\"\"\n Restores a DB cluster to an arbitrary point in time. Users can restore to any point in time before LatestRestorableTime for up to BackupRetentionPeriod days. The target DB cluster is created from the source DB cluster with the same configuration as the original DB cluster, except that the new DB cluster is created with the default DB security group.\n See also: AWS API Documentation\n \n \n :example: response = client.restore_db_cluster_to_point_in_time(\n DBClusterIdentifier='string',\n RestoreType='string',\n SourceDBClusterIdentifier='string',\n RestoreToTime=datetime(2015, 1, 1),\n UseLatestRestorableTime=True|False,\n Port=123,\n DBSubnetGroupName='string',\n OptionGroupName='string',\n VpcSecurityGroupIds=[\n 'string',\n ],\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n KmsKeyId='string',\n EnableIAMDatabaseAuthentication=True|False\n )\n \n \n :type DBClusterIdentifier: string\n :param DBClusterIdentifier: [REQUIRED]\n The name of the new DB cluster to be created.\n Constraints:\n Must contain from 1 to 63 letters, numbers, or hyphens\n First character must be a letter\n Cannot end with a hyphen or contain two consecutive hyphens\n \n\n :type RestoreType: string\n :param RestoreType: The type of restore to be performed. You can specify one of the following values:\n full-copy - The new DB cluster is restored as a full copy of the source DB cluster.\n copy-on-write - The new DB cluster is restored as a clone of the source DB cluster.\n Constraints: You can't specify copy-on-write if the engine version of the source DB cluster is earlier than 1.11.\n If you don't specify a RestoreType value, then the new DB cluster is restored as a full copy of the source DB cluster.\n \n\n :type SourceDBClusterIdentifier: string\n :param SourceDBClusterIdentifier: [REQUIRED]\n The identifier of the source DB cluster from which to restore.\n Constraints:\n Must match the identifier of an existing DBCluster.\n \n\n :type RestoreToTime: datetime\n :param RestoreToTime: The date and time to restore the DB cluster to.\n Valid Values: Value must be a time in Universal Coordinated Time (UTC) format\n Constraints:\n Must be before the latest restorable time for the DB instance\n Must be specified if UseLatestRestorableTime parameter is not provided\n Cannot be specified if UseLatestRestorableTime parameter is true\n Cannot be specified if RestoreType parameter is copy-on-write\n Example: 2015-03-07T23:45:00Z\n \n\n :type UseLatestRestorableTime: boolean\n :param UseLatestRestorableTime: A value that is set to true to restore the DB cluster to the latest restorable backup time, and false otherwise.\n Default: false\n Constraints: Cannot be specified if RestoreToTime parameter is provided.\n \n\n :type Port: integer\n :param Port: The port number on which the new DB cluster accepts connections.\n Constraints: Value must be 1150-65535\n Default: The same port as the original DB cluster.\n \n\n :type DBSubnetGroupName: string\n :param DBSubnetGroupName: The DB subnet group name to use for the new DB cluster.\n Constraints: If supplied, must match the name of an existing DBSubnetGroup.\n Example: mySubnetgroup\n \n\n :type OptionGroupName: string\n :param OptionGroupName: The name of the option group for the new DB cluster.\n\n :type VpcSecurityGroupIds: list\n :param VpcSecurityGroupIds: A list of VPC security groups that the new DB cluster belongs to.\n (string) --\n \n\n :type Tags: list\n :param Tags: A list of tags. For more information, see Tagging Amazon Neptune Resources .\n (dict) --Metadata assigned to an Amazon Neptune resource consisting of a key-value pair.\n Key (string) --A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n Value (string) --A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n \n \n\n :type KmsKeyId: string\n :param KmsKeyId: The AWS KMS key identifier to use when restoring an encrypted DB cluster from an encrypted DB cluster.\n The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are restoring a DB cluster with the same AWS account that owns the KMS encryption key used to encrypt the new DB cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.\n You can restore to a new DB cluster and encrypt the new DB cluster with a KMS key that is different than the KMS key used to encrypt the source DB cluster. The new DB cluster is encrypted with the KMS key identified by the KmsKeyId parameter.\n If you do not specify a value for the KmsKeyId parameter, then the following will occur:\n If the DB cluster is encrypted, then the restored DB cluster is encrypted using the KMS key that was used to encrypt the source DB cluster.\n If the DB cluster is not encrypted, then the restored DB cluster is not encrypted.\n If DBClusterIdentifier refers to a DB cluster that is not encrypted, then the restore request is rejected.\n \n\n :type EnableIAMDatabaseAuthentication: boolean\n :param EnableIAMDatabaseAuthentication: True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.\n Default: false\n \n\n :rtype: dict\n :return: {\n 'DBCluster': {\n 'AllocatedStorage': 123,\n 'AvailabilityZones': [\n 'string',\n ],\n 'BackupRetentionPeriod': 123,\n 'CharacterSetName': 'string',\n 'DatabaseName': 'string',\n 'DBClusterIdentifier': 'string',\n 'DBClusterParameterGroup': 'string',\n 'DBSubnetGroup': 'string',\n 'Status': 'string',\n 'PercentProgress': 'string',\n 'EarliestRestorableTime': datetime(2015, 1, 1),\n 'Endpoint': 'string',\n 'ReaderEndpoint': 'string',\n 'MultiAZ': True|False,\n 'Engine': 'string',\n 'EngineVersion': 'string',\n 'LatestRestorableTime': datetime(2015, 1, 1),\n 'Port': 123,\n 'MasterUsername': 'string',\n 'DBClusterOptionGroupMemberships': [\n {\n 'DBClusterOptionGroupName': 'string',\n 'Status': 'string'\n },\n ],\n 'PreferredBackupWindow': 'string',\n 'PreferredMaintenanceWindow': 'string',\n 'ReplicationSourceIdentifier': 'string',\n 'ReadReplicaIdentifiers': [\n 'string',\n ],\n 'DBClusterMembers': [\n {\n 'DBInstanceIdentifier': 'string',\n 'IsClusterWriter': True|False,\n 'DBClusterParameterGroupStatus': 'string',\n 'PromotionTier': 123\n },\n ],\n 'VpcSecurityGroups': [\n {\n 'VpcSecurityGroupId': 'string',\n 'Status': 'string'\n },\n ],\n 'HostedZoneId': 'string',\n 'StorageEncrypted': True|False,\n 'KmsKeyId': 'string',\n 'DbClusterResourceId': 'string',\n 'DBClusterArn': 'string',\n 'AssociatedRoles': [\n {\n 'RoleArn': 'string',\n 'Status': 'string'\n },\n ],\n 'IAMDatabaseAuthenticationEnabled': True|False,\n 'CloneGroupId': 'string',\n 'ClusterCreateTime': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6979891061782837, "alphanum_fraction": 0.7000563740730286, "avg_line_length": 37, "blob_id": "4e96eb89745f7a5e2ff37d6ce41c7d6251857316", "content_id": "51e40e75b9a021f5b55506e1ca01cff47a88c6aa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5321, "license_type": "permissive", "max_line_length": 221, "num_lines": 140, "path": "/pyboto3/sagemakerruntime.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef invoke_endpoint(EndpointName=None, Body=None, ContentType=None, Accept=None, CustomAttributes=None):\n \"\"\"\n After you deploy a model into production using Amazon SageMaker hosting services, your client applications use this API to get inferences from the model hosted at the specified endpoint.\n For an overview of Amazon SageMaker, see How It Works .\n Amazon SageMaker strips all POST headers except those supported by the API. Amazon SageMaker might add additional headers. You should not rely on the behavior of headers outside those enumerated in the request syntax.\n Cals to InvokeEndpoint are authenticated by using AWS Signature Version 4. For information, see Authenticating Requests (AWS Signature Version 4) in the Amazon S3 API Reference .\n See also: AWS API Documentation\n \n \n :example: response = client.invoke_endpoint(\n EndpointName='string',\n Body=b'bytes'|file,\n ContentType='string',\n Accept='string',\n CustomAttributes='string'\n )\n \n \n :type EndpointName: string\n :param EndpointName: [REQUIRED]\n The name of the endpoint that you specified when you created the endpoint using the CreateEndpoint API.\n \n\n :type Body: bytes or seekable file-like object\n :param Body: [REQUIRED]\n Provides input data, in the format specified in the ContentType request header. Amazon SageMaker passes all of the data in the body to the model.\n For information about the format of the request body, see Common Data Formats Inference .\n \n\n :type ContentType: string\n :param ContentType: The MIME type of the input data in the request body.\n\n :type Accept: string\n :param Accept: The desired MIME type of the inference in the response.\n\n :type CustomAttributes: string\n :param CustomAttributes: \n\n :rtype: dict\n :return: {\n 'Body': StreamingBody(),\n 'ContentType': 'string',\n 'InvokedProductionVariant': 'string',\n 'CustomAttributes': 'string'\n }\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6203439235687256, "alphanum_fraction": 0.6232956647872925, "avg_line_length": 36.05208206176758, "blob_id": "9d6000d094b5153b55295b3707bf837469be183a", "content_id": "b01f18acb89f215db1e41e4eb68c293d30323751", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21343, "license_type": "permissive", "max_line_length": 570, "num_lines": 576, "path": "/pyboto3/fms.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef associate_admin_account(AdminAccount=None):\n \"\"\"\n Sets the AWS Firewall Manager administrator account. AWS Firewall Manager must be associated with the master account your AWS organization or associated with a member account that has the appropriate permissions. If the account ID that you submit is not an AWS Organizations master account, AWS Firewall Manager will set the appropriate permissions for the given member account.\n The account that you associate with AWS Firewall Manager is called the AWS Firewall Manager administrator account.\n See also: AWS API Documentation\n \n \n :example: response = client.associate_admin_account(\n AdminAccount='string'\n )\n \n \n :type AdminAccount: string\n :param AdminAccount: [REQUIRED]\n The AWS account ID to associate with AWS Firewall Manager as the AWS Firewall Manager administrator account. This can be an AWS Organizations master account or a member account. For more information about AWS Organizations and master accounts, see Managing the AWS Accounts in Your Organization .\n \n\n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef delete_notification_channel():\n \"\"\"\n Deletes an AWS Firewall Manager association with the IAM role and the Amazon Simple Notification Service (SNS) topic that is used to record AWS Firewall Manager SNS logs.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_notification_channel()\n \n \n \"\"\"\n pass\n\ndef delete_policy(PolicyId=None):\n \"\"\"\n Permanently deletes an AWS Firewall Manager policy.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_policy(\n PolicyId='string'\n )\n \n \n :type PolicyId: string\n :param PolicyId: [REQUIRED]\n The ID of the policy that you want to delete. PolicyId is returned by PutPolicy and by ListPolicies .\n \n\n \"\"\"\n pass\n\ndef disassociate_admin_account():\n \"\"\"\n Disassociates the account that has been set as the AWS Firewall Manager administrator account. You will need to submit an AssociateAdminAccount request to set a new account as the AWS Firewall administrator.\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_admin_account()\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_admin_account():\n \"\"\"\n Returns the AWS Organizations master account that is associated with AWS Firewall Manager as the AWS Firewall Manager administrator.\n See also: AWS API Documentation\n \n \n :example: response = client.get_admin_account()\n \n \n :rtype: dict\n :return: {\n 'AdminAccount': 'string',\n 'RoleStatus': 'READY'|'CREATING'|'PENDING_DELETION'|'DELETING'|'DELETED'\n }\n \n \n \"\"\"\n pass\n\ndef get_compliance_detail(PolicyId=None, MemberAccount=None):\n \"\"\"\n Returns detailed compliance information about the specified member account. Details include resources that are in and out of compliance with the specified policy. Resources are considered non-compliant if the specified policy has not been applied to them.\n See also: AWS API Documentation\n \n \n :example: response = client.get_compliance_detail(\n PolicyId='string',\n MemberAccount='string'\n )\n \n \n :type PolicyId: string\n :param PolicyId: [REQUIRED]\n The ID of the policy that you want to get the details for. PolicyId is returned by PutPolicy and by ListPolicies .\n \n\n :type MemberAccount: string\n :param MemberAccount: [REQUIRED]\n The AWS account that owns the resources that you want to get the details for.\n \n\n :rtype: dict\n :return: {\n 'PolicyComplianceDetail': {\n 'PolicyOwner': 'string',\n 'PolicyId': 'string',\n 'MemberAccount': 'string',\n 'Violators': [\n {\n 'ResourceId': 'string',\n 'ViolationReason': 'WEB_ACL_MISSING_RULE_GROUP'|'RESOURCE_MISSING_WEB_ACL'|'RESOURCE_INCORRECT_WEB_ACL',\n 'ResourceType': 'string'\n },\n ],\n 'EvaluationLimitExceeded': True|False,\n 'ExpiredAt': datetime(2015, 1, 1),\n 'IssueInfoMap': {\n 'string': 'string'\n }\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_notification_channel():\n \"\"\"\n Returns information about the Amazon Simple Notification Service (SNS) topic that is used to record AWS Firewall Manager SNS logs.\n See also: AWS API Documentation\n \n \n :example: response = client.get_notification_channel()\n \n \n :rtype: dict\n :return: {\n 'SnsTopicArn': 'string',\n 'SnsRoleName': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_policy(PolicyId=None):\n \"\"\"\n Returns information about the specified AWS Firewall Manager policy.\n See also: AWS API Documentation\n \n \n :example: response = client.get_policy(\n PolicyId='string'\n )\n \n \n :type PolicyId: string\n :param PolicyId: [REQUIRED]\n The ID of the AWS Firewall Manager policy that you want the details for.\n \n\n :rtype: dict\n :return: {\n 'Policy': {\n 'PolicyId': 'string',\n 'PolicyName': 'string',\n 'PolicyUpdateToken': 'string',\n 'SecurityServicePolicyData': {\n 'Type': 'WAF',\n 'ManagedServiceData': 'string'\n },\n 'ResourceType': 'string',\n 'ResourceTags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'ExcludeResourceTags': True|False,\n 'RemediationEnabled': True|False,\n 'IncludeMap': {\n 'string': [\n 'string',\n ]\n },\n 'ExcludeMap': {\n 'string': [\n 'string',\n ]\n }\n },\n 'PolicyArn': 'string'\n }\n \n \n :returns: \n (string) --\n (list) --\n (string) --\n \n \n \n \n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_compliance_status(PolicyId=None, NextToken=None, MaxResults=None):\n \"\"\"\n Returns an array of PolicyComplianceStatus objects in the response. Use PolicyComplianceStatus to get a summary of which member accounts are protected by the specified policy.\n See also: AWS API Documentation\n \n \n :example: response = client.list_compliance_status(\n PolicyId='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type PolicyId: string\n :param PolicyId: [REQUIRED]\n The ID of the AWS Firewall Manager policy that you want the details for.\n \n\n :type NextToken: string\n :param NextToken: If you specify a value for MaxResults and you have more PolicyComplianceStatus objects than the number that you specify for MaxResults , AWS Firewall Manager returns a NextToken value in the response that allows you to list another group of PolicyComplianceStatus objects. For the second and subsequent ListComplianceStatus requests, specify the value of NextToken from the previous response to get information about another batch of PolicyComplianceStatus objects.\n\n :type MaxResults: integer\n :param MaxResults: Specifies the number of PolicyComplianceStatus objects that you want AWS Firewall Manager to return for this request. If you have more PolicyComplianceStatus objects than the number that you specify for MaxResults , the response includes a NextToken value that you can use to get another batch of PolicyComplianceStatus objects.\n\n :rtype: dict\n :return: {\n 'PolicyComplianceStatusList': [\n {\n 'PolicyOwner': 'string',\n 'PolicyId': 'string',\n 'PolicyName': 'string',\n 'MemberAccount': 'string',\n 'EvaluationResults': [\n {\n 'ComplianceStatus': 'COMPLIANT'|'NON_COMPLIANT',\n 'ViolatorCount': 123,\n 'EvaluationLimitExceeded': True|False\n },\n ],\n 'LastUpdated': datetime(2015, 1, 1),\n 'IssueInfoMap': {\n 'string': 'string'\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef list_member_accounts(NextToken=None, MaxResults=None):\n \"\"\"\n Returns a MemberAccounts object that lists the member accounts in the administrator's AWS organization.\n The ListMemberAccounts must be submitted by the account that is set as the AWS Firewall Manager administrator.\n See also: AWS API Documentation\n \n \n :example: response = client.list_member_accounts(\n NextToken='string',\n MaxResults=123\n )\n \n \n :type NextToken: string\n :param NextToken: If you specify a value for MaxResults and you have more account IDs than the number that you specify for MaxResults , AWS Firewall Manager returns a NextToken value in the response that allows you to list another group of IDs. For the second and subsequent ListMemberAccountsRequest requests, specify the value of NextToken from the previous response to get information about another batch of member account IDs.\n\n :type MaxResults: integer\n :param MaxResults: Specifies the number of member account IDs that you want AWS Firewall Manager to return for this request. If you have more IDs than the number that you specify for MaxResults , the response includes a NextToken value that you can use to get another batch of member account IDs. The maximum value for MaxResults is 100.\n\n :rtype: dict\n :return: {\n 'MemberAccounts': [\n 'string',\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_policies(NextToken=None, MaxResults=None):\n \"\"\"\n Returns an array of PolicySummary objects in the response.\n See also: AWS API Documentation\n \n \n :example: response = client.list_policies(\n NextToken='string',\n MaxResults=123\n )\n \n \n :type NextToken: string\n :param NextToken: If you specify a value for MaxResults and you have more PolicySummary objects than the number that you specify for MaxResults , AWS Firewall Manager returns a NextToken value in the response that allows you to list another group of PolicySummary objects. For the second and subsequent ListPolicies requests, specify the value of NextToken from the previous response to get information about another batch of PolicySummary objects.\n\n :type MaxResults: integer\n :param MaxResults: Specifies the number of PolicySummary objects that you want AWS Firewall Manager to return for this request. If you have more PolicySummary objects than the number that you specify for MaxResults , the response includes a NextToken value that you can use to get another batch of PolicySummary objects.\n\n :rtype: dict\n :return: {\n 'PolicyList': [\n {\n 'PolicyArn': 'string',\n 'PolicyId': 'string',\n 'PolicyName': 'string',\n 'ResourceType': 'string',\n 'SecurityServiceType': 'WAF',\n 'RemediationEnabled': True|False\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef put_notification_channel(SnsTopicArn=None, SnsRoleName=None):\n \"\"\"\n Designates the IAM role and Amazon Simple Notification Service (SNS) topic that AWS Firewall Manager uses to record SNS logs.\n See also: AWS API Documentation\n \n \n :example: response = client.put_notification_channel(\n SnsTopicArn='string',\n SnsRoleName='string'\n )\n \n \n :type SnsTopicArn: string\n :param SnsTopicArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the SNS topic that collects notifications from AWS Firewall Manager.\n \n\n :type SnsRoleName: string\n :param SnsRoleName: [REQUIRED]\n The Amazon Resource Name (ARN) of the IAM role that allows Amazon SNS to record AWS Firewall Manager activity.\n \n\n \"\"\"\n pass\n\ndef put_policy(Policy=None):\n \"\"\"\n Creates an AWS Firewall Manager policy.\n See also: AWS API Documentation\n \n \n :example: response = client.put_policy(\n Policy={\n 'PolicyId': 'string',\n 'PolicyName': 'string',\n 'PolicyUpdateToken': 'string',\n 'SecurityServicePolicyData': {\n 'Type': 'WAF',\n 'ManagedServiceData': 'string'\n },\n 'ResourceType': 'string',\n 'ResourceTags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'ExcludeResourceTags': True|False,\n 'RemediationEnabled': True|False,\n 'IncludeMap': {\n 'string': [\n 'string',\n ]\n },\n 'ExcludeMap': {\n 'string': [\n 'string',\n ]\n }\n }\n )\n \n \n :type Policy: dict\n :param Policy: [REQUIRED]\n The details of the AWS Firewall Manager policy to be created.\n PolicyId (string) --The ID of the AWS Firewall Manager policy.\n PolicyName (string) -- [REQUIRED]The friendly name of the AWS Firewall Manager policy.\n PolicyUpdateToken (string) --A unique identifier for each update to the policy. When issuing a PutPolicy request, the PolicyUpdateToken in the request must match the PolicyUpdateToken of the current policy version. To get the PolicyUpdateToken of the current policy version, use a GetPolicy request.\n SecurityServicePolicyData (dict) -- [REQUIRED]Details about the security service that is being used to protect the resources.\n Type (string) -- [REQUIRED]The service that the policy is using to protect the resources. This value is WAF .\n ManagedServiceData (string) --Details about the service. This contains WAF data in JSON format, as shown in the following example:\n ManagedServiceData': '{\\'type\\': \\'WAF\\', \\'ruleGroups\\': [{\\'id\\': \\'12345678-1bcd-9012-efga-0987654321ab\\', \\'overrideAction\\' : {\\'type\\': \\'COUNT\\'}}], \\'defaultAction\\': {\\'type\\': \\'BLOCK\\'}}\n ResourceType (string) -- [REQUIRED]The type of resource to protect with the policy, either an Application Load Balancer or a CloudFront distribution. This is in the format shown in AWS Resource Types Reference . Valid values are AWS::ElasticLoadBalancingV2::LoadBalancer or AWS::CloudFront::Distribution .\n ResourceTags (list) --An array of ResourceTag objects.\n (dict) --The resource tags that AWS Firewall Manager uses to determine if a particular resource should be included or excluded from protection by the AWS Firewall Manager policy. Tags enable you to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. Each tag consists of a key and an optional value, both of which you define. Tags are combined with an 'OR.' That is, if you add more than one tag, if any of the tags matches, the resource is considered a match for the include or exclude. Working with Tag Editor .\n Key (string) -- [REQUIRED]The resource tag key.\n Value (string) --The resource tag value.\n \n ExcludeResourceTags (boolean) -- [REQUIRED]If set to True , resources with the tags that are specified in the ResourceTag array are not protected by the policy. If set to False , and the ResourceTag array is not null, only resources with the specified tags are associated with the policy.\n RemediationEnabled (boolean) -- [REQUIRED]Indicates if the policy should be automatically applied to new resources.\n IncludeMap (dict) --Specifies the AWS account IDs to include in the policy. If IncludeMap is null, all accounts in the AWS Organization are included in the policy. If IncludeMap is not null, only values listed in IncludeMap will be included in the policy.\n The key to the map is ACCOUNT . For example, a valid IncludeMap would be { ACCOUNT : [ accountID1 , accountID2 ]} .\n (string) --\n (list) --\n (string) --\n \n ExcludeMap (dict) --Specifies the AWS account IDs to exclude from the policy. The IncludeMap values are evaluated first, with all of the appropriate account IDs added to the policy. Then the accounts listed in ExcludeMap are removed, resulting in the final list of accounts to add to the policy.\n The key to the map is ACCOUNT . For example, a valid ExcludeMap would be { ACCOUNT : [ accountID1 , accountID2 ]} .\n (string) --\n (list) --\n (string) --\n \n \n\n :rtype: dict\n :return: {\n 'Policy': {\n 'PolicyId': 'string',\n 'PolicyName': 'string',\n 'PolicyUpdateToken': 'string',\n 'SecurityServicePolicyData': {\n 'Type': 'WAF',\n 'ManagedServiceData': 'string'\n },\n 'ResourceType': 'string',\n 'ResourceTags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'ExcludeResourceTags': True|False,\n 'RemediationEnabled': True|False,\n 'IncludeMap': {\n 'string': [\n 'string',\n ]\n },\n 'ExcludeMap': {\n 'string': [\n 'string',\n ]\n }\n },\n 'PolicyArn': 'string'\n }\n \n \n :returns: \n (string) --\n (list) --\n (string) --\n \n \n \n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6670491695404053, "alphanum_fraction": 0.6700429320335388, "avg_line_length": 36.60037612915039, "blob_id": "c6920d58950af1dac9bb2d20c3e65409596bd9df", "content_id": "2f67bf28b3955e606cea4244d45ed3de97199a22", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20042, "license_type": "permissive", "max_line_length": 358, "num_lines": 533, "path": "/pyboto3/kinesisvideo.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_stream(DeviceName=None, StreamName=None, MediaType=None, KmsKeyId=None, DataRetentionInHours=None):\n \"\"\"\n Creates a new Kinesis video stream.\n When you create a new stream, Kinesis Video Streams assigns it a version number. When you change the stream's metadata, Kinesis Video Streams updates the version.\n For information about how the service works, see How it Works .\n You must have permissions for the KinesisVideo:CreateStream action.\n See also: AWS API Documentation\n \n \n :example: response = client.create_stream(\n DeviceName='string',\n StreamName='string',\n MediaType='string',\n KmsKeyId='string',\n DataRetentionInHours=123\n )\n \n \n :type DeviceName: string\n :param DeviceName: The name of the device that is writing to the stream.\n Note\n In the current implementation, Kinesis Video Streams does not use this name.\n \n\n :type StreamName: string\n :param StreamName: [REQUIRED]\n A name for the stream that you are creating.\n The stream name is an identifier for the stream, and must be unique for each account and region.\n \n\n :type MediaType: string\n :param MediaType: The media type of the stream. Consumers of the stream can use this information when processing the stream. For more information about media types, see Media Types . If you choose to specify the MediaType , see Naming Requirements for guidelines.\n To play video on the console, the media must be H.264 encoded, and you need to specify this video type in this parameter as video/h264 .\n This parameter is optional; the default value is null (or empty in JSON).\n \n\n :type KmsKeyId: string\n :param KmsKeyId: The ID of the AWS Key Management Service (AWS KMS) key that you want Kinesis Video Streams to use to encrypt stream data.\n If no key ID is specified, the default, Kinesis Video-managed key (aws/kinesisvideo ) is used.\n For more information, see DescribeKey .\n \n\n :type DataRetentionInHours: integer\n :param DataRetentionInHours: The number of hours that you want to retain the data in the stream. Kinesis Video Streams retains the data in a data store that is associated with the stream.\n The default value is 0, indicating that the stream does not persist data.\n When the DataRetentionInHours value is 0, consumers can still consume the fragments that remain in the service host buffer, which has a retention time limit of 5 minutes and a retention memory limit of 200 MB. Fragments are removed from the buffer when either limit is reached.\n \n\n :rtype: dict\n :return: {\n 'StreamARN': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_stream(StreamARN=None, CurrentVersion=None):\n \"\"\"\n Deletes a Kinesis video stream and the data contained in the stream.\n This method marks the stream for deletion, and makes the data in the stream inaccessible immediately.\n To ensure that you have the latest version of the stream before deleting it, you can specify the stream version. Kinesis Video Streams assigns a version to each stream. When you update a stream, Kinesis Video Streams assigns a new version number. To get the latest stream version, use the DescribeStream API.\n This operation requires permission for the KinesisVideo:DeleteStream action.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_stream(\n StreamARN='string',\n CurrentVersion='string'\n )\n \n \n :type StreamARN: string\n :param StreamARN: [REQUIRED]\n The Amazon Resource Name (ARN) of the stream that you want to delete.\n \n\n :type CurrentVersion: string\n :param CurrentVersion: Optional: The version of the stream that you want to delete.\n Specify the version as a safeguard to ensure that your are deleting the correct stream. To get the stream version, use the DescribeStream API.\n If not specified, only the CreationTime is checked before deleting the stream.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef describe_stream(StreamName=None, StreamARN=None):\n \"\"\"\n Returns the most current information about the specified stream. You must specify either the StreamName or the StreamARN .\n See also: AWS API Documentation\n \n \n :example: response = client.describe_stream(\n StreamName='string',\n StreamARN='string'\n )\n \n \n :type StreamName: string\n :param StreamName: The name of the stream.\n\n :type StreamARN: string\n :param StreamARN: The Amazon Resource Name (ARN) of the stream.\n\n :rtype: dict\n :return: {\n 'StreamInfo': {\n 'DeviceName': 'string',\n 'StreamName': 'string',\n 'StreamARN': 'string',\n 'MediaType': 'string',\n 'KmsKeyId': 'string',\n 'Version': 'string',\n 'Status': 'CREATING'|'ACTIVE'|'UPDATING'|'DELETING',\n 'CreationTime': datetime(2015, 1, 1),\n 'DataRetentionInHours': 123\n }\n }\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_data_endpoint(StreamName=None, StreamARN=None, APIName=None):\n \"\"\"\n Gets an endpoint for a specified stream for either reading or writing. Use this endpoint in your application to read from the specified stream (using the GetMedia or GetMediaForFragmentList operations) or write to it (using the PutMedia operation).\n In the request, specify the stream either by StreamName or StreamARN .\n See also: AWS API Documentation\n \n \n :example: response = client.get_data_endpoint(\n StreamName='string',\n StreamARN='string',\n APIName='PUT_MEDIA'|'GET_MEDIA'|'LIST_FRAGMENTS'|'GET_MEDIA_FOR_FRAGMENT_LIST'|'GET_HLS_STREAMING_SESSION_URL'\n )\n \n \n :type StreamName: string\n :param StreamName: The name of the stream that you want to get the endpoint for. You must specify either this parameter or a StreamARN in the request.\n\n :type StreamARN: string\n :param StreamARN: The Amazon Resource Name (ARN) of the stream that you want to get the endpoint for. You must specify either this parameter or a StreamName in the request.\n\n :type APIName: string\n :param APIName: [REQUIRED]\n The name of the API action for which to get an endpoint.\n \n\n :rtype: dict\n :return: {\n 'DataEndpoint': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_streams(MaxResults=None, NextToken=None, StreamNameCondition=None):\n \"\"\"\n Returns an array of StreamInfo objects. Each object describes a stream. To retrieve only streams that satisfy a specific condition, you can specify a StreamNameCondition .\n See also: AWS API Documentation\n \n \n :example: response = client.list_streams(\n MaxResults=123,\n NextToken='string',\n StreamNameCondition={\n 'ComparisonOperator': 'BEGINS_WITH',\n 'ComparisonValue': 'string'\n }\n )\n \n \n :type MaxResults: integer\n :param MaxResults: The maximum number of streams to return in the response. The default is 10,000.\n\n :type NextToken: string\n :param NextToken: If you specify this parameter, when the result of a ListStreams operation is truncated, the call returns the NextToken in the response. To get another batch of streams, provide this token in your next request.\n\n :type StreamNameCondition: dict\n :param StreamNameCondition: Optional: Returns only streams that satisfy a specific condition. Currently, you can specify only the prefix of a stream name as a condition.\n ComparisonOperator (string) --A comparison operator. Currently, you can specify only the BEGINS_WITH operator, which finds streams whose names start with a given prefix.\n ComparisonValue (string) --A value to compare.\n \n\n :rtype: dict\n :return: {\n 'StreamInfoList': [\n {\n 'DeviceName': 'string',\n 'StreamName': 'string',\n 'StreamARN': 'string',\n 'MediaType': 'string',\n 'KmsKeyId': 'string',\n 'Version': 'string',\n 'Status': 'CREATING'|'ACTIVE'|'UPDATING'|'DELETING',\n 'CreationTime': datetime(2015, 1, 1),\n 'DataRetentionInHours': 123\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_tags_for_stream(NextToken=None, StreamARN=None, StreamName=None):\n \"\"\"\n Returns a list of tags associated with the specified stream.\n In the request, you must specify either the StreamName or the StreamARN .\n See also: AWS API Documentation\n \n \n :example: response = client.list_tags_for_stream(\n NextToken='string',\n StreamARN='string',\n StreamName='string'\n )\n \n \n :type NextToken: string\n :param NextToken: If you specify this parameter and the result of a ListTagsForStream call is truncated, the response includes a token that you can use in the next request to fetch the next batch of tags.\n\n :type StreamARN: string\n :param StreamARN: The Amazon Resource Name (ARN) of the stream that you want to list tags for.\n\n :type StreamName: string\n :param StreamName: The name of the stream that you want to list tags for.\n\n :rtype: dict\n :return: {\n 'NextToken': 'string',\n 'Tags': {\n 'string': 'string'\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef tag_stream(StreamARN=None, StreamName=None, Tags=None):\n \"\"\"\n Adds one or more tags to a stream. A tag is a key-value pair (the value is optional) that you can define and assign to AWS resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide .\n You must provide either the StreamName or the StreamARN .\n This operation requires permission for the KinesisVideo:TagStream action.\n Kinesis video streams support up to 50 tags.\n See also: AWS API Documentation\n \n \n :example: response = client.tag_stream(\n StreamARN='string',\n StreamName='string',\n Tags={\n 'string': 'string'\n }\n )\n \n \n :type StreamARN: string\n :param StreamARN: The Amazon Resource Name (ARN) of the resource that you want to add the tag or tags to.\n\n :type StreamName: string\n :param StreamName: The name of the stream that you want to add the tag or tags to.\n\n :type Tags: dict\n :param Tags: [REQUIRED]\n A list of tags to associate with the specified stream. Each tag is a key-value pair (the value is optional).\n (string) --\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef untag_stream(StreamARN=None, StreamName=None, TagKeyList=None):\n \"\"\"\n Removes one or more tags from a stream. In the request, specify only a tag key or keys; don't specify the value. If you specify a tag key that does not exist, it's ignored.\n In the request, you must provide the StreamName or StreamARN .\n See also: AWS API Documentation\n \n \n :example: response = client.untag_stream(\n StreamARN='string',\n StreamName='string',\n TagKeyList=[\n 'string',\n ]\n )\n \n \n :type StreamARN: string\n :param StreamARN: The Amazon Resource Name (ARN) of the stream that you want to remove tags from.\n\n :type StreamName: string\n :param StreamName: The name of the stream that you want to remove tags from.\n\n :type TagKeyList: list\n :param TagKeyList: [REQUIRED]\n A list of the keys of the tags that you want to remove.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_data_retention(StreamName=None, StreamARN=None, CurrentVersion=None, Operation=None, DataRetentionChangeInHours=None):\n \"\"\"\n Increases or decreases the stream's data retention period by the value that you specify. To indicate whether you want to increase or decrease the data retention period, specify the Operation parameter in the request body. In the request, you must specify either the StreamName or the StreamARN .\n This operation requires permission for the KinesisVideo:UpdateDataRetention action.\n Changing the data retention period affects the data in the stream as follows:\n See also: AWS API Documentation\n \n \n :example: response = client.update_data_retention(\n StreamName='string',\n StreamARN='string',\n CurrentVersion='string',\n Operation='INCREASE_DATA_RETENTION'|'DECREASE_DATA_RETENTION',\n DataRetentionChangeInHours=123\n )\n \n \n :type StreamName: string\n :param StreamName: The name of the stream whose retention period you want to change.\n\n :type StreamARN: string\n :param StreamARN: The Amazon Resource Name (ARN) of the stream whose retention period you want to change.\n\n :type CurrentVersion: string\n :param CurrentVersion: [REQUIRED]\n The version of the stream whose retention period you want to change. To get the version, call either the DescribeStream or the ListStreams API.\n \n\n :type Operation: string\n :param Operation: [REQUIRED]\n Indicates whether you want to increase or decrease the retention period.\n \n\n :type DataRetentionChangeInHours: integer\n :param DataRetentionChangeInHours: [REQUIRED]\n The retention period, in hours. The value you specify replaces the current value.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n StreamName (string) -- The name of the stream whose retention period you want to change.\n StreamARN (string) -- The Amazon Resource Name (ARN) of the stream whose retention period you want to change.\n CurrentVersion (string) -- [REQUIRED]\n The version of the stream whose retention period you want to change. To get the version, call either the DescribeStream or the ListStreams API.\n \n Operation (string) -- [REQUIRED]\n Indicates whether you want to increase or decrease the retention period.\n \n DataRetentionChangeInHours (integer) -- [REQUIRED]\n The retention period, in hours. The value you specify replaces the current value.\n \n \n \"\"\"\n pass\n\ndef update_stream(StreamName=None, StreamARN=None, CurrentVersion=None, DeviceName=None, MediaType=None):\n \"\"\"\n Updates stream metadata, such as the device name and media type.\n You must provide the stream name or the Amazon Resource Name (ARN) of the stream.\n To make sure that you have the latest version of the stream before updating it, you can specify the stream version. Kinesis Video Streams assigns a version to each stream. When you update a stream, Kinesis Video Streams assigns a new version number. To get the latest stream version, use the DescribeStream API.\n See also: AWS API Documentation\n \n \n :example: response = client.update_stream(\n StreamName='string',\n StreamARN='string',\n CurrentVersion='string',\n DeviceName='string',\n MediaType='string'\n )\n \n \n :type StreamName: string\n :param StreamName: The name of the stream whose metadata you want to update.\n The stream name is an identifier for the stream, and must be unique for each account and region.\n \n\n :type StreamARN: string\n :param StreamARN: The ARN of the stream whose metadata you want to update.\n\n :type CurrentVersion: string\n :param CurrentVersion: [REQUIRED]\n The version of the stream whose metadata you want to update.\n \n\n :type DeviceName: string\n :param DeviceName: The name of the device that is writing to the stream.\n Note\n In the current implementation, Kinesis Video Streams does not use this name.\n \n\n :type MediaType: string\n :param MediaType: The stream's media type. Use MediaType to specify the type of content that the stream contains to the consumers of the stream. For more information about media types, see Media Types . If you choose to specify the MediaType , see Naming Requirements .\n To play video on the console, you must specify the correct video type. For example, if the video in the stream is H.264, specify video/h264 as the MediaType .\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6497993469238281, "alphanum_fraction": 0.651265025138855, "avg_line_length": 36.30989456176758, "blob_id": "5cdbf1f90363cfc6a092ed9ff2d44a5559311a58", "content_id": "34a66dfab5d95d913222742a1ddca4a536c081bf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 28655, "license_type": "permissive", "max_line_length": 453, "num_lines": 768, "path": "/pyboto3/transfer.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_server(IdentityProviderDetails=None, IdentityProviderType=None, LoggingRole=None, Tags=None):\n \"\"\"\n Instantiates an autoscaling virtual server based on Secure File Transfer Protocol (SFTP) in AWS. The call returns the ServerId property assigned by the service to the newly created server. Reference this ServerId property when you make updates to your server, or work with users.\n The response returns the ServerId value for the newly created server.\n See also: AWS API Documentation\n \n \n :example: response = client.create_server(\n IdentityProviderDetails={\n 'Url': 'string',\n 'InvocationRole': 'string'\n },\n IdentityProviderType='SERVICE_MANAGED'|'API_GATEWAY',\n LoggingRole='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type IdentityProviderDetails: dict\n :param IdentityProviderDetails: An array containing all of the information required to call a customer-supplied authentication API. This parameter is not required when the IdentityProviderType value of server that is created uses the SERVICE_MANAGED authentication method.\n Url (string) --The IdentityProviderDetail parameter contains the location of the service endpoint used to authenticate users.\n InvocationRole (string) --The Role parameter provides the type of InvocationRole used to authenticate the user account.\n \n\n :type IdentityProviderType: string\n :param IdentityProviderType: The mode of authentication enabled for this service. The default value is SERVICE_MANAGED , which allows you to store and access SFTP user credentials within the service. An IdentityProviderType value of API_GATEWAY indicates that user authentication requires a call to an API Gateway endpoint URL provided by you to integrate an identity provider of your choice.\n\n :type LoggingRole: string\n :param LoggingRole: A value that allows the service to write your SFTP users activity to your Amazon CloudWatch logs for monitoring and auditing purposes.\n\n :type Tags: list\n :param Tags: Key-value pairs that can be used to group and search for servers.\n (dict) --Creates a key-value pair for a specific resource. Tags are metadata that you can use to search for and group a resource for various purposes. You can apply tags to servers, users, and roles. A tag key can take more than one value. For example, to group servers for accounting purposes, you might create a tag called Group and assign the values Research and Accounting to that group.\n Key (string) -- [REQUIRED]The name assigned to the tag that you create.\n Value (string) -- [REQUIRED]This property contains one or more values that you assigned to the key name you create.\n \n \n\n :rtype: dict\n :return: {\n 'ServerId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_user(HomeDirectory=None, Policy=None, Role=None, ServerId=None, SshPublicKeyBody=None, Tags=None, UserName=None):\n \"\"\"\n Adds a user and associate them with an existing Secure File Transfer Protocol (SFTP) server. Using parameters for CreateUser , you can specify the user name, set the home directory, store the user's public key, and assign the user's AWS Identity and Access Management (IAM) role. You can also optionally add a scope-down policy, and assign metadata with tags that can be used to group and search for users.\n The response returns the UserName and ServerId values of the new user for that server.\n See also: AWS API Documentation\n \n \n :example: response = client.create_user(\n HomeDirectory='string',\n Policy='string',\n Role='string',\n ServerId='string',\n SshPublicKeyBody='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n UserName='string'\n )\n \n \n :type HomeDirectory: string\n :param HomeDirectory: The landing directory (folder) for a user when they log in to the server using their SFTP client. An example is ``/home/username `` .\n\n :type Policy: string\n :param Policy: A scope-down policy for your user so you can use the same IAM role across multiple users. This policy scopes down user access to portions of their Amazon S3 bucket. Variables you can use inside this policy include ${Transfer:UserName} , ${Transfer:HomeDirectory} , and ${Transfer:HomeBucket} .\n\n :type Role: string\n :param Role: [REQUIRED]\n The IAM role that controls your user s access to your Amazon S3 bucket. The policies attached to this role will determine the level of access you want to provide your users when transferring files into and out of your Amazon S3 bucket or buckets. The IAM role should also contain a trust relationship that allows the SFTP server to access your resources when servicing your SFTP user s transfer requests.\n \n\n :type ServerId: string\n :param ServerId: [REQUIRED]\n A system-assigned unique identifier for an SFTP server instance. This is the specific SFTP server that you added your user to.\n \n\n :type SshPublicKeyBody: string\n :param SshPublicKeyBody: The public portion of the Secure Shall (SSH) key used to authenticate the user to the SFTP server.\n\n :type Tags: list\n :param Tags: Key-value pairs that can be used to group and search for users. Tags are metadata attached to users for any purpose.\n (dict) --Creates a key-value pair for a specific resource. Tags are metadata that you can use to search for and group a resource for various purposes. You can apply tags to servers, users, and roles. A tag key can take more than one value. For example, to group servers for accounting purposes, you might create a tag called Group and assign the values Research and Accounting to that group.\n Key (string) -- [REQUIRED]The name assigned to the tag that you create.\n Value (string) -- [REQUIRED]This property contains one or more values that you assigned to the key name you create.\n \n \n\n :type UserName: string\n :param UserName: [REQUIRED]\n A unique string that identifies a user and is associated with a server as specified by the ServerId .\n \n\n :rtype: dict\n :return: {\n 'ServerId': 'string',\n 'UserName': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_server(ServerId=None):\n \"\"\"\n Deletes the Secure File Transfer Protocol (SFTP) server that you specify. If you used SERVICE_MANAGED as your IdentityProviderType , you need to delete all users associated with this server before deleting the server itself\n No response returns from this call.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_server(\n ServerId='string'\n )\n \n \n :type ServerId: string\n :param ServerId: [REQUIRED]\n A unique system-assigned identifier for an SFTP server instance.\n \n\n \"\"\"\n pass\n\ndef delete_ssh_public_key(ServerId=None, SshPublicKeyId=None, UserName=None):\n \"\"\"\n Deletes a user's Secure Shell (SSH) public key.\n No response is returned from this call.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_ssh_public_key(\n ServerId='string',\n SshPublicKeyId='string',\n UserName='string'\n )\n \n \n :type ServerId: string\n :param ServerId: [REQUIRED]\n A system-assigned unique identifier for a Secure File Transfer Protocol (SFTP) server instance that has the user assigned to it.\n \n\n :type SshPublicKeyId: string\n :param SshPublicKeyId: [REQUIRED]\n A unique identifier used to reference your user s specific SSH key.\n \n\n :type UserName: string\n :param UserName: [REQUIRED]\n A unique string that identifies a user whose public key is being deleted.\n \n\n \"\"\"\n pass\n\ndef delete_user(ServerId=None, UserName=None):\n \"\"\"\n Deletes the user belonging to the server you specify.\n No response returns from this call.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_user(\n ServerId='string',\n UserName='string'\n )\n \n \n :type ServerId: string\n :param ServerId: [REQUIRED]\n A system-assigned unique identifier for an SFTP server instance that has the user assigned to it.\n \n\n :type UserName: string\n :param UserName: [REQUIRED]\n A unique string that identifies a user that is being deleted from the server.\n \n\n \"\"\"\n pass\n\ndef describe_server(ServerId=None):\n \"\"\"\n Describes the server that you specify by passing the ServerId parameter.\n The response contains a description of the server's properties.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_server(\n ServerId='string'\n )\n \n \n :type ServerId: string\n :param ServerId: [REQUIRED]\n A system-assigned unique identifier for an SFTP server.\n \n\n :rtype: dict\n :return: {\n 'Server': {\n 'Arn': 'string',\n 'IdentityProviderDetails': {\n 'Url': 'string',\n 'InvocationRole': 'string'\n },\n 'IdentityProviderType': 'SERVICE_MANAGED'|'API_GATEWAY',\n 'LoggingRole': 'string',\n 'ServerId': 'string',\n 'State': 'OFFLINE'|'ONLINE'|'STARTING'|'STOPPING'|'START_FAILED'|'STOP_FAILED',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'UserCount': 123\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_user(ServerId=None, UserName=None):\n \"\"\"\n Describes the user assigned to a specific server, as identified by its ServerId property.\n The response from this call returns the properties of the user associated with the ServerId value that was specified.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_user(\n ServerId='string',\n UserName='string'\n )\n \n \n :type ServerId: string\n :param ServerId: [REQUIRED]\n A system-assigned unique identifier for an SFTP server that has this user assigned.\n \n\n :type UserName: string\n :param UserName: [REQUIRED]\n The name of the user assigned to one or more servers. User names are part of the sign-in credentials to use the AWS Transfer service and perform file transfer tasks.\n \n\n :rtype: dict\n :return: {\n 'ServerId': 'string',\n 'User': {\n 'Arn': 'string',\n 'HomeDirectory': 'string',\n 'Policy': 'string',\n 'Role': 'string',\n 'SshPublicKeys': [\n {\n 'DateImported': datetime(2015, 1, 1),\n 'SshPublicKeyBody': 'string',\n 'SshPublicKeyId': 'string'\n },\n ],\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'UserName': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef import_ssh_public_key(ServerId=None, SshPublicKeyBody=None, UserName=None):\n \"\"\"\n Adds a Secure Shell (SSH) public key to a user account identified by a UserName value assigned to a specific server, identified by ServerId .\n The response returns the UserName value, the ServerId value, and the name of the SshPublicKeyId .\n See also: AWS API Documentation\n \n \n :example: response = client.import_ssh_public_key(\n ServerId='string',\n SshPublicKeyBody='string',\n UserName='string'\n )\n \n \n :type ServerId: string\n :param ServerId: [REQUIRED]\n A system-assigned unique identifier for an SFTP server.\n \n\n :type SshPublicKeyBody: string\n :param SshPublicKeyBody: [REQUIRED]\n The public key portion of an SSH key pair.\n \n\n :type UserName: string\n :param UserName: [REQUIRED]\n The name of the user account that is assigned to one or more servers.\n \n\n :rtype: dict\n :return: {\n 'ServerId': 'string',\n 'SshPublicKeyId': 'string',\n 'UserName': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_servers(MaxResults=None, NextToken=None):\n \"\"\"\n Lists the Secure File Transfer Protocol (SFTP) servers that are associated with your AWS account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_servers(\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type MaxResults: integer\n :param MaxResults: Specifies the number of servers to return as a response to the ListServers query.\n\n :type NextToken: string\n :param NextToken: When additional results are obtained from the ListServers command, a NextToken parameter is returned in the output. You can then pass the NextToken parameter in a subsequent command to continue listing additional servers.\n\n :rtype: dict\n :return: {\n 'NextToken': 'string',\n 'Servers': [\n {\n 'Arn': 'string',\n 'IdentityProviderType': 'SERVICE_MANAGED'|'API_GATEWAY',\n 'LoggingRole': 'string',\n 'ServerId': 'string',\n 'State': 'OFFLINE'|'ONLINE'|'STARTING'|'STOPPING'|'START_FAILED'|'STOP_FAILED',\n 'UserCount': 123\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef list_tags_for_resource(Arn=None, MaxResults=None, NextToken=None):\n \"\"\"\n Lists all of the tags associated with the Amazon Resource Number (ARN) you specify. The resource can be a user, server, or role.\n See also: AWS API Documentation\n \n \n :example: response = client.list_tags_for_resource(\n Arn='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type Arn: string\n :param Arn: [REQUIRED]\n Requests the tags associated with a particular Amazon Resource Name (ARN). An ARN is an identifier for a specific AWS resource, such as a server, user, or role.\n \n\n :type MaxResults: integer\n :param MaxResults: \n\n :type NextToken: string\n :param NextToken: \n\n :rtype: dict\n :return: {\n 'Arn': 'string',\n 'NextToken': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef list_users(MaxResults=None, NextToken=None, ServerId=None):\n \"\"\"\n Lists the users for the server that you specify by passing the ServerId parameter.\n See also: AWS API Documentation\n \n \n :example: response = client.list_users(\n MaxResults=123,\n NextToken='string',\n ServerId='string'\n )\n \n \n :type MaxResults: integer\n :param MaxResults: Specifies the number of users to return as a response to the ListUsers request.\n\n :type NextToken: string\n :param NextToken: When you can get additional results from the ListUsers ListUsers call, a NextToken parameter is returned in the output. You can then pass in a subsequent command the NextToken parameter to continue listing additional users.\n\n :type ServerId: string\n :param ServerId: [REQUIRED]\n A system-assigned unique identifier for a Secure File Transfer Protocol (SFTP) server that has users are assigned to it.\n \n\n :rtype: dict\n :return: {\n 'NextToken': 'string',\n 'ServerId': 'string',\n 'Users': [\n {\n 'Arn': 'string',\n 'HomeDirectory': 'string',\n 'Role': 'string',\n 'SshPublicKeyCount': 123,\n 'UserName': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef start_server(ServerId=None):\n \"\"\"\n Changes the state of a Secure File Transfer Protocol (SFTP) server from OFFLINE to ONLINE . It has no impact on an SFTP server that is already ONLINE . An ONLINE server can accept and process file transfer jobs.\n The state of STARTING indicates that the server is in an intermediate state, either not fully able to respond, or not fully online. The values of START_FAILED can indicate an error condition.\n No response is returned from this call.\n See also: AWS API Documentation\n \n \n :example: response = client.start_server(\n ServerId='string'\n )\n \n \n :type ServerId: string\n :param ServerId: [REQUIRED]\n A system-assigned unique identifier for an SFTP server that you start.\n \n\n \"\"\"\n pass\n\ndef stop_server(ServerId=None):\n \"\"\"\n Changes the state of an SFTP server from ONLINE to OFFLINE . An OFFLINE server cannot accept and process file transfer jobs. Information tied to your server such as server and user properties are not affected by stopping your server. Stopping a server will not reduce or impact your Secure File Transfer Protocol (SFTP) endpoint billing.\n The states of STOPPING indicates that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of STOP_FAILED can indicate an error condition.\n No response is returned from this call.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_server(\n ServerId='string'\n )\n \n \n :type ServerId: string\n :param ServerId: [REQUIRED]\n A system-assigned unique identifier for an SFTP server that you stopped.\n \n\n \"\"\"\n pass\n\ndef tag_resource(Arn=None, Tags=None):\n \"\"\"\n Attaches a key-value pair to a resource, as identified by its Amazon Resource Name (ARN). Resources are users, servers, roles, and other entities.\n There is no response returned from this call.\n See also: AWS API Documentation\n \n \n :example: response = client.tag_resource(\n Arn='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type Arn: string\n :param Arn: [REQUIRED]\n An Amazon Resource Name (ARN) for a specific AWS resource, such as a server, user, or role.\n \n\n :type Tags: list\n :param Tags: [REQUIRED]\n Key-value pairs assigned to ARNs that you can use to group and search for resources by type. You can attach this metadata to user accounts for any purpose.\n (dict) --Creates a key-value pair for a specific resource. Tags are metadata that you can use to search for and group a resource for various purposes. You can apply tags to servers, users, and roles. A tag key can take more than one value. For example, to group servers for accounting purposes, you might create a tag called Group and assign the values Research and Accounting to that group.\n Key (string) -- [REQUIRED]The name assigned to the tag that you create.\n Value (string) -- [REQUIRED]This property contains one or more values that you assigned to the key name you create.\n \n \n\n \"\"\"\n pass\n\ndef test_identity_provider(ServerId=None, UserName=None, UserPassword=None):\n \"\"\"\n If the IdentityProviderType of the server is API_Gateway , tests whether your API Gateway is set up successfully. We highly recommend that you call this method to test your authentication method as soon as you create your server. By doing so, you can troubleshoot issues with the API Gateway integration to ensure that your users can successfully use the service.\n See also: AWS API Documentation\n \n \n :example: response = client.test_identity_provider(\n ServerId='string',\n UserName='string',\n UserPassword='string'\n )\n \n \n :type ServerId: string\n :param ServerId: [REQUIRED]\n A system assigned identifier for a specific server. That server's user authentication method is tested with a user name and password.\n \n\n :type UserName: string\n :param UserName: [REQUIRED]\n This request parameter is name of the user account to be tested.\n \n\n :type UserPassword: string\n :param UserPassword: The password of the user account to be tested.\n\n :rtype: dict\n :return: {\n 'Message': 'string',\n 'StatusCode': 123,\n 'Url': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef untag_resource(Arn=None, TagKeys=None):\n \"\"\"\n Detaches a key-value pair from a resource, as identified by its Amazon Resource Name (ARN). Resources are users, servers, roles, and other entities.\n No response is returned from this call.\n See also: AWS API Documentation\n \n \n :example: response = client.untag_resource(\n Arn='string',\n TagKeys=[\n 'string',\n ]\n )\n \n \n :type Arn: string\n :param Arn: [REQUIRED]\n This is the value of the resource that will have the tag removed. An Amazon Resource Name (ARN) is an identifier for a specific AWS resource, such as a server, user, or role.\n \n\n :type TagKeys: list\n :param TagKeys: [REQUIRED]\n TagKeys are key-value pairs assigned to ARNs that can be used to group and search for resources by type. This metadata can be attached to resources for any purpose.\n (string) --\n \n\n \"\"\"\n pass\n\ndef update_server(IdentityProviderDetails=None, LoggingRole=None, ServerId=None):\n \"\"\"\n Updates the server properties after that server has been created.\n The UpdateServer call returns the ServerId of the Secure File Transfer Protocol (SFTP) server you updated.\n See also: AWS API Documentation\n \n \n :example: response = client.update_server(\n IdentityProviderDetails={\n 'Url': 'string',\n 'InvocationRole': 'string'\n },\n LoggingRole='string',\n ServerId='string'\n )\n \n \n :type IdentityProviderDetails: dict\n :param IdentityProviderDetails: This response parameter is an array containing all of the information required to call a customer's authentication API method.\n Url (string) --The IdentityProviderDetail parameter contains the location of the service endpoint used to authenticate users.\n InvocationRole (string) --The Role parameter provides the type of InvocationRole used to authenticate the user account.\n \n\n :type LoggingRole: string\n :param LoggingRole: Changes the AWS Identity and Access Management (IAM) role that allows Amazon S3 events to be logged in Amazon CloudWatch, turning logging on or off.\n\n :type ServerId: string\n :param ServerId: [REQUIRED]\n A system-assigned unique identifier for an SFTP server instance that the user account is assigned to.\n \n\n :rtype: dict\n :return: {\n 'ServerId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef update_user(HomeDirectory=None, Policy=None, Role=None, ServerId=None, UserName=None):\n \"\"\"\n Assigns new properties to a user. Parameters you pass modify any or all of the following: the home directory, role, and policy for the UserName and ServerId you specify.\n The response returns the ServerId and the UserName for the updated user.\n See also: AWS API Documentation\n \n \n :example: response = client.update_user(\n HomeDirectory='string',\n Policy='string',\n Role='string',\n ServerId='string',\n UserName='string'\n )\n \n \n :type HomeDirectory: string\n :param HomeDirectory: The HomeDirectory parameter specifies the landing directory (folder) for a user when they log in to the server using their client. An example would be: ``/home/username `` .\n\n :type Policy: string\n :param Policy: Allows you to supply a scope-down policy for your user so you can use the same AWS Identity and Access Management (IAM) role across multiple users. The policy scopes down users access to portions of your Amazon S3 bucket. Variables you can use inside this policy include ${Transfer:UserName} , ${Transfer:HomeDirectory} , and ${Transfer:HomeBucket} .\n\n :type Role: string\n :param Role: The IAM role that controls your user s access to your Amazon S3 bucket. The policies attached to this role will determine the level of access you want to provide your users when transferring files into and out of your Amazon S3 bucket or buckets. The IAM role should also contain a trust relationship that allows the Secure File Transfer Protocol (SFTP) server to access your resources when servicing your SFTP user s transfer requests.\n\n :type ServerId: string\n :param ServerId: [REQUIRED]\n A system-assigned unique identifier for an SFTP server instance that the user account is assigned to.\n \n\n :type UserName: string\n :param UserName: [REQUIRED]\n A unique string that identifies a user and is associated with a server as specified by the ServerId. This is the string that will be used by your user when they log in to your SFTP server.\n \n\n :rtype: dict\n :return: {\n 'ServerId': 'string',\n 'UserName': 'string'\n }\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6079519391059875, "alphanum_fraction": 0.6168060898780823, "avg_line_length": 58.68060302734375, "blob_id": "08c73e86c8bed3219052364f4ef7d86de4eb40e8", "content_id": "05fe34218d2a881a1916f922638779ea5a1ddcdf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 71379, "license_type": "permissive", "max_line_length": 674, "num_lines": 1196, "path": "/pyboto3/costexplorer.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_cost_and_usage(TimePeriod=None, Granularity=None, Filter=None, Metrics=None, GroupBy=None, NextPageToken=None):\n \"\"\"\n Retrieves cost and usage metrics for your account. You can specify which cost and usage-related metric, such as BlendedCosts or Quantity , that you want the request to return. You can also filter and group your data by various dimensions, such as SERVICE or AZ , in a specific time range. For a complete list of valid dimensions, see the GetDimensionValues operation. Master accounts in an organization in AWS Organizations have access to all member accounts.\n See also: AWS API Documentation\n \n \n :example: response = client.get_cost_and_usage(\n TimePeriod={\n 'Start': 'string',\n 'End': 'string'\n },\n Granularity='DAILY'|'MONTHLY'|'HOURLY',\n Filter={\n 'Or': [\n {'... recursive ...'},\n ],\n 'And': [\n {'... recursive ...'},\n ],\n 'Not': {'... recursive ...'},\n 'Dimensions': {\n 'Key': 'AZ'|'INSTANCE_TYPE'|'LINKED_ACCOUNT'|'OPERATION'|'PURCHASE_TYPE'|'REGION'|'SERVICE'|'USAGE_TYPE'|'USAGE_TYPE_GROUP'|'RECORD_TYPE'|'OPERATING_SYSTEM'|'TENANCY'|'SCOPE'|'PLATFORM'|'SUBSCRIPTION_ID'|'LEGAL_ENTITY_NAME'|'DEPLOYMENT_OPTION'|'DATABASE_ENGINE'|'CACHE_ENGINE'|'INSTANCE_TYPE_FAMILY'|'BILLING_ENTITY'|'RESERVATION_ID',\n 'Values': [\n 'string',\n ]\n },\n 'Tags': {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n }\n },\n Metrics=[\n 'string',\n ],\n GroupBy=[\n {\n 'Type': 'DIMENSION'|'TAG',\n 'Key': 'string'\n },\n ],\n NextPageToken='string'\n )\n \n \n :type TimePeriod: dict\n :param TimePeriod: Sets the start and end dates for retrieving AWS costs. The start date is inclusive, but the end date is exclusive. For example, if start is 2017-01-01 and end is 2017-05-01 , then the cost and usage data is retrieved from 2017-01-01 up to and including 2017-04-30 but not including 2017-05-01 .\n Start (string) -- [REQUIRED]The beginning of the time period that you want the usage and costs for. The start date is inclusive. For example, if start is 2017-01-01 , AWS retrieves cost and usage data starting at 2017-01-01 up to the end date.\n End (string) -- [REQUIRED]The end of the time period that you want the usage and costs for. The end date is exclusive. For example, if end is 2017-05-01 , AWS retrieves cost and usage data from the start date up to, but not including, 2017-05-01 .\n \n\n :type Granularity: string\n :param Granularity: Sets the AWS cost granularity to MONTHLY or DAILY . If Granularity isn't set, the response object doesn't include the Granularity , either MONTHLY or DAILY .\n The GetCostAndUsageRequest operation supports only DAILY and MONTHLY granularities.\n \n\n :type Filter: dict\n :param Filter: Filters AWS costs by different dimensions. For example, you can specify SERVICE and LINKED_ACCOUNT and get the costs that are associated with that account's usage of that service. You can nest Expression objects to define any combination of dimension filters. For more information, see Expression .\n Or (list) --Return results that match either Dimension object.\n (dict) --Use Expression to filter by cost or by usage. There are two patterns:\n Simple dimension values - You can set the dimension name and values for the filters that you plan to use. For example, you can filter for INSTANCE_TYPE==m4.xlarge OR INSTANCE_TYPE==c4.large . The Expression for that looks like this: { 'Dimensions': { 'Key': 'INSTANCE_TYPE', 'Values': [ 'm4.xlarge', c4.large ] } } The list of dimension values are OR'd together to retrieve cost or usage data. You can create Expression and DimensionValues objects using either with* methods or set* methods in multiple lines.\n Compound dimension values with logical operations - You can use multiple Expression types and the logical operators AND/OR/NOT to create a list of one or more Expression objects. This allows you to filter on more advanced options. For example, you can filter on ((INSTANCE_TYPE == m4.large OR INSTANCE_TYPE == m3.large) OR (TAG.Type == Type1)) AND (USAGE_TYPE != DataTransfer) . The Expression for that looks like this: { 'And': [ {'Or': [ {'Dimensions': { 'Key': 'INSTANCE_TYPE', 'Values': [ 'm4.x.large', 'c4.large' ] }}, {'Tags': { 'Key': 'TagName', 'Values': ['Value1'] } } ]}, {'Not': {'Dimensions': { 'Key': 'USAGE_TYPE', 'Values': ['DataTransfer'] }}} ] }\n Note\n Because each Expression can have only one operator, the service returns an error if more than one is specified. The following example shows an Expression object that creates an error.\n { 'And': [ ... ], 'DimensionValues': { 'Dimension': 'USAGE_TYPE', 'Values': [ 'DataTransfer' ] } }\n \n And (list) --Return results that match both Dimension objects.\n (dict) --Use Expression to filter by cost or by usage. There are two patterns:\n Simple dimension values - You can set the dimension name and values for the filters that you plan to use. For example, you can filter for INSTANCE_TYPE==m4.xlarge OR INSTANCE_TYPE==c4.large . The Expression for that looks like this: { 'Dimensions': { 'Key': 'INSTANCE_TYPE', 'Values': [ 'm4.xlarge', c4.large ] } } The list of dimension values are OR'd together to retrieve cost or usage data. You can create Expression and DimensionValues objects using either with* methods or set* methods in multiple lines.\n Compound dimension values with logical operations - You can use multiple Expression types and the logical operators AND/OR/NOT to create a list of one or more Expression objects. This allows you to filter on more advanced options. For example, you can filter on ((INSTANCE_TYPE == m4.large OR INSTANCE_TYPE == m3.large) OR (TAG.Type == Type1)) AND (USAGE_TYPE != DataTransfer) . The Expression for that looks like this: { 'And': [ {'Or': [ {'Dimensions': { 'Key': 'INSTANCE_TYPE', 'Values': [ 'm4.x.large', 'c4.large' ] }}, {'Tags': { 'Key': 'TagName', 'Values': ['Value1'] } } ]}, {'Not': {'Dimensions': { 'Key': 'USAGE_TYPE', 'Values': ['DataTransfer'] }}} ] }\n Note\n Because each Expression can have only one operator, the service returns an error if more than one is specified. The following example shows an Expression object that creates an error.\n { 'And': [ ... ], 'DimensionValues': { 'Dimension': 'USAGE_TYPE', 'Values': [ 'DataTransfer' ] } }\n \n Not (dict) --Return results that don't match a Dimension object.\n Dimensions (dict) --The specific Dimension to use for Expression .\n Key (string) --The names of the metadata types that you can use to filter and group your results. For example, AZ returns a list of Availability Zones.\n Values (list) --The metadata values that you can use to filter and group your results. You can use GetDimensionValues to find specific values.\n Valid values for the SERVICE dimension are Amazon Elastic Compute Cloud - Compute , Amazon Elasticsearch Service , Amazon ElastiCache , Amazon Redshift , and Amazon Relational Database Service .\n (string) --\n \n Tags (dict) --The specific Tag to use for Expression .\n Key (string) --The key for the tag.\n Values (list) --The specific value of the tag.\n (string) --\n \n \n\n :type Metrics: list\n :param Metrics: Which metrics are returned in the query. For more information about blended and unblended rates, see Why does the 'blended' annotation appear on some line items in my bill? .\n Valid values are AmortizedCost , BlendedCost , NetAmortizedCost , NetUnblendedCost , NormalizedUsageAmount , UnblendedCost , and UsageQuantity .\n Note\n If you return the UsageQuantity metric, the service aggregates all usage numbers without taking into account the units. For example, if you aggregate usageQuantity across all of Amazon EC2, the results aren't meaningful because Amazon EC2 compute hours and data transfer are measured in different units (for example, hours vs. GB). To get more meaningful UsageQuantity metrics, filter by UsageType or UsageTypeGroups .\n Metrics is required for GetCostAndUsage requests.\n (string) --\n \n\n :type GroupBy: list\n :param GroupBy: You can group AWS costs using up to two different groups, either dimensions, tag keys, or both.\n When you group by tag key, you get all tag values, including empty strings.\n Valid values are AZ , INSTANCE_TYPE , LEGAL_ENTITY_NAME , LINKED_ACCOUNT , OPERATION , PLATFORM , PURCHASE_TYPE , SERVICE , TAGS , TENANCY , and USAGE_TYPE .\n (dict) --Represents a group when you specify a group by criteria or in the response to a query with a specific grouping.\n Type (string) --The string that represents the type of group.\n Key (string) --The string that represents a key for a specified group.\n \n \n\n :type NextPageToken: string\n :param NextPageToken: The token to retrieve the next set of results. AWS provides the token when the response from a previous call has more results than the maximum page size.\n\n :rtype: dict\n :return: {\n 'NextPageToken': 'string',\n 'GroupDefinitions': [\n {\n 'Type': 'DIMENSION'|'TAG',\n 'Key': 'string'\n },\n ],\n 'ResultsByTime': [\n {\n 'TimePeriod': {\n 'Start': 'string',\n 'End': 'string'\n },\n 'Total': {\n 'string': {\n 'Amount': 'string',\n 'Unit': 'string'\n }\n },\n 'Groups': [\n {\n 'Keys': [\n 'string',\n ],\n 'Metrics': {\n 'string': {\n 'Amount': 'string',\n 'Unit': 'string'\n }\n }\n },\n ],\n 'Estimated': True|False\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_cost_forecast(TimePeriod=None, Metric=None, Granularity=None, Filter=None, PredictionIntervalLevel=None):\n \"\"\"\n Retrieves a forecast for how much Amazon Web Services predicts that you will spend over the forecast time period that you select, based on your past costs.\n See also: AWS API Documentation\n \n \n :example: response = client.get_cost_forecast(\n TimePeriod={\n 'Start': 'string',\n 'End': 'string'\n },\n Metric='BLENDED_COST'|'UNBLENDED_COST'|'AMORTIZED_COST'|'NET_UNBLENDED_COST'|'NET_AMORTIZED_COST'|'USAGE_QUANTITY'|'NORMALIZED_USAGE_AMOUNT',\n Granularity='DAILY'|'MONTHLY'|'HOURLY',\n Filter={\n 'Or': [\n {'... recursive ...'},\n ],\n 'And': [\n {'... recursive ...'},\n ],\n 'Not': {'... recursive ...'},\n 'Dimensions': {\n 'Key': 'AZ'|'INSTANCE_TYPE'|'LINKED_ACCOUNT'|'OPERATION'|'PURCHASE_TYPE'|'REGION'|'SERVICE'|'USAGE_TYPE'|'USAGE_TYPE_GROUP'|'RECORD_TYPE'|'OPERATING_SYSTEM'|'TENANCY'|'SCOPE'|'PLATFORM'|'SUBSCRIPTION_ID'|'LEGAL_ENTITY_NAME'|'DEPLOYMENT_OPTION'|'DATABASE_ENGINE'|'CACHE_ENGINE'|'INSTANCE_TYPE_FAMILY'|'BILLING_ENTITY'|'RESERVATION_ID',\n 'Values': [\n 'string',\n ]\n },\n 'Tags': {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n }\n },\n PredictionIntervalLevel=123\n )\n \n \n :type TimePeriod: dict\n :param TimePeriod: [REQUIRED]\n The period of time that you want the forecast to cover.\n Start (string) -- [REQUIRED]The beginning of the time period that you want the usage and costs for. The start date is inclusive. For example, if start is 2017-01-01 , AWS retrieves cost and usage data starting at 2017-01-01 up to the end date.\n End (string) -- [REQUIRED]The end of the time period that you want the usage and costs for. The end date is exclusive. For example, if end is 2017-05-01 , AWS retrieves cost and usage data from the start date up to, but not including, 2017-05-01 .\n \n\n :type Metric: string\n :param Metric: [REQUIRED]\n Which metric Cost Explorer uses to create your forecast. For more information about blended and unblended rates, see Why does the 'blended' annotation appear on some line items in my bill? .\n Valid values for a GetCostForecast call are the following:\n AmortizedCost\n BlendedCost\n NetAmortizedCost\n NetUnblendedCost\n UnblendedCost\n \n\n :type Granularity: string\n :param Granularity: [REQUIRED]\n How granular you want the forecast to be. You can get 3 months of DAILY forecasts or 12 months of MONTHLY forecasts.\n The GetCostForecast operation supports only DAILY and MONTHLY granularities.\n \n\n :type Filter: dict\n :param Filter: The filters that you want to use to filter your forecast. Cost Explorer API supports all of the Cost Explorer filters.\n Or (list) --Return results that match either Dimension object.\n (dict) --Use Expression to filter by cost or by usage. There are two patterns:\n Simple dimension values - You can set the dimension name and values for the filters that you plan to use. For example, you can filter for INSTANCE_TYPE==m4.xlarge OR INSTANCE_TYPE==c4.large . The Expression for that looks like this: { 'Dimensions': { 'Key': 'INSTANCE_TYPE', 'Values': [ 'm4.xlarge', c4.large ] } } The list of dimension values are OR'd together to retrieve cost or usage data. You can create Expression and DimensionValues objects using either with* methods or set* methods in multiple lines.\n Compound dimension values with logical operations - You can use multiple Expression types and the logical operators AND/OR/NOT to create a list of one or more Expression objects. This allows you to filter on more advanced options. For example, you can filter on ((INSTANCE_TYPE == m4.large OR INSTANCE_TYPE == m3.large) OR (TAG.Type == Type1)) AND (USAGE_TYPE != DataTransfer) . The Expression for that looks like this: { 'And': [ {'Or': [ {'Dimensions': { 'Key': 'INSTANCE_TYPE', 'Values': [ 'm4.x.large', 'c4.large' ] }}, {'Tags': { 'Key': 'TagName', 'Values': ['Value1'] } } ]}, {'Not': {'Dimensions': { 'Key': 'USAGE_TYPE', 'Values': ['DataTransfer'] }}} ] }\n Note\n Because each Expression can have only one operator, the service returns an error if more than one is specified. The following example shows an Expression object that creates an error.\n { 'And': [ ... ], 'DimensionValues': { 'Dimension': 'USAGE_TYPE', 'Values': [ 'DataTransfer' ] } }\n \n And (list) --Return results that match both Dimension objects.\n (dict) --Use Expression to filter by cost or by usage. There are two patterns:\n Simple dimension values - You can set the dimension name and values for the filters that you plan to use. For example, you can filter for INSTANCE_TYPE==m4.xlarge OR INSTANCE_TYPE==c4.large . The Expression for that looks like this: { 'Dimensions': { 'Key': 'INSTANCE_TYPE', 'Values': [ 'm4.xlarge', c4.large ] } } The list of dimension values are OR'd together to retrieve cost or usage data. You can create Expression and DimensionValues objects using either with* methods or set* methods in multiple lines.\n Compound dimension values with logical operations - You can use multiple Expression types and the logical operators AND/OR/NOT to create a list of one or more Expression objects. This allows you to filter on more advanced options. For example, you can filter on ((INSTANCE_TYPE == m4.large OR INSTANCE_TYPE == m3.large) OR (TAG.Type == Type1)) AND (USAGE_TYPE != DataTransfer) . The Expression for that looks like this: { 'And': [ {'Or': [ {'Dimensions': { 'Key': 'INSTANCE_TYPE', 'Values': [ 'm4.x.large', 'c4.large' ] }}, {'Tags': { 'Key': 'TagName', 'Values': ['Value1'] } } ]}, {'Not': {'Dimensions': { 'Key': 'USAGE_TYPE', 'Values': ['DataTransfer'] }}} ] }\n Note\n Because each Expression can have only one operator, the service returns an error if more than one is specified. The following example shows an Expression object that creates an error.\n { 'And': [ ... ], 'DimensionValues': { 'Dimension': 'USAGE_TYPE', 'Values': [ 'DataTransfer' ] } }\n \n Not (dict) --Return results that don't match a Dimension object.\n Dimensions (dict) --The specific Dimension to use for Expression .\n Key (string) --The names of the metadata types that you can use to filter and group your results. For example, AZ returns a list of Availability Zones.\n Values (list) --The metadata values that you can use to filter and group your results. You can use GetDimensionValues to find specific values.\n Valid values for the SERVICE dimension are Amazon Elastic Compute Cloud - Compute , Amazon Elasticsearch Service , Amazon ElastiCache , Amazon Redshift , and Amazon Relational Database Service .\n (string) --\n \n Tags (dict) --The specific Tag to use for Expression .\n Key (string) --The key for the tag.\n Values (list) --The specific value of the tag.\n (string) --\n \n \n\n :type PredictionIntervalLevel: integer\n :param PredictionIntervalLevel: Cost Explorer always returns the mean forecast as a single point. You can request a prediction interval around the mean by specifying a confidence level. The higher the confidence level, the more confident Cost Explorer is about the actual value falling in the prediction interval. Higher confidence levels result in wider prediction intervals.\n\n :rtype: dict\n :return: {\n 'Total': {\n 'Amount': 'string',\n 'Unit': 'string'\n },\n 'ForecastResultsByTime': [\n {\n 'TimePeriod': {\n 'Start': 'string',\n 'End': 'string'\n },\n 'MeanValue': 'string',\n 'PredictionIntervalLowerBound': 'string',\n 'PredictionIntervalUpperBound': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_dimension_values(SearchString=None, TimePeriod=None, Dimension=None, Context=None, NextPageToken=None):\n \"\"\"\n Retrieves all available filter values for a specified filter over a period of time. You can search the dimension values for an arbitrary string.\n See also: AWS API Documentation\n \n \n :example: response = client.get_dimension_values(\n SearchString='string',\n TimePeriod={\n 'Start': 'string',\n 'End': 'string'\n },\n Dimension='AZ'|'INSTANCE_TYPE'|'LINKED_ACCOUNT'|'OPERATION'|'PURCHASE_TYPE'|'REGION'|'SERVICE'|'USAGE_TYPE'|'USAGE_TYPE_GROUP'|'RECORD_TYPE'|'OPERATING_SYSTEM'|'TENANCY'|'SCOPE'|'PLATFORM'|'SUBSCRIPTION_ID'|'LEGAL_ENTITY_NAME'|'DEPLOYMENT_OPTION'|'DATABASE_ENGINE'|'CACHE_ENGINE'|'INSTANCE_TYPE_FAMILY'|'BILLING_ENTITY'|'RESERVATION_ID',\n Context='COST_AND_USAGE'|'RESERVATIONS',\n NextPageToken='string'\n )\n \n \n :type SearchString: string\n :param SearchString: The value that you want to search the filter values for.\n\n :type TimePeriod: dict\n :param TimePeriod: [REQUIRED]\n The start and end dates for retrieving the dimension values. The start date is inclusive, but the end date is exclusive. For example, if start is 2017-01-01 and end is 2017-05-01 , then the cost and usage data is retrieved from 2017-01-01 up to and including 2017-04-30 but not including 2017-05-01 .\n Start (string) -- [REQUIRED]The beginning of the time period that you want the usage and costs for. The start date is inclusive. For example, if start is 2017-01-01 , AWS retrieves cost and usage data starting at 2017-01-01 up to the end date.\n End (string) -- [REQUIRED]The end of the time period that you want the usage and costs for. The end date is exclusive. For example, if end is 2017-05-01 , AWS retrieves cost and usage data from the start date up to, but not including, 2017-05-01 .\n \n\n :type Dimension: string\n :param Dimension: [REQUIRED]\n The name of the dimension. Each Dimension is available for a different Context . For more information, see Context .\n \n\n :type Context: string\n :param Context: The context for the call to GetDimensionValues . This can be RESERVATIONS or COST_AND_USAGE . The default value is COST_AND_USAGE . If the context is set to RESERVATIONS , the resulting dimension values can be used in the GetReservationUtilization operation. If the context is set to COST_AND_USAGE , the resulting dimension values can be used in the GetCostAndUsage operation.\n If you set the context to COST_AND_USAGE , you can use the following dimensions for searching:\n AZ - The Availability Zone. An example is us-east-1a .\n DATABASE_ENGINE - The Amazon Relational Database Service database. Examples are Aurora or MySQL.\n INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge .\n LEGAL_ENTITY_NAME - The name of the organization that sells you AWS services, such as Amazon Web Services.\n LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the AWS ID of the member account.\n OPERATING_SYSTEM - The operating system. Examples are Windows or Linux.\n OPERATION - The action performed. Examples include RunInstance and CreateBucket .\n PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux.\n PURCHASE_TYPE - The reservation type of the purchase to which this usage is related. Examples include On-Demand Instances and Standard Reserved Instances.\n SERVICE - The AWS service such as Amazon DynamoDB.\n USAGE_TYPE - The type of usage. An example is DataTransfer-In-Bytes. The response for the GetDimensionValues operation includes a unit attribute. Examples include GB and Hrs.\n USAGE_TYPE_GROUP - The grouping of common usage types. An example is Amazon EC2: CloudWatch Alarms. The response for this operation includes a unit attribute.\n RECORD_TYPE - The different types of charges such as RI fees, usage costs, tax refunds, and credits.\n If you set the context to RESERVATIONS , you can use the following dimensions for searching:\n AZ - The Availability Zone. An example is us-east-1a .\n CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or Linux.\n DEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. Valid values are SingleAZ and MultiAZ .\n INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge .\n LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the AWS ID of the member account.\n PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux.\n REGION - The AWS Region.\n SCOPE (Utilization only) - The scope of a Reserved Instance (RI). Values are regional or a single Availability Zone.\n TAG (Coverage only) - The tags that are associated with a Reserved Instance (RI).\n TENANCY - The tenancy of a resource. Examples are shared or dedicated.\n \n\n :type NextPageToken: string\n :param NextPageToken: The token to retrieve the next set of results. AWS provides the token when the response from a previous call has more results than the maximum page size.\n\n :rtype: dict\n :return: {\n 'DimensionValues': [\n {\n 'Value': 'string',\n 'Attributes': {\n 'string': 'string'\n }\n },\n ],\n 'ReturnSize': 123,\n 'TotalSize': 123,\n 'NextPageToken': 'string'\n }\n \n \n :returns: \n AZ - The Availability Zone. An example is us-east-1a .\n DATABASE_ENGINE - The Amazon Relational Database Service database. Examples are Aurora or MySQL.\n INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge .\n LEGAL_ENTITY_NAME - The name of the organization that sells you AWS services, such as Amazon Web Services.\n LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the AWS ID of the member account.\n OPERATING_SYSTEM - The operating system. Examples are Windows or Linux.\n OPERATION - The action performed. Examples include RunInstance and CreateBucket .\n PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux.\n PURCHASE_TYPE - The reservation type of the purchase to which this usage is related. Examples include On-Demand Instances and Standard Reserved Instances.\n SERVICE - The AWS service such as Amazon DynamoDB.\n USAGE_TYPE - The type of usage. An example is DataTransfer-In-Bytes. The response for the GetDimensionValues operation includes a unit attribute. Examples include GB and Hrs.\n USAGE_TYPE_GROUP - The grouping of common usage types. An example is Amazon EC2: CloudWatch Alarms. The response for this operation includes a unit attribute.\n RECORD_TYPE - The different types of charges such as RI fees, usage costs, tax refunds, and credits.\n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_reservation_coverage(TimePeriod=None, GroupBy=None, Granularity=None, Filter=None, Metrics=None, NextPageToken=None):\n \"\"\"\n Retrieves the reservation coverage for your account. This enables you to see how much of your Amazon Elastic Compute Cloud, Amazon ElastiCache, Amazon Relational Database Service, or Amazon Redshift usage is covered by a reservation. An organization's master account can see the coverage of the associated member accounts. For any time period, you can filter data about reservation usage by the following dimensions:\n To determine valid values for a dimension, use the GetDimensionValues operation.\n See also: AWS API Documentation\n \n \n :example: response = client.get_reservation_coverage(\n TimePeriod={\n 'Start': 'string',\n 'End': 'string'\n },\n GroupBy=[\n {\n 'Type': 'DIMENSION'|'TAG',\n 'Key': 'string'\n },\n ],\n Granularity='DAILY'|'MONTHLY'|'HOURLY',\n Filter={\n 'Or': [\n {'... recursive ...'},\n ],\n 'And': [\n {'... recursive ...'},\n ],\n 'Not': {'... recursive ...'},\n 'Dimensions': {\n 'Key': 'AZ'|'INSTANCE_TYPE'|'LINKED_ACCOUNT'|'OPERATION'|'PURCHASE_TYPE'|'REGION'|'SERVICE'|'USAGE_TYPE'|'USAGE_TYPE_GROUP'|'RECORD_TYPE'|'OPERATING_SYSTEM'|'TENANCY'|'SCOPE'|'PLATFORM'|'SUBSCRIPTION_ID'|'LEGAL_ENTITY_NAME'|'DEPLOYMENT_OPTION'|'DATABASE_ENGINE'|'CACHE_ENGINE'|'INSTANCE_TYPE_FAMILY'|'BILLING_ENTITY'|'RESERVATION_ID',\n 'Values': [\n 'string',\n ]\n },\n 'Tags': {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n }\n },\n Metrics=[\n 'string',\n ],\n NextPageToken='string'\n )\n \n \n :type TimePeriod: dict\n :param TimePeriod: [REQUIRED]\n The start and end dates of the period that you want to retrieve data about reservation coverage for. You can retrieve data for a maximum of 13 months: the last 12 months and the current month. The start date is inclusive, but the end date is exclusive. For example, if start is 2017-01-01 and end is 2017-05-01 , then the cost and usage data is retrieved from 2017-01-01 up to and including 2017-04-30 but not including 2017-05-01 .\n Start (string) -- [REQUIRED]The beginning of the time period that you want the usage and costs for. The start date is inclusive. For example, if start is 2017-01-01 , AWS retrieves cost and usage data starting at 2017-01-01 up to the end date.\n End (string) -- [REQUIRED]The end of the time period that you want the usage and costs for. The end date is exclusive. For example, if end is 2017-05-01 , AWS retrieves cost and usage data from the start date up to, but not including, 2017-05-01 .\n \n\n :type GroupBy: list\n :param GroupBy: You can group the data by the following attributes:\n AZ\n CACHE_ENGINE\n DATABASE_ENGINE\n DEPLOYMENT_OPTION\n INSTANCE_TYPE\n LINKED_ACCOUNT\n OPERATING_SYSTEM\n PLATFORM\n REGION\n TAG\n TENANCY\n (dict) --Represents a group when you specify a group by criteria or in the response to a query with a specific grouping.\n Type (string) --The string that represents the type of group.\n Key (string) --The string that represents a key for a specified group.\n \n \n\n :type Granularity: string\n :param Granularity: The granularity of the AWS cost data for the reservation. Valid values are MONTHLY and DAILY .\n If GroupBy is set, Granularity can't be set. If Granularity isn't set, the response object doesn't include Granularity , either MONTHLY or DAILY .\n The GetReservationCoverage operation supports only DAILY and MONTHLY granularities.\n \n\n :type Filter: dict\n :param Filter: Filters utilization data by dimensions. You can filter by the following dimensions:\n AZ\n CACHE_ENGINE\n DATABASE_ENGINE\n DEPLOYMENT_OPTION\n INSTANCE_TYPE\n LINKED_ACCOUNT\n OPERATING_SYSTEM\n PLATFORM\n REGION\n SERVICE\n TAG\n TENANCY\n GetReservationCoverage uses the same Expression object as the other operations, but only AND is supported among each dimension. You can nest only one level deep. If there are multiple values for a dimension, they are OR'd together.\n If you don't provide a SERVICE filter, Cost Explorer defaults to EC2.\n Or (list) --Return results that match either Dimension object.\n (dict) --Use Expression to filter by cost or by usage. There are two patterns:\n Simple dimension values - You can set the dimension name and values for the filters that you plan to use. For example, you can filter for INSTANCE_TYPE==m4.xlarge OR INSTANCE_TYPE==c4.large . The Expression for that looks like this: { 'Dimensions': { 'Key': 'INSTANCE_TYPE', 'Values': [ 'm4.xlarge', c4.large ] } } The list of dimension values are OR'd together to retrieve cost or usage data. You can create Expression and DimensionValues objects using either with* methods or set* methods in multiple lines.\n Compound dimension values with logical operations - You can use multiple Expression types and the logical operators AND/OR/NOT to create a list of one or more Expression objects. This allows you to filter on more advanced options. For example, you can filter on ((INSTANCE_TYPE == m4.large OR INSTANCE_TYPE == m3.large) OR (TAG.Type == Type1)) AND (USAGE_TYPE != DataTransfer) . The Expression for that looks like this: { 'And': [ {'Or': [ {'Dimensions': { 'Key': 'INSTANCE_TYPE', 'Values': [ 'm4.x.large', 'c4.large' ] }}, {'Tags': { 'Key': 'TagName', 'Values': ['Value1'] } } ]}, {'Not': {'Dimensions': { 'Key': 'USAGE_TYPE', 'Values': ['DataTransfer'] }}} ] }\n Note\n Because each Expression can have only one operator, the service returns an error if more than one is specified. The following example shows an Expression object that creates an error.\n { 'And': [ ... ], 'DimensionValues': { 'Dimension': 'USAGE_TYPE', 'Values': [ 'DataTransfer' ] } }\n \n And (list) --Return results that match both Dimension objects.\n (dict) --Use Expression to filter by cost or by usage. There are two patterns:\n Simple dimension values - You can set the dimension name and values for the filters that you plan to use. For example, you can filter for INSTANCE_TYPE==m4.xlarge OR INSTANCE_TYPE==c4.large . The Expression for that looks like this: { 'Dimensions': { 'Key': 'INSTANCE_TYPE', 'Values': [ 'm4.xlarge', c4.large ] } } The list of dimension values are OR'd together to retrieve cost or usage data. You can create Expression and DimensionValues objects using either with* methods or set* methods in multiple lines.\n Compound dimension values with logical operations - You can use multiple Expression types and the logical operators AND/OR/NOT to create a list of one or more Expression objects. This allows you to filter on more advanced options. For example, you can filter on ((INSTANCE_TYPE == m4.large OR INSTANCE_TYPE == m3.large) OR (TAG.Type == Type1)) AND (USAGE_TYPE != DataTransfer) . The Expression for that looks like this: { 'And': [ {'Or': [ {'Dimensions': { 'Key': 'INSTANCE_TYPE', 'Values': [ 'm4.x.large', 'c4.large' ] }}, {'Tags': { 'Key': 'TagName', 'Values': ['Value1'] } } ]}, {'Not': {'Dimensions': { 'Key': 'USAGE_TYPE', 'Values': ['DataTransfer'] }}} ] }\n Note\n Because each Expression can have only one operator, the service returns an error if more than one is specified. The following example shows an Expression object that creates an error.\n { 'And': [ ... ], 'DimensionValues': { 'Dimension': 'USAGE_TYPE', 'Values': [ 'DataTransfer' ] } }\n \n Not (dict) --Return results that don't match a Dimension object.\n Dimensions (dict) --The specific Dimension to use for Expression .\n Key (string) --The names of the metadata types that you can use to filter and group your results. For example, AZ returns a list of Availability Zones.\n Values (list) --The metadata values that you can use to filter and group your results. You can use GetDimensionValues to find specific values.\n Valid values for the SERVICE dimension are Amazon Elastic Compute Cloud - Compute , Amazon Elasticsearch Service , Amazon ElastiCache , Amazon Redshift , and Amazon Relational Database Service .\n (string) --\n \n Tags (dict) --The specific Tag to use for Expression .\n Key (string) --The key for the tag.\n Values (list) --The specific value of the tag.\n (string) --\n \n \n\n :type Metrics: list\n :param Metrics: \n (string) --\n \n\n :type NextPageToken: string\n :param NextPageToken: The token to retrieve the next set of results. AWS provides the token when the response from a previous call has more results than the maximum page size.\n\n :rtype: dict\n :return: {\n 'CoveragesByTime': [\n {\n 'TimePeriod': {\n 'Start': 'string',\n 'End': 'string'\n },\n 'Groups': [\n {\n 'Attributes': {\n 'string': 'string'\n },\n 'Coverage': {\n 'CoverageHours': {\n 'OnDemandHours': 'string',\n 'ReservedHours': 'string',\n 'TotalRunningHours': 'string',\n 'CoverageHoursPercentage': 'string'\n },\n 'CoverageNormalizedUnits': {\n 'OnDemandNormalizedUnits': 'string',\n 'ReservedNormalizedUnits': 'string',\n 'TotalRunningNormalizedUnits': 'string',\n 'CoverageNormalizedUnitsPercentage': 'string'\n },\n 'CoverageCost': {\n 'OnDemandCost': 'string'\n }\n }\n },\n ],\n 'Total': {\n 'CoverageHours': {\n 'OnDemandHours': 'string',\n 'ReservedHours': 'string',\n 'TotalRunningHours': 'string',\n 'CoverageHoursPercentage': 'string'\n },\n 'CoverageNormalizedUnits': {\n 'OnDemandNormalizedUnits': 'string',\n 'ReservedNormalizedUnits': 'string',\n 'TotalRunningNormalizedUnits': 'string',\n 'CoverageNormalizedUnitsPercentage': 'string'\n },\n 'CoverageCost': {\n 'OnDemandCost': 'string'\n }\n }\n },\n ],\n 'Total': {\n 'CoverageHours': {\n 'OnDemandHours': 'string',\n 'ReservedHours': 'string',\n 'TotalRunningHours': 'string',\n 'CoverageHoursPercentage': 'string'\n },\n 'CoverageNormalizedUnits': {\n 'OnDemandNormalizedUnits': 'string',\n 'ReservedNormalizedUnits': 'string',\n 'TotalRunningNormalizedUnits': 'string',\n 'CoverageNormalizedUnitsPercentage': 'string'\n },\n 'CoverageCost': {\n 'OnDemandCost': 'string'\n }\n },\n 'NextPageToken': 'string'\n }\n \n \n :returns: \n TimePeriod (dict) -- [REQUIRED]\n The start and end dates of the period that you want to retrieve data about reservation coverage for. You can retrieve data for a maximum of 13 months: the last 12 months and the current month. The start date is inclusive, but the end date is exclusive. For example, if start is 2017-01-01 and end is 2017-05-01 , then the cost and usage data is retrieved from 2017-01-01 up to and including 2017-04-30 but not including 2017-05-01 .\n \n Start (string) -- [REQUIRED]The beginning of the time period that you want the usage and costs for. The start date is inclusive. For example, if start is 2017-01-01 , AWS retrieves cost and usage data starting at 2017-01-01 up to the end date.\n \n End (string) -- [REQUIRED]The end of the time period that you want the usage and costs for. The end date is exclusive. For example, if end is 2017-05-01 , AWS retrieves cost and usage data from the start date up to, but not including, 2017-05-01 .\n \n \n \n GroupBy (list) -- You can group the data by the following attributes:\n \n AZ\n CACHE_ENGINE\n DATABASE_ENGINE\n DEPLOYMENT_OPTION\n INSTANCE_TYPE\n LINKED_ACCOUNT\n OPERATING_SYSTEM\n PLATFORM\n REGION\n TAG\n TENANCY\n \n \n (dict) --Represents a group when you specify a group by criteria or in the response to a query with a specific grouping.\n \n Type (string) --The string that represents the type of group.\n \n Key (string) --The string that represents a key for a specified group.\n \n \n \n \n \n Granularity (string) -- The granularity of the AWS cost data for the reservation. Valid values are MONTHLY and DAILY .\n If GroupBy is set, Granularity can't be set. If Granularity isn't set, the response object doesn't include Granularity , either MONTHLY or DAILY .\n The GetReservationCoverage operation supports only DAILY and MONTHLY granularities.\n \n Filter (dict) -- Filters utilization data by dimensions. You can filter by the following dimensions:\n \n AZ\n CACHE_ENGINE\n DATABASE_ENGINE\n DEPLOYMENT_OPTION\n INSTANCE_TYPE\n LINKED_ACCOUNT\n OPERATING_SYSTEM\n PLATFORM\n REGION\n SERVICE\n TAG\n TENANCY\n \n \n GetReservationCoverage uses the same Expression object as the other operations, but only AND is supported among each dimension. You can nest only one level deep. If there are multiple values for a dimension, they are OR'd together.\n If you don't provide a SERVICE filter, Cost Explorer defaults to EC2.\n \n Or (list) --Return results that match either Dimension object.\n \n (dict) --Use Expression to filter by cost or by usage. There are two patterns:\n \n Simple dimension values - You can set the dimension name and values for the filters that you plan to use. For example, you can filter for INSTANCE_TYPE==m4.xlarge OR INSTANCE_TYPE==c4.large . The Expression for that looks like this: { \"Dimensions\": { \"Key\": \"INSTANCE_TYPE\", \"Values\": [ \"m4.xlarge\", c4.large ] } } The list of dimension values are OR'd together to retrieve cost or usage data. You can create Expression and DimensionValues objects using either with* methods or set* methods in multiple lines.\n Compound dimension values with logical operations - You can use multiple Expression types and the logical operators AND/OR/NOT to create a list of one or more Expression objects. This allows you to filter on more advanced options. For example, you can filter on ((INSTANCE_TYPE == m4.large OR INSTANCE_TYPE == m3.large) OR (TAG.Type == Type1)) AND (USAGE_TYPE != DataTransfer) . The Expression for that looks like this: { \"And\": [ {\"Or\": [ {\"Dimensions\": { \"Key\": \"INSTANCE_TYPE\", \"Values\": [ \"m4.x.large\", \"c4.large\" ] }}, {\"Tags\": { \"Key\": \"TagName\", \"Values\": [\"Value1\"] } } ]}, {\"Not\": {\"Dimensions\": { \"Key\": \"USAGE_TYPE\", \"Values\": [\"DataTransfer\"] }}} ] }\n \n \n Note\n \n Because each Expression can have only one operator, the service returns an error if more than one is specified. The following example shows an Expression object that creates an error.\n { \"And\": [ ... ], \"DimensionValues\": { \"Dimension\": \"USAGE_TYPE\", \"Values\": [ \"DataTransfer\" ] } }\n \n \n \n \n And (list) --Return results that match both Dimension objects.\n \n (dict) --Use Expression to filter by cost or by usage. There are two patterns:\n \n Simple dimension values - You can set the dimension name and values for the filters that you plan to use. For example, you can filter for INSTANCE_TYPE==m4.xlarge OR INSTANCE_TYPE==c4.large . The Expression for that looks like this: { \"Dimensions\": { \"Key\": \"INSTANCE_TYPE\", \"Values\": [ \"m4.xlarge\", c4.large ] } } The list of dimension values are OR'd together to retrieve cost or usage data. You can create Expression and DimensionValues objects using either with* methods or set* methods in multiple lines.\n Compound dimension values with logical operations - You can use multiple Expression types and the logical operators AND/OR/NOT to create a list of one or more Expression objects. This allows you to filter on more advanced options. For example, you can filter on ((INSTANCE_TYPE == m4.large OR INSTANCE_TYPE == m3.large) OR (TAG.Type == Type1)) AND (USAGE_TYPE != DataTransfer) . The Expression for that looks like this: { \"And\": [ {\"Or\": [ {\"Dimensions\": { \"Key\": \"INSTANCE_TYPE\", \"Values\": [ \"m4.x.large\", \"c4.large\" ] }}, {\"Tags\": { \"Key\": \"TagName\", \"Values\": [\"Value1\"] } } ]}, {\"Not\": {\"Dimensions\": { \"Key\": \"USAGE_TYPE\", \"Values\": [\"DataTransfer\"] }}} ] }\n \n \n Note\n \n Because each Expression can have only one operator, the service returns an error if more than one is specified. The following example shows an Expression object that creates an error.\n { \"And\": [ ... ], \"DimensionValues\": { \"Dimension\": \"USAGE_TYPE\", \"Values\": [ \"DataTransfer\" ] } }\n \n \n \n \n Not (dict) --Return results that don't match a Dimension object.\n \n Dimensions (dict) --The specific Dimension to use for Expression .\n \n Key (string) --The names of the metadata types that you can use to filter and group your results. For example, AZ returns a list of Availability Zones.\n \n Values (list) --The metadata values that you can use to filter and group your results. You can use GetDimensionValues to find specific values.\n Valid values for the SERVICE dimension are Amazon Elastic Compute Cloud - Compute , Amazon Elasticsearch Service , Amazon ElastiCache , Amazon Redshift , and Amazon Relational Database Service .\n \n (string) --\n \n \n \n \n Tags (dict) --The specific Tag to use for Expression .\n \n Key (string) --The key for the tag.\n \n Values (list) --The specific value of the tag.\n \n (string) --\n \n \n \n \n \n \n Metrics (list) -- \n (string) --\n \n \n NextPageToken (string) -- The token to retrieve the next set of results. AWS provides the token when the response from a previous call has more results than the maximum page size.\n \n \"\"\"\n pass\n\ndef get_reservation_purchase_recommendation(AccountId=None, Service=None, AccountScope=None, LookbackPeriodInDays=None, TermInYears=None, PaymentOption=None, ServiceSpecification=None, PageSize=None, NextPageToken=None):\n \"\"\"\n Gets recommendations for which reservations to purchase. These recommendations could help you reduce your costs. Reservations provide a discounted hourly rate (up to 75%) compared to On-Demand pricing.\n AWS generates your recommendations by identifying your On-Demand usage during a specific time period and collecting your usage into categories that are eligible for a reservation. After AWS has these categories, it simulates every combination of reservations in each category of usage to identify the best number of each type of RI to purchase to maximize your estimated savings.\n For example, AWS automatically aggregates your Amazon EC2 Linux, shared tenancy, and c4 family usage in the US West (Oregon) Region and recommends that you buy size-flexible regional reservations to apply to the c4 family usage. AWS recommends the smallest size instance in an instance family. This makes it easier to purchase a size-flexible RI. AWS also shows the equal number of normalized units so that you can purchase any instance size that you want. For this example, your RI recommendation would be for c4.large because that is the smallest size instance in the c4 instance family.\n See also: AWS API Documentation\n \n \n :example: response = client.get_reservation_purchase_recommendation(\n AccountId='string',\n Service='string',\n AccountScope='PAYER'|'LINKED',\n LookbackPeriodInDays='SEVEN_DAYS'|'THIRTY_DAYS'|'SIXTY_DAYS',\n TermInYears='ONE_YEAR'|'THREE_YEARS',\n PaymentOption='NO_UPFRONT'|'PARTIAL_UPFRONT'|'ALL_UPFRONT'|'LIGHT_UTILIZATION'|'MEDIUM_UTILIZATION'|'HEAVY_UTILIZATION',\n ServiceSpecification={\n 'EC2Specification': {\n 'OfferingClass': 'STANDARD'|'CONVERTIBLE'\n }\n },\n PageSize=123,\n NextPageToken='string'\n )\n \n \n :type AccountId: string\n :param AccountId: The account ID that is associated with the recommendation.\n\n :type Service: string\n :param Service: [REQUIRED]\n The specific service that you want recommendations for.\n \n\n :type AccountScope: string\n :param AccountScope: The account scope that you want recommendations for. PAYER means that AWS includes the master account and any member accounts when it calculates its recommendations. LINKED means that AWS includes only member accounts when it calculates its recommendations.\n Valid values are PAYER and LINKED .\n \n\n :type LookbackPeriodInDays: string\n :param LookbackPeriodInDays: The number of previous days that you want AWS to consider when it calculates your recommendations.\n\n :type TermInYears: string\n :param TermInYears: The reservation term that you want recommendations for.\n\n :type PaymentOption: string\n :param PaymentOption: The reservation purchase option that you want recommendations for.\n\n :type ServiceSpecification: dict\n :param ServiceSpecification: The hardware specifications for the service instances that you want recommendations for, such as standard or convertible Amazon EC2 instances.\n EC2Specification (dict) --The Amazon EC2 hardware specifications that you want AWS to provide recommendations for.\n OfferingClass (string) --Whether you want a recommendation for standard or convertible reservations.\n \n \n\n :type PageSize: integer\n :param PageSize: The number of recommendations that you want returned in a single response object.\n\n :type NextPageToken: string\n :param NextPageToken: The pagination token that indicates the next set of results that you want to retrieve.\n\n :rtype: dict\n :return: {\n 'Metadata': {\n 'RecommendationId': 'string',\n 'GenerationTimestamp': 'string'\n },\n 'Recommendations': [\n {\n 'AccountScope': 'PAYER'|'LINKED',\n 'LookbackPeriodInDays': 'SEVEN_DAYS'|'THIRTY_DAYS'|'SIXTY_DAYS',\n 'TermInYears': 'ONE_YEAR'|'THREE_YEARS',\n 'PaymentOption': 'NO_UPFRONT'|'PARTIAL_UPFRONT'|'ALL_UPFRONT'|'LIGHT_UTILIZATION'|'MEDIUM_UTILIZATION'|'HEAVY_UTILIZATION',\n 'ServiceSpecification': {\n 'EC2Specification': {\n 'OfferingClass': 'STANDARD'|'CONVERTIBLE'\n }\n },\n 'RecommendationDetails': [\n {\n 'AccountId': 'string',\n 'InstanceDetails': {\n 'EC2InstanceDetails': {\n 'Family': 'string',\n 'InstanceType': 'string',\n 'Region': 'string',\n 'AvailabilityZone': 'string',\n 'Platform': 'string',\n 'Tenancy': 'string',\n 'CurrentGeneration': True|False,\n 'SizeFlexEligible': True|False\n },\n 'RDSInstanceDetails': {\n 'Family': 'string',\n 'InstanceType': 'string',\n 'Region': 'string',\n 'DatabaseEngine': 'string',\n 'DatabaseEdition': 'string',\n 'DeploymentOption': 'string',\n 'LicenseModel': 'string',\n 'CurrentGeneration': True|False,\n 'SizeFlexEligible': True|False\n },\n 'RedshiftInstanceDetails': {\n 'Family': 'string',\n 'NodeType': 'string',\n 'Region': 'string',\n 'CurrentGeneration': True|False,\n 'SizeFlexEligible': True|False\n },\n 'ElastiCacheInstanceDetails': {\n 'Family': 'string',\n 'NodeType': 'string',\n 'Region': 'string',\n 'ProductDescription': 'string',\n 'CurrentGeneration': True|False,\n 'SizeFlexEligible': True|False\n },\n 'ESInstanceDetails': {\n 'InstanceClass': 'string',\n 'InstanceSize': 'string',\n 'Region': 'string',\n 'CurrentGeneration': True|False,\n 'SizeFlexEligible': True|False\n }\n },\n 'RecommendedNumberOfInstancesToPurchase': 'string',\n 'RecommendedNormalizedUnitsToPurchase': 'string',\n 'MinimumNumberOfInstancesUsedPerHour': 'string',\n 'MinimumNormalizedUnitsUsedPerHour': 'string',\n 'MaximumNumberOfInstancesUsedPerHour': 'string',\n 'MaximumNormalizedUnitsUsedPerHour': 'string',\n 'AverageNumberOfInstancesUsedPerHour': 'string',\n 'AverageNormalizedUnitsUsedPerHour': 'string',\n 'AverageUtilization': 'string',\n 'EstimatedBreakEvenInMonths': 'string',\n 'CurrencyCode': 'string',\n 'EstimatedMonthlySavingsAmount': 'string',\n 'EstimatedMonthlySavingsPercentage': 'string',\n 'EstimatedMonthlyOnDemandCost': 'string',\n 'EstimatedReservationCostForLookbackPeriod': 'string',\n 'UpfrontCost': 'string',\n 'RecurringStandardMonthlyCost': 'string'\n },\n ],\n 'RecommendationSummary': {\n 'TotalEstimatedMonthlySavingsAmount': 'string',\n 'TotalEstimatedMonthlySavingsPercentage': 'string',\n 'CurrencyCode': 'string'\n }\n },\n ],\n 'NextPageToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_reservation_utilization(TimePeriod=None, GroupBy=None, Granularity=None, Filter=None, NextPageToken=None):\n \"\"\"\n Retrieves the reservation utilization for your account. Master accounts in an organization have access to member accounts. You can filter data by dimensions in a time period. You can use GetDimensionValues to determine the possible dimension values. Currently, you can group only by SUBSCRIPTION_ID .\n See also: AWS API Documentation\n \n \n :example: response = client.get_reservation_utilization(\n TimePeriod={\n 'Start': 'string',\n 'End': 'string'\n },\n GroupBy=[\n {\n 'Type': 'DIMENSION'|'TAG',\n 'Key': 'string'\n },\n ],\n Granularity='DAILY'|'MONTHLY'|'HOURLY',\n Filter={\n 'Or': [\n {'... recursive ...'},\n ],\n 'And': [\n {'... recursive ...'},\n ],\n 'Not': {'... recursive ...'},\n 'Dimensions': {\n 'Key': 'AZ'|'INSTANCE_TYPE'|'LINKED_ACCOUNT'|'OPERATION'|'PURCHASE_TYPE'|'REGION'|'SERVICE'|'USAGE_TYPE'|'USAGE_TYPE_GROUP'|'RECORD_TYPE'|'OPERATING_SYSTEM'|'TENANCY'|'SCOPE'|'PLATFORM'|'SUBSCRIPTION_ID'|'LEGAL_ENTITY_NAME'|'DEPLOYMENT_OPTION'|'DATABASE_ENGINE'|'CACHE_ENGINE'|'INSTANCE_TYPE_FAMILY'|'BILLING_ENTITY'|'RESERVATION_ID',\n 'Values': [\n 'string',\n ]\n },\n 'Tags': {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n }\n },\n NextPageToken='string'\n )\n \n \n :type TimePeriod: dict\n :param TimePeriod: [REQUIRED]\n Sets the start and end dates for retrieving RI utilization. The start date is inclusive, but the end date is exclusive. For example, if start is 2017-01-01 and end is 2017-05-01 , then the cost and usage data is retrieved from 2017-01-01 up to and including 2017-04-30 but not including 2017-05-01 .\n Start (string) -- [REQUIRED]The beginning of the time period that you want the usage and costs for. The start date is inclusive. For example, if start is 2017-01-01 , AWS retrieves cost and usage data starting at 2017-01-01 up to the end date.\n End (string) -- [REQUIRED]The end of the time period that you want the usage and costs for. The end date is exclusive. For example, if end is 2017-05-01 , AWS retrieves cost and usage data from the start date up to, but not including, 2017-05-01 .\n \n\n :type GroupBy: list\n :param GroupBy: Groups only by SUBSCRIPTION_ID . Metadata is included.\n (dict) --Represents a group when you specify a group by criteria or in the response to a query with a specific grouping.\n Type (string) --The string that represents the type of group.\n Key (string) --The string that represents a key for a specified group.\n \n \n\n :type Granularity: string\n :param Granularity: If GroupBy is set, Granularity can't be set. If Granularity isn't set, the response object doesn't include Granularity , either MONTHLY or DAILY . If both GroupBy and Granularity aren't set, GetReservationUtilization defaults to DAILY .\n The GetReservationUtilization operation supports only DAILY and MONTHLY granularities.\n \n\n :type Filter: dict\n :param Filter: Filters utilization data by dimensions. You can filter by the following dimensions:\n AZ\n CACHE_ENGINE\n DATABASE_ENGINE\n DEPLOYMENT_OPTION\n INSTANCE_TYPE\n LINKED_ACCOUNT\n OPERATING_SYSTEM\n PLATFORM\n REGION\n SERVICE\n SCOPE\n TENANCY\n GetReservationUtilization uses the same Expression object as the other operations, but only AND is supported among each dimension, and nesting is supported up to only one level deep. If there are multiple values for a dimension, they are OR'd together.\n Or (list) --Return results that match either Dimension object.\n (dict) --Use Expression to filter by cost or by usage. There are two patterns:\n Simple dimension values - You can set the dimension name and values for the filters that you plan to use. For example, you can filter for INSTANCE_TYPE==m4.xlarge OR INSTANCE_TYPE==c4.large . The Expression for that looks like this: { 'Dimensions': { 'Key': 'INSTANCE_TYPE', 'Values': [ 'm4.xlarge', c4.large ] } } The list of dimension values are OR'd together to retrieve cost or usage data. You can create Expression and DimensionValues objects using either with* methods or set* methods in multiple lines.\n Compound dimension values with logical operations - You can use multiple Expression types and the logical operators AND/OR/NOT to create a list of one or more Expression objects. This allows you to filter on more advanced options. For example, you can filter on ((INSTANCE_TYPE == m4.large OR INSTANCE_TYPE == m3.large) OR (TAG.Type == Type1)) AND (USAGE_TYPE != DataTransfer) . The Expression for that looks like this: { 'And': [ {'Or': [ {'Dimensions': { 'Key': 'INSTANCE_TYPE', 'Values': [ 'm4.x.large', 'c4.large' ] }}, {'Tags': { 'Key': 'TagName', 'Values': ['Value1'] } } ]}, {'Not': {'Dimensions': { 'Key': 'USAGE_TYPE', 'Values': ['DataTransfer'] }}} ] }\n Note\n Because each Expression can have only one operator, the service returns an error if more than one is specified. The following example shows an Expression object that creates an error.\n { 'And': [ ... ], 'DimensionValues': { 'Dimension': 'USAGE_TYPE', 'Values': [ 'DataTransfer' ] } }\n \n And (list) --Return results that match both Dimension objects.\n (dict) --Use Expression to filter by cost or by usage. There are two patterns:\n Simple dimension values - You can set the dimension name and values for the filters that you plan to use. For example, you can filter for INSTANCE_TYPE==m4.xlarge OR INSTANCE_TYPE==c4.large . The Expression for that looks like this: { 'Dimensions': { 'Key': 'INSTANCE_TYPE', 'Values': [ 'm4.xlarge', c4.large ] } } The list of dimension values are OR'd together to retrieve cost or usage data. You can create Expression and DimensionValues objects using either with* methods or set* methods in multiple lines.\n Compound dimension values with logical operations - You can use multiple Expression types and the logical operators AND/OR/NOT to create a list of one or more Expression objects. This allows you to filter on more advanced options. For example, you can filter on ((INSTANCE_TYPE == m4.large OR INSTANCE_TYPE == m3.large) OR (TAG.Type == Type1)) AND (USAGE_TYPE != DataTransfer) . The Expression for that looks like this: { 'And': [ {'Or': [ {'Dimensions': { 'Key': 'INSTANCE_TYPE', 'Values': [ 'm4.x.large', 'c4.large' ] }}, {'Tags': { 'Key': 'TagName', 'Values': ['Value1'] } } ]}, {'Not': {'Dimensions': { 'Key': 'USAGE_TYPE', 'Values': ['DataTransfer'] }}} ] }\n Note\n Because each Expression can have only one operator, the service returns an error if more than one is specified. The following example shows an Expression object that creates an error.\n { 'And': [ ... ], 'DimensionValues': { 'Dimension': 'USAGE_TYPE', 'Values': [ 'DataTransfer' ] } }\n \n Not (dict) --Return results that don't match a Dimension object.\n Dimensions (dict) --The specific Dimension to use for Expression .\n Key (string) --The names of the metadata types that you can use to filter and group your results. For example, AZ returns a list of Availability Zones.\n Values (list) --The metadata values that you can use to filter and group your results. You can use GetDimensionValues to find specific values.\n Valid values for the SERVICE dimension are Amazon Elastic Compute Cloud - Compute , Amazon Elasticsearch Service , Amazon ElastiCache , Amazon Redshift , and Amazon Relational Database Service .\n (string) --\n \n Tags (dict) --The specific Tag to use for Expression .\n Key (string) --The key for the tag.\n Values (list) --The specific value of the tag.\n (string) --\n \n \n\n :type NextPageToken: string\n :param NextPageToken: The token to retrieve the next set of results. AWS provides the token when the response from a previous call has more results than the maximum page size.\n\n :rtype: dict\n :return: {\n 'UtilizationsByTime': [\n {\n 'TimePeriod': {\n 'Start': 'string',\n 'End': 'string'\n },\n 'Groups': [\n {\n 'Key': 'string',\n 'Value': 'string',\n 'Attributes': {\n 'string': 'string'\n },\n 'Utilization': {\n 'UtilizationPercentage': 'string',\n 'UtilizationPercentageInUnits': 'string',\n 'PurchasedHours': 'string',\n 'PurchasedUnits': 'string',\n 'TotalActualHours': 'string',\n 'TotalActualUnits': 'string',\n 'UnusedHours': 'string',\n 'UnusedUnits': 'string',\n 'OnDemandCostOfRIHoursUsed': 'string',\n 'NetRISavings': 'string',\n 'TotalPotentialRISavings': 'string',\n 'AmortizedUpfrontFee': 'string',\n 'AmortizedRecurringFee': 'string',\n 'TotalAmortizedFee': 'string'\n }\n },\n ],\n 'Total': {\n 'UtilizationPercentage': 'string',\n 'UtilizationPercentageInUnits': 'string',\n 'PurchasedHours': 'string',\n 'PurchasedUnits': 'string',\n 'TotalActualHours': 'string',\n 'TotalActualUnits': 'string',\n 'UnusedHours': 'string',\n 'UnusedUnits': 'string',\n 'OnDemandCostOfRIHoursUsed': 'string',\n 'NetRISavings': 'string',\n 'TotalPotentialRISavings': 'string',\n 'AmortizedUpfrontFee': 'string',\n 'AmortizedRecurringFee': 'string',\n 'TotalAmortizedFee': 'string'\n }\n },\n ],\n 'Total': {\n 'UtilizationPercentage': 'string',\n 'UtilizationPercentageInUnits': 'string',\n 'PurchasedHours': 'string',\n 'PurchasedUnits': 'string',\n 'TotalActualHours': 'string',\n 'TotalActualUnits': 'string',\n 'UnusedHours': 'string',\n 'UnusedUnits': 'string',\n 'OnDemandCostOfRIHoursUsed': 'string',\n 'NetRISavings': 'string',\n 'TotalPotentialRISavings': 'string',\n 'AmortizedUpfrontFee': 'string',\n 'AmortizedRecurringFee': 'string',\n 'TotalAmortizedFee': 'string'\n },\n 'NextPageToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_tags(SearchString=None, TimePeriod=None, TagKey=None, NextPageToken=None):\n \"\"\"\n Queries for available tag keys and tag values for a specified period. You can search the tag values for an arbitrary string.\n See also: AWS API Documentation\n \n \n :example: response = client.get_tags(\n SearchString='string',\n TimePeriod={\n 'Start': 'string',\n 'End': 'string'\n },\n TagKey='string',\n NextPageToken='string'\n )\n \n \n :type SearchString: string\n :param SearchString: The value that you want to search for.\n\n :type TimePeriod: dict\n :param TimePeriod: [REQUIRED]\n The start and end dates for retrieving the dimension values. The start date is inclusive, but the end date is exclusive. For example, if start is 2017-01-01 and end is 2017-05-01 , then the cost and usage data is retrieved from 2017-01-01 up to and including 2017-04-30 but not including 2017-05-01 .\n Start (string) -- [REQUIRED]The beginning of the time period that you want the usage and costs for. The start date is inclusive. For example, if start is 2017-01-01 , AWS retrieves cost and usage data starting at 2017-01-01 up to the end date.\n End (string) -- [REQUIRED]The end of the time period that you want the usage and costs for. The end date is exclusive. For example, if end is 2017-05-01 , AWS retrieves cost and usage data from the start date up to, but not including, 2017-05-01 .\n \n\n :type TagKey: string\n :param TagKey: The key of the tag that you want to return values for.\n\n :type NextPageToken: string\n :param NextPageToken: The token to retrieve the next set of results. AWS provides the token when the response from a previous call has more results than the maximum page size.\n\n :rtype: dict\n :return: {\n 'NextPageToken': 'string',\n 'Tags': [\n 'string',\n ],\n 'ReturnSize': 123,\n 'TotalSize': 123\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6716150641441345, "alphanum_fraction": 0.6826599836349487, "avg_line_length": 62.131526947021484, "blob_id": "52b4815e8de89c77facb78535412d56efd9e0280", "content_id": "a8e7142201de2d6d2962faf6817c2e9334a0dcb3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 95519, "license_type": "permissive", "max_line_length": 724, "num_lines": 1513, "path": "/pyboto3/cloudwatch.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef delete_alarms(AlarmNames=None):\n \"\"\"\n Deletes the specified alarms. In the event of an error, no alarms are deleted.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_alarms(\n AlarmNames=[\n 'string',\n ]\n )\n \n \n :type AlarmNames: list\n :param AlarmNames: [REQUIRED]\n The alarms to be deleted.\n (string) --\n \n\n \"\"\"\n pass\n\ndef delete_dashboards(DashboardNames=None):\n \"\"\"\n Deletes all dashboards that you specify. You may specify up to 100 dashboards to delete. If there is an error during this call, no dashboards are deleted.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_dashboards(\n DashboardNames=[\n 'string',\n ]\n )\n \n \n :type DashboardNames: list\n :param DashboardNames: [REQUIRED]\n The dashboards to be deleted. This parameter is required.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef describe_alarm_history(AlarmName=None, HistoryItemType=None, StartDate=None, EndDate=None, MaxRecords=None, NextToken=None):\n \"\"\"\n Retrieves the history for the specified alarm. You can filter the results by date range or item type. If an alarm name is not specified, the histories for all alarms are returned.\n CloudWatch retains the history of an alarm even if you delete the alarm.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_alarm_history(\n AlarmName='string',\n HistoryItemType='ConfigurationUpdate'|'StateUpdate'|'Action',\n StartDate=datetime(2015, 1, 1),\n EndDate=datetime(2015, 1, 1),\n MaxRecords=123,\n NextToken='string'\n )\n \n \n :type AlarmName: string\n :param AlarmName: The name of the alarm.\n\n :type HistoryItemType: string\n :param HistoryItemType: The type of alarm histories to retrieve.\n\n :type StartDate: datetime\n :param StartDate: The starting date to retrieve alarm history.\n\n :type EndDate: datetime\n :param EndDate: The ending date to retrieve alarm history.\n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of alarm history records to retrieve.\n\n :type NextToken: string\n :param NextToken: The token returned by a previous call to indicate that there is more data available.\n\n :rtype: dict\n :return: {\n 'AlarmHistoryItems': [\n {\n 'AlarmName': 'string',\n 'Timestamp': datetime(2015, 1, 1),\n 'HistoryItemType': 'ConfigurationUpdate'|'StateUpdate'|'Action',\n 'HistorySummary': 'string',\n 'HistoryData': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_alarms(AlarmNames=None, AlarmNamePrefix=None, StateValue=None, ActionPrefix=None, MaxRecords=None, NextToken=None):\n \"\"\"\n Retrieves the specified alarms. If no alarms are specified, all alarms are returned. Alarms can be retrieved by using only a prefix for the alarm name, the alarm state, or a prefix for any action.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_alarms(\n AlarmNames=[\n 'string',\n ],\n AlarmNamePrefix='string',\n StateValue='OK'|'ALARM'|'INSUFFICIENT_DATA',\n ActionPrefix='string',\n MaxRecords=123,\n NextToken='string'\n )\n \n \n :type AlarmNames: list\n :param AlarmNames: The names of the alarms.\n (string) --\n \n\n :type AlarmNamePrefix: string\n :param AlarmNamePrefix: The alarm name prefix. If this parameter is specified, you cannot specify AlarmNames .\n\n :type StateValue: string\n :param StateValue: The state value to be used in matching alarms.\n\n :type ActionPrefix: string\n :param ActionPrefix: The action name prefix.\n\n :type MaxRecords: integer\n :param MaxRecords: The maximum number of alarm descriptions to retrieve.\n\n :type NextToken: string\n :param NextToken: The token returned by a previous call to indicate that there is more data available.\n\n :rtype: dict\n :return: {\n 'MetricAlarms': [\n {\n 'AlarmName': 'string',\n 'AlarmArn': 'string',\n 'AlarmDescription': 'string',\n 'AlarmConfigurationUpdatedTimestamp': datetime(2015, 1, 1),\n 'ActionsEnabled': True|False,\n 'OKActions': [\n 'string',\n ],\n 'AlarmActions': [\n 'string',\n ],\n 'InsufficientDataActions': [\n 'string',\n ],\n 'StateValue': 'OK'|'ALARM'|'INSUFFICIENT_DATA',\n 'StateReason': 'string',\n 'StateReasonData': 'string',\n 'StateUpdatedTimestamp': datetime(2015, 1, 1),\n 'MetricName': 'string',\n 'Namespace': 'string',\n 'Statistic': 'SampleCount'|'Average'|'Sum'|'Minimum'|'Maximum',\n 'ExtendedStatistic': 'string',\n 'Dimensions': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'Period': 123,\n 'Unit': 'Seconds'|'Microseconds'|'Milliseconds'|'Bytes'|'Kilobytes'|'Megabytes'|'Gigabytes'|'Terabytes'|'Bits'|'Kilobits'|'Megabits'|'Gigabits'|'Terabits'|'Percent'|'Count'|'Bytes/Second'|'Kilobytes/Second'|'Megabytes/Second'|'Gigabytes/Second'|'Terabytes/Second'|'Bits/Second'|'Kilobits/Second'|'Megabits/Second'|'Gigabits/Second'|'Terabits/Second'|'Count/Second'|'None',\n 'EvaluationPeriods': 123,\n 'DatapointsToAlarm': 123,\n 'Threshold': 123.0,\n 'ComparisonOperator': 'GreaterThanOrEqualToThreshold'|'GreaterThanThreshold'|'LessThanThreshold'|'LessThanOrEqualToThreshold',\n 'TreatMissingData': 'string',\n 'EvaluateLowSampleCountPercentile': 'string',\n 'Metrics': [\n {\n 'Id': 'string',\n 'MetricStat': {\n 'Metric': {\n 'Namespace': 'string',\n 'MetricName': 'string',\n 'Dimensions': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ]\n },\n 'Period': 123,\n 'Stat': 'string',\n 'Unit': 'Seconds'|'Microseconds'|'Milliseconds'|'Bytes'|'Kilobytes'|'Megabytes'|'Gigabytes'|'Terabytes'|'Bits'|'Kilobits'|'Megabits'|'Gigabits'|'Terabits'|'Percent'|'Count'|'Bytes/Second'|'Kilobytes/Second'|'Megabytes/Second'|'Gigabytes/Second'|'Terabytes/Second'|'Bits/Second'|'Kilobits/Second'|'Megabits/Second'|'Gigabits/Second'|'Terabits/Second'|'Count/Second'|'None'\n },\n 'Expression': 'string',\n 'Label': 'string',\n 'ReturnData': True|False\n },\n ]\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_alarms_for_metric(MetricName=None, Namespace=None, Statistic=None, ExtendedStatistic=None, Dimensions=None, Period=None, Unit=None):\n \"\"\"\n Retrieves the alarms for the specified metric. To filter the results, specify a statistic, period, or unit.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_alarms_for_metric(\n MetricName='string',\n Namespace='string',\n Statistic='SampleCount'|'Average'|'Sum'|'Minimum'|'Maximum',\n ExtendedStatistic='string',\n Dimensions=[\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n Period=123,\n Unit='Seconds'|'Microseconds'|'Milliseconds'|'Bytes'|'Kilobytes'|'Megabytes'|'Gigabytes'|'Terabytes'|'Bits'|'Kilobits'|'Megabits'|'Gigabits'|'Terabits'|'Percent'|'Count'|'Bytes/Second'|'Kilobytes/Second'|'Megabytes/Second'|'Gigabytes/Second'|'Terabytes/Second'|'Bits/Second'|'Kilobits/Second'|'Megabits/Second'|'Gigabits/Second'|'Terabits/Second'|'Count/Second'|'None'\n )\n \n \n :type MetricName: string\n :param MetricName: [REQUIRED]\n The name of the metric.\n \n\n :type Namespace: string\n :param Namespace: [REQUIRED]\n The namespace of the metric.\n \n\n :type Statistic: string\n :param Statistic: The statistic for the metric, other than percentiles. For percentile statistics, use ExtendedStatistics .\n\n :type ExtendedStatistic: string\n :param ExtendedStatistic: The percentile statistic for the metric. Specify a value between p0.0 and p100.\n\n :type Dimensions: list\n :param Dimensions: The dimensions associated with the metric. If the metric has any associated dimensions, you must specify them in order for the call to succeed.\n (dict) --Expands the identity of a metric.\n Name (string) -- [REQUIRED]The name of the dimension.\n Value (string) -- [REQUIRED]The value representing the dimension measurement.\n \n \n\n :type Period: integer\n :param Period: The period, in seconds, over which the statistic is applied.\n\n :type Unit: string\n :param Unit: The unit for the metric.\n\n :rtype: dict\n :return: {\n 'MetricAlarms': [\n {\n 'AlarmName': 'string',\n 'AlarmArn': 'string',\n 'AlarmDescription': 'string',\n 'AlarmConfigurationUpdatedTimestamp': datetime(2015, 1, 1),\n 'ActionsEnabled': True|False,\n 'OKActions': [\n 'string',\n ],\n 'AlarmActions': [\n 'string',\n ],\n 'InsufficientDataActions': [\n 'string',\n ],\n 'StateValue': 'OK'|'ALARM'|'INSUFFICIENT_DATA',\n 'StateReason': 'string',\n 'StateReasonData': 'string',\n 'StateUpdatedTimestamp': datetime(2015, 1, 1),\n 'MetricName': 'string',\n 'Namespace': 'string',\n 'Statistic': 'SampleCount'|'Average'|'Sum'|'Minimum'|'Maximum',\n 'ExtendedStatistic': 'string',\n 'Dimensions': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'Period': 123,\n 'Unit': 'Seconds'|'Microseconds'|'Milliseconds'|'Bytes'|'Kilobytes'|'Megabytes'|'Gigabytes'|'Terabytes'|'Bits'|'Kilobits'|'Megabits'|'Gigabits'|'Terabits'|'Percent'|'Count'|'Bytes/Second'|'Kilobytes/Second'|'Megabytes/Second'|'Gigabytes/Second'|'Terabytes/Second'|'Bits/Second'|'Kilobits/Second'|'Megabits/Second'|'Gigabits/Second'|'Terabits/Second'|'Count/Second'|'None',\n 'EvaluationPeriods': 123,\n 'DatapointsToAlarm': 123,\n 'Threshold': 123.0,\n 'ComparisonOperator': 'GreaterThanOrEqualToThreshold'|'GreaterThanThreshold'|'LessThanThreshold'|'LessThanOrEqualToThreshold',\n 'TreatMissingData': 'string',\n 'EvaluateLowSampleCountPercentile': 'string',\n 'Metrics': [\n {\n 'Id': 'string',\n 'MetricStat': {\n 'Metric': {\n 'Namespace': 'string',\n 'MetricName': 'string',\n 'Dimensions': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ]\n },\n 'Period': 123,\n 'Stat': 'string',\n 'Unit': 'Seconds'|'Microseconds'|'Milliseconds'|'Bytes'|'Kilobytes'|'Megabytes'|'Gigabytes'|'Terabytes'|'Bits'|'Kilobits'|'Megabits'|'Gigabits'|'Terabits'|'Percent'|'Count'|'Bytes/Second'|'Kilobytes/Second'|'Megabytes/Second'|'Gigabytes/Second'|'Terabytes/Second'|'Bits/Second'|'Kilobits/Second'|'Megabits/Second'|'Gigabits/Second'|'Terabits/Second'|'Count/Second'|'None'\n },\n 'Expression': 'string',\n 'Label': 'string',\n 'ReturnData': True|False\n },\n ]\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef disable_alarm_actions(AlarmNames=None):\n \"\"\"\n Disables the actions for the specified alarms. When an alarm's actions are disabled, the alarm actions do not execute when the alarm state changes.\n See also: AWS API Documentation\n \n \n :example: response = client.disable_alarm_actions(\n AlarmNames=[\n 'string',\n ]\n )\n \n \n :type AlarmNames: list\n :param AlarmNames: [REQUIRED]\n The names of the alarms.\n (string) --\n \n\n \"\"\"\n pass\n\ndef enable_alarm_actions(AlarmNames=None):\n \"\"\"\n Enables the actions for the specified alarms.\n See also: AWS API Documentation\n \n \n :example: response = client.enable_alarm_actions(\n AlarmNames=[\n 'string',\n ]\n )\n \n \n :type AlarmNames: list\n :param AlarmNames: [REQUIRED]\n The names of the alarms.\n (string) --\n \n\n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_dashboard(DashboardName=None):\n \"\"\"\n Displays the details of the dashboard that you specify.\n To copy an existing dashboard, use GetDashboard , and then use the data returned within DashboardBody as the template for the new dashboard when you call PutDashboard to create the copy.\n See also: AWS API Documentation\n \n \n :example: response = client.get_dashboard(\n DashboardName='string'\n )\n \n \n :type DashboardName: string\n :param DashboardName: [REQUIRED]\n The name of the dashboard to be described.\n \n\n :rtype: dict\n :return: {\n 'DashboardArn': 'string',\n 'DashboardBody': 'string',\n 'DashboardName': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_metric_data(MetricDataQueries=None, StartTime=None, EndTime=None, NextToken=None, ScanBy=None, MaxDatapoints=None):\n \"\"\"\n You can use the GetMetricData API to retrieve as many as 100 different metrics in a single request, with a total of as many as 100,800 datapoints. You can also optionally perform math expressions on the values of the returned statistics, to create new time series that represent new insights into your data. For example, using Lambda metrics, you could divide the Errors metric by the Invocations metric to get an error rate time series. For more information about metric math expressions, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide .\n Calls to the GetMetricData API have a different pricing structure than calls to GetMetricStatistics . For more information about pricing, see Amazon CloudWatch Pricing .\n Amazon CloudWatch retains metric data as follows:\n Data points that are initially published with a shorter period are aggregated together for long-term storage. For example, if you collect data using a period of 1 minute, the data remains available for 15 days with 1-minute resolution. After 15 days, this data is still available, but is aggregated and retrievable only with a resolution of 5 minutes. After 63 days, the data is further aggregated and is available with a resolution of 1 hour.\n See also: AWS API Documentation\n \n \n :example: response = client.get_metric_data(\n MetricDataQueries=[\n {\n 'Id': 'string',\n 'MetricStat': {\n 'Metric': {\n 'Namespace': 'string',\n 'MetricName': 'string',\n 'Dimensions': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ]\n },\n 'Period': 123,\n 'Stat': 'string',\n 'Unit': 'Seconds'|'Microseconds'|'Milliseconds'|'Bytes'|'Kilobytes'|'Megabytes'|'Gigabytes'|'Terabytes'|'Bits'|'Kilobits'|'Megabits'|'Gigabits'|'Terabits'|'Percent'|'Count'|'Bytes/Second'|'Kilobytes/Second'|'Megabytes/Second'|'Gigabytes/Second'|'Terabytes/Second'|'Bits/Second'|'Kilobits/Second'|'Megabits/Second'|'Gigabits/Second'|'Terabits/Second'|'Count/Second'|'None'\n },\n 'Expression': 'string',\n 'Label': 'string',\n 'ReturnData': True|False\n },\n ],\n StartTime=datetime(2015, 1, 1),\n EndTime=datetime(2015, 1, 1),\n NextToken='string',\n ScanBy='TimestampDescending'|'TimestampAscending',\n MaxDatapoints=123\n )\n \n \n :type MetricDataQueries: list\n :param MetricDataQueries: [REQUIRED]\n The metric queries to be returned. A single GetMetricData call can include as many as 100 MetricDataQuery structures. Each of these structures can specify either a metric to retrieve, or a math expression to perform on retrieved data.\n (dict) --This structure is used in both GetMetricData and PutMetricAlarm . The supported use of this structure is different for those two operations.\n When used in GetMetricData , it indicates the metric data to return, and whether this call is just retrieving a batch set of data for one metric, or is performing a math expression on metric data. A single GetMetricData call can include up to 100 MetricDataQuery structures.\n When used in PutMetricAlarm , it enables you to create an alarm based on a metric math expression. Each MetricDataQuery in the array specifies either a metric to retrieve, or a math expression to be performed on retrieved metrics. A single PutMetricAlarm call can include up to 20 MetricDataQuery structures in the array. The 20 structures can include as many as 10 structures that contain a MetricStat parameter to retrieve a metric, and as many as 10 structures that contain the Expression parameter to perform a math expression. Any expression used in a PutMetricAlarm operation must return a single time series. For more information, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide .\n Some of the parameters of this structure also have different uses whether you are using this structure in a GetMetricData operation or a PutMetricAlarm operation. These differences are explained in the following parameter list.\n Id (string) -- [REQUIRED]A short name used to tie this object to the results in the response. This name must be unique within a single call to GetMetricData . If you are performing math expressions on this set of data, this name represents that data and can serve as a variable in the mathematical expression. The valid characters are letters, numbers, and underscore. The first character must be a lowercase letter.\n MetricStat (dict) --The metric to be returned, along with statistics, period, and units. Use this parameter only if this object is retrieving a metric and not performing a math expression on returned data.\n Within one MetricDataQuery object, you must specify either Expression or MetricStat but not both.\n Metric (dict) -- [REQUIRED]The metric to return, including the metric name, namespace, and dimensions.\n Namespace (string) --The namespace of the metric.\n MetricName (string) --The name of the metric. This is a required field.\n Dimensions (list) --The dimensions for the metric.\n (dict) --Expands the identity of a metric.\n Name (string) -- [REQUIRED]The name of the dimension.\n Value (string) -- [REQUIRED]The value representing the dimension measurement.\n \n Period (integer) -- [REQUIRED]The period, in seconds, to use when retrieving the metric.\n Stat (string) -- [REQUIRED]The statistic to return. It can include any CloudWatch statistic or extended statistic.\n Unit (string) --The unit to use for the returned data points.\n Expression (string) --The math expression to be performed on the returned data, if this object is performing a math expression. This expression can use the Id of the other metrics to refer to those metrics, and can also use the Id of other expressions to use the result of those expressions. For more information about metric math expressions, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide .\n Within each MetricDataQuery object, you must specify either Expression or MetricStat but not both.\n Label (string) --A human-readable label for this metric or expression. This is especially useful if this is an expression, so that you know what the value represents. If the metric or expression is shown in a CloudWatch dashboard widget, the label is shown. If Label is omitted, CloudWatch generates a default.\n ReturnData (boolean) --When used in GetMetricData , this option indicates whether to return the timestamps and raw data values of this metric. If you are performing this call just to do math expressions and do not also need the raw data returned, you can specify False . If you omit this, the default of True is used.\n When used in PutMetricAlarm , specify True for the one expression result to use as the alarm. For all other metrics and expressions in the same PutMetricAlarm operation, specify ReturnData as False.\n \n \n\n :type StartTime: datetime\n :param StartTime: [REQUIRED]\n The time stamp indicating the earliest data to be returned.\n For better performance, specify StartTime and EndTime values that align with the value of the metric's Period and sync up with the beginning and end of an hour. For example, if the Period of a metric is 5 minutes, specifying 12:05 or 12:30 as StartTime can get a faster response from CloudWatch then setting 12:07 or 12:29 as the StartTime .\n \n\n :type EndTime: datetime\n :param EndTime: [REQUIRED]\n The time stamp indicating the latest data to be returned.\n For better performance, specify StartTime and EndTime values that align with the value of the metric's Period and sync up with the beginning and end of an hour. For example, if the Period of a metric is 5 minutes, specifying 12:05 or 12:30 as EndTime can get a faster response from CloudWatch then setting 12:07 or 12:29 as the EndTime .\n \n\n :type NextToken: string\n :param NextToken: Include this value, if it was returned by the previous call, to get the next set of data points.\n\n :type ScanBy: string\n :param ScanBy: The order in which data points should be returned. TimestampDescending returns the newest data first and paginates when the MaxDatapoints limit is reached. TimestampAscending returns the oldest data first and paginates when the MaxDatapoints limit is reached.\n\n :type MaxDatapoints: integer\n :param MaxDatapoints: The maximum number of data points the request should return before paginating. If you omit this, the default of 100,800 is used.\n\n :rtype: dict\n :return: {\n 'MetricDataResults': [\n {\n 'Id': 'string',\n 'Label': 'string',\n 'Timestamps': [\n datetime(2015, 1, 1),\n ],\n 'Values': [\n 123.0,\n ],\n 'StatusCode': 'Complete'|'InternalError'|'PartialData',\n 'Messages': [\n {\n 'Code': 'string',\n 'Value': 'string'\n },\n ]\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n MetricDataQueries (list) -- [REQUIRED]\n The metric queries to be returned. A single GetMetricData call can include as many as 100 MetricDataQuery structures. Each of these structures can specify either a metric to retrieve, or a math expression to perform on retrieved data.\n \n (dict) --This structure is used in both GetMetricData and PutMetricAlarm . The supported use of this structure is different for those two operations.\n When used in GetMetricData , it indicates the metric data to return, and whether this call is just retrieving a batch set of data for one metric, or is performing a math expression on metric data. A single GetMetricData call can include up to 100 MetricDataQuery structures.\n When used in PutMetricAlarm , it enables you to create an alarm based on a metric math expression. Each MetricDataQuery in the array specifies either a metric to retrieve, or a math expression to be performed on retrieved metrics. A single PutMetricAlarm call can include up to 20 MetricDataQuery structures in the array. The 20 structures can include as many as 10 structures that contain a MetricStat parameter to retrieve a metric, and as many as 10 structures that contain the Expression parameter to perform a math expression. Any expression used in a PutMetricAlarm operation must return a single time series. For more information, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide .\n Some of the parameters of this structure also have different uses whether you are using this structure in a GetMetricData operation or a PutMetricAlarm operation. These differences are explained in the following parameter list.\n \n Id (string) -- [REQUIRED]A short name used to tie this object to the results in the response. This name must be unique within a single call to GetMetricData . If you are performing math expressions on this set of data, this name represents that data and can serve as a variable in the mathematical expression. The valid characters are letters, numbers, and underscore. The first character must be a lowercase letter.\n \n MetricStat (dict) --The metric to be returned, along with statistics, period, and units. Use this parameter only if this object is retrieving a metric and not performing a math expression on returned data.\n Within one MetricDataQuery object, you must specify either Expression or MetricStat but not both.\n \n Metric (dict) -- [REQUIRED]The metric to return, including the metric name, namespace, and dimensions.\n \n Namespace (string) --The namespace of the metric.\n \n MetricName (string) --The name of the metric. This is a required field.\n \n Dimensions (list) --The dimensions for the metric.\n \n (dict) --Expands the identity of a metric.\n \n Name (string) -- [REQUIRED]The name of the dimension.\n \n Value (string) -- [REQUIRED]The value representing the dimension measurement.\n \n \n \n \n \n \n \n Period (integer) -- [REQUIRED]The period, in seconds, to use when retrieving the metric.\n \n Stat (string) -- [REQUIRED]The statistic to return. It can include any CloudWatch statistic or extended statistic.\n \n Unit (string) --The unit to use for the returned data points.\n \n \n \n Expression (string) --The math expression to be performed on the returned data, if this object is performing a math expression. This expression can use the Id of the other metrics to refer to those metrics, and can also use the Id of other expressions to use the result of those expressions. For more information about metric math expressions, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide .\n Within each MetricDataQuery object, you must specify either Expression or MetricStat but not both.\n \n Label (string) --A human-readable label for this metric or expression. This is especially useful if this is an expression, so that you know what the value represents. If the metric or expression is shown in a CloudWatch dashboard widget, the label is shown. If Label is omitted, CloudWatch generates a default.\n \n ReturnData (boolean) --When used in GetMetricData , this option indicates whether to return the timestamps and raw data values of this metric. If you are performing this call just to do math expressions and do not also need the raw data returned, you can specify False . If you omit this, the default of True is used.\n When used in PutMetricAlarm , specify True for the one expression result to use as the alarm. For all other metrics and expressions in the same PutMetricAlarm operation, specify ReturnData as False.\n \n \n \n \n \n StartTime (datetime) -- [REQUIRED]\n The time stamp indicating the earliest data to be returned.\n For better performance, specify StartTime and EndTime values that align with the value of the metric's Period and sync up with the beginning and end of an hour. For example, if the Period of a metric is 5 minutes, specifying 12:05 or 12:30 as StartTime can get a faster response from CloudWatch then setting 12:07 or 12:29 as the StartTime .\n \n EndTime (datetime) -- [REQUIRED]\n The time stamp indicating the latest data to be returned.\n For better performance, specify StartTime and EndTime values that align with the value of the metric's Period and sync up with the beginning and end of an hour. For example, if the Period of a metric is 5 minutes, specifying 12:05 or 12:30 as EndTime can get a faster response from CloudWatch then setting 12:07 or 12:29 as the EndTime .\n \n NextToken (string) -- Include this value, if it was returned by the previous call, to get the next set of data points.\n ScanBy (string) -- The order in which data points should be returned. TimestampDescending returns the newest data first and paginates when the MaxDatapoints limit is reached. TimestampAscending returns the oldest data first and paginates when the MaxDatapoints limit is reached.\n MaxDatapoints (integer) -- The maximum number of data points the request should return before paginating. If you omit this, the default of 100,800 is used.\n \n \"\"\"\n pass\n\ndef get_metric_statistics(Namespace=None, MetricName=None, Dimensions=None, StartTime=None, EndTime=None, Period=None, Statistics=None, ExtendedStatistics=None, Unit=None):\n \"\"\"\n Gets statistics for the specified metric.\n The maximum number of data points returned from a single call is 1,440. If you request more than 1,440 data points, CloudWatch returns an error. To reduce the number of data points, you can narrow the specified time range and make multiple requests across adjacent time ranges, or you can increase the specified period. Data points are not returned in chronological order.\n CloudWatch aggregates data points based on the length of the period that you specify. For example, if you request statistics with a one-hour period, CloudWatch aggregates all data points with time stamps that fall within each one-hour period. Therefore, the number of values aggregated by CloudWatch is larger than the number of data points returned.\n CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:\n Percentile statistics are not available for metrics when any of the metric values are negative numbers.\n Amazon CloudWatch retains metric data as follows:\n Data points that are initially published with a shorter period are aggregated together for long-term storage. For example, if you collect data using a period of 1 minute, the data remains available for 15 days with 1-minute resolution. After 15 days, this data is still available, but is aggregated and retrievable only with a resolution of 5 minutes. After 63 days, the data is further aggregated and is available with a resolution of 1 hour.\n CloudWatch started retaining 5-minute and 1-hour metric data as of July 9, 2016.\n For information about metrics and dimensions supported by AWS services, see the Amazon CloudWatch Metrics and Dimensions Reference in the Amazon CloudWatch User Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.get_metric_statistics(\n Namespace='string',\n MetricName='string',\n Dimensions=[\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n StartTime=datetime(2015, 1, 1),\n EndTime=datetime(2015, 1, 1),\n Period=123,\n Statistics=[\n 'SampleCount'|'Average'|'Sum'|'Minimum'|'Maximum',\n ],\n ExtendedStatistics=[\n 'string',\n ],\n Unit='Seconds'|'Microseconds'|'Milliseconds'|'Bytes'|'Kilobytes'|'Megabytes'|'Gigabytes'|'Terabytes'|'Bits'|'Kilobits'|'Megabits'|'Gigabits'|'Terabits'|'Percent'|'Count'|'Bytes/Second'|'Kilobytes/Second'|'Megabytes/Second'|'Gigabytes/Second'|'Terabytes/Second'|'Bits/Second'|'Kilobits/Second'|'Megabits/Second'|'Gigabits/Second'|'Terabits/Second'|'Count/Second'|'None'\n )\n \n \n :type Namespace: string\n :param Namespace: [REQUIRED]\n The namespace of the metric, with or without spaces.\n \n\n :type MetricName: string\n :param MetricName: [REQUIRED]\n The name of the metric, with or without spaces.\n \n\n :type Dimensions: list\n :param Dimensions: The dimensions. If the metric contains multiple dimensions, you must include a value for each dimension. CloudWatch treats each unique combination of dimensions as a separate metric. If a specific combination of dimensions was not published, you can't retrieve statistics for it. You must specify the same dimensions that were used when the metrics were created. For an example, see Dimension Combinations in the Amazon CloudWatch User Guide . For more information about specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide .\n (dict) --Expands the identity of a metric.\n Name (string) -- [REQUIRED]The name of the dimension.\n Value (string) -- [REQUIRED]The value representing the dimension measurement.\n \n \n\n :type StartTime: datetime\n :param StartTime: [REQUIRED]\n The time stamp that determines the first data point to return. Start times are evaluated relative to the time that CloudWatch receives the request.\n The value specified is inclusive; results include data points with the specified time stamp. The time stamp must be in ISO 8601 UTC format (for example, 2016-10-03T23:00:00Z).\n CloudWatch rounds the specified time stamp as follows:\n Start time less than 15 days ago - Round down to the nearest whole minute. For example, 12:32:34 is rounded down to 12:32:00.\n Start time between 15 and 63 days ago - Round down to the nearest 5-minute clock interval. For example, 12:32:34 is rounded down to 12:30:00.\n Start time greater than 63 days ago - Round down to the nearest 1-hour clock interval. For example, 12:32:34 is rounded down to 12:00:00.\n If you set Period to 5, 10, or 30, the start time of your request is rounded down to the nearest time that corresponds to even 5-, 10-, or 30-second divisions of a minute. For example, if you make a query at (HH:mm:ss) 01:05:23 for the previous 10-second period, the start time of your request is rounded down and you receive data from 01:05:10 to 01:05:20. If you make a query at 15:07:17 for the previous 5 minutes of data, using a period of 5 seconds, you receive data timestamped between 15:02:15 and 15:07:15.\n \n\n :type EndTime: datetime\n :param EndTime: [REQUIRED]\n The time stamp that determines the last data point to return.\n The value specified is exclusive; results include data points up to the specified time stamp. The time stamp must be in ISO 8601 UTC format (for example, 2016-10-10T23:00:00Z).\n \n\n :type Period: integer\n :param Period: [REQUIRED]\n The granularity, in seconds, of the returned data points. For metrics with regular resolution, a period can be as short as one minute (60 seconds) and must be a multiple of 60. For high-resolution metrics that are collected at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, or any multiple of 60. High-resolution metrics are those metrics stored by a PutMetricData call that includes a StorageResolution of 1 second.\n If the StartTime parameter specifies a time stamp that is greater than 3 hours ago, you must specify the period as follows or no data points in that time range is returned:\n Start time between 3 hours and 15 days ago - Use a multiple of 60 seconds (1 minute).\n Start time between 15 and 63 days ago - Use a multiple of 300 seconds (5 minutes).\n Start time greater than 63 days ago - Use a multiple of 3600 seconds (1 hour).\n \n\n :type Statistics: list\n :param Statistics: The metric statistics, other than percentile. For percentile statistics, use ExtendedStatistics . When calling GetMetricStatistics , you must specify either Statistics or ExtendedStatistics , but not both.\n (string) --\n \n\n :type ExtendedStatistics: list\n :param ExtendedStatistics: The percentile statistics. Specify values between p0.0 and p100. When calling GetMetricStatistics , you must specify either Statistics or ExtendedStatistics , but not both. Percentile statistics are not available for metrics when any of the metric values are negative numbers.\n (string) --\n \n\n :type Unit: string\n :param Unit: The unit for a given metric. Metrics may be reported in multiple units. Not supplying a unit results in all units being returned. If you specify only a unit that the metric does not report, the results of the call are null.\n\n :rtype: dict\n :return: {\n 'Label': 'string',\n 'Datapoints': [\n {\n 'Timestamp': datetime(2015, 1, 1),\n 'SampleCount': 123.0,\n 'Average': 123.0,\n 'Sum': 123.0,\n 'Minimum': 123.0,\n 'Maximum': 123.0,\n 'Unit': 'Seconds'|'Microseconds'|'Milliseconds'|'Bytes'|'Kilobytes'|'Megabytes'|'Gigabytes'|'Terabytes'|'Bits'|'Kilobits'|'Megabits'|'Gigabits'|'Terabits'|'Percent'|'Count'|'Bytes/Second'|'Kilobytes/Second'|'Megabytes/Second'|'Gigabytes/Second'|'Terabytes/Second'|'Bits/Second'|'Kilobits/Second'|'Megabits/Second'|'Gigabits/Second'|'Terabits/Second'|'Count/Second'|'None',\n 'ExtendedStatistics': {\n 'string': 123.0\n }\n },\n ]\n }\n \n \n :returns: \n Data points with a period of less than 60 seconds are available for 3 hours. These data points are high-resolution metrics and are available only for custom metrics that have been defined with a StorageResolution of 1.\n Data points with a period of 60 seconds (1-minute) are available for 15 days.\n Data points with a period of 300 seconds (5-minute) are available for 63 days.\n Data points with a period of 3600 seconds (1 hour) are available for 455 days (15 months).\n \n \"\"\"\n pass\n\ndef get_metric_widget_image(MetricWidget=None, OutputFormat=None):\n \"\"\"\n You can use the GetMetricWidgetImage API to retrieve a snapshot graph of one or more Amazon CloudWatch metrics as a bitmap image. You can then embed this image into your services and products, such as wiki pages, reports, and documents. You could also retrieve images regularly, such as every minute, and create your own custom live dashboard.\n The graph you retrieve can include all CloudWatch metric graph features, including metric math and horizontal and vertical annotations.\n There is a limit of 20 transactions per second for this API. Each GetMetricWidgetImage action has the following limits:\n See also: AWS API Documentation\n \n \n :example: response = client.get_metric_widget_image(\n MetricWidget='string',\n OutputFormat='string'\n )\n \n \n :type MetricWidget: string\n :param MetricWidget: [REQUIRED]\n A JSON string that defines the bitmap graph to be retrieved. The string includes the metrics to include in the graph, statistics, annotations, title, axis limits, and so on. You can include only one MetricWidget parameter in each GetMetricWidgetImage call.\n For more information about the syntax of MetricWidget see CloudWatch-Metric-Widget-Structure .\n If any metric on the graph could not load all the requested data points, an orange triangle with an exclamation point appears next to the graph legend.\n \n\n :type OutputFormat: string\n :param OutputFormat: The format of the resulting image. Only PNG images are supported.\n The default is png . If you specify png , the API returns an HTTP response with the content-type set to text/xml . The image data is in a MetricWidgetImage field. For example:\n <GetMetricWidgetImageResponse xmlns='http://monitoring.amazonaws.com/doc/2010-08-01/'><GetMetricWidgetImageResult>\n <MetricWidgetImage>\n iVBORw0KGgoAAAANSUhEUgAAAlgAAAGQEAYAAAAip...\n </MetricWidgetImage>\n </GetMetricWidgetImageResult>\n <ResponseMetadata>\n <RequestId>6f0d4192-4d42-11e8-82c1-f539a07e0e3b</RequestId>\n </ResponseMetadata>\n </GetMetricWidgetImageResponse>\n The image/png setting is intended only for custom HTTP requests. For most use cases, and all actions using an AWS SDK, you should use png . If you specify image/png , the HTTP response has a content-type set to image/png , and the body of the response is a PNG image.\n \n\n :rtype: dict\n :return: {\n 'MetricWidgetImage': b'bytes'\n }\n \n \n :returns: \n MetricWidget (string) -- [REQUIRED]\n A JSON string that defines the bitmap graph to be retrieved. The string includes the metrics to include in the graph, statistics, annotations, title, axis limits, and so on. You can include only one MetricWidget parameter in each GetMetricWidgetImage call.\n For more information about the syntax of MetricWidget see CloudWatch-Metric-Widget-Structure .\n If any metric on the graph could not load all the requested data points, an orange triangle with an exclamation point appears next to the graph legend.\n \n OutputFormat (string) -- The format of the resulting image. Only PNG images are supported.\n The default is png . If you specify png , the API returns an HTTP response with the content-type set to text/xml . The image data is in a MetricWidgetImage field. For example:\n \n <GetMetricWidgetImageResponse xmlns=\"http://monitoring.amazonaws.com/doc/2010-08-01/\"><GetMetricWidgetImageResult>\n <MetricWidgetImage>\n iVBORw0KGgoAAAANSUhEUgAAAlgAAAGQEAYAAAAip...\n </MetricWidgetImage>\n </GetMetricWidgetImageResult>\n <ResponseMetadata>\n <RequestId>6f0d4192-4d42-11e8-82c1-f539a07e0e3b</RequestId>\n </ResponseMetadata>\n </GetMetricWidgetImageResponse>\n \n The image/png setting is intended only for custom HTTP requests. For most use cases, and all actions using an AWS SDK, you should use png . If you specify image/png , the HTTP response has a content-type set to image/png , and the body of the response is a PNG image.\n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_dashboards(DashboardNamePrefix=None, NextToken=None):\n \"\"\"\n Returns a list of the dashboards for your account. If you include DashboardNamePrefix , only those dashboards with names starting with the prefix are listed. Otherwise, all dashboards in your account are listed.\n See also: AWS API Documentation\n \n \n :example: response = client.list_dashboards(\n DashboardNamePrefix='string',\n NextToken='string'\n )\n \n \n :type DashboardNamePrefix: string\n :param DashboardNamePrefix: If you specify this parameter, only the dashboards with names starting with the specified string are listed. The maximum length is 255, and valid characters are A-Z, a-z, 0-9, '.', '-', and '_'.\n\n :type NextToken: string\n :param NextToken: The token returned by a previous call to indicate that there is more data available.\n\n :rtype: dict\n :return: {\n 'DashboardEntries': [\n {\n 'DashboardName': 'string',\n 'DashboardArn': 'string',\n 'LastModified': datetime(2015, 1, 1),\n 'Size': 123\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_metrics(Namespace=None, MetricName=None, Dimensions=None, NextToken=None):\n \"\"\"\n List the specified metrics. You can use the returned metrics with GetMetricData or GetMetricStatistics to obtain statistical data.\n Up to 500 results are returned for any one call. To retrieve additional results, use the returned token with subsequent calls.\n After you create a metric, allow up to fifteen minutes before the metric appears. Statistics about the metric, however, are available sooner using GetMetricData or GetMetricStatistics .\n See also: AWS API Documentation\n \n \n :example: response = client.list_metrics(\n Namespace='string',\n MetricName='string',\n Dimensions=[\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n NextToken='string'\n )\n \n \n :type Namespace: string\n :param Namespace: The namespace to filter against.\n\n :type MetricName: string\n :param MetricName: The name of the metric to filter against.\n\n :type Dimensions: list\n :param Dimensions: The dimensions to filter against.\n (dict) --Represents filters for a dimension.\n Name (string) -- [REQUIRED]The dimension name to be matched.\n Value (string) --The value of the dimension to be matched.\n \n \n\n :type NextToken: string\n :param NextToken: The token returned by a previous call to indicate that there is more data available.\n\n :rtype: dict\n :return: {\n 'Metrics': [\n {\n 'Namespace': 'string',\n 'MetricName': 'string',\n 'Dimensions': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ]\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef put_dashboard(DashboardName=None, DashboardBody=None):\n \"\"\"\n Creates a dashboard if it does not already exist, or updates an existing dashboard. If you update a dashboard, the entire contents are replaced with what you specify here.\n There is no limit to the number of dashboards in your account. All dashboards in your account are global, not region-specific.\n A simple way to create a dashboard using PutDashboard is to copy an existing dashboard. To copy an existing dashboard using the console, you can load the dashboard and then use the View/edit source command in the Actions menu to display the JSON block for that dashboard. Another way to copy a dashboard is to use GetDashboard , and then use the data returned within DashboardBody as the template for the new dashboard when you call PutDashboard .\n When you create a dashboard with PutDashboard , a good practice is to add a text widget at the top of the dashboard with a message that the dashboard was created by script and should not be changed in the console. This message could also point console users to the location of the DashboardBody script or the CloudFormation template used to create the dashboard.\n See also: AWS API Documentation\n \n \n :example: response = client.put_dashboard(\n DashboardName='string',\n DashboardBody='string'\n )\n \n \n :type DashboardName: string\n :param DashboardName: [REQUIRED]\n The name of the dashboard. If a dashboard with this name already exists, this call modifies that dashboard, replacing its current contents. Otherwise, a new dashboard is created. The maximum length is 255, and valid characters are A-Z, a-z, 0-9, '-', and '_'. This parameter is required.\n \n\n :type DashboardBody: string\n :param DashboardBody: [REQUIRED]\n The detailed information about the dashboard in JSON format, including the widgets to include and their location on the dashboard. This parameter is required.\n For more information about the syntax, see CloudWatch-Dashboard-Body-Structure .\n \n\n :rtype: dict\n :return: {\n 'DashboardValidationMessages': [\n {\n 'DataPath': 'string',\n 'Message': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef put_metric_alarm(AlarmName=None, AlarmDescription=None, ActionsEnabled=None, OKActions=None, AlarmActions=None, InsufficientDataActions=None, MetricName=None, Namespace=None, Statistic=None, ExtendedStatistic=None, Dimensions=None, Period=None, Unit=None, EvaluationPeriods=None, DatapointsToAlarm=None, Threshold=None, ComparisonOperator=None, TreatMissingData=None, EvaluateLowSampleCountPercentile=None, Metrics=None):\n \"\"\"\n Creates or updates an alarm and associates it with the specified metric or metric math expression.\n When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA . The alarm is then evaluated and its state is set appropriately. Any actions associated with the new state are then executed.\n When you update an existing alarm, its state is left unchanged, but the update completely overwrites the previous configuration of the alarm.\n If you are an IAM user, you must have Amazon EC2 permissions for some alarm operations:\n If you have read/write permissions for Amazon CloudWatch but not for Amazon EC2, you can still create an alarm, but the stop or terminate actions are not performed. However, if you are later granted the required permissions, the alarm actions that you created earlier are performed.\n If you are using an IAM role (for example, an EC2 instance profile), you cannot stop or terminate the instance using alarm actions. However, you can still see the alarm state and perform any other actions such as Amazon SNS notifications or Auto Scaling policies.\n If you are using temporary security credentials granted using AWS STS, you cannot stop or terminate an EC2 instance using alarm actions.\n The first time you create an alarm in the AWS Management Console, the CLI, or by using the PutMetricAlarm API, CloudWatch creates the necessary service-linked role for you. The service-linked role is called AWSServiceRoleForCloudWatchEvents . For more information, see AWS service-linked role .\n See also: AWS API Documentation\n \n \n :example: response = client.put_metric_alarm(\n AlarmName='string',\n AlarmDescription='string',\n ActionsEnabled=True|False,\n OKActions=[\n 'string',\n ],\n AlarmActions=[\n 'string',\n ],\n InsufficientDataActions=[\n 'string',\n ],\n MetricName='string',\n Namespace='string',\n Statistic='SampleCount'|'Average'|'Sum'|'Minimum'|'Maximum',\n ExtendedStatistic='string',\n Dimensions=[\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n Period=123,\n Unit='Seconds'|'Microseconds'|'Milliseconds'|'Bytes'|'Kilobytes'|'Megabytes'|'Gigabytes'|'Terabytes'|'Bits'|'Kilobits'|'Megabits'|'Gigabits'|'Terabits'|'Percent'|'Count'|'Bytes/Second'|'Kilobytes/Second'|'Megabytes/Second'|'Gigabytes/Second'|'Terabytes/Second'|'Bits/Second'|'Kilobits/Second'|'Megabits/Second'|'Gigabits/Second'|'Terabits/Second'|'Count/Second'|'None',\n EvaluationPeriods=123,\n DatapointsToAlarm=123,\n Threshold=123.0,\n ComparisonOperator='GreaterThanOrEqualToThreshold'|'GreaterThanThreshold'|'LessThanThreshold'|'LessThanOrEqualToThreshold',\n TreatMissingData='string',\n EvaluateLowSampleCountPercentile='string',\n Metrics=[\n {\n 'Id': 'string',\n 'MetricStat': {\n 'Metric': {\n 'Namespace': 'string',\n 'MetricName': 'string',\n 'Dimensions': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ]\n },\n 'Period': 123,\n 'Stat': 'string',\n 'Unit': 'Seconds'|'Microseconds'|'Milliseconds'|'Bytes'|'Kilobytes'|'Megabytes'|'Gigabytes'|'Terabytes'|'Bits'|'Kilobits'|'Megabits'|'Gigabits'|'Terabits'|'Percent'|'Count'|'Bytes/Second'|'Kilobytes/Second'|'Megabytes/Second'|'Gigabytes/Second'|'Terabytes/Second'|'Bits/Second'|'Kilobits/Second'|'Megabits/Second'|'Gigabits/Second'|'Terabits/Second'|'Count/Second'|'None'\n },\n 'Expression': 'string',\n 'Label': 'string',\n 'ReturnData': True|False\n },\n ]\n )\n \n \n :type AlarmName: string\n :param AlarmName: [REQUIRED]\n The name for the alarm. This name must be unique within your AWS account.\n \n\n :type AlarmDescription: string\n :param AlarmDescription: The description for the alarm.\n\n :type ActionsEnabled: boolean\n :param ActionsEnabled: Indicates whether actions should be executed during any changes to the alarm state. The default is TRUE.\n\n :type OKActions: list\n :param OKActions: The actions to execute when this alarm transitions to an OK state from any other state. Each action is specified as an Amazon Resource Name (ARN).\n Valid Values: arn:aws:automate:*region* :ec2:stop | arn:aws:automate:*region* :ec2:terminate | arn:aws:automate:*region* :ec2:recover | arn:aws:automate:*region* :ec2:reboot | ``arn:aws:sns:region :account-id :sns-topic-name `` | ``arn:aws:autoscaling:region :account-id :scalingPolicy:policy-id autoScalingGroupName/group-friendly-name :policyName/policy-friendly-name ``\n Valid Values (for use with IAM roles): arn:aws:swf:*region* :*account-id* :action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:*region* :*account-id* :action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:*region* :*account-id* :action/actions/AWS_EC2.InstanceId.Reboot/1.0\n (string) --\n \n\n :type AlarmActions: list\n :param AlarmActions: The actions to execute when this alarm transitions to the ALARM state from any other state. Each action is specified as an Amazon Resource Name (ARN).\n Valid Values: arn:aws:automate:*region* :ec2:stop | arn:aws:automate:*region* :ec2:terminate | arn:aws:automate:*region* :ec2:recover | ``arn:aws:sns:region :account-id :sns-topic-name `` | ``arn:aws:autoscaling:region :account-id :scalingPolicy:policy-id autoScalingGroupName/group-friendly-name :policyName/policy-friendly-name ``\n Valid Values (for use with IAM roles): arn:aws:swf:*region* :*account-id* :action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:*region* :*account-id* :action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:*region* :*account-id* :action/actions/AWS_EC2.InstanceId.Reboot/1.0\n (string) --\n \n\n :type InsufficientDataActions: list\n :param InsufficientDataActions: The actions to execute when this alarm transitions to the INSUFFICIENT_DATA state from any other state. Each action is specified as an Amazon Resource Name (ARN).\n Valid Values: arn:aws:automate:*region* :ec2:stop | arn:aws:automate:*region* :ec2:terminate | arn:aws:automate:*region* :ec2:recover | ``arn:aws:sns:region :account-id :sns-topic-name `` | ``arn:aws:autoscaling:region :account-id :scalingPolicy:policy-id autoScalingGroupName/group-friendly-name :policyName/policy-friendly-name ``\n Valid Values (for use with IAM roles): >arn:aws:swf:*region* :*account-id* :action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:*region* :*account-id* :action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:*region* :*account-id* :action/actions/AWS_EC2.InstanceId.Reboot/1.0\n (string) --\n \n\n :type MetricName: string\n :param MetricName: The name for the metric associated with the alarm.\n If you are creating an alarm based on a math expression, you cannot specify this parameter, or any of the Dimensions , Period , Namespace , Statistic , or ExtendedStatistic parameters. Instead, you specify all this information in the Metrics array.\n \n\n :type Namespace: string\n :param Namespace: The namespace for the metric associated specified in MetricName .\n\n :type Statistic: string\n :param Statistic: The statistic for the metric specified in MetricName , other than percentile. For percentile statistics, use ExtendedStatistic . When you call PutMetricAlarm and specify a MetricName , you must specify either Statistic or ExtendedStatistic, but not both.\n\n :type ExtendedStatistic: string\n :param ExtendedStatistic: The percentile statistic for the metric specified in MetricName . Specify a value between p0.0 and p100. When you call PutMetricAlarm and specify a MetricName , you must specify either Statistic or ExtendedStatistic, but not both.\n\n :type Dimensions: list\n :param Dimensions: The dimensions for the metric specified in MetricName .\n (dict) --Expands the identity of a metric.\n Name (string) -- [REQUIRED]The name of the dimension.\n Value (string) -- [REQUIRED]The value representing the dimension measurement.\n \n \n\n :type Period: integer\n :param Period: The length, in seconds, used each time the metric specified in MetricName is evaluated. Valid values are 10, 30, and any multiple of 60.\n Be sure to specify 10 or 30 only for metrics that are stored by a PutMetricData call with a StorageResolution of 1. If you specify a period of 10 or 30 for a metric that does not have sub-minute resolution, the alarm still attempts to gather data at the period rate that you specify. In this case, it does not receive data for the attempts that do not correspond to a one-minute data resolution, and the alarm may often lapse into INSUFFICENT_DATA status. Specifying 10 or 30 also sets this alarm as a high-resolution alarm, which has a higher charge than other alarms. For more information about pricing, see Amazon CloudWatch Pricing .\n An alarm's total current evaluation period can be no longer than one day, so Period multiplied by EvaluationPeriods cannot be more than 86,400 seconds.\n \n\n :type Unit: string\n :param Unit: The unit of measure for the statistic. For example, the units for the Amazon EC2 NetworkIn metric are Bytes because NetworkIn tracks the number of bytes that an instance receives on all network interfaces. You can also specify a unit when you create a custom metric. Units help provide conceptual meaning to your data. Metric data points that specify a unit of measure, such as Percent, are aggregated separately.\n If you specify a unit, you must use a unit that is appropriate for the metric. Otherwise, the CloudWatch alarm can get stuck in the INSUFFICIENT DATA state.\n \n\n :type EvaluationPeriods: integer\n :param EvaluationPeriods: [REQUIRED]\n The number of periods over which data is compared to the specified threshold. If you are setting an alarm that requires that a number of consecutive data points be breaching to trigger the alarm, this value specifies that number. If you are setting an 'M out of N' alarm, this value is the N.\n An alarm's total current evaluation period can be no longer than one day, so this number multiplied by Period cannot be more than 86,400 seconds.\n \n\n :type DatapointsToAlarm: integer\n :param DatapointsToAlarm: The number of datapoints that must be breaching to trigger the alarm. This is used only if you are setting an 'M out of N' alarm. In that case, this value is the M. For more information, see Evaluating an Alarm in the Amazon CloudWatch User Guide .\n\n :type Threshold: float\n :param Threshold: [REQUIRED]\n The value against which the specified statistic is compared.\n \n\n :type ComparisonOperator: string\n :param ComparisonOperator: [REQUIRED]\n The arithmetic operation to use when comparing the specified statistic and threshold. The specified statistic value is used as the first operand.\n \n\n :type TreatMissingData: string\n :param TreatMissingData: Sets how this alarm is to handle missing data points. If TreatMissingData is omitted, the default behavior of missing is used. For more information, see Configuring How CloudWatch Alarms Treats Missing Data .\n Valid Values: breaching | notBreaching | ignore | missing\n \n\n :type EvaluateLowSampleCountPercentile: string\n :param EvaluateLowSampleCountPercentile: Used only for alarms based on percentiles. If you specify ignore , the alarm state does not change during periods with too few data points to be statistically significant. If you specify evaluate or omit this parameter, the alarm is always evaluated and possibly changes state no matter how many data points are available. For more information, see Percentile-Based CloudWatch Alarms and Low Data Samples .\n Valid Values: evaluate | ignore\n \n\n :type Metrics: list\n :param Metrics: An array of MetricDataQuery structures that enable you to create an alarm based on the result of a metric math expression. Each item in the Metrics array either retrieves a metric or performs a math expression.\n If you use the Metrics parameter, you cannot include the MetricName , Dimensions , Period , Namespace , Statistic , or ExtendedStatistic parameters of PutMetricAlarm in the same operation. Instead, you retrieve the metrics you are using in your math expression as part of the Metrics array.\n (dict) --This structure is used in both GetMetricData and PutMetricAlarm . The supported use of this structure is different for those two operations.\n When used in GetMetricData , it indicates the metric data to return, and whether this call is just retrieving a batch set of data for one metric, or is performing a math expression on metric data. A single GetMetricData call can include up to 100 MetricDataQuery structures.\n When used in PutMetricAlarm , it enables you to create an alarm based on a metric math expression. Each MetricDataQuery in the array specifies either a metric to retrieve, or a math expression to be performed on retrieved metrics. A single PutMetricAlarm call can include up to 20 MetricDataQuery structures in the array. The 20 structures can include as many as 10 structures that contain a MetricStat parameter to retrieve a metric, and as many as 10 structures that contain the Expression parameter to perform a math expression. Any expression used in a PutMetricAlarm operation must return a single time series. For more information, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide .\n Some of the parameters of this structure also have different uses whether you are using this structure in a GetMetricData operation or a PutMetricAlarm operation. These differences are explained in the following parameter list.\n Id (string) -- [REQUIRED]A short name used to tie this object to the results in the response. This name must be unique within a single call to GetMetricData . If you are performing math expressions on this set of data, this name represents that data and can serve as a variable in the mathematical expression. The valid characters are letters, numbers, and underscore. The first character must be a lowercase letter.\n MetricStat (dict) --The metric to be returned, along with statistics, period, and units. Use this parameter only if this object is retrieving a metric and not performing a math expression on returned data.\n Within one MetricDataQuery object, you must specify either Expression or MetricStat but not both.\n Metric (dict) -- [REQUIRED]The metric to return, including the metric name, namespace, and dimensions.\n Namespace (string) --The namespace of the metric.\n MetricName (string) --The name of the metric. This is a required field.\n Dimensions (list) --The dimensions for the metric.\n (dict) --Expands the identity of a metric.\n Name (string) -- [REQUIRED]The name of the dimension.\n Value (string) -- [REQUIRED]The value representing the dimension measurement.\n \n Period (integer) -- [REQUIRED]The period, in seconds, to use when retrieving the metric.\n Stat (string) -- [REQUIRED]The statistic to return. It can include any CloudWatch statistic or extended statistic.\n Unit (string) --The unit to use for the returned data points.\n Expression (string) --The math expression to be performed on the returned data, if this object is performing a math expression. This expression can use the Id of the other metrics to refer to those metrics, and can also use the Id of other expressions to use the result of those expressions. For more information about metric math expressions, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide .\n Within each MetricDataQuery object, you must specify either Expression or MetricStat but not both.\n Label (string) --A human-readable label for this metric or expression. This is especially useful if this is an expression, so that you know what the value represents. If the metric or expression is shown in a CloudWatch dashboard widget, the label is shown. If Label is omitted, CloudWatch generates a default.\n ReturnData (boolean) --When used in GetMetricData , this option indicates whether to return the timestamps and raw data values of this metric. If you are performing this call just to do math expressions and do not also need the raw data returned, you can specify False . If you omit this, the default of True is used.\n When used in PutMetricAlarm , specify True for the one expression result to use as the alarm. For all other metrics and expressions in the same PutMetricAlarm operation, specify ReturnData as False.\n \n \n\n :returns: \n AlarmName (string) -- [REQUIRED]\n The name for the alarm. This name must be unique within your AWS account.\n \n AlarmDescription (string) -- The description for the alarm.\n ActionsEnabled (boolean) -- Indicates whether actions should be executed during any changes to the alarm state. The default is TRUE.\n OKActions (list) -- The actions to execute when this alarm transitions to an OK state from any other state. Each action is specified as an Amazon Resource Name (ARN).\n Valid Values: arn:aws:automate:*region* :ec2:stop | arn:aws:automate:*region* :ec2:terminate | arn:aws:automate:*region* :ec2:recover | arn:aws:automate:*region* :ec2:reboot | ``arn:aws:sns:region :account-id :sns-topic-name `` | ``arn:aws:autoscaling:region :account-id :scalingPolicy:policy-id autoScalingGroupName/group-friendly-name :policyName/policy-friendly-name ``\n Valid Values (for use with IAM roles): arn:aws:swf:*region* :*account-id* :action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:*region* :*account-id* :action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:*region* :*account-id* :action/actions/AWS_EC2.InstanceId.Reboot/1.0\n \n (string) --\n \n \n AlarmActions (list) -- The actions to execute when this alarm transitions to the ALARM state from any other state. Each action is specified as an Amazon Resource Name (ARN).\n Valid Values: arn:aws:automate:*region* :ec2:stop | arn:aws:automate:*region* :ec2:terminate | arn:aws:automate:*region* :ec2:recover | ``arn:aws:sns:region :account-id :sns-topic-name `` | ``arn:aws:autoscaling:region :account-id :scalingPolicy:policy-id autoScalingGroupName/group-friendly-name :policyName/policy-friendly-name ``\n Valid Values (for use with IAM roles): arn:aws:swf:*region* :*account-id* :action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:*region* :*account-id* :action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:*region* :*account-id* :action/actions/AWS_EC2.InstanceId.Reboot/1.0\n \n (string) --\n \n \n InsufficientDataActions (list) -- The actions to execute when this alarm transitions to the INSUFFICIENT_DATA state from any other state. Each action is specified as an Amazon Resource Name (ARN).\n Valid Values: arn:aws:automate:*region* :ec2:stop | arn:aws:automate:*region* :ec2:terminate | arn:aws:automate:*region* :ec2:recover | ``arn:aws:sns:region :account-id :sns-topic-name `` | ``arn:aws:autoscaling:region :account-id :scalingPolicy:policy-id autoScalingGroupName/group-friendly-name :policyName/policy-friendly-name ``\n Valid Values (for use with IAM roles): >arn:aws:swf:*region* :*account-id* :action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:*region* :*account-id* :action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:*region* :*account-id* :action/actions/AWS_EC2.InstanceId.Reboot/1.0\n \n (string) --\n \n \n MetricName (string) -- The name for the metric associated with the alarm.\n If you are creating an alarm based on a math expression, you cannot specify this parameter, or any of the Dimensions , Period , Namespace , Statistic , or ExtendedStatistic parameters. Instead, you specify all this information in the Metrics array.\n \n Namespace (string) -- The namespace for the metric associated specified in MetricName .\n Statistic (string) -- The statistic for the metric specified in MetricName , other than percentile. For percentile statistics, use ExtendedStatistic . When you call PutMetricAlarm and specify a MetricName , you must specify either Statistic or ExtendedStatistic, but not both.\n ExtendedStatistic (string) -- The percentile statistic for the metric specified in MetricName . Specify a value between p0.0 and p100. When you call PutMetricAlarm and specify a MetricName , you must specify either Statistic or ExtendedStatistic, but not both.\n Dimensions (list) -- The dimensions for the metric specified in MetricName .\n \n (dict) --Expands the identity of a metric.\n \n Name (string) -- [REQUIRED]The name of the dimension.\n \n Value (string) -- [REQUIRED]The value representing the dimension measurement.\n \n \n \n \n \n Period (integer) -- The length, in seconds, used each time the metric specified in MetricName is evaluated. Valid values are 10, 30, and any multiple of 60.\n Be sure to specify 10 or 30 only for metrics that are stored by a PutMetricData call with a StorageResolution of 1. If you specify a period of 10 or 30 for a metric that does not have sub-minute resolution, the alarm still attempts to gather data at the period rate that you specify. In this case, it does not receive data for the attempts that do not correspond to a one-minute data resolution, and the alarm may often lapse into INSUFFICENT_DATA status. Specifying 10 or 30 also sets this alarm as a high-resolution alarm, which has a higher charge than other alarms. For more information about pricing, see Amazon CloudWatch Pricing .\n An alarm's total current evaluation period can be no longer than one day, so Period multiplied by EvaluationPeriods cannot be more than 86,400 seconds.\n \n Unit (string) -- The unit of measure for the statistic. For example, the units for the Amazon EC2 NetworkIn metric are Bytes because NetworkIn tracks the number of bytes that an instance receives on all network interfaces. You can also specify a unit when you create a custom metric. Units help provide conceptual meaning to your data. Metric data points that specify a unit of measure, such as Percent, are aggregated separately.\n If you specify a unit, you must use a unit that is appropriate for the metric. Otherwise, the CloudWatch alarm can get stuck in the INSUFFICIENT DATA state.\n \n EvaluationPeriods (integer) -- [REQUIRED]\n The number of periods over which data is compared to the specified threshold. If you are setting an alarm that requires that a number of consecutive data points be breaching to trigger the alarm, this value specifies that number. If you are setting an \"M out of N\" alarm, this value is the N.\n An alarm's total current evaluation period can be no longer than one day, so this number multiplied by Period cannot be more than 86,400 seconds.\n \n DatapointsToAlarm (integer) -- The number of datapoints that must be breaching to trigger the alarm. This is used only if you are setting an \"M out of N\" alarm. In that case, this value is the M. For more information, see Evaluating an Alarm in the Amazon CloudWatch User Guide .\n Threshold (float) -- [REQUIRED]\n The value against which the specified statistic is compared.\n \n ComparisonOperator (string) -- [REQUIRED]\n The arithmetic operation to use when comparing the specified statistic and threshold. The specified statistic value is used as the first operand.\n \n TreatMissingData (string) -- Sets how this alarm is to handle missing data points. If TreatMissingData is omitted, the default behavior of missing is used. For more information, see Configuring How CloudWatch Alarms Treats Missing Data .\n Valid Values: breaching | notBreaching | ignore | missing\n \n EvaluateLowSampleCountPercentile (string) -- Used only for alarms based on percentiles. If you specify ignore , the alarm state does not change during periods with too few data points to be statistically significant. If you specify evaluate or omit this parameter, the alarm is always evaluated and possibly changes state no matter how many data points are available. For more information, see Percentile-Based CloudWatch Alarms and Low Data Samples .\n Valid Values: evaluate | ignore\n \n Metrics (list) -- An array of MetricDataQuery structures that enable you to create an alarm based on the result of a metric math expression. Each item in the Metrics array either retrieves a metric or performs a math expression.\n If you use the Metrics parameter, you cannot include the MetricName , Dimensions , Period , Namespace , Statistic , or ExtendedStatistic parameters of PutMetricAlarm in the same operation. Instead, you retrieve the metrics you are using in your math expression as part of the Metrics array.\n \n (dict) --This structure is used in both GetMetricData and PutMetricAlarm . The supported use of this structure is different for those two operations.\n When used in GetMetricData , it indicates the metric data to return, and whether this call is just retrieving a batch set of data for one metric, or is performing a math expression on metric data. A single GetMetricData call can include up to 100 MetricDataQuery structures.\n When used in PutMetricAlarm , it enables you to create an alarm based on a metric math expression. Each MetricDataQuery in the array specifies either a metric to retrieve, or a math expression to be performed on retrieved metrics. A single PutMetricAlarm call can include up to 20 MetricDataQuery structures in the array. The 20 structures can include as many as 10 structures that contain a MetricStat parameter to retrieve a metric, and as many as 10 structures that contain the Expression parameter to perform a math expression. Any expression used in a PutMetricAlarm operation must return a single time series. For more information, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide .\n Some of the parameters of this structure also have different uses whether you are using this structure in a GetMetricData operation or a PutMetricAlarm operation. These differences are explained in the following parameter list.\n \n Id (string) -- [REQUIRED]A short name used to tie this object to the results in the response. This name must be unique within a single call to GetMetricData . If you are performing math expressions on this set of data, this name represents that data and can serve as a variable in the mathematical expression. The valid characters are letters, numbers, and underscore. The first character must be a lowercase letter.\n \n MetricStat (dict) --The metric to be returned, along with statistics, period, and units. Use this parameter only if this object is retrieving a metric and not performing a math expression on returned data.\n Within one MetricDataQuery object, you must specify either Expression or MetricStat but not both.\n \n Metric (dict) -- [REQUIRED]The metric to return, including the metric name, namespace, and dimensions.\n \n Namespace (string) --The namespace of the metric.\n \n MetricName (string) --The name of the metric. This is a required field.\n \n Dimensions (list) --The dimensions for the metric.\n \n (dict) --Expands the identity of a metric.\n \n Name (string) -- [REQUIRED]The name of the dimension.\n \n Value (string) -- [REQUIRED]The value representing the dimension measurement.\n \n \n \n \n \n \n \n Period (integer) -- [REQUIRED]The period, in seconds, to use when retrieving the metric.\n \n Stat (string) -- [REQUIRED]The statistic to return. It can include any CloudWatch statistic or extended statistic.\n \n Unit (string) --The unit to use for the returned data points.\n \n \n \n Expression (string) --The math expression to be performed on the returned data, if this object is performing a math expression. This expression can use the Id of the other metrics to refer to those metrics, and can also use the Id of other expressions to use the result of those expressions. For more information about metric math expressions, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide .\n Within each MetricDataQuery object, you must specify either Expression or MetricStat but not both.\n \n Label (string) --A human-readable label for this metric or expression. This is especially useful if this is an expression, so that you know what the value represents. If the metric or expression is shown in a CloudWatch dashboard widget, the label is shown. If Label is omitted, CloudWatch generates a default.\n \n ReturnData (boolean) --When used in GetMetricData , this option indicates whether to return the timestamps and raw data values of this metric. If you are performing this call just to do math expressions and do not also need the raw data returned, you can specify False . If you omit this, the default of True is used.\n When used in PutMetricAlarm , specify True for the one expression result to use as the alarm. For all other metrics and expressions in the same PutMetricAlarm operation, specify ReturnData as False.\n \n \n \n \n \n \n \"\"\"\n pass\n\ndef put_metric_data(Namespace=None, MetricData=None):\n \"\"\"\n Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metric. If the specified metric does not exist, CloudWatch creates the metric. When CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to ListMetrics .\n You can publish either individual data points in the Value field, or arrays of values and the number of times each value occurred during the period by using the Values and Counts fields in the MetricDatum structure. Using the Values and Counts method enables you to publish up to 150 values per metric with one PutMetricData request, and supports retrieving percentile statistics on this data.\n Each PutMetricData request is limited to 40 KB in size for HTTP POST requests. You can send a payload compressed by gzip. Each request is also limited to no more than 20 different metrics.\n Although the Value parameter accepts numbers of type Double , CloudWatch rejects values that are either too small or too large. Values must be in the range of 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2). In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.\n You can use up to 10 dimensions per metric to further clarify what data the metric collects. For more information about specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide .\n Data points with time stamps from 24 hours ago or longer can take at least 48 hours to become available for GetMetricData or GetMetricStatistics from the time they are submitted.\n CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:\n See also: AWS API Documentation\n \n \n :example: response = client.put_metric_data(\n Namespace='string',\n MetricData=[\n {\n 'MetricName': 'string',\n 'Dimensions': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'Timestamp': datetime(2015, 1, 1),\n 'Value': 123.0,\n 'StatisticValues': {\n 'SampleCount': 123.0,\n 'Sum': 123.0,\n 'Minimum': 123.0,\n 'Maximum': 123.0\n },\n 'Values': [\n 123.0,\n ],\n 'Counts': [\n 123.0,\n ],\n 'Unit': 'Seconds'|'Microseconds'|'Milliseconds'|'Bytes'|'Kilobytes'|'Megabytes'|'Gigabytes'|'Terabytes'|'Bits'|'Kilobits'|'Megabits'|'Gigabits'|'Terabits'|'Percent'|'Count'|'Bytes/Second'|'Kilobytes/Second'|'Megabytes/Second'|'Gigabytes/Second'|'Terabytes/Second'|'Bits/Second'|'Kilobits/Second'|'Megabits/Second'|'Gigabits/Second'|'Terabits/Second'|'Count/Second'|'None',\n 'StorageResolution': 123\n },\n ]\n )\n \n \n :type Namespace: string\n :param Namespace: [REQUIRED]\n The namespace for the metric data.\n You cannot specify a namespace that begins with 'AWS/'. Namespaces that begin with 'AWS/' are reserved for use by Amazon Web Services products.\n \n\n :type MetricData: list\n :param MetricData: [REQUIRED]\n The data for the metric. The array can include no more than 20 metrics per call.\n (dict) --Encapsulates the information sent to either create a metric or add new values to be aggregated into an existing metric.\n MetricName (string) -- [REQUIRED]The name of the metric.\n Dimensions (list) --The dimensions associated with the metric.\n (dict) --Expands the identity of a metric.\n Name (string) -- [REQUIRED]The name of the dimension.\n Value (string) -- [REQUIRED]The value representing the dimension measurement.\n \n Timestamp (datetime) --The time the metric data was received, expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC.\n Value (float) --The value for the metric.\n Although the parameter accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2). In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.\n StatisticValues (dict) --The statistical values for the metric.\n SampleCount (float) -- [REQUIRED]The number of samples used for the statistic set.\n Sum (float) -- [REQUIRED]The sum of values for the sample set.\n Minimum (float) -- [REQUIRED]The minimum value of the sample set.\n Maximum (float) -- [REQUIRED]The maximum value of the sample set.\n Values (list) --Array of numbers representing the values for the metric during the period. Each unique value is listed just once in this array, and the corresponding number in the Counts array specifies the number of times that value occurred during the period. You can include up to 150 unique values in each PutMetricData action that specifies a Values array.\n Although the Values array accepts numbers of type Double , CloudWatch rejects values that are either too small or too large. Values must be in the range of 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2). In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.\n (float) --\n Counts (list) --Array of numbers that is used along with the Values array. Each number in the Count array is the number of times the corresponding value in the Values array occurred during the period.\n If you omit the Counts array, the default of 1 is used as the value for each count. If you include a Counts array, it must include the same amount of values as the Values array.\n (float) --\n Unit (string) --The unit of the metric.\n StorageResolution (integer) --Valid values are 1 and 60. Setting this to 1 specifies this metric as a high-resolution metric, so that CloudWatch stores the metric with sub-minute resolution down to one second. Setting this to 60 specifies this metric as a regular-resolution metric, which CloudWatch stores at 1-minute resolution. Currently, high resolution is available only for custom metrics. For more information about high-resolution metrics, see High-Resolution Metrics in the Amazon CloudWatch User Guide .\n This field is optional, if you do not specify it the default of 60 is used.\n \n \n\n :returns: \n Namespace (string) -- [REQUIRED]\n The namespace for the metric data.\n You cannot specify a namespace that begins with \"AWS/\". Namespaces that begin with \"AWS/\" are reserved for use by Amazon Web Services products.\n \n MetricData (list) -- [REQUIRED]\n The data for the metric. The array can include no more than 20 metrics per call.\n \n (dict) --Encapsulates the information sent to either create a metric or add new values to be aggregated into an existing metric.\n \n MetricName (string) -- [REQUIRED]The name of the metric.\n \n Dimensions (list) --The dimensions associated with the metric.\n \n (dict) --Expands the identity of a metric.\n \n Name (string) -- [REQUIRED]The name of the dimension.\n \n Value (string) -- [REQUIRED]The value representing the dimension measurement.\n \n \n \n \n \n Timestamp (datetime) --The time the metric data was received, expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC.\n \n Value (float) --The value for the metric.\n Although the parameter accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2). In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.\n \n StatisticValues (dict) --The statistical values for the metric.\n \n SampleCount (float) -- [REQUIRED]The number of samples used for the statistic set.\n \n Sum (float) -- [REQUIRED]The sum of values for the sample set.\n \n Minimum (float) -- [REQUIRED]The minimum value of the sample set.\n \n Maximum (float) -- [REQUIRED]The maximum value of the sample set.\n \n \n \n Values (list) --Array of numbers representing the values for the metric during the period. Each unique value is listed just once in this array, and the corresponding number in the Counts array specifies the number of times that value occurred during the period. You can include up to 150 unique values in each PutMetricData action that specifies a Values array.\n Although the Values array accepts numbers of type Double , CloudWatch rejects values that are either too small or too large. Values must be in the range of 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2). In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.\n \n (float) --\n \n \n Counts (list) --Array of numbers that is used along with the Values array. Each number in the Count array is the number of times the corresponding value in the Values array occurred during the period.\n If you omit the Counts array, the default of 1 is used as the value for each count. If you include a Counts array, it must include the same amount of values as the Values array.\n \n (float) --\n \n \n Unit (string) --The unit of the metric.\n \n StorageResolution (integer) --Valid values are 1 and 60. Setting this to 1 specifies this metric as a high-resolution metric, so that CloudWatch stores the metric with sub-minute resolution down to one second. Setting this to 60 specifies this metric as a regular-resolution metric, which CloudWatch stores at 1-minute resolution. Currently, high resolution is available only for custom metrics. For more information about high-resolution metrics, see High-Resolution Metrics in the Amazon CloudWatch User Guide .\n This field is optional, if you do not specify it the default of 60 is used.\n \n \n \n \n \n \n \"\"\"\n pass\n\ndef set_alarm_state(AlarmName=None, StateValue=None, StateReason=None, StateReasonData=None):\n \"\"\"\n Temporarily sets the state of an alarm for testing purposes. When the updated state differs from the previous value, the action configured for the appropriate state is invoked. For example, if your alarm is configured to send an Amazon SNS message when an alarm is triggered, temporarily changing the alarm state to ALARM sends an SNS message. The alarm returns to its actual state (often within seconds). Because the alarm state change happens quickly, it is typically only visible in the alarm's History tab in the Amazon CloudWatch console or through DescribeAlarmHistory .\n See also: AWS API Documentation\n \n \n :example: response = client.set_alarm_state(\n AlarmName='string',\n StateValue='OK'|'ALARM'|'INSUFFICIENT_DATA',\n StateReason='string',\n StateReasonData='string'\n )\n \n \n :type AlarmName: string\n :param AlarmName: [REQUIRED]\n The name for the alarm. This name must be unique within the AWS account. The maximum length is 255 characters.\n \n\n :type StateValue: string\n :param StateValue: [REQUIRED]\n The value of the state.\n \n\n :type StateReason: string\n :param StateReason: [REQUIRED]\n The reason that this alarm is set to this specific state, in text format.\n \n\n :type StateReasonData: string\n :param StateReasonData: The reason that this alarm is set to this specific state, in JSON format.\n\n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.5808055996894836, "alphanum_fraction": 0.5854290723800659, "avg_line_length": 32.90498733520508, "blob_id": "6c015eacc07bbcaf4e858145ca6bd93d899d8cf5", "content_id": "42e0bed7f49b1d87d17a78cce66138c397f11d90", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14275, "license_type": "permissive", "max_line_length": 320, "num_lines": 421, "path": "/pyboto3/kafka.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_cluster(BrokerNodeGroupInfo=None, ClusterName=None, EncryptionInfo=None, EnhancedMonitoring=None, KafkaVersion=None, NumberOfBrokerNodes=None):\n \"\"\"\n Creates a new MSK cluster.\n See also: AWS API Documentation\n \n \n :example: response = client.create_cluster(\n BrokerNodeGroupInfo={\n 'BrokerAZDistribution': 'DEFAULT',\n 'ClientSubnets': [\n 'string',\n ],\n 'InstanceType': 'string',\n 'SecurityGroups': [\n 'string',\n ],\n 'StorageInfo': {\n 'EbsStorageInfo': {\n 'VolumeSize': 123\n }\n }\n },\n ClusterName='string',\n EncryptionInfo={\n 'EncryptionAtRest': {\n 'DataVolumeKMSKeyId': 'string'\n }\n },\n EnhancedMonitoring='DEFAULT'|'PER_BROKER'|'PER_TOPIC_PER_BROKER',\n KafkaVersion='string',\n NumberOfBrokerNodes=123\n )\n \n \n :type BrokerNodeGroupInfo: dict\n :param BrokerNodeGroupInfo: [REQUIRED]\n Information about the broker nodes in the cluster.\n BrokerAZDistribution (string) --The distribution of broker nodes across Availability Zones.\n ClientSubnets (list) -- [REQUIRED]The list of subnets to connect to in the client virtual private cloud (VPC). AWS creates elastic network interfaces inside these subnets. Client applications use elastic network interfaces to produce and consume data. Client subnets can't be in Availability Zone us-east-1e.\n (string) --\n InstanceType (string) -- [REQUIRED]The type of Amazon EC2 instances to use for Kafka brokers. The following instance types are allowed: kafka.m5.large, kafka.m5.xlarge, kafka.m5.2xlarge, kafka.m5.4xlarge, kafka.m5.12xlarge, and kafka.m5.24xlarge.\n SecurityGroups (list) --The AWS security groups to associate with the elastic network interfaces in order to specify who can connect to and communicate with the Amazon MSK cluster.\n (string) --\n StorageInfo (dict) --Contains information about storage volumes attached to MSK broker nodes.\n EbsStorageInfo (dict) --EBS volume information.\n VolumeSize (integer) --The size in GiB of the EBS volume for the data drive on each broker node.\n \n \n\n :type ClusterName: string\n :param ClusterName: [REQUIRED]\n The name of the cluster.\n \n\n :type EncryptionInfo: dict\n :param EncryptionInfo: Includes all encryption-related information.\n EncryptionAtRest (dict) --The data volume encryption details.\n DataVolumeKMSKeyId (string) -- [REQUIRED]The AWS KMS key used for data encryption.\n \n \n\n :type EnhancedMonitoring: string\n :param EnhancedMonitoring: Specifies the level of monitoring for the MSK cluster. The possible values are DEFAULT, PER_BROKER, and PER_TOPIC_PER_BROKER.\n\n :type KafkaVersion: string\n :param KafkaVersion: [REQUIRED]\n The version of Apache Kafka.\n \n\n :type NumberOfBrokerNodes: integer\n :param NumberOfBrokerNodes: [REQUIRED]\n The number of Kafka broker nodes in the Amazon MSK cluster.\n \n\n :rtype: dict\n :return: {\n 'ClusterArn': 'string',\n 'ClusterName': 'string',\n 'State': 'ACTIVE'|'CREATING'|'DELETING'|'FAILED'\n }\n \n \n \"\"\"\n pass\n\ndef delete_cluster(ClusterArn=None, CurrentVersion=None):\n \"\"\"\n Deletes the MSK cluster specified by the Amazon Resource Name (ARN) in the request.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_cluster(\n ClusterArn='string',\n CurrentVersion='string'\n )\n \n \n :type ClusterArn: string\n :param ClusterArn: [REQUIRED]\n The Amazon Resource Name (ARN) that uniquely identifies the cluster.\n \n\n :type CurrentVersion: string\n :param CurrentVersion: The current version of the MSK cluster.\n\n :rtype: dict\n :return: {\n 'ClusterArn': 'string',\n 'State': 'ACTIVE'|'CREATING'|'DELETING'|'FAILED'\n }\n \n \n \"\"\"\n pass\n\ndef describe_cluster(ClusterArn=None):\n \"\"\"\n Returns a description of the MSK cluster whose Amazon Resource Name (ARN) is specified in the request.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_cluster(\n ClusterArn='string'\n )\n \n \n :type ClusterArn: string\n :param ClusterArn: [REQUIRED]\n The Amazon Resource Name (ARN) that uniquely identifies the cluster.\n \n\n :rtype: dict\n :return: {\n 'ClusterInfo': {\n 'BrokerNodeGroupInfo': {\n 'BrokerAZDistribution': 'DEFAULT',\n 'ClientSubnets': [\n 'string',\n ],\n 'InstanceType': 'string',\n 'SecurityGroups': [\n 'string',\n ],\n 'StorageInfo': {\n 'EbsStorageInfo': {\n 'VolumeSize': 123\n }\n }\n },\n 'ClusterArn': 'string',\n 'ClusterName': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'CurrentBrokerSoftwareInfo': {\n 'ConfigurationArn': 'string',\n 'ConfigurationRevision': 'string',\n 'KafkaVersion': 'string'\n },\n 'CurrentVersion': 'string',\n 'EncryptionInfo': {\n 'EncryptionAtRest': {\n 'DataVolumeKMSKeyId': 'string'\n }\n },\n 'EnhancedMonitoring': 'DEFAULT'|'PER_BROKER'|'PER_TOPIC_PER_BROKER',\n 'NumberOfBrokerNodes': 123,\n 'State': 'ACTIVE'|'CREATING'|'DELETING'|'FAILED',\n 'ZookeeperConnectString': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_bootstrap_brokers(ClusterArn=None):\n \"\"\"\n A list of brokers that a client application can use to bootstrap.\n See also: AWS API Documentation\n \n \n :example: response = client.get_bootstrap_brokers(\n ClusterArn='string'\n )\n \n \n :type ClusterArn: string\n :param ClusterArn: [REQUIRED]\n The Amazon Resource Name (ARN) that uniquely identifies the cluster.\n \n\n :rtype: dict\n :return: {\n 'BootstrapBrokerString': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_clusters(ClusterNameFilter=None, MaxResults=None, NextToken=None):\n \"\"\"\n Returns a list of clusters in an account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_clusters(\n ClusterNameFilter='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type ClusterNameFilter: string\n :param ClusterNameFilter: Specify a prefix of the name of the clusters that you want to list. The service lists all the clusters whose names start with this prefix.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of clusters to return in the response. If there are more clusters, the response includes a NextToken parameter.\n\n :type NextToken: string\n :param NextToken: The paginated results marker. When the result of a ListClusters operation is truncated, the call returns NextToken in the response. To get another batch of clusters, provide this token in your next request.\n\n :rtype: dict\n :return: {\n 'ClusterInfoList': [\n {\n 'BrokerNodeGroupInfo': {\n 'BrokerAZDistribution': 'DEFAULT',\n 'ClientSubnets': [\n 'string',\n ],\n 'InstanceType': 'string',\n 'SecurityGroups': [\n 'string',\n ],\n 'StorageInfo': {\n 'EbsStorageInfo': {\n 'VolumeSize': 123\n }\n }\n },\n 'ClusterArn': 'string',\n 'ClusterName': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'CurrentBrokerSoftwareInfo': {\n 'ConfigurationArn': 'string',\n 'ConfigurationRevision': 'string',\n 'KafkaVersion': 'string'\n },\n 'CurrentVersion': 'string',\n 'EncryptionInfo': {\n 'EncryptionAtRest': {\n 'DataVolumeKMSKeyId': 'string'\n }\n },\n 'EnhancedMonitoring': 'DEFAULT'|'PER_BROKER'|'PER_TOPIC_PER_BROKER',\n 'NumberOfBrokerNodes': 123,\n 'State': 'ACTIVE'|'CREATING'|'DELETING'|'FAILED',\n 'ZookeeperConnectString': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_nodes(ClusterArn=None, MaxResults=None, NextToken=None):\n \"\"\"\n Returns a list of the broker nodes in the cluster.\n See also: AWS API Documentation\n \n \n :example: response = client.list_nodes(\n ClusterArn='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type ClusterArn: string\n :param ClusterArn: [REQUIRED]\n The Amazon Resource Name (ARN) that uniquely identifies the cluster.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of clusters to return in the response. If there are more clusters, the response includes a NextToken parameter.\n\n :type NextToken: string\n :param NextToken: The paginated results marker. When the result of a ListClusters operation is truncated, the call returns NextToken in the response. To get another batch of clusters, provide this token in your next request.\n\n :rtype: dict\n :return: {\n 'NextToken': 'string',\n 'NodeInfoList': [\n {\n 'AddedToClusterTime': 'string',\n 'BrokerNodeInfo': {\n 'AttachedENIId': 'string',\n 'BrokerId': 123.0,\n 'ClientSubnet': 'string',\n 'ClientVpcIpAddress': 'string',\n 'CurrentBrokerSoftwareInfo': {\n 'ConfigurationArn': 'string',\n 'ConfigurationRevision': 'string',\n 'KafkaVersion': 'string'\n }\n },\n 'InstanceType': 'string',\n 'NodeARN': 'string',\n 'NodeType': 'BROKER',\n 'ZookeeperNodeInfo': {\n 'AttachedENIId': 'string',\n 'ClientVpcIpAddress': 'string',\n 'ZookeeperId': 123.0,\n 'ZookeeperVersion': 'string'\n }\n },\n ]\n }\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.5917829275131226, "alphanum_fraction": 0.5959793925285339, "avg_line_length": 33.65373992919922, "blob_id": "2bf11dad70c04c0ceecc8854540b66138083c83d", "content_id": "41f8aba73ee5fc1530b25b0329949152be909f0f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25021, "license_type": "permissive", "max_line_length": 399, "num_lines": 722, "path": "/pyboto3/cloudhsmv2.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef copy_backup_to_region(DestinationRegion=None, BackupId=None):\n \"\"\"\n Copy an AWS CloudHSM cluster backup to a different region.\n See also: AWS API Documentation\n \n \n :example: response = client.copy_backup_to_region(\n DestinationRegion='string',\n BackupId='string'\n )\n \n \n :type DestinationRegion: string\n :param DestinationRegion: [REQUIRED]\n The AWS region that will contain your copied CloudHSM cluster backup.\n \n\n :type BackupId: string\n :param BackupId: [REQUIRED]\n The ID of the backup that will be copied to the destination region.\n \n\n :rtype: dict\n :return: {\n 'DestinationBackup': {\n 'CreateTimestamp': datetime(2015, 1, 1),\n 'SourceRegion': 'string',\n 'SourceBackup': 'string',\n 'SourceCluster': 'string'\n }\n }\n \n \n :returns: \n CreateTimestamp (datetime) --\n SourceRegion (string) --\n SourceBackup (string) --\n SourceCluster (string) --\n \n \"\"\"\n pass\n\ndef create_cluster(SubnetIds=None, HsmType=None, SourceBackupId=None):\n \"\"\"\n Creates a new AWS CloudHSM cluster.\n See also: AWS API Documentation\n \n \n :example: response = client.create_cluster(\n SubnetIds=[\n 'string',\n ],\n HsmType='string',\n SourceBackupId='string'\n )\n \n \n :type SubnetIds: list\n :param SubnetIds: [REQUIRED]\n The identifiers (IDs) of the subnets where you are creating the cluster. You must specify at least one subnet. If you specify multiple subnets, they must meet the following criteria:\n All subnets must be in the same virtual private cloud (VPC).\n You can specify only one subnet per Availability Zone.\n (string) --\n \n\n :type HsmType: string\n :param HsmType: [REQUIRED]\n The type of HSM to use in the cluster. Currently the only allowed value is hsm1.medium .\n \n\n :type SourceBackupId: string\n :param SourceBackupId: The identifier (ID) of the cluster backup to restore. Use this value to restore the cluster from a backup instead of creating a new cluster. To find the backup ID, use DescribeBackups .\n\n :rtype: dict\n :return: {\n 'Cluster': {\n 'BackupPolicy': 'DEFAULT',\n 'ClusterId': 'string',\n 'CreateTimestamp': datetime(2015, 1, 1),\n 'Hsms': [\n {\n 'AvailabilityZone': 'string',\n 'ClusterId': 'string',\n 'SubnetId': 'string',\n 'EniId': 'string',\n 'EniIp': 'string',\n 'HsmId': 'string',\n 'State': 'CREATE_IN_PROGRESS'|'ACTIVE'|'DEGRADED'|'DELETE_IN_PROGRESS'|'DELETED',\n 'StateMessage': 'string'\n },\n ],\n 'HsmType': 'string',\n 'PreCoPassword': 'string',\n 'SecurityGroup': 'string',\n 'SourceBackupId': 'string',\n 'State': 'CREATE_IN_PROGRESS'|'UNINITIALIZED'|'INITIALIZE_IN_PROGRESS'|'INITIALIZED'|'ACTIVE'|'UPDATE_IN_PROGRESS'|'DELETE_IN_PROGRESS'|'DELETED'|'DEGRADED',\n 'StateMessage': 'string',\n 'SubnetMapping': {\n 'string': 'string'\n },\n 'VpcId': 'string',\n 'Certificates': {\n 'ClusterCsr': 'string',\n 'HsmCertificate': 'string',\n 'AwsHardwareCertificate': 'string',\n 'ManufacturerHardwareCertificate': 'string',\n 'ClusterCertificate': 'string'\n }\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef create_hsm(ClusterId=None, AvailabilityZone=None, IpAddress=None):\n \"\"\"\n Creates a new hardware security module (HSM) in the specified AWS CloudHSM cluster.\n See also: AWS API Documentation\n \n \n :example: response = client.create_hsm(\n ClusterId='string',\n AvailabilityZone='string',\n IpAddress='string'\n )\n \n \n :type ClusterId: string\n :param ClusterId: [REQUIRED]\n The identifier (ID) of the HSM's cluster. To find the cluster ID, use DescribeClusters .\n \n\n :type AvailabilityZone: string\n :param AvailabilityZone: [REQUIRED]\n The Availability Zone where you are creating the HSM. To find the cluster's Availability Zones, use DescribeClusters .\n \n\n :type IpAddress: string\n :param IpAddress: The HSM's IP address. If you specify an IP address, use an available address from the subnet that maps to the Availability Zone where you are creating the HSM. If you don't specify an IP address, one is chosen for you from that subnet.\n\n :rtype: dict\n :return: {\n 'Hsm': {\n 'AvailabilityZone': 'string',\n 'ClusterId': 'string',\n 'SubnetId': 'string',\n 'EniId': 'string',\n 'EniIp': 'string',\n 'HsmId': 'string',\n 'State': 'CREATE_IN_PROGRESS'|'ACTIVE'|'DEGRADED'|'DELETE_IN_PROGRESS'|'DELETED',\n 'StateMessage': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef delete_backup(BackupId=None):\n \"\"\"\n Deletes a specified AWS CloudHSM backup. A backup can be restored up to 7 days after the DeleteBackup request. For more information on restoring a backup, see RestoreBackup\n See also: AWS API Documentation\n \n \n :example: response = client.delete_backup(\n BackupId='string'\n )\n \n \n :type BackupId: string\n :param BackupId: [REQUIRED]\n The ID of the backup to be deleted. To find the ID of a backup, use the DescribeBackups operation.\n \n\n :rtype: dict\n :return: {\n 'Backup': {\n 'BackupId': 'string',\n 'BackupState': 'CREATE_IN_PROGRESS'|'READY'|'DELETED'|'PENDING_DELETION',\n 'ClusterId': 'string',\n 'CreateTimestamp': datetime(2015, 1, 1),\n 'CopyTimestamp': datetime(2015, 1, 1),\n 'SourceRegion': 'string',\n 'SourceBackup': 'string',\n 'SourceCluster': 'string',\n 'DeleteTimestamp': datetime(2015, 1, 1)\n }\n }\n \n \n \"\"\"\n pass\n\ndef delete_cluster(ClusterId=None):\n \"\"\"\n Deletes the specified AWS CloudHSM cluster. Before you can delete a cluster, you must delete all HSMs in the cluster. To see if the cluster contains any HSMs, use DescribeClusters . To delete an HSM, use DeleteHsm .\n See also: AWS API Documentation\n \n \n :example: response = client.delete_cluster(\n ClusterId='string'\n )\n \n \n :type ClusterId: string\n :param ClusterId: [REQUIRED]\n The identifier (ID) of the cluster that you are deleting. To find the cluster ID, use DescribeClusters .\n \n\n :rtype: dict\n :return: {\n 'Cluster': {\n 'BackupPolicy': 'DEFAULT',\n 'ClusterId': 'string',\n 'CreateTimestamp': datetime(2015, 1, 1),\n 'Hsms': [\n {\n 'AvailabilityZone': 'string',\n 'ClusterId': 'string',\n 'SubnetId': 'string',\n 'EniId': 'string',\n 'EniIp': 'string',\n 'HsmId': 'string',\n 'State': 'CREATE_IN_PROGRESS'|'ACTIVE'|'DEGRADED'|'DELETE_IN_PROGRESS'|'DELETED',\n 'StateMessage': 'string'\n },\n ],\n 'HsmType': 'string',\n 'PreCoPassword': 'string',\n 'SecurityGroup': 'string',\n 'SourceBackupId': 'string',\n 'State': 'CREATE_IN_PROGRESS'|'UNINITIALIZED'|'INITIALIZE_IN_PROGRESS'|'INITIALIZED'|'ACTIVE'|'UPDATE_IN_PROGRESS'|'DELETE_IN_PROGRESS'|'DELETED'|'DEGRADED',\n 'StateMessage': 'string',\n 'SubnetMapping': {\n 'string': 'string'\n },\n 'VpcId': 'string',\n 'Certificates': {\n 'ClusterCsr': 'string',\n 'HsmCertificate': 'string',\n 'AwsHardwareCertificate': 'string',\n 'ManufacturerHardwareCertificate': 'string',\n 'ClusterCertificate': 'string'\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef delete_hsm(ClusterId=None, HsmId=None, EniId=None, EniIp=None):\n \"\"\"\n Deletes the specified HSM. To specify an HSM, you can use its identifier (ID), the IP address of the HSM's elastic network interface (ENI), or the ID of the HSM's ENI. You need to specify only one of these values. To find these values, use DescribeClusters .\n See also: AWS API Documentation\n \n \n :example: response = client.delete_hsm(\n ClusterId='string',\n HsmId='string',\n EniId='string',\n EniIp='string'\n )\n \n \n :type ClusterId: string\n :param ClusterId: [REQUIRED]\n The identifier (ID) of the cluster that contains the HSM that you are deleting.\n \n\n :type HsmId: string\n :param HsmId: The identifier (ID) of the HSM that you are deleting.\n\n :type EniId: string\n :param EniId: The identifier (ID) of the elastic network interface (ENI) of the HSM that you are deleting.\n\n :type EniIp: string\n :param EniIp: The IP address of the elastic network interface (ENI) of the HSM that you are deleting.\n\n :rtype: dict\n :return: {\n 'HsmId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_backups(NextToken=None, MaxResults=None, Filters=None, SortAscending=None):\n \"\"\"\n Gets information about backups of AWS CloudHSM clusters.\n This is a paginated operation, which means that each response might contain only a subset of all the backups. When the response contains only a subset of backups, it includes a NextToken value. Use this value in a subsequent DescribeBackups request to get more backups. When you receive a response with no NextToken (or an empty or null value), that means there are no more backups to get.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_backups(\n NextToken='string',\n MaxResults=123,\n Filters={\n 'string': [\n 'string',\n ]\n },\n SortAscending=True|False\n )\n \n \n :type NextToken: string\n :param NextToken: The NextToken value that you received in the previous response. Use this value to get more backups.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of backups to return in the response. When there are more backups than the number you specify, the response contains a NextToken value.\n\n :type Filters: dict\n :param Filters: One or more filters to limit the items returned in the response.\n Use the backupIds filter to return only the specified backups. Specify backups by their backup identifier (ID).\n Use the sourceBackupIds filter to return only the backups created from a source backup. The sourceBackupID of a source backup is returned by the CopyBackupToRegion operation.\n Use the clusterIds filter to return only the backups for the specified clusters. Specify clusters by their cluster identifier (ID).\n Use the states filter to return only backups that match the specified state.\n (string) --\n (list) --\n (string) --\n \n \n\n :type SortAscending: boolean\n :param SortAscending: \n\n :rtype: dict\n :return: {\n 'Backups': [\n {\n 'BackupId': 'string',\n 'BackupState': 'CREATE_IN_PROGRESS'|'READY'|'DELETED'|'PENDING_DELETION',\n 'ClusterId': 'string',\n 'CreateTimestamp': datetime(2015, 1, 1),\n 'CopyTimestamp': datetime(2015, 1, 1),\n 'SourceRegion': 'string',\n 'SourceBackup': 'string',\n 'SourceCluster': 'string',\n 'DeleteTimestamp': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_clusters(Filters=None, NextToken=None, MaxResults=None):\n \"\"\"\n Gets information about AWS CloudHSM clusters.\n This is a paginated operation, which means that each response might contain only a subset of all the clusters. When the response contains only a subset of clusters, it includes a NextToken value. Use this value in a subsequent DescribeClusters request to get more clusters. When you receive a response with no NextToken (or an empty or null value), that means there are no more clusters to get.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_clusters(\n Filters={\n 'string': [\n 'string',\n ]\n },\n NextToken='string',\n MaxResults=123\n )\n \n \n :type Filters: dict\n :param Filters: One or more filters to limit the items returned in the response.\n Use the clusterIds filter to return only the specified clusters. Specify clusters by their cluster identifier (ID).\n Use the vpcIds filter to return only the clusters in the specified virtual private clouds (VPCs). Specify VPCs by their VPC identifier (ID).\n Use the states filter to return only clusters that match the specified state.\n (string) --\n (list) --\n (string) --\n \n \n\n :type NextToken: string\n :param NextToken: The NextToken value that you received in the previous response. Use this value to get more clusters.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of clusters to return in the response. When there are more clusters than the number you specify, the response contains a NextToken value.\n\n :rtype: dict\n :return: {\n 'Clusters': [\n {\n 'BackupPolicy': 'DEFAULT',\n 'ClusterId': 'string',\n 'CreateTimestamp': datetime(2015, 1, 1),\n 'Hsms': [\n {\n 'AvailabilityZone': 'string',\n 'ClusterId': 'string',\n 'SubnetId': 'string',\n 'EniId': 'string',\n 'EniIp': 'string',\n 'HsmId': 'string',\n 'State': 'CREATE_IN_PROGRESS'|'ACTIVE'|'DEGRADED'|'DELETE_IN_PROGRESS'|'DELETED',\n 'StateMessage': 'string'\n },\n ],\n 'HsmType': 'string',\n 'PreCoPassword': 'string',\n 'SecurityGroup': 'string',\n 'SourceBackupId': 'string',\n 'State': 'CREATE_IN_PROGRESS'|'UNINITIALIZED'|'INITIALIZE_IN_PROGRESS'|'INITIALIZED'|'ACTIVE'|'UPDATE_IN_PROGRESS'|'DELETE_IN_PROGRESS'|'DELETED'|'DEGRADED',\n 'StateMessage': 'string',\n 'SubnetMapping': {\n 'string': 'string'\n },\n 'VpcId': 'string',\n 'Certificates': {\n 'ClusterCsr': 'string',\n 'HsmCertificate': 'string',\n 'AwsHardwareCertificate': 'string',\n 'ManufacturerHardwareCertificate': 'string',\n 'ClusterCertificate': 'string'\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef initialize_cluster(ClusterId=None, SignedCert=None, TrustAnchor=None):\n \"\"\"\n Claims an AWS CloudHSM cluster by submitting the cluster certificate issued by your issuing certificate authority (CA) and the CA's root certificate. Before you can claim a cluster, you must sign the cluster's certificate signing request (CSR) with your issuing CA. To get the cluster's CSR, use DescribeClusters .\n See also: AWS API Documentation\n \n \n :example: response = client.initialize_cluster(\n ClusterId='string',\n SignedCert='string',\n TrustAnchor='string'\n )\n \n \n :type ClusterId: string\n :param ClusterId: [REQUIRED]\n The identifier (ID) of the cluster that you are claiming. To find the cluster ID, use DescribeClusters .\n \n\n :type SignedCert: string\n :param SignedCert: [REQUIRED]\n The cluster certificate issued (signed) by your issuing certificate authority (CA). The certificate must be in PEM format and can contain a maximum of 5000 characters.\n \n\n :type TrustAnchor: string\n :param TrustAnchor: [REQUIRED]\n The issuing certificate of the issuing certificate authority (CA) that issued (signed) the cluster certificate. This can be a root (self-signed) certificate or a certificate chain that begins with the certificate that issued the cluster certificate and ends with a root certificate. The certificate or certificate chain must be in PEM format and can contain a maximum of 5000 characters.\n \n\n :rtype: dict\n :return: {\n 'State': 'CREATE_IN_PROGRESS'|'UNINITIALIZED'|'INITIALIZE_IN_PROGRESS'|'INITIALIZED'|'ACTIVE'|'UPDATE_IN_PROGRESS'|'DELETE_IN_PROGRESS'|'DELETED'|'DEGRADED',\n 'StateMessage': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_tags(ResourceId=None, NextToken=None, MaxResults=None):\n \"\"\"\n Gets a list of tags for the specified AWS CloudHSM cluster.\n This is a paginated operation, which means that each response might contain only a subset of all the tags. When the response contains only a subset of tags, it includes a NextToken value. Use this value in a subsequent ListTags request to get more tags. When you receive a response with no NextToken (or an empty or null value), that means there are no more tags to get.\n See also: AWS API Documentation\n \n \n :example: response = client.list_tags(\n ResourceId='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type ResourceId: string\n :param ResourceId: [REQUIRED]\n The cluster identifier (ID) for the cluster whose tags you are getting. To find the cluster ID, use DescribeClusters .\n \n\n :type NextToken: string\n :param NextToken: The NextToken value that you received in the previous response. Use this value to get more tags.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of tags to return in the response. When there are more tags than the number you specify, the response contains a NextToken value.\n\n :rtype: dict\n :return: {\n 'TagList': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef restore_backup(BackupId=None):\n \"\"\"\n Restores a specified AWS CloudHSM backup that is in the PENDING_DELETION state. For more information on deleting a backup, see DeleteBackup .\n See also: AWS API Documentation\n \n \n :example: response = client.restore_backup(\n BackupId='string'\n )\n \n \n :type BackupId: string\n :param BackupId: [REQUIRED]\n The ID of the backup to be restored. To find the ID of a backup, use the DescribeBackups operation.\n \n\n :rtype: dict\n :return: {\n 'Backup': {\n 'BackupId': 'string',\n 'BackupState': 'CREATE_IN_PROGRESS'|'READY'|'DELETED'|'PENDING_DELETION',\n 'ClusterId': 'string',\n 'CreateTimestamp': datetime(2015, 1, 1),\n 'CopyTimestamp': datetime(2015, 1, 1),\n 'SourceRegion': 'string',\n 'SourceBackup': 'string',\n 'SourceCluster': 'string',\n 'DeleteTimestamp': datetime(2015, 1, 1)\n }\n }\n \n \n \"\"\"\n pass\n\ndef tag_resource(ResourceId=None, TagList=None):\n \"\"\"\n Adds or overwrites one or more tags for the specified AWS CloudHSM cluster.\n See also: AWS API Documentation\n \n \n :example: response = client.tag_resource(\n ResourceId='string',\n TagList=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type ResourceId: string\n :param ResourceId: [REQUIRED]\n The cluster identifier (ID) for the cluster that you are tagging. To find the cluster ID, use DescribeClusters .\n \n\n :type TagList: list\n :param TagList: [REQUIRED]\n A list of one or more tags.\n (dict) --Contains a tag. A tag is a key-value pair.\n Key (string) -- [REQUIRED]The key of the tag.\n Value (string) -- [REQUIRED]The value of the tag.\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef untag_resource(ResourceId=None, TagKeyList=None):\n \"\"\"\n Removes the specified tag or tags from the specified AWS CloudHSM cluster.\n See also: AWS API Documentation\n \n \n :example: response = client.untag_resource(\n ResourceId='string',\n TagKeyList=[\n 'string',\n ]\n )\n \n \n :type ResourceId: string\n :param ResourceId: [REQUIRED]\n The cluster identifier (ID) for the cluster whose tags you are removing. To find the cluster ID, use DescribeClusters .\n \n\n :type TagKeyList: list\n :param TagKeyList: [REQUIRED]\n A list of one or more tag keys for the tags that you are removing. Specify only the tag keys, not the tag values.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.5903278589248657, "alphanum_fraction": 0.5947273373603821, "avg_line_length": 34.24275588989258, "blob_id": "0d8da7aec7885182e70ca07a00f006c953d5aa57", "content_id": "cd9852700b65707d4b8a61756a6d81db0626e89b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 90011, "license_type": "permissive", "max_line_length": 979, "num_lines": 2554, "path": "/pyboto3/appstream.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef associate_fleet(FleetName=None, StackName=None):\n \"\"\"\n Associates the specified fleet with the specified stack.\n See also: AWS API Documentation\n \n \n :example: response = client.associate_fleet(\n FleetName='string',\n StackName='string'\n )\n \n \n :type FleetName: string\n :param FleetName: [REQUIRED]\n The name of the fleet.\n \n\n :type StackName: string\n :param StackName: [REQUIRED]\n The name of the stack.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef batch_associate_user_stack(UserStackAssociations=None):\n \"\"\"\n Associates the specified users with the specified stacks. Users in a user pool cannot be assigned to stacks with fleets that are joined to an Active Directory domain.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_associate_user_stack(\n UserStackAssociations=[\n {\n 'StackName': 'string',\n 'UserName': 'string',\n 'AuthenticationType': 'API'|'SAML'|'USERPOOL',\n 'SendEmailNotification': True|False\n },\n ]\n )\n \n \n :type UserStackAssociations: list\n :param UserStackAssociations: [REQUIRED]\n The list of UserStackAssociation objects.\n (dict) --Describes a user in the user pool and the associated stack.\n StackName (string) -- [REQUIRED]The name of the stack that is associated with the user.\n UserName (string) -- [REQUIRED]The email address of the user who is associated with the stack.\n AuthenticationType (string) -- [REQUIRED]The authentication type for the user.\n SendEmailNotification (boolean) --Specifies whether a welcome email is sent to a user after the user is created in the user pool.\n \n \n\n :rtype: dict\n :return: {\n 'errors': [\n {\n 'UserStackAssociation': {\n 'StackName': 'string',\n 'UserName': 'string',\n 'AuthenticationType': 'API'|'SAML'|'USERPOOL',\n 'SendEmailNotification': True|False\n },\n 'ErrorCode': 'STACK_NOT_FOUND'|'USER_NAME_NOT_FOUND'|'INTERNAL_ERROR',\n 'ErrorMessage': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef batch_disassociate_user_stack(UserStackAssociations=None):\n \"\"\"\n Disassociates the specified users from the specified stacks.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_disassociate_user_stack(\n UserStackAssociations=[\n {\n 'StackName': 'string',\n 'UserName': 'string',\n 'AuthenticationType': 'API'|'SAML'|'USERPOOL',\n 'SendEmailNotification': True|False\n },\n ]\n )\n \n \n :type UserStackAssociations: list\n :param UserStackAssociations: [REQUIRED]\n The list of UserStackAssociation objects.\n (dict) --Describes a user in the user pool and the associated stack.\n StackName (string) -- [REQUIRED]The name of the stack that is associated with the user.\n UserName (string) -- [REQUIRED]The email address of the user who is associated with the stack.\n AuthenticationType (string) -- [REQUIRED]The authentication type for the user.\n SendEmailNotification (boolean) --Specifies whether a welcome email is sent to a user after the user is created in the user pool.\n \n \n\n :rtype: dict\n :return: {\n 'errors': [\n {\n 'UserStackAssociation': {\n 'StackName': 'string',\n 'UserName': 'string',\n 'AuthenticationType': 'API'|'SAML'|'USERPOOL',\n 'SendEmailNotification': True|False\n },\n 'ErrorCode': 'STACK_NOT_FOUND'|'USER_NAME_NOT_FOUND'|'INTERNAL_ERROR',\n 'ErrorMessage': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef copy_image(SourceImageName=None, DestinationImageName=None, DestinationRegion=None, DestinationImageDescription=None):\n \"\"\"\n Copies the image within the same region or to a new region within the same AWS account. Note that any tags you added to the image will not be copied.\n See also: AWS API Documentation\n \n \n :example: response = client.copy_image(\n SourceImageName='string',\n DestinationImageName='string',\n DestinationRegion='string',\n DestinationImageDescription='string'\n )\n \n \n :type SourceImageName: string\n :param SourceImageName: [REQUIRED]\n The name of the image to copy.\n \n\n :type DestinationImageName: string\n :param DestinationImageName: [REQUIRED]\n The name that the image will have when it is copied to the destination.\n \n\n :type DestinationRegion: string\n :param DestinationRegion: [REQUIRED]\n The destination region to which the image will be copied. This parameter is required, even if you are copying an image within the same region.\n \n\n :type DestinationImageDescription: string\n :param DestinationImageDescription: The description that the image will have when it is copied to the destination.\n\n :rtype: dict\n :return: {\n 'DestinationImageName': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_directory_config(DirectoryName=None, OrganizationalUnitDistinguishedNames=None, ServiceAccountCredentials=None):\n \"\"\"\n Creates a Directory Config object in AppStream 2.0. This object includes the information required to join streaming instances to an Active Directory domain.\n See also: AWS API Documentation\n \n \n :example: response = client.create_directory_config(\n DirectoryName='string',\n OrganizationalUnitDistinguishedNames=[\n 'string',\n ],\n ServiceAccountCredentials={\n 'AccountName': 'string',\n 'AccountPassword': 'string'\n }\n )\n \n \n :type DirectoryName: string\n :param DirectoryName: [REQUIRED]\n The fully qualified name of the directory (for example, corp.example.com).\n \n\n :type OrganizationalUnitDistinguishedNames: list\n :param OrganizationalUnitDistinguishedNames: [REQUIRED]\n The distinguished names of the organizational units for computer accounts.\n (string) --\n \n\n :type ServiceAccountCredentials: dict\n :param ServiceAccountCredentials: [REQUIRED]\n The credentials for the service account used by the streaming instance to connect to the directory.\n AccountName (string) -- [REQUIRED]The user name of the account. This account must have the following privileges: create computer objects, join computers to the domain, and change/reset the password on descendant computer objects for the organizational units specified.\n AccountPassword (string) -- [REQUIRED]The password for the account.\n \n\n :rtype: dict\n :return: {\n 'DirectoryConfig': {\n 'DirectoryName': 'string',\n 'OrganizationalUnitDistinguishedNames': [\n 'string',\n ],\n 'ServiceAccountCredentials': {\n 'AccountName': 'string',\n 'AccountPassword': 'string'\n },\n 'CreatedTime': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef create_fleet(Name=None, ImageName=None, ImageArn=None, InstanceType=None, FleetType=None, ComputeCapacity=None, VpcConfig=None, MaxUserDurationInSeconds=None, DisconnectTimeoutInSeconds=None, Description=None, DisplayName=None, EnableDefaultInternetAccess=None, DomainJoinInfo=None):\n \"\"\"\n Creates a fleet. A fleet consists of streaming instances that run a specified image.\n See also: AWS API Documentation\n \n \n :example: response = client.create_fleet(\n Name='string',\n ImageName='string',\n ImageArn='string',\n InstanceType='string',\n FleetType='ALWAYS_ON'|'ON_DEMAND',\n ComputeCapacity={\n 'DesiredInstances': 123\n },\n VpcConfig={\n 'SubnetIds': [\n 'string',\n ],\n 'SecurityGroupIds': [\n 'string',\n ]\n },\n MaxUserDurationInSeconds=123,\n DisconnectTimeoutInSeconds=123,\n Description='string',\n DisplayName='string',\n EnableDefaultInternetAccess=True|False,\n DomainJoinInfo={\n 'DirectoryName': 'string',\n 'OrganizationalUnitDistinguishedName': 'string'\n }\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n A unique name for the fleet.\n \n\n :type ImageName: string\n :param ImageName: The name of the image used to create the fleet.\n\n :type ImageArn: string\n :param ImageArn: The ARN of the public, private, or shared image to use.\n\n :type InstanceType: string\n :param InstanceType: [REQUIRED]\n The instance type to use when launching fleet instances. The following instance types are available:\n stream.standard.medium\n stream.standard.large\n stream.compute.large\n stream.compute.xlarge\n stream.compute.2xlarge\n stream.compute.4xlarge\n stream.compute.8xlarge\n stream.memory.large\n stream.memory.xlarge\n stream.memory.2xlarge\n stream.memory.4xlarge\n stream.memory.8xlarge\n stream.graphics-design.large\n stream.graphics-design.xlarge\n stream.graphics-design.2xlarge\n stream.graphics-design.4xlarge\n stream.graphics-desktop.2xlarge\n stream.graphics-pro.4xlarge\n stream.graphics-pro.8xlarge\n stream.graphics-pro.16xlarge\n \n\n :type FleetType: string\n :param FleetType: The fleet type.\n ALWAYS_ON\n Provides users with instant-on access to their apps. You are charged for all running instances in your fleet, even if no users are streaming apps.\n ON_DEMAND\n Provide users with access to applications after they connect, which takes one to two minutes. You are charged for instance streaming when users are connected and a small hourly fee for instances that are not streaming apps.\n \n\n :type ComputeCapacity: dict\n :param ComputeCapacity: [REQUIRED]\n The desired capacity for the fleet.\n DesiredInstances (integer) -- [REQUIRED]The desired number of streaming instances.\n \n\n :type VpcConfig: dict\n :param VpcConfig: The VPC configuration for the fleet.\n SubnetIds (list) --The subnets to which a network interface is established from the fleet instance.\n (string) --\n SecurityGroupIds (list) --The security groups for the fleet.\n (string) --\n \n\n :type MaxUserDurationInSeconds: integer\n :param MaxUserDurationInSeconds: The maximum time that a streaming session can run, in seconds. Specify a value between 600 and 57600.\n\n :type DisconnectTimeoutInSeconds: integer\n :param DisconnectTimeoutInSeconds: The time after disconnection when a session is considered to have ended, in seconds. If a user who was disconnected reconnects within this time interval, the user is connected to their previous session. Specify a value between 60 and 57600.\n\n :type Description: string\n :param Description: The description for display.\n\n :type DisplayName: string\n :param DisplayName: The fleet name for display.\n\n :type EnableDefaultInternetAccess: boolean\n :param EnableDefaultInternetAccess: Enables or disables default internet access for the fleet.\n\n :type DomainJoinInfo: dict\n :param DomainJoinInfo: The information needed to join a Microsoft Active Directory domain.\n DirectoryName (string) --The fully qualified name of the directory (for example, corp.example.com).\n OrganizationalUnitDistinguishedName (string) --The distinguished name of the organizational unit for computer accounts.\n \n\n :rtype: dict\n :return: {\n 'Fleet': {\n 'Arn': 'string',\n 'Name': 'string',\n 'DisplayName': 'string',\n 'Description': 'string',\n 'ImageName': 'string',\n 'ImageArn': 'string',\n 'InstanceType': 'string',\n 'FleetType': 'ALWAYS_ON'|'ON_DEMAND',\n 'ComputeCapacityStatus': {\n 'Desired': 123,\n 'Running': 123,\n 'InUse': 123,\n 'Available': 123\n },\n 'MaxUserDurationInSeconds': 123,\n 'DisconnectTimeoutInSeconds': 123,\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED',\n 'VpcConfig': {\n 'SubnetIds': [\n 'string',\n ],\n 'SecurityGroupIds': [\n 'string',\n ]\n },\n 'CreatedTime': datetime(2015, 1, 1),\n 'FleetErrors': [\n {\n 'ErrorCode': 'IAM_SERVICE_ROLE_MISSING_ENI_DESCRIBE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_CREATE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_DELETE_ACTION'|'NETWORK_INTERFACE_LIMIT_EXCEEDED'|'INTERNAL_SERVICE_ERROR'|'IAM_SERVICE_ROLE_IS_MISSING'|'SUBNET_HAS_INSUFFICIENT_IP_ADDRESSES'|'IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION'|'SUBNET_NOT_FOUND'|'IMAGE_NOT_FOUND'|'INVALID_SUBNET_CONFIGURATION'|'SECURITY_GROUPS_NOT_FOUND'|'IGW_NOT_ATTACHED'|'IAM_SERVICE_ROLE_MISSING_DESCRIBE_SECURITY_GROUPS_ACTION'|'DOMAIN_JOIN_ERROR_FILE_NOT_FOUND'|'DOMAIN_JOIN_ERROR_ACCESS_DENIED'|'DOMAIN_JOIN_ERROR_LOGON_FAILURE'|'DOMAIN_JOIN_ERROR_INVALID_PARAMETER'|'DOMAIN_JOIN_ERROR_MORE_DATA'|'DOMAIN_JOIN_ERROR_NO_SUCH_DOMAIN'|'DOMAIN_JOIN_ERROR_NOT_SUPPORTED'|'DOMAIN_JOIN_NERR_INVALID_WORKGROUP_NAME'|'DOMAIN_JOIN_NERR_WORKSTATION_NOT_STARTED'|'DOMAIN_JOIN_ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED'|'DOMAIN_JOIN_NERR_PASSWORD_EXPIRED'|'DOMAIN_JOIN_INTERNAL_SERVICE_ERROR',\n 'ErrorMessage': 'string'\n },\n ],\n 'EnableDefaultInternetAccess': True|False,\n 'DomainJoinInfo': {\n 'DirectoryName': 'string',\n 'OrganizationalUnitDistinguishedName': 'string'\n }\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef create_image_builder(Name=None, ImageName=None, ImageArn=None, InstanceType=None, Description=None, DisplayName=None, VpcConfig=None, EnableDefaultInternetAccess=None, DomainJoinInfo=None, AppstreamAgentVersion=None):\n \"\"\"\n Creates an image builder. An image builder is a virtual machine that is used to create an image.\n The initial state of the builder is PENDING . When it is ready, the state is RUNNING .\n See also: AWS API Documentation\n \n \n :example: response = client.create_image_builder(\n Name='string',\n ImageName='string',\n ImageArn='string',\n InstanceType='string',\n Description='string',\n DisplayName='string',\n VpcConfig={\n 'SubnetIds': [\n 'string',\n ],\n 'SecurityGroupIds': [\n 'string',\n ]\n },\n EnableDefaultInternetAccess=True|False,\n DomainJoinInfo={\n 'DirectoryName': 'string',\n 'OrganizationalUnitDistinguishedName': 'string'\n },\n AppstreamAgentVersion='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n A unique name for the image builder.\n \n\n :type ImageName: string\n :param ImageName: The name of the image used to create the builder.\n\n :type ImageArn: string\n :param ImageArn: The ARN of the public, private, or shared image to use.\n\n :type InstanceType: string\n :param InstanceType: [REQUIRED]\n The instance type to use when launching the image builder.\n \n\n :type Description: string\n :param Description: The description for display.\n\n :type DisplayName: string\n :param DisplayName: The image builder name for display.\n\n :type VpcConfig: dict\n :param VpcConfig: The VPC configuration for the image builder. You can specify only one subnet.\n SubnetIds (list) --The subnets to which a network interface is established from the fleet instance.\n (string) --\n SecurityGroupIds (list) --The security groups for the fleet.\n (string) --\n \n\n :type EnableDefaultInternetAccess: boolean\n :param EnableDefaultInternetAccess: Enables or disables default internet access for the image builder.\n\n :type DomainJoinInfo: dict\n :param DomainJoinInfo: The information needed to join a Microsoft Active Directory domain.\n DirectoryName (string) --The fully qualified name of the directory (for example, corp.example.com).\n OrganizationalUnitDistinguishedName (string) --The distinguished name of the organizational unit for computer accounts.\n \n\n :type AppstreamAgentVersion: string\n :param AppstreamAgentVersion: The version of the AppStream 2.0 agent to use for this image builder. To use the latest version of the AppStream 2.0 agent, specify [LATEST].\n\n :rtype: dict\n :return: {\n 'ImageBuilder': {\n 'Name': 'string',\n 'Arn': 'string',\n 'ImageArn': 'string',\n 'Description': 'string',\n 'DisplayName': 'string',\n 'VpcConfig': {\n 'SubnetIds': [\n 'string',\n ],\n 'SecurityGroupIds': [\n 'string',\n ]\n },\n 'InstanceType': 'string',\n 'Platform': 'WINDOWS',\n 'State': 'PENDING'|'UPDATING_AGENT'|'RUNNING'|'STOPPING'|'STOPPED'|'REBOOTING'|'SNAPSHOTTING'|'DELETING'|'FAILED',\n 'StateChangeReason': {\n 'Code': 'INTERNAL_ERROR'|'IMAGE_UNAVAILABLE',\n 'Message': 'string'\n },\n 'CreatedTime': datetime(2015, 1, 1),\n 'EnableDefaultInternetAccess': True|False,\n 'DomainJoinInfo': {\n 'DirectoryName': 'string',\n 'OrganizationalUnitDistinguishedName': 'string'\n },\n 'ImageBuilderErrors': [\n {\n 'ErrorCode': 'IAM_SERVICE_ROLE_MISSING_ENI_DESCRIBE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_CREATE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_DELETE_ACTION'|'NETWORK_INTERFACE_LIMIT_EXCEEDED'|'INTERNAL_SERVICE_ERROR'|'IAM_SERVICE_ROLE_IS_MISSING'|'SUBNET_HAS_INSUFFICIENT_IP_ADDRESSES'|'IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION'|'SUBNET_NOT_FOUND'|'IMAGE_NOT_FOUND'|'INVALID_SUBNET_CONFIGURATION'|'SECURITY_GROUPS_NOT_FOUND'|'IGW_NOT_ATTACHED'|'IAM_SERVICE_ROLE_MISSING_DESCRIBE_SECURITY_GROUPS_ACTION'|'DOMAIN_JOIN_ERROR_FILE_NOT_FOUND'|'DOMAIN_JOIN_ERROR_ACCESS_DENIED'|'DOMAIN_JOIN_ERROR_LOGON_FAILURE'|'DOMAIN_JOIN_ERROR_INVALID_PARAMETER'|'DOMAIN_JOIN_ERROR_MORE_DATA'|'DOMAIN_JOIN_ERROR_NO_SUCH_DOMAIN'|'DOMAIN_JOIN_ERROR_NOT_SUPPORTED'|'DOMAIN_JOIN_NERR_INVALID_WORKGROUP_NAME'|'DOMAIN_JOIN_NERR_WORKSTATION_NOT_STARTED'|'DOMAIN_JOIN_ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED'|'DOMAIN_JOIN_NERR_PASSWORD_EXPIRED'|'DOMAIN_JOIN_INTERNAL_SERVICE_ERROR',\n 'ErrorMessage': 'string',\n 'ErrorTimestamp': datetime(2015, 1, 1)\n },\n ],\n 'AppstreamAgentVersion': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef create_image_builder_streaming_url(Name=None, Validity=None):\n \"\"\"\n Creates a URL to start an image builder streaming session.\n See also: AWS API Documentation\n \n \n :example: response = client.create_image_builder_streaming_url(\n Name='string',\n Validity=123\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the image builder.\n \n\n :type Validity: integer\n :param Validity: The time that the streaming URL will be valid, in seconds. Specify a value between 1 and 604800 seconds. The default is 3600 seconds.\n\n :rtype: dict\n :return: {\n 'StreamingURL': 'string',\n 'Expires': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef create_stack(Name=None, Description=None, DisplayName=None, StorageConnectors=None, RedirectURL=None, FeedbackURL=None, UserSettings=None, ApplicationSettings=None):\n \"\"\"\n Creates a stack to start streaming applications to users. A stack consists of an associated fleet, user access policies, and storage configurations.\n See also: AWS API Documentation\n \n \n :example: response = client.create_stack(\n Name='string',\n Description='string',\n DisplayName='string',\n StorageConnectors=[\n {\n 'ConnectorType': 'HOMEFOLDERS'|'GOOGLE_DRIVE'|'ONE_DRIVE',\n 'ResourceIdentifier': 'string',\n 'Domains': [\n 'string',\n ]\n },\n ],\n RedirectURL='string',\n FeedbackURL='string',\n UserSettings=[\n {\n 'Action': 'CLIPBOARD_COPY_FROM_LOCAL_DEVICE'|'CLIPBOARD_COPY_TO_LOCAL_DEVICE'|'FILE_UPLOAD'|'FILE_DOWNLOAD'|'PRINTING_TO_LOCAL_DEVICE',\n 'Permission': 'ENABLED'|'DISABLED'\n },\n ],\n ApplicationSettings={\n 'Enabled': True|False,\n 'SettingsGroup': 'string'\n }\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the stack.\n \n\n :type Description: string\n :param Description: The description for display.\n\n :type DisplayName: string\n :param DisplayName: The stack name for display.\n\n :type StorageConnectors: list\n :param StorageConnectors: The storage connectors to enable.\n (dict) --Describes a connector to enable persistent storage for users.\n ConnectorType (string) -- [REQUIRED]The type of storage connector.\n ResourceIdentifier (string) --The ARN of the storage connector.\n Domains (list) --The names of the domains for the G Suite account.\n (string) -- GSuite domain for GDrive integration.\n \n \n\n :type RedirectURL: string\n :param RedirectURL: The URL that users are redirected to after their streaming session ends.\n\n :type FeedbackURL: string\n :param FeedbackURL: The URL that users are redirected to after they click the Send Feedback link. If no URL is specified, no Send Feedback link is displayed.\n\n :type UserSettings: list\n :param UserSettings: The actions that are enabled or disabled for users during their streaming sessions. By default, these actions are enabled.\n (dict) --Describes an action and whether the action is enabled or disabled for users during their streaming sessions.\n Action (string) -- [REQUIRED]The action that is enabled or disabled.\n Permission (string) -- [REQUIRED]Indicates whether the action is enabled or disabled.\n \n \n\n :type ApplicationSettings: dict\n :param ApplicationSettings: The persistent application settings for users of a stack. When these settings are enabled, changes that users make to applications and Windows settings are automatically saved after each session and applied to the next session.\n Enabled (boolean) -- [REQUIRED]Enables or disables persistent application settings for users during their streaming sessions.\n SettingsGroup (string) --The path prefix for the S3 bucket where users persistent application settings are stored. You can allow the same persistent application settings to be used across multiple stacks by specifying the same settings group for each stack.\n \n\n :rtype: dict\n :return: {\n 'Stack': {\n 'Arn': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'DisplayName': 'string',\n 'CreatedTime': datetime(2015, 1, 1),\n 'StorageConnectors': [\n {\n 'ConnectorType': 'HOMEFOLDERS'|'GOOGLE_DRIVE'|'ONE_DRIVE',\n 'ResourceIdentifier': 'string',\n 'Domains': [\n 'string',\n ]\n },\n ],\n 'RedirectURL': 'string',\n 'FeedbackURL': 'string',\n 'StackErrors': [\n {\n 'ErrorCode': 'STORAGE_CONNECTOR_ERROR'|'INTERNAL_SERVICE_ERROR',\n 'ErrorMessage': 'string'\n },\n ],\n 'UserSettings': [\n {\n 'Action': 'CLIPBOARD_COPY_FROM_LOCAL_DEVICE'|'CLIPBOARD_COPY_TO_LOCAL_DEVICE'|'FILE_UPLOAD'|'FILE_DOWNLOAD'|'PRINTING_TO_LOCAL_DEVICE',\n 'Permission': 'ENABLED'|'DISABLED'\n },\n ],\n 'ApplicationSettings': {\n 'Enabled': True|False,\n 'SettingsGroup': 'string',\n 'S3BucketName': 'string'\n }\n }\n }\n \n \n :returns: \n (string) -- GSuite domain for GDrive integration.\n \n \"\"\"\n pass\n\ndef create_streaming_url(StackName=None, FleetName=None, UserId=None, ApplicationId=None, Validity=None, SessionContext=None):\n \"\"\"\n Creates a temporary URL to start an AppStream 2.0 streaming session for the specified user. A streaming URL enables application streaming to be tested without user setup.\n See also: AWS API Documentation\n \n \n :example: response = client.create_streaming_url(\n StackName='string',\n FleetName='string',\n UserId='string',\n ApplicationId='string',\n Validity=123,\n SessionContext='string'\n )\n \n \n :type StackName: string\n :param StackName: [REQUIRED]\n The name of the stack.\n \n\n :type FleetName: string\n :param FleetName: [REQUIRED]\n The name of the fleet.\n \n\n :type UserId: string\n :param UserId: [REQUIRED]\n The ID of the user.\n \n\n :type ApplicationId: string\n :param ApplicationId: The name of the application to launch after the session starts. This is the name that you specified as Name in the Image Assistant.\n\n :type Validity: integer\n :param Validity: The time that the streaming URL will be valid, in seconds. Specify a value between 1 and 604800 seconds. The default is 60 seconds.\n\n :type SessionContext: string\n :param SessionContext: The session context. For more information, see Session Context in the Amazon AppStream 2.0 Developer Guide .\n\n :rtype: dict\n :return: {\n 'StreamingURL': 'string',\n 'Expires': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef create_user(UserName=None, MessageAction=None, FirstName=None, LastName=None, AuthenticationType=None):\n \"\"\"\n Creates a new user in the user pool.\n See also: AWS API Documentation\n \n \n :example: response = client.create_user(\n UserName='string',\n MessageAction='SUPPRESS'|'RESEND',\n FirstName='string',\n LastName='string',\n AuthenticationType='API'|'SAML'|'USERPOOL'\n )\n \n \n :type UserName: string\n :param UserName: [REQUIRED]\n The email address of the user.\n \n\n :type MessageAction: string\n :param MessageAction: The action to take for the welcome email that is sent to a user after the user is created in the user pool. If you specify SUPPRESS, no email is sent. If you specify RESEND, do not specify the first name or last name of the user. If the value is null, the email is sent.\n Note\n The temporary password in the welcome email is valid for only 7 days. If users don t set their passwords within 7 days, you must send them a new welcome email.\n \n\n :type FirstName: string\n :param FirstName: The first name, or given name, of the user.\n\n :type LastName: string\n :param LastName: The last name, or surname, of the user.\n\n :type AuthenticationType: string\n :param AuthenticationType: [REQUIRED]\n The authentication type for the user. You must specify USERPOOL.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_directory_config(DirectoryName=None):\n \"\"\"\n Deletes the specified Directory Config object from AppStream 2.0. This object includes the information required to join streaming instances to an Active Directory domain.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_directory_config(\n DirectoryName='string'\n )\n \n \n :type DirectoryName: string\n :param DirectoryName: [REQUIRED]\n The name of the directory configuration.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_fleet(Name=None):\n \"\"\"\n Deletes the specified fleet.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_fleet(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the fleet.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_image(Name=None):\n \"\"\"\n Deletes the specified image. You cannot delete an image when it is in use. After you delete an image, you cannot provision new capacity using the image.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_image(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the image.\n \n\n :rtype: dict\n :return: {\n 'Image': {\n 'Name': 'string',\n 'Arn': 'string',\n 'BaseImageArn': 'string',\n 'DisplayName': 'string',\n 'State': 'PENDING'|'AVAILABLE'|'FAILED'|'COPYING'|'DELETING',\n 'Visibility': 'PUBLIC'|'PRIVATE'|'SHARED',\n 'ImageBuilderSupported': True|False,\n 'Platform': 'WINDOWS',\n 'Description': 'string',\n 'StateChangeReason': {\n 'Code': 'INTERNAL_ERROR'|'IMAGE_BUILDER_NOT_AVAILABLE'|'IMAGE_COPY_FAILURE',\n 'Message': 'string'\n },\n 'Applications': [\n {\n 'Name': 'string',\n 'DisplayName': 'string',\n 'IconURL': 'string',\n 'LaunchPath': 'string',\n 'LaunchParameters': 'string',\n 'Enabled': True|False,\n 'Metadata': {\n 'string': 'string'\n }\n },\n ],\n 'CreatedTime': datetime(2015, 1, 1),\n 'PublicBaseImageReleasedDate': datetime(2015, 1, 1),\n 'AppstreamAgentVersion': 'string',\n 'ImagePermissions': {\n 'allowFleet': True|False,\n 'allowImageBuilder': True|False\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef delete_image_builder(Name=None):\n \"\"\"\n Deletes the specified image builder and releases the capacity.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_image_builder(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the image builder.\n \n\n :rtype: dict\n :return: {\n 'ImageBuilder': {\n 'Name': 'string',\n 'Arn': 'string',\n 'ImageArn': 'string',\n 'Description': 'string',\n 'DisplayName': 'string',\n 'VpcConfig': {\n 'SubnetIds': [\n 'string',\n ],\n 'SecurityGroupIds': [\n 'string',\n ]\n },\n 'InstanceType': 'string',\n 'Platform': 'WINDOWS',\n 'State': 'PENDING'|'UPDATING_AGENT'|'RUNNING'|'STOPPING'|'STOPPED'|'REBOOTING'|'SNAPSHOTTING'|'DELETING'|'FAILED',\n 'StateChangeReason': {\n 'Code': 'INTERNAL_ERROR'|'IMAGE_UNAVAILABLE',\n 'Message': 'string'\n },\n 'CreatedTime': datetime(2015, 1, 1),\n 'EnableDefaultInternetAccess': True|False,\n 'DomainJoinInfo': {\n 'DirectoryName': 'string',\n 'OrganizationalUnitDistinguishedName': 'string'\n },\n 'ImageBuilderErrors': [\n {\n 'ErrorCode': 'IAM_SERVICE_ROLE_MISSING_ENI_DESCRIBE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_CREATE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_DELETE_ACTION'|'NETWORK_INTERFACE_LIMIT_EXCEEDED'|'INTERNAL_SERVICE_ERROR'|'IAM_SERVICE_ROLE_IS_MISSING'|'SUBNET_HAS_INSUFFICIENT_IP_ADDRESSES'|'IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION'|'SUBNET_NOT_FOUND'|'IMAGE_NOT_FOUND'|'INVALID_SUBNET_CONFIGURATION'|'SECURITY_GROUPS_NOT_FOUND'|'IGW_NOT_ATTACHED'|'IAM_SERVICE_ROLE_MISSING_DESCRIBE_SECURITY_GROUPS_ACTION'|'DOMAIN_JOIN_ERROR_FILE_NOT_FOUND'|'DOMAIN_JOIN_ERROR_ACCESS_DENIED'|'DOMAIN_JOIN_ERROR_LOGON_FAILURE'|'DOMAIN_JOIN_ERROR_INVALID_PARAMETER'|'DOMAIN_JOIN_ERROR_MORE_DATA'|'DOMAIN_JOIN_ERROR_NO_SUCH_DOMAIN'|'DOMAIN_JOIN_ERROR_NOT_SUPPORTED'|'DOMAIN_JOIN_NERR_INVALID_WORKGROUP_NAME'|'DOMAIN_JOIN_NERR_WORKSTATION_NOT_STARTED'|'DOMAIN_JOIN_ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED'|'DOMAIN_JOIN_NERR_PASSWORD_EXPIRED'|'DOMAIN_JOIN_INTERNAL_SERVICE_ERROR',\n 'ErrorMessage': 'string',\n 'ErrorTimestamp': datetime(2015, 1, 1)\n },\n ],\n 'AppstreamAgentVersion': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef delete_image_permissions(Name=None, SharedAccountId=None):\n \"\"\"\n Deletes permissions for the specified private image. After you delete permissions for an image, AWS accounts to which you previously granted these permissions can no longer use the image.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_image_permissions(\n Name='string',\n SharedAccountId='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the private image.\n \n\n :type SharedAccountId: string\n :param SharedAccountId: [REQUIRED]\n The 12-digit ID of the AWS account for which to delete image permissions.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_stack(Name=None):\n \"\"\"\n Deletes the specified stack. After the stack is deleted, the application streaming environment provided by the stack is no longer available to users. Also, any reservations made for application streaming sessions for the stack are released.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_stack(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the stack.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_user(UserName=None, AuthenticationType=None):\n \"\"\"\n Deletes a user from the user pool.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_user(\n UserName='string',\n AuthenticationType='API'|'SAML'|'USERPOOL'\n )\n \n \n :type UserName: string\n :param UserName: [REQUIRED]\n The email address of the user.\n \n\n :type AuthenticationType: string\n :param AuthenticationType: [REQUIRED]\n The authentication type for the user. You must specify USERPOOL.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef describe_directory_configs(DirectoryNames=None, MaxResults=None, NextToken=None):\n \"\"\"\n Retrieves a list that describes one or more specified Directory Config objects for AppStream 2.0, if the names for these objects are provided. Otherwise, all Directory Config objects in the account are described. These objects include the information required to join streaming instances to an Active Directory domain.\n Although the response syntax in this topic includes the account password, this password is not returned in the actual response.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_directory_configs(\n DirectoryNames=[\n 'string',\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type DirectoryNames: list\n :param DirectoryNames: The directory names.\n (string) --\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum size of each page of results.\n\n :type NextToken: string\n :param NextToken: The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.\n\n :rtype: dict\n :return: {\n 'DirectoryConfigs': [\n {\n 'DirectoryName': 'string',\n 'OrganizationalUnitDistinguishedNames': [\n 'string',\n ],\n 'ServiceAccountCredentials': {\n 'AccountName': 'string',\n 'AccountPassword': 'string'\n },\n 'CreatedTime': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_fleets(Names=None, NextToken=None):\n \"\"\"\n Retrieves a list that describes one or more specified fleets, if the fleet names are provided. Otherwise, all fleets in the account are described.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_fleets(\n Names=[\n 'string',\n ],\n NextToken='string'\n )\n \n \n :type Names: list\n :param Names: The names of the fleets to describe.\n (string) --\n \n\n :type NextToken: string\n :param NextToken: The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.\n\n :rtype: dict\n :return: {\n 'Fleets': [\n {\n 'Arn': 'string',\n 'Name': 'string',\n 'DisplayName': 'string',\n 'Description': 'string',\n 'ImageName': 'string',\n 'ImageArn': 'string',\n 'InstanceType': 'string',\n 'FleetType': 'ALWAYS_ON'|'ON_DEMAND',\n 'ComputeCapacityStatus': {\n 'Desired': 123,\n 'Running': 123,\n 'InUse': 123,\n 'Available': 123\n },\n 'MaxUserDurationInSeconds': 123,\n 'DisconnectTimeoutInSeconds': 123,\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED',\n 'VpcConfig': {\n 'SubnetIds': [\n 'string',\n ],\n 'SecurityGroupIds': [\n 'string',\n ]\n },\n 'CreatedTime': datetime(2015, 1, 1),\n 'FleetErrors': [\n {\n 'ErrorCode': 'IAM_SERVICE_ROLE_MISSING_ENI_DESCRIBE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_CREATE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_DELETE_ACTION'|'NETWORK_INTERFACE_LIMIT_EXCEEDED'|'INTERNAL_SERVICE_ERROR'|'IAM_SERVICE_ROLE_IS_MISSING'|'SUBNET_HAS_INSUFFICIENT_IP_ADDRESSES'|'IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION'|'SUBNET_NOT_FOUND'|'IMAGE_NOT_FOUND'|'INVALID_SUBNET_CONFIGURATION'|'SECURITY_GROUPS_NOT_FOUND'|'IGW_NOT_ATTACHED'|'IAM_SERVICE_ROLE_MISSING_DESCRIBE_SECURITY_GROUPS_ACTION'|'DOMAIN_JOIN_ERROR_FILE_NOT_FOUND'|'DOMAIN_JOIN_ERROR_ACCESS_DENIED'|'DOMAIN_JOIN_ERROR_LOGON_FAILURE'|'DOMAIN_JOIN_ERROR_INVALID_PARAMETER'|'DOMAIN_JOIN_ERROR_MORE_DATA'|'DOMAIN_JOIN_ERROR_NO_SUCH_DOMAIN'|'DOMAIN_JOIN_ERROR_NOT_SUPPORTED'|'DOMAIN_JOIN_NERR_INVALID_WORKGROUP_NAME'|'DOMAIN_JOIN_NERR_WORKSTATION_NOT_STARTED'|'DOMAIN_JOIN_ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED'|'DOMAIN_JOIN_NERR_PASSWORD_EXPIRED'|'DOMAIN_JOIN_INTERNAL_SERVICE_ERROR',\n 'ErrorMessage': 'string'\n },\n ],\n 'EnableDefaultInternetAccess': True|False,\n 'DomainJoinInfo': {\n 'DirectoryName': 'string',\n 'OrganizationalUnitDistinguishedName': 'string'\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_image_builders(Names=None, MaxResults=None, NextToken=None):\n \"\"\"\n Retrieves a list that describes one or more specified image builders, if the image builder names are provided. Otherwise, all image builders in the account are described.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_image_builders(\n Names=[\n 'string',\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type Names: list\n :param Names: The names of the image builders to describe.\n (string) --\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum size of each page of results.\n\n :type NextToken: string\n :param NextToken: The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.\n\n :rtype: dict\n :return: {\n 'ImageBuilders': [\n {\n 'Name': 'string',\n 'Arn': 'string',\n 'ImageArn': 'string',\n 'Description': 'string',\n 'DisplayName': 'string',\n 'VpcConfig': {\n 'SubnetIds': [\n 'string',\n ],\n 'SecurityGroupIds': [\n 'string',\n ]\n },\n 'InstanceType': 'string',\n 'Platform': 'WINDOWS',\n 'State': 'PENDING'|'UPDATING_AGENT'|'RUNNING'|'STOPPING'|'STOPPED'|'REBOOTING'|'SNAPSHOTTING'|'DELETING'|'FAILED',\n 'StateChangeReason': {\n 'Code': 'INTERNAL_ERROR'|'IMAGE_UNAVAILABLE',\n 'Message': 'string'\n },\n 'CreatedTime': datetime(2015, 1, 1),\n 'EnableDefaultInternetAccess': True|False,\n 'DomainJoinInfo': {\n 'DirectoryName': 'string',\n 'OrganizationalUnitDistinguishedName': 'string'\n },\n 'ImageBuilderErrors': [\n {\n 'ErrorCode': 'IAM_SERVICE_ROLE_MISSING_ENI_DESCRIBE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_CREATE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_DELETE_ACTION'|'NETWORK_INTERFACE_LIMIT_EXCEEDED'|'INTERNAL_SERVICE_ERROR'|'IAM_SERVICE_ROLE_IS_MISSING'|'SUBNET_HAS_INSUFFICIENT_IP_ADDRESSES'|'IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION'|'SUBNET_NOT_FOUND'|'IMAGE_NOT_FOUND'|'INVALID_SUBNET_CONFIGURATION'|'SECURITY_GROUPS_NOT_FOUND'|'IGW_NOT_ATTACHED'|'IAM_SERVICE_ROLE_MISSING_DESCRIBE_SECURITY_GROUPS_ACTION'|'DOMAIN_JOIN_ERROR_FILE_NOT_FOUND'|'DOMAIN_JOIN_ERROR_ACCESS_DENIED'|'DOMAIN_JOIN_ERROR_LOGON_FAILURE'|'DOMAIN_JOIN_ERROR_INVALID_PARAMETER'|'DOMAIN_JOIN_ERROR_MORE_DATA'|'DOMAIN_JOIN_ERROR_NO_SUCH_DOMAIN'|'DOMAIN_JOIN_ERROR_NOT_SUPPORTED'|'DOMAIN_JOIN_NERR_INVALID_WORKGROUP_NAME'|'DOMAIN_JOIN_NERR_WORKSTATION_NOT_STARTED'|'DOMAIN_JOIN_ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED'|'DOMAIN_JOIN_NERR_PASSWORD_EXPIRED'|'DOMAIN_JOIN_INTERNAL_SERVICE_ERROR',\n 'ErrorMessage': 'string',\n 'ErrorTimestamp': datetime(2015, 1, 1)\n },\n ],\n 'AppstreamAgentVersion': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_image_permissions(Name=None, MaxResults=None, SharedAwsAccountIds=None, NextToken=None):\n \"\"\"\n Retrieves a list that describes the permissions for shared AWS account IDs on a private image that you own.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_image_permissions(\n Name='string',\n MaxResults=123,\n SharedAwsAccountIds=[\n 'string',\n ],\n NextToken='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the private image for which to describe permissions. The image must be one that you own.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum size of each page of results.\n\n :type SharedAwsAccountIds: list\n :param SharedAwsAccountIds: The 12-digit ID of one or more AWS accounts with which the image is shared.\n (string) --\n \n\n :type NextToken: string\n :param NextToken: The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.\n\n :rtype: dict\n :return: {\n 'Name': 'string',\n 'SharedImagePermissionsList': [\n {\n 'sharedAccountId': 'string',\n 'imagePermissions': {\n 'allowFleet': True|False,\n 'allowImageBuilder': True|False\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_images(Names=None, Arns=None, Type=None, NextToken=None, MaxResults=None):\n \"\"\"\n Retrieves a list that describes one or more specified images, if the image names or image ARNs are provided. Otherwise, all images in the account are described.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_images(\n Names=[\n 'string',\n ],\n Arns=[\n 'string',\n ],\n Type='PUBLIC'|'PRIVATE'|'SHARED',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type Names: list\n :param Names: The names of the public or private images to describe.\n (string) --\n \n\n :type Arns: list\n :param Arns: The ARNs of the public, private, and shared images to describe.\n (string) --\n \n\n :type Type: string\n :param Type: The type of image (public, private, or shared) to describe.\n\n :type NextToken: string\n :param NextToken: The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.\n\n :type MaxResults: integer\n :param MaxResults: The maximum size of each page of results.\n\n :rtype: dict\n :return: {\n 'Images': [\n {\n 'Name': 'string',\n 'Arn': 'string',\n 'BaseImageArn': 'string',\n 'DisplayName': 'string',\n 'State': 'PENDING'|'AVAILABLE'|'FAILED'|'COPYING'|'DELETING',\n 'Visibility': 'PUBLIC'|'PRIVATE'|'SHARED',\n 'ImageBuilderSupported': True|False,\n 'Platform': 'WINDOWS',\n 'Description': 'string',\n 'StateChangeReason': {\n 'Code': 'INTERNAL_ERROR'|'IMAGE_BUILDER_NOT_AVAILABLE'|'IMAGE_COPY_FAILURE',\n 'Message': 'string'\n },\n 'Applications': [\n {\n 'Name': 'string',\n 'DisplayName': 'string',\n 'IconURL': 'string',\n 'LaunchPath': 'string',\n 'LaunchParameters': 'string',\n 'Enabled': True|False,\n 'Metadata': {\n 'string': 'string'\n }\n },\n ],\n 'CreatedTime': datetime(2015, 1, 1),\n 'PublicBaseImageReleasedDate': datetime(2015, 1, 1),\n 'AppstreamAgentVersion': 'string',\n 'ImagePermissions': {\n 'allowFleet': True|False,\n 'allowImageBuilder': True|False\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef describe_sessions(StackName=None, FleetName=None, UserId=None, NextToken=None, Limit=None, AuthenticationType=None):\n \"\"\"\n Retrieves a list that describes the streaming sessions for a specified stack and fleet. If a user ID is provided for the stack and fleet, only streaming sessions for that user are described. If an authentication type is not provided, the default is to authenticate users using a streaming URL.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_sessions(\n StackName='string',\n FleetName='string',\n UserId='string',\n NextToken='string',\n Limit=123,\n AuthenticationType='API'|'SAML'|'USERPOOL'\n )\n \n \n :type StackName: string\n :param StackName: [REQUIRED]\n The name of the stack. This value is case-sensitive.\n \n\n :type FleetName: string\n :param FleetName: [REQUIRED]\n The name of the fleet. This value is case-sensitive.\n \n\n :type UserId: string\n :param UserId: The user ID.\n\n :type NextToken: string\n :param NextToken: The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.\n\n :type Limit: integer\n :param Limit: The size of each page of results. The default value is 20 and the maximum value is 50.\n\n :type AuthenticationType: string\n :param AuthenticationType: The authentication method. Specify API for a user authenticated using a streaming URL or SAML for a SAML federated user. The default is to authenticate users using a streaming URL.\n\n :rtype: dict\n :return: {\n 'Sessions': [\n {\n 'Id': 'string',\n 'UserId': 'string',\n 'StackName': 'string',\n 'FleetName': 'string',\n 'State': 'ACTIVE'|'PENDING'|'EXPIRED',\n 'AuthenticationType': 'API'|'SAML'|'USERPOOL',\n 'NetworkAccessConfiguration': {\n 'EniPrivateIpAddress': 'string',\n 'EniId': 'string'\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_stacks(Names=None, NextToken=None):\n \"\"\"\n Retrieves a list that describes one or more specified stacks, if the stack names are provided. Otherwise, all stacks in the account are described.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_stacks(\n Names=[\n 'string',\n ],\n NextToken='string'\n )\n \n \n :type Names: list\n :param Names: The names of the stacks to describe.\n (string) --\n \n\n :type NextToken: string\n :param NextToken: The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.\n\n :rtype: dict\n :return: {\n 'Stacks': [\n {\n 'Arn': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'DisplayName': 'string',\n 'CreatedTime': datetime(2015, 1, 1),\n 'StorageConnectors': [\n {\n 'ConnectorType': 'HOMEFOLDERS'|'GOOGLE_DRIVE'|'ONE_DRIVE',\n 'ResourceIdentifier': 'string',\n 'Domains': [\n 'string',\n ]\n },\n ],\n 'RedirectURL': 'string',\n 'FeedbackURL': 'string',\n 'StackErrors': [\n {\n 'ErrorCode': 'STORAGE_CONNECTOR_ERROR'|'INTERNAL_SERVICE_ERROR',\n 'ErrorMessage': 'string'\n },\n ],\n 'UserSettings': [\n {\n 'Action': 'CLIPBOARD_COPY_FROM_LOCAL_DEVICE'|'CLIPBOARD_COPY_TO_LOCAL_DEVICE'|'FILE_UPLOAD'|'FILE_DOWNLOAD'|'PRINTING_TO_LOCAL_DEVICE',\n 'Permission': 'ENABLED'|'DISABLED'\n },\n ],\n 'ApplicationSettings': {\n 'Enabled': True|False,\n 'SettingsGroup': 'string',\n 'S3BucketName': 'string'\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) -- GSuite domain for GDrive integration.\n \n \"\"\"\n pass\n\ndef describe_user_stack_associations(StackName=None, UserName=None, AuthenticationType=None, MaxResults=None, NextToken=None):\n \"\"\"\n Retrieves a list that describes the UserStackAssociation objects. You must specify either or both of the following:\n See also: AWS API Documentation\n \n \n :example: response = client.describe_user_stack_associations(\n StackName='string',\n UserName='string',\n AuthenticationType='API'|'SAML'|'USERPOOL',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type StackName: string\n :param StackName: The name of the stack that is associated with the user.\n\n :type UserName: string\n :param UserName: The email address of the user who is associated with the stack.\n\n :type AuthenticationType: string\n :param AuthenticationType: The authentication type for the user who is associated with the stack. You must specify USERPOOL.\n\n :type MaxResults: integer\n :param MaxResults: The maximum size of each page of results.\n\n :type NextToken: string\n :param NextToken: The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.\n\n :rtype: dict\n :return: {\n 'UserStackAssociations': [\n {\n 'StackName': 'string',\n 'UserName': 'string',\n 'AuthenticationType': 'API'|'SAML'|'USERPOOL',\n 'SendEmailNotification': True|False\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n StackName (string) -- The name of the stack that is associated with the user.\n UserName (string) -- The email address of the user who is associated with the stack.\n AuthenticationType (string) -- The authentication type for the user who is associated with the stack. You must specify USERPOOL.\n MaxResults (integer) -- The maximum size of each page of results.\n NextToken (string) -- The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.\n \n \"\"\"\n pass\n\ndef describe_users(AuthenticationType=None, MaxResults=None, NextToken=None):\n \"\"\"\n Retrieves a list that describes one or more specified users in the user pool, if user names are provided. Otherwise, all users in the user pool are described.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_users(\n AuthenticationType='API'|'SAML'|'USERPOOL',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type AuthenticationType: string\n :param AuthenticationType: [REQUIRED]\n The authentication type for the users in the user pool to describe. You must specify USERPOOL.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum size of each page of results.\n\n :type NextToken: string\n :param NextToken: The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.\n\n :rtype: dict\n :return: {\n 'Users': [\n {\n 'Arn': 'string',\n 'UserName': 'string',\n 'Enabled': True|False,\n 'Status': 'string',\n 'FirstName': 'string',\n 'LastName': 'string',\n 'CreatedTime': datetime(2015, 1, 1),\n 'AuthenticationType': 'API'|'SAML'|'USERPOOL'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n UNCONFIRMED The user is created but not confirmed.\n CONFIRMED The user is confirmed.\n ARCHIVED The user is no longer active.\n COMPROMISED The user is disabled because of a potential security threat.\n UNKNOWN The user status is not known.\n \n \"\"\"\n pass\n\ndef disable_user(UserName=None, AuthenticationType=None):\n \"\"\"\n Disables the specified user in the user pool. Users can't sign in to AppStream 2.0 until they are re-enabled. This action does not delete the user.\n See also: AWS API Documentation\n \n \n :example: response = client.disable_user(\n UserName='string',\n AuthenticationType='API'|'SAML'|'USERPOOL'\n )\n \n \n :type UserName: string\n :param UserName: [REQUIRED]\n The email address of the user.\n \n\n :type AuthenticationType: string\n :param AuthenticationType: [REQUIRED]\n The authentication type for the user. You must specify USERPOOL.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef disassociate_fleet(FleetName=None, StackName=None):\n \"\"\"\n Disassociates the specified fleet from the specified stack.\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_fleet(\n FleetName='string',\n StackName='string'\n )\n \n \n :type FleetName: string\n :param FleetName: [REQUIRED]\n The name of the fleet.\n \n\n :type StackName: string\n :param StackName: [REQUIRED]\n The name of the stack.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef enable_user(UserName=None, AuthenticationType=None):\n \"\"\"\n Enables a user in the user pool. After being enabled, users can sign in to AppStream 2.0 and open applications from the stacks to which they are assigned.\n See also: AWS API Documentation\n \n \n :example: response = client.enable_user(\n UserName='string',\n AuthenticationType='API'|'SAML'|'USERPOOL'\n )\n \n \n :type UserName: string\n :param UserName: [REQUIRED]\n The email address of the user.\n \n\n :type AuthenticationType: string\n :param AuthenticationType: [REQUIRED]\n The authentication type for the user. You must specify USERPOOL.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef expire_session(SessionId=None):\n \"\"\"\n Immediately stops the specified streaming session.\n See also: AWS API Documentation\n \n \n :example: response = client.expire_session(\n SessionId='string'\n )\n \n \n :type SessionId: string\n :param SessionId: [REQUIRED]\n The ID of the streaming session.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_associated_fleets(StackName=None, NextToken=None):\n \"\"\"\n Retrieves the name of the fleet that is associated with the specified stack.\n See also: AWS API Documentation\n \n \n :example: response = client.list_associated_fleets(\n StackName='string',\n NextToken='string'\n )\n \n \n :type StackName: string\n :param StackName: [REQUIRED]\n The name of the stack.\n \n\n :type NextToken: string\n :param NextToken: The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.\n\n :rtype: dict\n :return: {\n 'Names': [\n 'string',\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_associated_stacks(FleetName=None, NextToken=None):\n \"\"\"\n Retrieves the name of the stack with which the specified fleet is associated.\n See also: AWS API Documentation\n \n \n :example: response = client.list_associated_stacks(\n FleetName='string',\n NextToken='string'\n )\n \n \n :type FleetName: string\n :param FleetName: [REQUIRED]\n The name of the fleet.\n \n\n :type NextToken: string\n :param NextToken: The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.\n\n :rtype: dict\n :return: {\n 'Names': [\n 'string',\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_tags_for_resource(ResourceArn=None):\n \"\"\"\n Retrieves a list of all tags for the specified AppStream 2.0 resource. You can tag AppStream 2.0 image builders, images, fleets, and stacks.\n For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.list_tags_for_resource(\n ResourceArn='string'\n )\n \n \n :type ResourceArn: string\n :param ResourceArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the resource.\n \n\n :rtype: dict\n :return: {\n 'Tags': {\n 'string': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef start_fleet(Name=None):\n \"\"\"\n Starts the specified fleet.\n See also: AWS API Documentation\n \n \n :example: response = client.start_fleet(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the fleet.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef start_image_builder(Name=None, AppstreamAgentVersion=None):\n \"\"\"\n Starts the specified image builder.\n See also: AWS API Documentation\n \n \n :example: response = client.start_image_builder(\n Name='string',\n AppstreamAgentVersion='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the image builder.\n \n\n :type AppstreamAgentVersion: string\n :param AppstreamAgentVersion: The version of the AppStream 2.0 agent to use for this image builder. To use the latest version of the AppStream 2.0 agent, specify [LATEST].\n\n :rtype: dict\n :return: {\n 'ImageBuilder': {\n 'Name': 'string',\n 'Arn': 'string',\n 'ImageArn': 'string',\n 'Description': 'string',\n 'DisplayName': 'string',\n 'VpcConfig': {\n 'SubnetIds': [\n 'string',\n ],\n 'SecurityGroupIds': [\n 'string',\n ]\n },\n 'InstanceType': 'string',\n 'Platform': 'WINDOWS',\n 'State': 'PENDING'|'UPDATING_AGENT'|'RUNNING'|'STOPPING'|'STOPPED'|'REBOOTING'|'SNAPSHOTTING'|'DELETING'|'FAILED',\n 'StateChangeReason': {\n 'Code': 'INTERNAL_ERROR'|'IMAGE_UNAVAILABLE',\n 'Message': 'string'\n },\n 'CreatedTime': datetime(2015, 1, 1),\n 'EnableDefaultInternetAccess': True|False,\n 'DomainJoinInfo': {\n 'DirectoryName': 'string',\n 'OrganizationalUnitDistinguishedName': 'string'\n },\n 'ImageBuilderErrors': [\n {\n 'ErrorCode': 'IAM_SERVICE_ROLE_MISSING_ENI_DESCRIBE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_CREATE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_DELETE_ACTION'|'NETWORK_INTERFACE_LIMIT_EXCEEDED'|'INTERNAL_SERVICE_ERROR'|'IAM_SERVICE_ROLE_IS_MISSING'|'SUBNET_HAS_INSUFFICIENT_IP_ADDRESSES'|'IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION'|'SUBNET_NOT_FOUND'|'IMAGE_NOT_FOUND'|'INVALID_SUBNET_CONFIGURATION'|'SECURITY_GROUPS_NOT_FOUND'|'IGW_NOT_ATTACHED'|'IAM_SERVICE_ROLE_MISSING_DESCRIBE_SECURITY_GROUPS_ACTION'|'DOMAIN_JOIN_ERROR_FILE_NOT_FOUND'|'DOMAIN_JOIN_ERROR_ACCESS_DENIED'|'DOMAIN_JOIN_ERROR_LOGON_FAILURE'|'DOMAIN_JOIN_ERROR_INVALID_PARAMETER'|'DOMAIN_JOIN_ERROR_MORE_DATA'|'DOMAIN_JOIN_ERROR_NO_SUCH_DOMAIN'|'DOMAIN_JOIN_ERROR_NOT_SUPPORTED'|'DOMAIN_JOIN_NERR_INVALID_WORKGROUP_NAME'|'DOMAIN_JOIN_NERR_WORKSTATION_NOT_STARTED'|'DOMAIN_JOIN_ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED'|'DOMAIN_JOIN_NERR_PASSWORD_EXPIRED'|'DOMAIN_JOIN_INTERNAL_SERVICE_ERROR',\n 'ErrorMessage': 'string',\n 'ErrorTimestamp': datetime(2015, 1, 1)\n },\n ],\n 'AppstreamAgentVersion': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef stop_fleet(Name=None):\n \"\"\"\n Stops the specified fleet.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_fleet(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the fleet.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef stop_image_builder(Name=None):\n \"\"\"\n Stops the specified image builder.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_image_builder(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the image builder.\n \n\n :rtype: dict\n :return: {\n 'ImageBuilder': {\n 'Name': 'string',\n 'Arn': 'string',\n 'ImageArn': 'string',\n 'Description': 'string',\n 'DisplayName': 'string',\n 'VpcConfig': {\n 'SubnetIds': [\n 'string',\n ],\n 'SecurityGroupIds': [\n 'string',\n ]\n },\n 'InstanceType': 'string',\n 'Platform': 'WINDOWS',\n 'State': 'PENDING'|'UPDATING_AGENT'|'RUNNING'|'STOPPING'|'STOPPED'|'REBOOTING'|'SNAPSHOTTING'|'DELETING'|'FAILED',\n 'StateChangeReason': {\n 'Code': 'INTERNAL_ERROR'|'IMAGE_UNAVAILABLE',\n 'Message': 'string'\n },\n 'CreatedTime': datetime(2015, 1, 1),\n 'EnableDefaultInternetAccess': True|False,\n 'DomainJoinInfo': {\n 'DirectoryName': 'string',\n 'OrganizationalUnitDistinguishedName': 'string'\n },\n 'ImageBuilderErrors': [\n {\n 'ErrorCode': 'IAM_SERVICE_ROLE_MISSING_ENI_DESCRIBE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_CREATE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_DELETE_ACTION'|'NETWORK_INTERFACE_LIMIT_EXCEEDED'|'INTERNAL_SERVICE_ERROR'|'IAM_SERVICE_ROLE_IS_MISSING'|'SUBNET_HAS_INSUFFICIENT_IP_ADDRESSES'|'IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION'|'SUBNET_NOT_FOUND'|'IMAGE_NOT_FOUND'|'INVALID_SUBNET_CONFIGURATION'|'SECURITY_GROUPS_NOT_FOUND'|'IGW_NOT_ATTACHED'|'IAM_SERVICE_ROLE_MISSING_DESCRIBE_SECURITY_GROUPS_ACTION'|'DOMAIN_JOIN_ERROR_FILE_NOT_FOUND'|'DOMAIN_JOIN_ERROR_ACCESS_DENIED'|'DOMAIN_JOIN_ERROR_LOGON_FAILURE'|'DOMAIN_JOIN_ERROR_INVALID_PARAMETER'|'DOMAIN_JOIN_ERROR_MORE_DATA'|'DOMAIN_JOIN_ERROR_NO_SUCH_DOMAIN'|'DOMAIN_JOIN_ERROR_NOT_SUPPORTED'|'DOMAIN_JOIN_NERR_INVALID_WORKGROUP_NAME'|'DOMAIN_JOIN_NERR_WORKSTATION_NOT_STARTED'|'DOMAIN_JOIN_ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED'|'DOMAIN_JOIN_NERR_PASSWORD_EXPIRED'|'DOMAIN_JOIN_INTERNAL_SERVICE_ERROR',\n 'ErrorMessage': 'string',\n 'ErrorTimestamp': datetime(2015, 1, 1)\n },\n ],\n 'AppstreamAgentVersion': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef tag_resource(ResourceArn=None, Tags=None):\n \"\"\"\n Adds or overwrites one or more tags for the specified AppStream 2.0 resource. You can tag AppStream 2.0 image builders, images, fleets, and stacks.\n Each tag consists of a key and an optional value. If a resource already has a tag with the same key, this operation updates its value.\n To list the current tags for your resources, use ListTagsForResource . To disassociate tags from your resources, use UntagResource .\n For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.tag_resource(\n ResourceArn='string',\n Tags={\n 'string': 'string'\n }\n )\n \n \n :type ResourceArn: string\n :param ResourceArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the resource.\n \n\n :type Tags: dict\n :param Tags: [REQUIRED]\n The tags to associate. A tag is a key-value pair (the value is optional). For example, Environment=Test , or, if you do not specify a value, Environment= .\n If you do not specify a value, we set the value to an empty string.\n (string) --\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef untag_resource(ResourceArn=None, TagKeys=None):\n \"\"\"\n Disassociates one or more specified tags from the specified AppStream 2.0 resource.\n To list the current tags for your resources, use ListTagsForResource .\n For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.untag_resource(\n ResourceArn='string',\n TagKeys=[\n 'string',\n ]\n )\n \n \n :type ResourceArn: string\n :param ResourceArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the resource.\n \n\n :type TagKeys: list\n :param TagKeys: [REQUIRED]\n The tag keys for the tags to disassociate.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_directory_config(DirectoryName=None, OrganizationalUnitDistinguishedNames=None, ServiceAccountCredentials=None):\n \"\"\"\n Updates the specified Directory Config object in AppStream 2.0. This object includes the information required to join streaming instances to an Active Directory domain.\n See also: AWS API Documentation\n \n \n :example: response = client.update_directory_config(\n DirectoryName='string',\n OrganizationalUnitDistinguishedNames=[\n 'string',\n ],\n ServiceAccountCredentials={\n 'AccountName': 'string',\n 'AccountPassword': 'string'\n }\n )\n \n \n :type DirectoryName: string\n :param DirectoryName: [REQUIRED]\n The name of the Directory Config object.\n \n\n :type OrganizationalUnitDistinguishedNames: list\n :param OrganizationalUnitDistinguishedNames: The distinguished names of the organizational units for computer accounts.\n (string) --\n \n\n :type ServiceAccountCredentials: dict\n :param ServiceAccountCredentials: The credentials for the service account used by the streaming instance to connect to the directory.\n AccountName (string) -- [REQUIRED]The user name of the account. This account must have the following privileges: create computer objects, join computers to the domain, and change/reset the password on descendant computer objects for the organizational units specified.\n AccountPassword (string) -- [REQUIRED]The password for the account.\n \n\n :rtype: dict\n :return: {\n 'DirectoryConfig': {\n 'DirectoryName': 'string',\n 'OrganizationalUnitDistinguishedNames': [\n 'string',\n ],\n 'ServiceAccountCredentials': {\n 'AccountName': 'string',\n 'AccountPassword': 'string'\n },\n 'CreatedTime': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef update_fleet(ImageName=None, ImageArn=None, Name=None, InstanceType=None, ComputeCapacity=None, VpcConfig=None, MaxUserDurationInSeconds=None, DisconnectTimeoutInSeconds=None, DeleteVpcConfig=None, Description=None, DisplayName=None, EnableDefaultInternetAccess=None, DomainJoinInfo=None, AttributesToDelete=None):\n \"\"\"\n Updates the specified fleet.\n If the fleet is in the STOPPED state, you can update any attribute except the fleet name. If the fleet is in the RUNNING state, you can update the DisplayName and ComputeCapacity attributes. If the fleet is in the STARTING or STOPPING state, you can't update it.\n See also: AWS API Documentation\n \n \n :example: response = client.update_fleet(\n ImageName='string',\n ImageArn='string',\n Name='string',\n InstanceType='string',\n ComputeCapacity={\n 'DesiredInstances': 123\n },\n VpcConfig={\n 'SubnetIds': [\n 'string',\n ],\n 'SecurityGroupIds': [\n 'string',\n ]\n },\n MaxUserDurationInSeconds=123,\n DisconnectTimeoutInSeconds=123,\n DeleteVpcConfig=True|False,\n Description='string',\n DisplayName='string',\n EnableDefaultInternetAccess=True|False,\n DomainJoinInfo={\n 'DirectoryName': 'string',\n 'OrganizationalUnitDistinguishedName': 'string'\n },\n AttributesToDelete=[\n 'VPC_CONFIGURATION'|'VPC_CONFIGURATION_SECURITY_GROUP_IDS'|'DOMAIN_JOIN_INFO',\n ]\n )\n \n \n :type ImageName: string\n :param ImageName: The name of the image used to create the fleet.\n\n :type ImageArn: string\n :param ImageArn: The ARN of the public, private, or shared image to use.\n\n :type Name: string\n :param Name: A unique name for the fleet.\n\n :type InstanceType: string\n :param InstanceType: The instance type to use when launching fleet instances. The following instance types are available:\n stream.standard.medium\n stream.standard.large\n stream.compute.large\n stream.compute.xlarge\n stream.compute.2xlarge\n stream.compute.4xlarge\n stream.compute.8xlarge\n stream.memory.large\n stream.memory.xlarge\n stream.memory.2xlarge\n stream.memory.4xlarge\n stream.memory.8xlarge\n stream.graphics-design.large\n stream.graphics-design.xlarge\n stream.graphics-design.2xlarge\n stream.graphics-design.4xlarge\n stream.graphics-desktop.2xlarge\n stream.graphics-pro.4xlarge\n stream.graphics-pro.8xlarge\n stream.graphics-pro.16xlarge\n \n\n :type ComputeCapacity: dict\n :param ComputeCapacity: The desired capacity for the fleet.\n DesiredInstances (integer) -- [REQUIRED]The desired number of streaming instances.\n \n\n :type VpcConfig: dict\n :param VpcConfig: The VPC configuration for the fleet.\n SubnetIds (list) --The subnets to which a network interface is established from the fleet instance.\n (string) --\n SecurityGroupIds (list) --The security groups for the fleet.\n (string) --\n \n\n :type MaxUserDurationInSeconds: integer\n :param MaxUserDurationInSeconds: The maximum time that a streaming session can run, in seconds. Specify a value between 600 and 57600.\n\n :type DisconnectTimeoutInSeconds: integer\n :param DisconnectTimeoutInSeconds: The time after disconnection when a session is considered to have ended, in seconds. If a user who was disconnected reconnects within this time interval, the user is connected to their previous session. Specify a value between 60 and 57600.\n\n :type DeleteVpcConfig: boolean\n :param DeleteVpcConfig: Deletes the VPC association for the specified fleet.\n\n :type Description: string\n :param Description: The description for display.\n\n :type DisplayName: string\n :param DisplayName: The fleet name for display.\n\n :type EnableDefaultInternetAccess: boolean\n :param EnableDefaultInternetAccess: Enables or disables default internet access for the fleet.\n\n :type DomainJoinInfo: dict\n :param DomainJoinInfo: The information needed to join a Microsoft Active Directory domain.\n DirectoryName (string) --The fully qualified name of the directory (for example, corp.example.com).\n OrganizationalUnitDistinguishedName (string) --The distinguished name of the organizational unit for computer accounts.\n \n\n :type AttributesToDelete: list\n :param AttributesToDelete: The fleet attributes to delete.\n (string) --The fleet attribute.\n \n\n :rtype: dict\n :return: {\n 'Fleet': {\n 'Arn': 'string',\n 'Name': 'string',\n 'DisplayName': 'string',\n 'Description': 'string',\n 'ImageName': 'string',\n 'ImageArn': 'string',\n 'InstanceType': 'string',\n 'FleetType': 'ALWAYS_ON'|'ON_DEMAND',\n 'ComputeCapacityStatus': {\n 'Desired': 123,\n 'Running': 123,\n 'InUse': 123,\n 'Available': 123\n },\n 'MaxUserDurationInSeconds': 123,\n 'DisconnectTimeoutInSeconds': 123,\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED',\n 'VpcConfig': {\n 'SubnetIds': [\n 'string',\n ],\n 'SecurityGroupIds': [\n 'string',\n ]\n },\n 'CreatedTime': datetime(2015, 1, 1),\n 'FleetErrors': [\n {\n 'ErrorCode': 'IAM_SERVICE_ROLE_MISSING_ENI_DESCRIBE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_CREATE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_DELETE_ACTION'|'NETWORK_INTERFACE_LIMIT_EXCEEDED'|'INTERNAL_SERVICE_ERROR'|'IAM_SERVICE_ROLE_IS_MISSING'|'SUBNET_HAS_INSUFFICIENT_IP_ADDRESSES'|'IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION'|'SUBNET_NOT_FOUND'|'IMAGE_NOT_FOUND'|'INVALID_SUBNET_CONFIGURATION'|'SECURITY_GROUPS_NOT_FOUND'|'IGW_NOT_ATTACHED'|'IAM_SERVICE_ROLE_MISSING_DESCRIBE_SECURITY_GROUPS_ACTION'|'DOMAIN_JOIN_ERROR_FILE_NOT_FOUND'|'DOMAIN_JOIN_ERROR_ACCESS_DENIED'|'DOMAIN_JOIN_ERROR_LOGON_FAILURE'|'DOMAIN_JOIN_ERROR_INVALID_PARAMETER'|'DOMAIN_JOIN_ERROR_MORE_DATA'|'DOMAIN_JOIN_ERROR_NO_SUCH_DOMAIN'|'DOMAIN_JOIN_ERROR_NOT_SUPPORTED'|'DOMAIN_JOIN_NERR_INVALID_WORKGROUP_NAME'|'DOMAIN_JOIN_NERR_WORKSTATION_NOT_STARTED'|'DOMAIN_JOIN_ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED'|'DOMAIN_JOIN_NERR_PASSWORD_EXPIRED'|'DOMAIN_JOIN_INTERNAL_SERVICE_ERROR',\n 'ErrorMessage': 'string'\n },\n ],\n 'EnableDefaultInternetAccess': True|False,\n 'DomainJoinInfo': {\n 'DirectoryName': 'string',\n 'OrganizationalUnitDistinguishedName': 'string'\n }\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef update_image_permissions(Name=None, SharedAccountId=None, ImagePermissions=None):\n \"\"\"\n Adds or updates permissions for the specified private image.\n See also: AWS API Documentation\n \n \n :example: response = client.update_image_permissions(\n Name='string',\n SharedAccountId='string',\n ImagePermissions={\n 'allowFleet': True|False,\n 'allowImageBuilder': True|False\n }\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the private image.\n \n\n :type SharedAccountId: string\n :param SharedAccountId: [REQUIRED]\n The 12-digit ID of the AWS account for which you want add or update image permissions.\n \n\n :type ImagePermissions: dict\n :param ImagePermissions: [REQUIRED]\n The permissions for the image.\n allowFleet (boolean) --Indicates whether the image can be used for a fleet.\n allowImageBuilder (boolean) --Indicates whether the image can be used for an image builder.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_stack(DisplayName=None, Description=None, Name=None, StorageConnectors=None, DeleteStorageConnectors=None, RedirectURL=None, FeedbackURL=None, AttributesToDelete=None, UserSettings=None, ApplicationSettings=None):\n \"\"\"\n Updates the specified fields for the specified stack.\n See also: AWS API Documentation\n \n \n :example: response = client.update_stack(\n DisplayName='string',\n Description='string',\n Name='string',\n StorageConnectors=[\n {\n 'ConnectorType': 'HOMEFOLDERS'|'GOOGLE_DRIVE'|'ONE_DRIVE',\n 'ResourceIdentifier': 'string',\n 'Domains': [\n 'string',\n ]\n },\n ],\n DeleteStorageConnectors=True|False,\n RedirectURL='string',\n FeedbackURL='string',\n AttributesToDelete=[\n 'STORAGE_CONNECTORS'|'STORAGE_CONNECTOR_HOMEFOLDERS'|'STORAGE_CONNECTOR_GOOGLE_DRIVE'|'STORAGE_CONNECTOR_ONE_DRIVE'|'REDIRECT_URL'|'FEEDBACK_URL'|'THEME_NAME'|'USER_SETTINGS',\n ],\n UserSettings=[\n {\n 'Action': 'CLIPBOARD_COPY_FROM_LOCAL_DEVICE'|'CLIPBOARD_COPY_TO_LOCAL_DEVICE'|'FILE_UPLOAD'|'FILE_DOWNLOAD'|'PRINTING_TO_LOCAL_DEVICE',\n 'Permission': 'ENABLED'|'DISABLED'\n },\n ],\n ApplicationSettings={\n 'Enabled': True|False,\n 'SettingsGroup': 'string'\n }\n )\n \n \n :type DisplayName: string\n :param DisplayName: The stack name for display.\n\n :type Description: string\n :param Description: The description for display.\n\n :type Name: string\n :param Name: [REQUIRED]\n The name of the stack.\n \n\n :type StorageConnectors: list\n :param StorageConnectors: The storage connectors to enable.\n (dict) --Describes a connector to enable persistent storage for users.\n ConnectorType (string) -- [REQUIRED]The type of storage connector.\n ResourceIdentifier (string) --The ARN of the storage connector.\n Domains (list) --The names of the domains for the G Suite account.\n (string) -- GSuite domain for GDrive integration.\n \n \n\n :type DeleteStorageConnectors: boolean\n :param DeleteStorageConnectors: Deletes the storage connectors currently enabled for the stack.\n\n :type RedirectURL: string\n :param RedirectURL: The URL that users are redirected to after their streaming session ends.\n\n :type FeedbackURL: string\n :param FeedbackURL: The URL that users are redirected to after they click the Send Feedback link. If no URL is specified, no Send Feedback link is displayed.\n\n :type AttributesToDelete: list\n :param AttributesToDelete: The stack attributes to delete.\n (string) --\n \n\n :type UserSettings: list\n :param UserSettings: The actions that are enabled or disabled for users during their streaming sessions. By default, these actions are enabled.\n (dict) --Describes an action and whether the action is enabled or disabled for users during their streaming sessions.\n Action (string) -- [REQUIRED]The action that is enabled or disabled.\n Permission (string) -- [REQUIRED]Indicates whether the action is enabled or disabled.\n \n \n\n :type ApplicationSettings: dict\n :param ApplicationSettings: The persistent application settings for users of a stack. When these settings are enabled, changes that users make to applications and Windows settings are automatically saved after each session and applied to the next session.\n Enabled (boolean) -- [REQUIRED]Enables or disables persistent application settings for users during their streaming sessions.\n SettingsGroup (string) --The path prefix for the S3 bucket where users persistent application settings are stored. You can allow the same persistent application settings to be used across multiple stacks by specifying the same settings group for each stack.\n \n\n :rtype: dict\n :return: {\n 'Stack': {\n 'Arn': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'DisplayName': 'string',\n 'CreatedTime': datetime(2015, 1, 1),\n 'StorageConnectors': [\n {\n 'ConnectorType': 'HOMEFOLDERS'|'GOOGLE_DRIVE'|'ONE_DRIVE',\n 'ResourceIdentifier': 'string',\n 'Domains': [\n 'string',\n ]\n },\n ],\n 'RedirectURL': 'string',\n 'FeedbackURL': 'string',\n 'StackErrors': [\n {\n 'ErrorCode': 'STORAGE_CONNECTOR_ERROR'|'INTERNAL_SERVICE_ERROR',\n 'ErrorMessage': 'string'\n },\n ],\n 'UserSettings': [\n {\n 'Action': 'CLIPBOARD_COPY_FROM_LOCAL_DEVICE'|'CLIPBOARD_COPY_TO_LOCAL_DEVICE'|'FILE_UPLOAD'|'FILE_DOWNLOAD'|'PRINTING_TO_LOCAL_DEVICE',\n 'Permission': 'ENABLED'|'DISABLED'\n },\n ],\n 'ApplicationSettings': {\n 'Enabled': True|False,\n 'SettingsGroup': 'string',\n 'S3BucketName': 'string'\n }\n }\n }\n \n \n :returns: \n (string) -- GSuite domain for GDrive integration.\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6102286577224731, "alphanum_fraction": 0.618567168712616, "avg_line_length": 40.208805084228516, "blob_id": "38d85ec0fbb7eb598be300327f098d0276702265", "content_id": "a28f6c37cbc5b9eee0188506424b3b93cbbf1cf4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 90784, "license_type": "permissive", "max_line_length": 389, "num_lines": 2203, "path": "/pyboto3/comprehend.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef batch_detect_dominant_language(TextList=None):\n \"\"\"\n Determines the dominant language of the input text for a batch of documents. For a list of languages that Amazon Comprehend can detect, see Amazon Comprehend Supported Languages .\n See also: AWS API Documentation\n \n \n :example: response = client.batch_detect_dominant_language(\n TextList=[\n 'string',\n ]\n )\n \n \n :type TextList: list\n :param TextList: [REQUIRED]\n A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document should contain at least 20 characters and must contain fewer than 5,000 bytes of UTF-8 encoded characters.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'ResultList': [\n {\n 'Index': 123,\n 'Languages': [\n {\n 'LanguageCode': 'string',\n 'Score': ...\n },\n ]\n },\n ],\n 'ErrorList': [\n {\n 'Index': 123,\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef batch_detect_entities(TextList=None, LanguageCode=None):\n \"\"\"\n Inspects the text of a batch of documents for named entities and returns information about them. For more information about named entities, see how-entities\n See also: AWS API Documentation\n \n \n :example: response = client.batch_detect_entities(\n TextList=[\n 'string',\n ],\n LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt'\n )\n \n \n :type TextList: list\n :param TextList: [REQUIRED]\n A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document must contain fewer than 5,000 bytes of UTF-8 encoded characters.\n (string) --\n \n\n :type LanguageCode: string\n :param LanguageCode: [REQUIRED]\n The language of the input documents. You can specify English ('en') or Spanish ('es'). All documents must be in the same language.\n \n\n :rtype: dict\n :return: {\n 'ResultList': [\n {\n 'Index': 123,\n 'Entities': [\n {\n 'Score': ...,\n 'Type': 'PERSON'|'LOCATION'|'ORGANIZATION'|'COMMERCIAL_ITEM'|'EVENT'|'DATE'|'QUANTITY'|'TITLE'|'OTHER',\n 'Text': 'string',\n 'BeginOffset': 123,\n 'EndOffset': 123\n },\n ]\n },\n ],\n 'ErrorList': [\n {\n 'Index': 123,\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef batch_detect_key_phrases(TextList=None, LanguageCode=None):\n \"\"\"\n Detects the key noun phrases found in a batch of documents.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_detect_key_phrases(\n TextList=[\n 'string',\n ],\n LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt'\n )\n \n \n :type TextList: list\n :param TextList: [REQUIRED]\n A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document must contain fewer that 5,000 bytes of UTF-8 encoded characters.\n (string) --\n \n\n :type LanguageCode: string\n :param LanguageCode: [REQUIRED]\n The language of the input documents. You can specify English ('en') or Spanish ('es'). All documents must be in the same language.\n \n\n :rtype: dict\n :return: {\n 'ResultList': [\n {\n 'Index': 123,\n 'KeyPhrases': [\n {\n 'Score': ...,\n 'Text': 'string',\n 'BeginOffset': 123,\n 'EndOffset': 123\n },\n ]\n },\n ],\n 'ErrorList': [\n {\n 'Index': 123,\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef batch_detect_sentiment(TextList=None, LanguageCode=None):\n \"\"\"\n Inspects a batch of documents and returns an inference of the prevailing sentiment, POSITIVE , NEUTRAL , MIXED , or NEGATIVE , in each one.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_detect_sentiment(\n TextList=[\n 'string',\n ],\n LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt'\n )\n \n \n :type TextList: list\n :param TextList: [REQUIRED]\n A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document must contain fewer that 5,000 bytes of UTF-8 encoded characters.\n (string) --\n \n\n :type LanguageCode: string\n :param LanguageCode: [REQUIRED]\n The language of the input documents. You can specify English ('en') or Spanish ('es'). All documents must be in the same language.\n \n\n :rtype: dict\n :return: {\n 'ResultList': [\n {\n 'Index': 123,\n 'Sentiment': 'POSITIVE'|'NEGATIVE'|'NEUTRAL'|'MIXED',\n 'SentimentScore': {\n 'Positive': ...,\n 'Negative': ...,\n 'Neutral': ...,\n 'Mixed': ...\n }\n },\n ],\n 'ErrorList': [\n {\n 'Index': 123,\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef batch_detect_syntax(TextList=None, LanguageCode=None):\n \"\"\"\n Inspects the text of a batch of documents for the syntax and part of speech of the words in the document and returns information about them. For more information, see how-syntax .\n See also: AWS API Documentation\n \n \n :example: response = client.batch_detect_syntax(\n TextList=[\n 'string',\n ],\n LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt'\n )\n \n \n :type TextList: list\n :param TextList: [REQUIRED]\n A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document must contain fewer that 5,000 bytes of UTF-8 encoded characters.\n (string) --\n \n\n :type LanguageCode: string\n :param LanguageCode: [REQUIRED]\n The language of the input documents. You can specify English ('en') or Spanish ('es'). All documents must be in the same language.\n \n\n :rtype: dict\n :return: {\n 'ResultList': [\n {\n 'Index': 123,\n 'SyntaxTokens': [\n {\n 'TokenId': 123,\n 'Text': 'string',\n 'BeginOffset': 123,\n 'EndOffset': 123,\n 'PartOfSpeech': {\n 'Tag': 'ADJ'|'ADP'|'ADV'|'AUX'|'CONJ'|'CCONJ'|'DET'|'INTJ'|'NOUN'|'NUM'|'O'|'PART'|'PRON'|'PROPN'|'PUNCT'|'SCONJ'|'SYM'|'VERB',\n 'Score': ...\n }\n },\n ]\n },\n ],\n 'ErrorList': [\n {\n 'Index': 123,\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_document_classifier(DocumentClassifierName=None, DataAccessRoleArn=None, InputDataConfig=None, ClientRequestToken=None, LanguageCode=None):\n \"\"\"\n Creates a new document classifier that you can use to categorize documents. To create a classifier you provide a set of training documents that labeled with the categories that you want to use. After the classifier is trained you can use it to categorize a set of labeled documents into the categories. For more information, see how-document-classification .\n See also: AWS API Documentation\n \n \n :example: response = client.create_document_classifier(\n DocumentClassifierName='string',\n DataAccessRoleArn='string',\n InputDataConfig={\n 'S3Uri': 'string'\n },\n ClientRequestToken='string',\n LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt'\n )\n \n \n :type DocumentClassifierName: string\n :param DocumentClassifierName: [REQUIRED]\n The name of the document classifier.\n \n\n :type DataAccessRoleArn: string\n :param DataAccessRoleArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your input data.\n \n\n :type InputDataConfig: dict\n :param InputDataConfig: [REQUIRED]\n Specifies the format and location of the input data for the job.\n S3Uri (string) -- [REQUIRED]The Amazon S3 URI for the input data. The S3 bucket must be in the same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of input files.\n For example, if you use the URI S3://bucketName/prefix , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.\n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: A unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.\n This field is autopopulated if not provided.\n \n\n :type LanguageCode: string\n :param LanguageCode: [REQUIRED]\n The language of the input documents. You can specify English ('en') or Spanish ('es'). All documents must be in the same language.\n \n\n :rtype: dict\n :return: {\n 'DocumentClassifierArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_entity_recognizer(RecognizerName=None, DataAccessRoleArn=None, InputDataConfig=None, ClientRequestToken=None, LanguageCode=None):\n \"\"\"\n Creates an entity recognizer using submitted files. After your CreateEntityRecognizer request is submitted, you can check job status using the API.\n See also: AWS API Documentation\n \n \n :example: response = client.create_entity_recognizer(\n RecognizerName='string',\n DataAccessRoleArn='string',\n InputDataConfig={\n 'EntityTypes': [\n {\n 'Type': 'string'\n },\n ],\n 'Documents': {\n 'S3Uri': 'string'\n },\n 'Annotations': {\n 'S3Uri': 'string'\n },\n 'EntityList': {\n 'S3Uri': 'string'\n }\n },\n ClientRequestToken='string',\n LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt'\n )\n \n \n :type RecognizerName: string\n :param RecognizerName: [REQUIRED]\n The name given to the newly created recognizer. Recognizer names can be a maximum of 256 characters. Alphanumeric characters, hyphens (-) and underscores (_) are allowed. The name must be unique in the account/region.\n \n\n :type DataAccessRoleArn: string\n :param DataAccessRoleArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your input data.\n \n\n :type InputDataConfig: dict\n :param InputDataConfig: [REQUIRED]\n Specifies the format and location of the input data. The S3 bucket containing the input data must be located in the same region as the entity recognizer being created.\n EntityTypes (list) -- [REQUIRED]The entity types in the input data for an entity recognizer.\n (dict) --Information about an individual item on a list of entity types.\n Type (string) -- [REQUIRED]Entity type of an item on an entity type list.\n \n Documents (dict) -- [REQUIRED]S3 location of the documents folder for an entity recognizer\n S3Uri (string) -- [REQUIRED]Specifies the Amazon S3 location where the training documents for an entity recognizer are located. The URI must be in the same region as the API endpoint that you are calling.\n Annotations (dict) --S3 location of the annotations file for an entity recognizer.\n S3Uri (string) -- [REQUIRED]Specifies the Amazon S3 location where the annotations for an entity recognizer are located. The URI must be in the same region as the API endpoint that you are calling.\n EntityList (dict) --S3 location of the entity list for an entity recognizer.\n S3Uri (string) -- [REQUIRED]Specifies the Amazon S3 location where the entity list is located. The URI must be in the same region as the API endpoint that you are calling.\n \n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: A unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.\n This field is autopopulated if not provided.\n \n\n :type LanguageCode: string\n :param LanguageCode: [REQUIRED]\n The language of the input documents. All documents must be in the same language. Only English ('en') is currently supported.\n \n\n :rtype: dict\n :return: {\n 'EntityRecognizerArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_document_classifier(DocumentClassifierArn=None):\n \"\"\"\n Deletes a previously created document classifier\n Only those classifiers that are in terminated states (IN_ERROR, TRAINED) will be deleted. If an active inference job is using the model, a ResourceInUseException will be returned.\n This is an asynchronous action that puts the classifier into a DELETING state, and it is then removed by a background job. Once removed, the classifier disappears from your account and is no longer available for use.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_document_classifier(\n DocumentClassifierArn='string'\n )\n \n \n :type DocumentClassifierArn: string\n :param DocumentClassifierArn: [REQUIRED]\n The Amazon Resource Name (ARN) that identifies the document classifier.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_entity_recognizer(EntityRecognizerArn=None):\n \"\"\"\n Deletes an entity recognizer.\n Only those recognizers that are in terminated states (IN_ERROR, TRAINED) will be deleted. If an active inference job is using the model, a ResourceInUseException will be returned.\n This is an asynchronous action that puts the recognizer into a DELETING state, and it is then removed by a background job. Once removed, the recognizer disappears from your account and is no longer available for use.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_entity_recognizer(\n EntityRecognizerArn='string'\n )\n \n \n :type EntityRecognizerArn: string\n :param EntityRecognizerArn: [REQUIRED]\n The Amazon Resource Name (ARN) that identifies the entity recognizer.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef describe_document_classification_job(JobId=None):\n \"\"\"\n Gets the properties associated with a document classification job. Use this operation to get the status of a classification job.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_document_classification_job(\n JobId='string'\n )\n \n \n :type JobId: string\n :param JobId: [REQUIRED]\n The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.\n \n\n :rtype: dict\n :return: {\n 'DocumentClassificationJobProperties': {\n 'JobId': 'string',\n 'JobName': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',\n 'Message': 'string',\n 'SubmitTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'DocumentClassifierArn': 'string',\n 'InputDataConfig': {\n 'S3Uri': 'string',\n 'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'\n },\n 'OutputDataConfig': {\n 'S3Uri': 'string'\n },\n 'DataAccessRoleArn': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_document_classifier(DocumentClassifierArn=None):\n \"\"\"\n Gets the properties associated with a document classifier.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_document_classifier(\n DocumentClassifierArn='string'\n )\n \n \n :type DocumentClassifierArn: string\n :param DocumentClassifierArn: [REQUIRED]\n The Amazon Resource Name (ARN) that identifies the document classifier. The operation returns this identifier in its response.\n \n\n :rtype: dict\n :return: {\n 'DocumentClassifierProperties': {\n 'DocumentClassifierArn': 'string',\n 'LanguageCode': 'en'|'es'|'fr'|'de'|'it'|'pt',\n 'Status': 'SUBMITTED'|'TRAINING'|'DELETING'|'STOP_REQUESTED'|'STOPPED'|'IN_ERROR'|'TRAINED',\n 'Message': 'string',\n 'SubmitTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'TrainingStartTime': datetime(2015, 1, 1),\n 'TrainingEndTime': datetime(2015, 1, 1),\n 'InputDataConfig': {\n 'S3Uri': 'string'\n },\n 'ClassifierMetadata': {\n 'NumberOfLabels': 123,\n 'NumberOfTrainedDocuments': 123,\n 'NumberOfTestDocuments': 123,\n 'EvaluationMetrics': {\n 'Accuracy': 123.0,\n 'Precision': 123.0,\n 'Recall': 123.0,\n 'F1Score': 123.0\n }\n },\n 'DataAccessRoleArn': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_dominant_language_detection_job(JobId=None):\n \"\"\"\n Gets the properties associated with a dominant language detection job. Use this operation to get the status of a detection job.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_dominant_language_detection_job(\n JobId='string'\n )\n \n \n :type JobId: string\n :param JobId: [REQUIRED]\n The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.\n \n\n :rtype: dict\n :return: {\n 'DominantLanguageDetectionJobProperties': {\n 'JobId': 'string',\n 'JobName': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',\n 'Message': 'string',\n 'SubmitTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'InputDataConfig': {\n 'S3Uri': 'string',\n 'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'\n },\n 'OutputDataConfig': {\n 'S3Uri': 'string'\n },\n 'DataAccessRoleArn': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_entities_detection_job(JobId=None):\n \"\"\"\n Gets the properties associated with an entities detection job. Use this operation to get the status of a detection job.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_entities_detection_job(\n JobId='string'\n )\n \n \n :type JobId: string\n :param JobId: [REQUIRED]\n The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.\n \n\n :rtype: dict\n :return: {\n 'EntitiesDetectionJobProperties': {\n 'JobId': 'string',\n 'JobName': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',\n 'Message': 'string',\n 'SubmitTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'EntityRecognizerArn': 'string',\n 'InputDataConfig': {\n 'S3Uri': 'string',\n 'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'\n },\n 'OutputDataConfig': {\n 'S3Uri': 'string'\n },\n 'LanguageCode': 'en'|'es'|'fr'|'de'|'it'|'pt',\n 'DataAccessRoleArn': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_entity_recognizer(EntityRecognizerArn=None):\n \"\"\"\n Provides details about an entity recognizer including status, S3 buckets containing training data, recognizer metadata, metrics, and so on.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_entity_recognizer(\n EntityRecognizerArn='string'\n )\n \n \n :type EntityRecognizerArn: string\n :param EntityRecognizerArn: [REQUIRED]\n The Amazon Resource Name (ARN) that identifies the entity recognizer.\n \n\n :rtype: dict\n :return: {\n 'EntityRecognizerProperties': {\n 'EntityRecognizerArn': 'string',\n 'LanguageCode': 'en'|'es'|'fr'|'de'|'it'|'pt',\n 'Status': 'SUBMITTED'|'TRAINING'|'DELETING'|'STOP_REQUESTED'|'STOPPED'|'IN_ERROR'|'TRAINED',\n 'Message': 'string',\n 'SubmitTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'TrainingStartTime': datetime(2015, 1, 1),\n 'TrainingEndTime': datetime(2015, 1, 1),\n 'InputDataConfig': {\n 'EntityTypes': [\n {\n 'Type': 'string'\n },\n ],\n 'Documents': {\n 'S3Uri': 'string'\n },\n 'Annotations': {\n 'S3Uri': 'string'\n },\n 'EntityList': {\n 'S3Uri': 'string'\n }\n },\n 'RecognizerMetadata': {\n 'NumberOfTrainedDocuments': 123,\n 'NumberOfTestDocuments': 123,\n 'EvaluationMetrics': {\n 'Precision': 123.0,\n 'Recall': 123.0,\n 'F1Score': 123.0\n },\n 'EntityTypes': [\n {\n 'Type': 'string'\n },\n ]\n },\n 'DataAccessRoleArn': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_key_phrases_detection_job(JobId=None):\n \"\"\"\n Gets the properties associated with a key phrases detection job. Use this operation to get the status of a detection job.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_key_phrases_detection_job(\n JobId='string'\n )\n \n \n :type JobId: string\n :param JobId: [REQUIRED]\n The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.\n \n\n :rtype: dict\n :return: {\n 'KeyPhrasesDetectionJobProperties': {\n 'JobId': 'string',\n 'JobName': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',\n 'Message': 'string',\n 'SubmitTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'InputDataConfig': {\n 'S3Uri': 'string',\n 'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'\n },\n 'OutputDataConfig': {\n 'S3Uri': 'string'\n },\n 'LanguageCode': 'en'|'es'|'fr'|'de'|'it'|'pt',\n 'DataAccessRoleArn': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_sentiment_detection_job(JobId=None):\n \"\"\"\n Gets the properties associated with a sentiment detection job. Use this operation to get the status of a detection job.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_sentiment_detection_job(\n JobId='string'\n )\n \n \n :type JobId: string\n :param JobId: [REQUIRED]\n The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.\n \n\n :rtype: dict\n :return: {\n 'SentimentDetectionJobProperties': {\n 'JobId': 'string',\n 'JobName': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',\n 'Message': 'string',\n 'SubmitTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'InputDataConfig': {\n 'S3Uri': 'string',\n 'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'\n },\n 'OutputDataConfig': {\n 'S3Uri': 'string'\n },\n 'LanguageCode': 'en'|'es'|'fr'|'de'|'it'|'pt',\n 'DataAccessRoleArn': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_topics_detection_job(JobId=None):\n \"\"\"\n Gets the properties associated with a topic detection job. Use this operation to get the status of a detection job.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_topics_detection_job(\n JobId='string'\n )\n \n \n :type JobId: string\n :param JobId: [REQUIRED]\n The identifier assigned by the user to the detection job.\n \n\n :rtype: dict\n :return: {\n 'TopicsDetectionJobProperties': {\n 'JobId': 'string',\n 'JobName': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',\n 'Message': 'string',\n 'SubmitTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'InputDataConfig': {\n 'S3Uri': 'string',\n 'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'\n },\n 'OutputDataConfig': {\n 'S3Uri': 'string'\n },\n 'NumberOfTopics': 123\n }\n }\n \n \n \"\"\"\n pass\n\ndef detect_dominant_language(Text=None):\n \"\"\"\n Determines the dominant language of the input text. For a list of languages that Amazon Comprehend can detect, see Amazon Comprehend Supported Languages .\n See also: AWS API Documentation\n \n \n :example: response = client.detect_dominant_language(\n Text='string'\n )\n \n \n :type Text: string\n :param Text: [REQUIRED]\n A UTF-8 text string. Each string should contain at least 20 characters and must contain fewer that 5,000 bytes of UTF-8 encoded characters.\n \n\n :rtype: dict\n :return: {\n 'Languages': [\n {\n 'LanguageCode': 'string',\n 'Score': ...\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef detect_entities(Text=None, LanguageCode=None):\n \"\"\"\n Inspects text for named entities, and returns information about them. For more information, about named entities, see how-entities .\n See also: AWS API Documentation\n \n \n :example: response = client.detect_entities(\n Text='string',\n LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt'\n )\n \n \n :type Text: string\n :param Text: [REQUIRED]\n A UTF-8 text string. Each string must contain fewer that 5,000 bytes of UTF-8 encoded characters.\n \n\n :type LanguageCode: string\n :param LanguageCode: [REQUIRED]\n The language of the input documents. You can specify English ('en') or Spanish ('es'). All documents must be in the same language.\n \n\n :rtype: dict\n :return: {\n 'Entities': [\n {\n 'Score': ...,\n 'Type': 'PERSON'|'LOCATION'|'ORGANIZATION'|'COMMERCIAL_ITEM'|'EVENT'|'DATE'|'QUANTITY'|'TITLE'|'OTHER',\n 'Text': 'string',\n 'BeginOffset': 123,\n 'EndOffset': 123\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef detect_key_phrases(Text=None, LanguageCode=None):\n \"\"\"\n Detects the key noun phrases found in the text.\n See also: AWS API Documentation\n \n \n :example: response = client.detect_key_phrases(\n Text='string',\n LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt'\n )\n \n \n :type Text: string\n :param Text: [REQUIRED]\n A UTF-8 text string. Each string must contain fewer that 5,000 bytes of UTF-8 encoded characters.\n \n\n :type LanguageCode: string\n :param LanguageCode: [REQUIRED]\n The language of the input documents. You can specify English ('en') or Spanish ('es'). All documents must be in the same language.\n \n\n :rtype: dict\n :return: {\n 'KeyPhrases': [\n {\n 'Score': ...,\n 'Text': 'string',\n 'BeginOffset': 123,\n 'EndOffset': 123\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef detect_sentiment(Text=None, LanguageCode=None):\n \"\"\"\n Inspects text and returns an inference of the prevailing sentiment (POSITIVE , NEUTRAL , MIXED , or NEGATIVE ).\n See also: AWS API Documentation\n \n \n :example: response = client.detect_sentiment(\n Text='string',\n LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt'\n )\n \n \n :type Text: string\n :param Text: [REQUIRED]\n A UTF-8 text string. Each string must contain fewer that 5,000 bytes of UTF-8 encoded characters.\n \n\n :type LanguageCode: string\n :param LanguageCode: [REQUIRED]\n The language of the input documents. You can specify English ('en') or Spanish ('es'). All documents must be in the same language.\n \n\n :rtype: dict\n :return: {\n 'Sentiment': 'POSITIVE'|'NEGATIVE'|'NEUTRAL'|'MIXED',\n 'SentimentScore': {\n 'Positive': ...,\n 'Negative': ...,\n 'Neutral': ...,\n 'Mixed': ...\n }\n }\n \n \n \"\"\"\n pass\n\ndef detect_syntax(Text=None, LanguageCode=None):\n \"\"\"\n Inspects text for syntax and the part of speech of words in the document. For more information, how-syntax .\n See also: AWS API Documentation\n \n \n :example: response = client.detect_syntax(\n Text='string',\n LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt'\n )\n \n \n :type Text: string\n :param Text: [REQUIRED]\n A UTF-8 string. Each string must contain fewer that 5,000 bytes of UTF encoded characters.\n \n\n :type LanguageCode: string\n :param LanguageCode: [REQUIRED]\n The language code of the input documents. You can specify English ('en') or Spanish ('es').\n \n\n :rtype: dict\n :return: {\n 'SyntaxTokens': [\n {\n 'TokenId': 123,\n 'Text': 'string',\n 'BeginOffset': 123,\n 'EndOffset': 123,\n 'PartOfSpeech': {\n 'Tag': 'ADJ'|'ADP'|'ADV'|'AUX'|'CONJ'|'CCONJ'|'DET'|'INTJ'|'NOUN'|'NUM'|'O'|'PART'|'PRON'|'PROPN'|'PUNCT'|'SCONJ'|'SYM'|'VERB',\n 'Score': ...\n }\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_document_classification_jobs(Filter=None, NextToken=None, MaxResults=None):\n \"\"\"\n Gets a list of the documentation classification jobs that you have submitted.\n See also: AWS API Documentation\n \n \n :example: response = client.list_document_classification_jobs(\n Filter={\n 'JobName': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',\n 'SubmitTimeBefore': datetime(2015, 1, 1),\n 'SubmitTimeAfter': datetime(2015, 1, 1)\n },\n NextToken='string',\n MaxResults=123\n )\n \n \n :type Filter: dict\n :param Filter: Filters the jobs that are returned. You can filter jobs on their names, status, or the date and time that they were submitted. You can only set one filter at a time.\n JobName (string) --Filters on the name of the job.\n JobStatus (string) --Filters the list based on job status. Returns only jobs with the specified status.\n SubmitTimeBefore (datetime) --Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in ascending order, oldest to newest.\n SubmitTimeAfter (datetime) --Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in descending order, newest to oldest.\n \n\n :type NextToken: string\n :param NextToken: Identifies the next page of results to return.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return in each page. The default is 100.\n\n :rtype: dict\n :return: {\n 'DocumentClassificationJobPropertiesList': [\n {\n 'JobId': 'string',\n 'JobName': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',\n 'Message': 'string',\n 'SubmitTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'DocumentClassifierArn': 'string',\n 'InputDataConfig': {\n 'S3Uri': 'string',\n 'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'\n },\n 'OutputDataConfig': {\n 'S3Uri': 'string'\n },\n 'DataAccessRoleArn': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n ONE_DOC_PER_FILE - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.\n ONE_DOC_PER_LINE - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.\n \n \"\"\"\n pass\n\ndef list_document_classifiers(Filter=None, NextToken=None, MaxResults=None):\n \"\"\"\n Gets a list of the document classifiers that you have created.\n See also: AWS API Documentation\n \n \n :example: response = client.list_document_classifiers(\n Filter={\n 'Status': 'SUBMITTED'|'TRAINING'|'DELETING'|'STOP_REQUESTED'|'STOPPED'|'IN_ERROR'|'TRAINED',\n 'SubmitTimeBefore': datetime(2015, 1, 1),\n 'SubmitTimeAfter': datetime(2015, 1, 1)\n },\n NextToken='string',\n MaxResults=123\n )\n \n \n :type Filter: dict\n :param Filter: Filters the jobs that are returned. You can filter jobs on their name, status, or the date and time that they were submitted. You can only set one filter at a time.\n Status (string) --Filters the list of classifiers based on status.\n SubmitTimeBefore (datetime) --Filters the list of classifiers based on the time that the classifier was submitted for processing. Returns only classifiers submitted before the specified time. Classifiers are returned in ascending order, oldest to newest.\n SubmitTimeAfter (datetime) --Filters the list of classifiers based on the time that the classifier was submitted for processing. Returns only classifiers submitted after the specified time. Classifiers are returned in descending order, newest to oldest.\n \n\n :type NextToken: string\n :param NextToken: Identifies the next page of results to return.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return in each page. The default is 100.\n\n :rtype: dict\n :return: {\n 'DocumentClassifierPropertiesList': [\n {\n 'DocumentClassifierArn': 'string',\n 'LanguageCode': 'en'|'es'|'fr'|'de'|'it'|'pt',\n 'Status': 'SUBMITTED'|'TRAINING'|'DELETING'|'STOP_REQUESTED'|'STOPPED'|'IN_ERROR'|'TRAINED',\n 'Message': 'string',\n 'SubmitTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'TrainingStartTime': datetime(2015, 1, 1),\n 'TrainingEndTime': datetime(2015, 1, 1),\n 'InputDataConfig': {\n 'S3Uri': 'string'\n },\n 'ClassifierMetadata': {\n 'NumberOfLabels': 123,\n 'NumberOfTrainedDocuments': 123,\n 'NumberOfTestDocuments': 123,\n 'EvaluationMetrics': {\n 'Accuracy': 123.0,\n 'Precision': 123.0,\n 'Recall': 123.0,\n 'F1Score': 123.0\n }\n },\n 'DataAccessRoleArn': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_dominant_language_detection_jobs(Filter=None, NextToken=None, MaxResults=None):\n \"\"\"\n Gets a list of the dominant language detection jobs that you have submitted.\n See also: AWS API Documentation\n \n \n :example: response = client.list_dominant_language_detection_jobs(\n Filter={\n 'JobName': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',\n 'SubmitTimeBefore': datetime(2015, 1, 1),\n 'SubmitTimeAfter': datetime(2015, 1, 1)\n },\n NextToken='string',\n MaxResults=123\n )\n \n \n :type Filter: dict\n :param Filter: Filters that jobs that are returned. You can filter jobs on their name, status, or the date and time that they were submitted. You can only set one filter at a time.\n JobName (string) --Filters on the name of the job.\n JobStatus (string) --Filters the list of jobs based on job status. Returns only jobs with the specified status.\n SubmitTimeBefore (datetime) --Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in ascending order, oldest to newest.\n SubmitTimeAfter (datetime) --Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in descending order, newest to oldest.\n \n\n :type NextToken: string\n :param NextToken: Identifies the next page of results to return.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return in each page. The default is 100.\n\n :rtype: dict\n :return: {\n 'DominantLanguageDetectionJobPropertiesList': [\n {\n 'JobId': 'string',\n 'JobName': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',\n 'Message': 'string',\n 'SubmitTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'InputDataConfig': {\n 'S3Uri': 'string',\n 'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'\n },\n 'OutputDataConfig': {\n 'S3Uri': 'string'\n },\n 'DataAccessRoleArn': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n ONE_DOC_PER_FILE - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.\n ONE_DOC_PER_LINE - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.\n \n \"\"\"\n pass\n\ndef list_entities_detection_jobs(Filter=None, NextToken=None, MaxResults=None):\n \"\"\"\n Gets a list of the entity detection jobs that you have submitted.\n See also: AWS API Documentation\n \n \n :example: response = client.list_entities_detection_jobs(\n Filter={\n 'JobName': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',\n 'SubmitTimeBefore': datetime(2015, 1, 1),\n 'SubmitTimeAfter': datetime(2015, 1, 1)\n },\n NextToken='string',\n MaxResults=123\n )\n \n \n :type Filter: dict\n :param Filter: Filters the jobs that are returned. You can filter jobs on their name, status, or the date and time that they were submitted. You can only set one filter at a time.\n JobName (string) --Filters on the name of the job.\n JobStatus (string) --Filters the list of jobs based on job status. Returns only jobs with the specified status.\n SubmitTimeBefore (datetime) --Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in ascending order, oldest to newest.\n SubmitTimeAfter (datetime) --Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in descending order, newest to oldest.\n \n\n :type NextToken: string\n :param NextToken: Identifies the next page of results to return.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return in each page. The default is 100.\n\n :rtype: dict\n :return: {\n 'EntitiesDetectionJobPropertiesList': [\n {\n 'JobId': 'string',\n 'JobName': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',\n 'Message': 'string',\n 'SubmitTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'EntityRecognizerArn': 'string',\n 'InputDataConfig': {\n 'S3Uri': 'string',\n 'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'\n },\n 'OutputDataConfig': {\n 'S3Uri': 'string'\n },\n 'LanguageCode': 'en'|'es'|'fr'|'de'|'it'|'pt',\n 'DataAccessRoleArn': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n ONE_DOC_PER_FILE - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.\n ONE_DOC_PER_LINE - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.\n \n \"\"\"\n pass\n\ndef list_entity_recognizers(Filter=None, NextToken=None, MaxResults=None):\n \"\"\"\n Gets a list of the properties of all entity recognizers that you created, including recognizers currently in training. Allows you to filter the list of recognizers based on criteria such as status and submission time. This call returns up to 500 entity recognizers in the list, with a default number of 100 recognizers in the list.\n The results of this list are not in any particular order. Please get the list and sort locally if needed.\n See also: AWS API Documentation\n \n \n :example: response = client.list_entity_recognizers(\n Filter={\n 'Status': 'SUBMITTED'|'TRAINING'|'DELETING'|'STOP_REQUESTED'|'STOPPED'|'IN_ERROR'|'TRAINED',\n 'SubmitTimeBefore': datetime(2015, 1, 1),\n 'SubmitTimeAfter': datetime(2015, 1, 1)\n },\n NextToken='string',\n MaxResults=123\n )\n \n \n :type Filter: dict\n :param Filter: Filters the list of entities returned. You can filter on Status , SubmitTimeBefore , or SubmitTimeAfter . You can only set one filter at a time.\n Status (string) --The status of an entity recognizer.\n SubmitTimeBefore (datetime) --Filters the list of entities based on the time that the list was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in descending order, newest to oldest.\n SubmitTimeAfter (datetime) --Filters the list of entities based on the time that the list was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in ascending order, oldest to newest.\n \n\n :type NextToken: string\n :param NextToken: Identifies the next page of results to return.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return on each page. The default is 100.\n\n :rtype: dict\n :return: {\n 'EntityRecognizerPropertiesList': [\n {\n 'EntityRecognizerArn': 'string',\n 'LanguageCode': 'en'|'es'|'fr'|'de'|'it'|'pt',\n 'Status': 'SUBMITTED'|'TRAINING'|'DELETING'|'STOP_REQUESTED'|'STOPPED'|'IN_ERROR'|'TRAINED',\n 'Message': 'string',\n 'SubmitTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'TrainingStartTime': datetime(2015, 1, 1),\n 'TrainingEndTime': datetime(2015, 1, 1),\n 'InputDataConfig': {\n 'EntityTypes': [\n {\n 'Type': 'string'\n },\n ],\n 'Documents': {\n 'S3Uri': 'string'\n },\n 'Annotations': {\n 'S3Uri': 'string'\n },\n 'EntityList': {\n 'S3Uri': 'string'\n }\n },\n 'RecognizerMetadata': {\n 'NumberOfTrainedDocuments': 123,\n 'NumberOfTestDocuments': 123,\n 'EvaluationMetrics': {\n 'Precision': 123.0,\n 'Recall': 123.0,\n 'F1Score': 123.0\n },\n 'EntityTypes': [\n {\n 'Type': 'string'\n },\n ]\n },\n 'DataAccessRoleArn': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_key_phrases_detection_jobs(Filter=None, NextToken=None, MaxResults=None):\n \"\"\"\n Get a list of key phrase detection jobs that you have submitted.\n See also: AWS API Documentation\n \n \n :example: response = client.list_key_phrases_detection_jobs(\n Filter={\n 'JobName': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',\n 'SubmitTimeBefore': datetime(2015, 1, 1),\n 'SubmitTimeAfter': datetime(2015, 1, 1)\n },\n NextToken='string',\n MaxResults=123\n )\n \n \n :type Filter: dict\n :param Filter: Filters the jobs that are returned. You can filter jobs on their name, status, or the date and time that they were submitted. You can only set one filter at a time.\n JobName (string) --Filters on the name of the job.\n JobStatus (string) --Filters the list of jobs based on job status. Returns only jobs with the specified status.\n SubmitTimeBefore (datetime) --Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in ascending order, oldest to newest.\n SubmitTimeAfter (datetime) --Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in descending order, newest to oldest.\n \n\n :type NextToken: string\n :param NextToken: Identifies the next page of results to return.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return in each page. The default is 100.\n\n :rtype: dict\n :return: {\n 'KeyPhrasesDetectionJobPropertiesList': [\n {\n 'JobId': 'string',\n 'JobName': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',\n 'Message': 'string',\n 'SubmitTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'InputDataConfig': {\n 'S3Uri': 'string',\n 'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'\n },\n 'OutputDataConfig': {\n 'S3Uri': 'string'\n },\n 'LanguageCode': 'en'|'es'|'fr'|'de'|'it'|'pt',\n 'DataAccessRoleArn': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n ONE_DOC_PER_FILE - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.\n ONE_DOC_PER_LINE - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.\n \n \"\"\"\n pass\n\ndef list_sentiment_detection_jobs(Filter=None, NextToken=None, MaxResults=None):\n \"\"\"\n Gets a list of sentiment detection jobs that you have submitted.\n See also: AWS API Documentation\n \n \n :example: response = client.list_sentiment_detection_jobs(\n Filter={\n 'JobName': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',\n 'SubmitTimeBefore': datetime(2015, 1, 1),\n 'SubmitTimeAfter': datetime(2015, 1, 1)\n },\n NextToken='string',\n MaxResults=123\n )\n \n \n :type Filter: dict\n :param Filter: Filters the jobs that are returned. You can filter jobs on their name, status, or the date and time that they were submitted. You can only set one filter at a time.\n JobName (string) --Filters on the name of the job.\n JobStatus (string) --Filters the list of jobs based on job status. Returns only jobs with the specified status.\n SubmitTimeBefore (datetime) --Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in ascending order, oldest to newest.\n SubmitTimeAfter (datetime) --Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in descending order, newest to oldest.\n \n\n :type NextToken: string\n :param NextToken: Identifies the next page of results to return.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return in each page. The default is 100.\n\n :rtype: dict\n :return: {\n 'SentimentDetectionJobPropertiesList': [\n {\n 'JobId': 'string',\n 'JobName': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',\n 'Message': 'string',\n 'SubmitTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'InputDataConfig': {\n 'S3Uri': 'string',\n 'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'\n },\n 'OutputDataConfig': {\n 'S3Uri': 'string'\n },\n 'LanguageCode': 'en'|'es'|'fr'|'de'|'it'|'pt',\n 'DataAccessRoleArn': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n ONE_DOC_PER_FILE - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.\n ONE_DOC_PER_LINE - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.\n \n \"\"\"\n pass\n\ndef list_topics_detection_jobs(Filter=None, NextToken=None, MaxResults=None):\n \"\"\"\n Gets a list of the topic detection jobs that you have submitted.\n See also: AWS API Documentation\n \n \n :example: response = client.list_topics_detection_jobs(\n Filter={\n 'JobName': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',\n 'SubmitTimeBefore': datetime(2015, 1, 1),\n 'SubmitTimeAfter': datetime(2015, 1, 1)\n },\n NextToken='string',\n MaxResults=123\n )\n \n \n :type Filter: dict\n :param Filter: Filters the jobs that are returned. Jobs can be filtered on their name, status, or the date and time that they were submitted. You can set only one filter at a time.\n JobName (string) --\n JobStatus (string) --Filters the list of topic detection jobs based on job status. Returns only jobs with the specified status.\n SubmitTimeBefore (datetime) --Filters the list of jobs based on the time that the job was submitted for processing. Only returns jobs submitted before the specified time. Jobs are returned in descending order, newest to oldest.\n SubmitTimeAfter (datetime) --Filters the list of jobs based on the time that the job was submitted for processing. Only returns jobs submitted after the specified time. Jobs are returned in ascending order, oldest to newest.\n \n\n :type NextToken: string\n :param NextToken: Identifies the next page of results to return.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return in each page. The default is 100.\n\n :rtype: dict\n :return: {\n 'TopicsDetectionJobPropertiesList': [\n {\n 'JobId': 'string',\n 'JobName': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',\n 'Message': 'string',\n 'SubmitTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'InputDataConfig': {\n 'S3Uri': 'string',\n 'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'\n },\n 'OutputDataConfig': {\n 'S3Uri': 'string'\n },\n 'NumberOfTopics': 123\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n ONE_DOC_PER_FILE - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.\n ONE_DOC_PER_LINE - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.\n \n \"\"\"\n pass\n\ndef start_document_classification_job(JobName=None, DocumentClassifierArn=None, InputDataConfig=None, OutputDataConfig=None, DataAccessRoleArn=None, ClientRequestToken=None):\n \"\"\"\n Starts an asynchronous document classification job. Use the operation to track the progress of the job.\n See also: AWS API Documentation\n \n \n :example: response = client.start_document_classification_job(\n JobName='string',\n DocumentClassifierArn='string',\n InputDataConfig={\n 'S3Uri': 'string',\n 'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'\n },\n OutputDataConfig={\n 'S3Uri': 'string'\n },\n DataAccessRoleArn='string',\n ClientRequestToken='string'\n )\n \n \n :type JobName: string\n :param JobName: The identifier of the job.\n\n :type DocumentClassifierArn: string\n :param DocumentClassifierArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the document classifier to use to process the job.\n \n\n :type InputDataConfig: dict\n :param InputDataConfig: [REQUIRED]\n Specifies the format and location of the input data for the job.\n S3Uri (string) -- [REQUIRED]The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files.\n For example, if you use the URI S3://bucketName/prefix , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.\n InputFormat (string) --Specifies how the text in an input file should be processed:\n ONE_DOC_PER_FILE - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.\n ONE_DOC_PER_LINE - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.\n \n\n :type OutputDataConfig: dict\n :param OutputDataConfig: [REQUIRED]\n Specifies where to send the output files.\n S3Uri (string) -- [REQUIRED]When you use the OutputDataConfig object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file.\n When the topic detection job is finished, the service creates an output file in a directory specific to the job. The S3Uri field contains the location of the output file, called output.tar.gz . It is a compressed archive that contains the ouput of the operation.\n \n\n :type DataAccessRoleArn: string\n :param DataAccessRoleArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data.\n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: A unique identifier for the request. If you do not set the client request token, Amazon Comprehend generates one.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'JobId': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED'\n }\n \n \n :returns: \n SUBMITTED - The job has been received and queued for processing.\n IN_PROGRESS - Amazon Comprehend is processing the job.\n COMPLETED - The job was successfully completed and the output is available.\n FAILED - The job did not complete. For details, use the operation.\n STOP_REQUESTED - Amazon Comprehend has received a stop request for the job and is processing the request.\n STOPPED - The job was successfully stopped without completing.\n \n \"\"\"\n pass\n\ndef start_dominant_language_detection_job(InputDataConfig=None, OutputDataConfig=None, DataAccessRoleArn=None, JobName=None, ClientRequestToken=None):\n \"\"\"\n Starts an asynchronous dominant language detection job for a collection of documents. Use the operation to track the status of a job.\n See also: AWS API Documentation\n \n \n :example: response = client.start_dominant_language_detection_job(\n InputDataConfig={\n 'S3Uri': 'string',\n 'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'\n },\n OutputDataConfig={\n 'S3Uri': 'string'\n },\n DataAccessRoleArn='string',\n JobName='string',\n ClientRequestToken='string'\n )\n \n \n :type InputDataConfig: dict\n :param InputDataConfig: [REQUIRED]\n Specifies the format and location of the input data for the job.\n S3Uri (string) -- [REQUIRED]The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files.\n For example, if you use the URI S3://bucketName/prefix , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.\n InputFormat (string) --Specifies how the text in an input file should be processed:\n ONE_DOC_PER_FILE - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.\n ONE_DOC_PER_LINE - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.\n \n\n :type OutputDataConfig: dict\n :param OutputDataConfig: [REQUIRED]\n Specifies where to send the output files.\n S3Uri (string) -- [REQUIRED]When you use the OutputDataConfig object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file.\n When the topic detection job is finished, the service creates an output file in a directory specific to the job. The S3Uri field contains the location of the output file, called output.tar.gz . It is a compressed archive that contains the ouput of the operation.\n \n\n :type DataAccessRoleArn: string\n :param DataAccessRoleArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions .\n \n\n :type JobName: string\n :param JobName: An identifier for the job.\n\n :type ClientRequestToken: string\n :param ClientRequestToken: A unique identifier for the request. If you do not set the client request token, Amazon Comprehend generates one.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'JobId': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED'\n }\n \n \n :returns: \n SUBMITTED - The job has been received and is queued for processing.\n IN_PROGRESS - Amazon Comprehend is processing the job.\n COMPLETED - The job was successfully completed and the output is available.\n FAILED - The job did not complete. To get details, use the operation.\n \n \"\"\"\n pass\n\ndef start_entities_detection_job(InputDataConfig=None, OutputDataConfig=None, DataAccessRoleArn=None, JobName=None, EntityRecognizerArn=None, LanguageCode=None, ClientRequestToken=None):\n \"\"\"\n Starts an asynchronous entity detection job for a collection of documents. Use the operation to track the status of a job.\n This API can be used for either standard entity detection or custom entity recognition. In order to be used for custom entity recognition, the optional EntityRecognizerArn must be used in order to provide access to the recognizer being used to detect the custom entity.\n See also: AWS API Documentation\n \n \n :example: response = client.start_entities_detection_job(\n InputDataConfig={\n 'S3Uri': 'string',\n 'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'\n },\n OutputDataConfig={\n 'S3Uri': 'string'\n },\n DataAccessRoleArn='string',\n JobName='string',\n EntityRecognizerArn='string',\n LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt',\n ClientRequestToken='string'\n )\n \n \n :type InputDataConfig: dict\n :param InputDataConfig: [REQUIRED]\n Specifies the format and location of the input data for the job.\n S3Uri (string) -- [REQUIRED]The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files.\n For example, if you use the URI S3://bucketName/prefix , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.\n InputFormat (string) --Specifies how the text in an input file should be processed:\n ONE_DOC_PER_FILE - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.\n ONE_DOC_PER_LINE - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.\n \n\n :type OutputDataConfig: dict\n :param OutputDataConfig: [REQUIRED]\n Specifies where to send the output files.\n S3Uri (string) -- [REQUIRED]When you use the OutputDataConfig object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file.\n When the topic detection job is finished, the service creates an output file in a directory specific to the job. The S3Uri field contains the location of the output file, called output.tar.gz . It is a compressed archive that contains the ouput of the operation.\n \n\n :type DataAccessRoleArn: string\n :param DataAccessRoleArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions .\n \n\n :type JobName: string\n :param JobName: The identifier of the job.\n\n :type EntityRecognizerArn: string\n :param EntityRecognizerArn: The Amazon Resource Name (ARN) that identifies the specific entity recognizer to be used by the StartEntitiesDetectionJob . This ARN is optional and is only used for a custom entity recognition job.\n\n :type LanguageCode: string\n :param LanguageCode: [REQUIRED]\n The language of the input documents. All documents must be in the same language. You can specify any of the languages supported by Amazon Comprehend: English ('en'), Spanish ('es'), French ('fr'), German ('de'), Italian ('it'), or Portuguese ('pt'). If custom entities recognition is used, this parameter is ignored and the language used for training the model is used instead.\n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: A unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'JobId': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED'\n }\n \n \n :returns: \n SUBMITTED - The job has been received and is queued for processing.\n IN_PROGRESS - Amazon Comprehend is processing the job.\n COMPLETED - The job was successfully completed and the output is available.\n FAILED - The job did not complete. To get details, use the operation.\n STOP_REQUESTED - Amazon Comprehend has received a stop request for the job and is processing the request.\n STOPPED - The job was successfully stopped without completing.\n \n \"\"\"\n pass\n\ndef start_key_phrases_detection_job(InputDataConfig=None, OutputDataConfig=None, DataAccessRoleArn=None, JobName=None, LanguageCode=None, ClientRequestToken=None):\n \"\"\"\n Starts an asynchronous key phrase detection job for a collection of documents. Use the operation to track the status of a job.\n See also: AWS API Documentation\n \n \n :example: response = client.start_key_phrases_detection_job(\n InputDataConfig={\n 'S3Uri': 'string',\n 'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'\n },\n OutputDataConfig={\n 'S3Uri': 'string'\n },\n DataAccessRoleArn='string',\n JobName='string',\n LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt',\n ClientRequestToken='string'\n )\n \n \n :type InputDataConfig: dict\n :param InputDataConfig: [REQUIRED]\n Specifies the format and location of the input data for the job.\n S3Uri (string) -- [REQUIRED]The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files.\n For example, if you use the URI S3://bucketName/prefix , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.\n InputFormat (string) --Specifies how the text in an input file should be processed:\n ONE_DOC_PER_FILE - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.\n ONE_DOC_PER_LINE - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.\n \n\n :type OutputDataConfig: dict\n :param OutputDataConfig: [REQUIRED]\n Specifies where to send the output files.\n S3Uri (string) -- [REQUIRED]When you use the OutputDataConfig object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file.\n When the topic detection job is finished, the service creates an output file in a directory specific to the job. The S3Uri field contains the location of the output file, called output.tar.gz . It is a compressed archive that contains the ouput of the operation.\n \n\n :type DataAccessRoleArn: string\n :param DataAccessRoleArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions .\n \n\n :type JobName: string\n :param JobName: The identifier of the job.\n\n :type LanguageCode: string\n :param LanguageCode: [REQUIRED]\n The language of the input documents. You can specify English ('en') or Spanish ('es'). All documents must be in the same language.\n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: A unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'JobId': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED'\n }\n \n \n :returns: \n SUBMITTED - The job has been received and is queued for processing.\n IN_PROGRESS - Amazon Comprehend is processing the job.\n COMPLETED - The job was successfully completed and the output is available.\n FAILED - The job did not complete. To get details, use the operation.\n \n \"\"\"\n pass\n\ndef start_sentiment_detection_job(InputDataConfig=None, OutputDataConfig=None, DataAccessRoleArn=None, JobName=None, LanguageCode=None, ClientRequestToken=None):\n \"\"\"\n Starts an asynchronous sentiment detection job for a collection of documents. use the operation to track the status of a job.\n See also: AWS API Documentation\n \n \n :example: response = client.start_sentiment_detection_job(\n InputDataConfig={\n 'S3Uri': 'string',\n 'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'\n },\n OutputDataConfig={\n 'S3Uri': 'string'\n },\n DataAccessRoleArn='string',\n JobName='string',\n LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt',\n ClientRequestToken='string'\n )\n \n \n :type InputDataConfig: dict\n :param InputDataConfig: [REQUIRED]\n Specifies the format and location of the input data for the job.\n S3Uri (string) -- [REQUIRED]The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files.\n For example, if you use the URI S3://bucketName/prefix , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.\n InputFormat (string) --Specifies how the text in an input file should be processed:\n ONE_DOC_PER_FILE - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.\n ONE_DOC_PER_LINE - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.\n \n\n :type OutputDataConfig: dict\n :param OutputDataConfig: [REQUIRED]\n Specifies where to send the output files.\n S3Uri (string) -- [REQUIRED]When you use the OutputDataConfig object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file.\n When the topic detection job is finished, the service creates an output file in a directory specific to the job. The S3Uri field contains the location of the output file, called output.tar.gz . It is a compressed archive that contains the ouput of the operation.\n \n\n :type DataAccessRoleArn: string\n :param DataAccessRoleArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions .\n \n\n :type JobName: string\n :param JobName: The identifier of the job.\n\n :type LanguageCode: string\n :param LanguageCode: [REQUIRED]\n The language of the input documents. You can specify English ('en') or Spanish ('es'). All documents must be in the same language.\n \n\n :type ClientRequestToken: string\n :param ClientRequestToken: A unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'JobId': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED'\n }\n \n \n :returns: \n SUBMITTED - The job has been received and is queued for processing.\n IN_PROGRESS - Amazon Comprehend is processing the job.\n COMPLETED - The job was successfully completed and the output is available.\n FAILED - The job did not complete. To get details, use the operation.\n \n \"\"\"\n pass\n\ndef start_topics_detection_job(InputDataConfig=None, OutputDataConfig=None, DataAccessRoleArn=None, JobName=None, NumberOfTopics=None, ClientRequestToken=None):\n \"\"\"\n Starts an asynchronous topic detection job. Use the DescribeTopicDetectionJob operation to track the status of a job.\n See also: AWS API Documentation\n \n \n :example: response = client.start_topics_detection_job(\n InputDataConfig={\n 'S3Uri': 'string',\n 'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'\n },\n OutputDataConfig={\n 'S3Uri': 'string'\n },\n DataAccessRoleArn='string',\n JobName='string',\n NumberOfTopics=123,\n ClientRequestToken='string'\n )\n \n \n :type InputDataConfig: dict\n :param InputDataConfig: [REQUIRED]\n Specifies the format and location of the input data for the job.\n S3Uri (string) -- [REQUIRED]The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files.\n For example, if you use the URI S3://bucketName/prefix , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.\n InputFormat (string) --Specifies how the text in an input file should be processed:\n ONE_DOC_PER_FILE - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.\n ONE_DOC_PER_LINE - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.\n \n\n :type OutputDataConfig: dict\n :param OutputDataConfig: [REQUIRED]\n Specifies where to send the output files. The output is a compressed archive with two files, topic-terms.csv that lists the terms associated with each topic, and doc-topics.csv that lists the documents associated with each topic\n S3Uri (string) -- [REQUIRED]When you use the OutputDataConfig object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file.\n When the topic detection job is finished, the service creates an output file in a directory specific to the job. The S3Uri field contains the location of the output file, called output.tar.gz . It is a compressed archive that contains the ouput of the operation.\n \n\n :type DataAccessRoleArn: string\n :param DataAccessRoleArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions .\n \n\n :type JobName: string\n :param JobName: The identifier of the job.\n\n :type NumberOfTopics: integer\n :param NumberOfTopics: The number of topics to detect.\n\n :type ClientRequestToken: string\n :param ClientRequestToken: A unique identifier for the request. If you do not set the client request token, Amazon Comprehend generates one.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'JobId': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED'\n }\n \n \n :returns: \n SUBMITTED - The job has been received and is queued for processing.\n IN_PROGRESS - Amazon Comprehend is processing the job.\n COMPLETED - The job was successfully completed and the output is available.\n FAILED - The job did not complete. To get details, use the DescribeTopicDetectionJob operation.\n \n \"\"\"\n pass\n\ndef stop_dominant_language_detection_job(JobId=None):\n \"\"\"\n Stops a dominant language detection job in progress.\n If the job state is IN_PROGRESS the job is marked for termination and put into the STOP_REQUESTED state. If the job completes before it can be stopped, it is put into the COMPLETED state; otherwise the job is stopped and put into the STOPPED state.\n If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation returns a 400 Internal Request Exception.\n When a job is stopped, any documents already processed are written to the output location.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_dominant_language_detection_job(\n JobId='string'\n )\n \n \n :type JobId: string\n :param JobId: [REQUIRED]\n The identifier of the dominant language detection job to stop.\n \n\n :rtype: dict\n :return: {\n 'JobId': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED'\n }\n \n \n \"\"\"\n pass\n\ndef stop_entities_detection_job(JobId=None):\n \"\"\"\n Stops an entities detection job in progress.\n If the job state is IN_PROGRESS the job is marked for termination and put into the STOP_REQUESTED state. If the job completes before it can be stopped, it is put into the COMPLETED state; otherwise the job is stopped and put into the STOPPED state.\n If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation returns a 400 Internal Request Exception.\n When a job is stopped, any documents already processed are written to the output location.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_entities_detection_job(\n JobId='string'\n )\n \n \n :type JobId: string\n :param JobId: [REQUIRED]\n The identifier of the entities detection job to stop.\n \n\n :rtype: dict\n :return: {\n 'JobId': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED'\n }\n \n \n \"\"\"\n pass\n\ndef stop_key_phrases_detection_job(JobId=None):\n \"\"\"\n Stops a key phrases detection job in progress.\n If the job state is IN_PROGRESS the job is marked for termination and put into the STOP_REQUESTED state. If the job completes before it can be stopped, it is put into the COMPLETED state; otherwise the job is stopped and put into the STOPPED state.\n If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation returns a 400 Internal Request Exception.\n When a job is stopped, any documents already processed are written to the output location.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_key_phrases_detection_job(\n JobId='string'\n )\n \n \n :type JobId: string\n :param JobId: [REQUIRED]\n The identifier of the key phrases detection job to stop.\n \n\n :rtype: dict\n :return: {\n 'JobId': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED'\n }\n \n \n \"\"\"\n pass\n\ndef stop_sentiment_detection_job(JobId=None):\n \"\"\"\n Stops a sentiment detection job in progress.\n If the job state is IN_PROGRESS the job is marked for termination and put into the STOP_REQUESTED state. If the job completes before it can be stopped, it is put into the COMPLETED state; otherwise the job is be stopped and put into the STOPPED state.\n If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation returns a 400 Internal Request Exception.\n When a job is stopped, any documents already processed are written to the output location.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_sentiment_detection_job(\n JobId='string'\n )\n \n \n :type JobId: string\n :param JobId: [REQUIRED]\n The identifier of the sentiment detection job to stop.\n \n\n :rtype: dict\n :return: {\n 'JobId': 'string',\n 'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED'\n }\n \n \n \"\"\"\n pass\n\ndef stop_training_document_classifier(DocumentClassifierArn=None):\n \"\"\"\n Stops a document classifier training job while in progress.\n If the training job state is TRAINING , the job is marked for termination and put into the STOP_REQUESTED state. If the training job completes before it can be stopped, it is put into the TRAINED ; otherwise the training job is stopped and put into the STOPPED state and the service sends back an HTTP 200 response with an empty HTTP body.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_training_document_classifier(\n DocumentClassifierArn='string'\n )\n \n \n :type DocumentClassifierArn: string\n :param DocumentClassifierArn: [REQUIRED]\n The Amazon Resource Name (ARN) that identifies the document classifier currently being trained.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef stop_training_entity_recognizer(EntityRecognizerArn=None):\n \"\"\"\n Stops an entity recognizer training job while in progress.\n If the training job state is TRAINING , the job is marked for termination and put into the STOP_REQUESTED state. If the training job completes before it can be stopped, it is put into the TRAINED ; otherwise the training job is stopped and putted into the STOPPED state and the service sends back an HTTP 200 response with an empty HTTP body.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_training_entity_recognizer(\n EntityRecognizerArn='string'\n )\n \n \n :type EntityRecognizerArn: string\n :param EntityRecognizerArn: [REQUIRED]\n The Amazon Resource Name (ARN) that identifies the entity recognizer currently being trained.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6415441036224365, "alphanum_fraction": 0.6456642746925354, "avg_line_length": 30.933198928833008, "blob_id": "11da9e1c3af2cd5a86c7a7ff6f73761a2e8821e4", "content_id": "1b9a9b9b7fa2a7d77a1594da3dff095a7bd170bf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15776, "license_type": "permissive", "max_line_length": 438, "num_lines": 494, "path": "/pyboto3/cloud9.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_environment_ec2(name=None, description=None, clientRequestToken=None, instanceType=None, subnetId=None, automaticStopTimeMinutes=None, ownerArn=None):\n \"\"\"\n Creates an AWS Cloud9 development environment, launches an Amazon Elastic Compute Cloud (Amazon EC2) instance, and then connects from the instance to the environment.\n See also: AWS API Documentation\n \n \n :example: response = client.create_environment_ec2(\n name='string',\n description='string',\n clientRequestToken='string',\n instanceType='string',\n subnetId='string',\n automaticStopTimeMinutes=123,\n ownerArn='string'\n )\n \n \n :type name: string\n :param name: [REQUIRED]\n The name of the environment to create.\n This name is visible to other AWS IAM users in the same AWS account.\n \n\n :type description: string\n :param description: The description of the environment to create.\n\n :type clientRequestToken: string\n :param clientRequestToken: A unique, case-sensitive string that helps AWS Cloud9 to ensure this operation completes no more than one time.\n For more information, see Client Tokens in the Amazon EC2 API Reference .\n \n\n :type instanceType: string\n :param instanceType: [REQUIRED]\n The type of instance to connect to the environment (for example, t2.micro ).\n \n\n :type subnetId: string\n :param subnetId: The ID of the subnet in Amazon VPC that AWS Cloud9 will use to communicate with the Amazon EC2 instance.\n\n :type automaticStopTimeMinutes: integer\n :param automaticStopTimeMinutes: The number of minutes until the running instance is shut down after the environment has last been used.\n\n :type ownerArn: string\n :param ownerArn: The Amazon Resource Name (ARN) of the environment owner. This ARN can be the ARN of any AWS IAM principal. If this value is not specified, the ARN defaults to this environment's creator.\n\n :rtype: dict\n :return: {\n 'environmentId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_environment_membership(environmentId=None, userArn=None, permissions=None):\n \"\"\"\n Adds an environment member to an AWS Cloud9 development environment.\n See also: AWS API Documentation\n \n \n :example: response = client.create_environment_membership(\n environmentId='string',\n userArn='string',\n permissions='read-write'|'read-only'\n )\n \n \n :type environmentId: string\n :param environmentId: [REQUIRED]\n The ID of the environment that contains the environment member you want to add.\n \n\n :type userArn: string\n :param userArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the environment member you want to add.\n \n\n :type permissions: string\n :param permissions: [REQUIRED]\n The type of environment member permissions you want to associate with this environment member. Available values include:\n read-only : Has read-only access to the environment.\n read-write : Has read-write access to the environment.\n \n\n :rtype: dict\n :return: {\n 'membership': {\n 'permissions': 'owner'|'read-write'|'read-only',\n 'userId': 'string',\n 'userArn': 'string',\n 'environmentId': 'string',\n 'lastAccess': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n owner : Owns the environment.\n read-only : Has read-only access to the environment.\n read-write : Has read-write access to the environment.\n \n \"\"\"\n pass\n\ndef delete_environment(environmentId=None):\n \"\"\"\n Deletes an AWS Cloud9 development environment. If an Amazon EC2 instance is connected to the environment, also terminates the instance.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_environment(\n environmentId='string'\n )\n \n \n :type environmentId: string\n :param environmentId: [REQUIRED]\n The ID of the environment to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_environment_membership(environmentId=None, userArn=None):\n \"\"\"\n Deletes an environment member from an AWS Cloud9 development environment.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_environment_membership(\n environmentId='string',\n userArn='string'\n )\n \n \n :type environmentId: string\n :param environmentId: [REQUIRED]\n The ID of the environment to delete the environment member from.\n \n\n :type userArn: string\n :param userArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the environment member to delete from the environment.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef describe_environment_memberships(userArn=None, environmentId=None, permissions=None, nextToken=None, maxResults=None):\n \"\"\"\n Gets information about environment members for an AWS Cloud9 development environment.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_environment_memberships(\n userArn='string',\n environmentId='string',\n permissions=[\n 'owner'|'read-write'|'read-only',\n ],\n nextToken='string',\n maxResults=123\n )\n \n \n :type userArn: string\n :param userArn: The Amazon Resource Name (ARN) of an individual environment member to get information about. If no value is specified, information about all environment members are returned.\n\n :type environmentId: string\n :param environmentId: The ID of the environment to get environment member information about.\n\n :type permissions: list\n :param permissions: The type of environment member permissions to get information about. Available values include:\n owner : Owns the environment.\n read-only : Has read-only access to the environment.\n read-write : Has read-write access to the environment.\n If no value is specified, information about all environment members are returned.\n (string) --\n \n\n :type nextToken: string\n :param nextToken: During a previous call, if there are more than 25 items in the list, only the first 25 items are returned, along with a unique string called a next token . To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned.\n\n :type maxResults: integer\n :param maxResults: The maximum number of environment members to get information about.\n\n :rtype: dict\n :return: {\n 'memberships': [\n {\n 'permissions': 'owner'|'read-write'|'read-only',\n 'userId': 'string',\n 'userArn': 'string',\n 'environmentId': 'string',\n 'lastAccess': datetime(2015, 1, 1)\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n owner : Owns the environment.\n read-only : Has read-only access to the environment.\n read-write : Has read-write access to the environment.\n \n \"\"\"\n pass\n\ndef describe_environment_status(environmentId=None):\n \"\"\"\n Gets status information for an AWS Cloud9 development environment.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_environment_status(\n environmentId='string'\n )\n \n \n :type environmentId: string\n :param environmentId: [REQUIRED]\n The ID of the environment to get status information about.\n \n\n :rtype: dict\n :return: {\n 'status': 'error'|'creating'|'connecting'|'ready'|'stopping'|'stopped'|'deleting',\n 'message': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_environments(environmentIds=None):\n \"\"\"\n Gets information about AWS Cloud9 development environments.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_environments(\n environmentIds=[\n 'string',\n ]\n )\n \n \n :type environmentIds: list\n :param environmentIds: [REQUIRED]\n The IDs of individual environments to get information about.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'environments': [\n {\n 'id': 'string',\n 'name': 'string',\n 'description': 'string',\n 'type': 'ssh'|'ec2',\n 'arn': 'string',\n 'ownerArn': 'string'\n },\n ]\n }\n \n \n :returns: \n ec2 : An Amazon Elastic Compute Cloud (Amazon EC2) instance connects to the environment.\n ssh : Your own server connects to the environment.\n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_environments(nextToken=None, maxResults=None):\n \"\"\"\n Gets a list of AWS Cloud9 development environment identifiers.\n See also: AWS API Documentation\n \n \n :example: response = client.list_environments(\n nextToken='string',\n maxResults=123\n )\n \n \n :type nextToken: string\n :param nextToken: During a previous call, if there are more than 25 items in the list, only the first 25 items are returned, along with a unique string called a next token . To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned.\n\n :type maxResults: integer\n :param maxResults: The maximum number of environments to get identifiers for.\n\n :rtype: dict\n :return: {\n 'nextToken': 'string',\n 'environmentIds': [\n 'string',\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef update_environment(environmentId=None, name=None, description=None):\n \"\"\"\n Changes the settings of an existing AWS Cloud9 development environment.\n See also: AWS API Documentation\n \n \n :example: response = client.update_environment(\n environmentId='string',\n name='string',\n description='string'\n )\n \n \n :type environmentId: string\n :param environmentId: [REQUIRED]\n The ID of the environment to change settings.\n \n\n :type name: string\n :param name: A replacement name for the environment.\n\n :type description: string\n :param description: Any new or replacement description for the environment.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_environment_membership(environmentId=None, userArn=None, permissions=None):\n \"\"\"\n Changes the settings of an existing environment member for an AWS Cloud9 development environment.\n See also: AWS API Documentation\n \n \n :example: response = client.update_environment_membership(\n environmentId='string',\n userArn='string',\n permissions='read-write'|'read-only'\n )\n \n \n :type environmentId: string\n :param environmentId: [REQUIRED]\n The ID of the environment for the environment member whose settings you want to change.\n \n\n :type userArn: string\n :param userArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the environment member whose settings you want to change.\n \n\n :type permissions: string\n :param permissions: [REQUIRED]\n The replacement type of environment member permissions you want to associate with this environment member. Available values include:\n read-only : Has read-only access to the environment.\n read-write : Has read-write access to the environment.\n \n\n :rtype: dict\n :return: {\n 'membership': {\n 'permissions': 'owner'|'read-write'|'read-only',\n 'userId': 'string',\n 'userArn': 'string',\n 'environmentId': 'string',\n 'lastAccess': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n owner : Owns the environment.\n read-only : Has read-only access to the environment.\n read-write : Has read-write access to the environment.\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.617292046546936, "alphanum_fraction": 0.6226522922515869, "avg_line_length": 37.729000091552734, "blob_id": "f36bfe833aeeb5590f8e691e6872fa5874b0fcef", "content_id": "160854823ebce24130c2d1dfbd6f10ca2691e7fd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24439, "license_type": "permissive", "max_line_length": 494, "num_lines": 631, "path": "/pyboto3/resourcegroups.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_group(Name=None, Description=None, ResourceQuery=None, Tags=None):\n \"\"\"\n Creates a group with a specified name, description, and resource query.\n See also: AWS API Documentation\n \n \n :example: response = client.create_group(\n Name='string',\n Description='string',\n ResourceQuery={\n 'Type': 'TAG_FILTERS_1_0'|'CLOUDFORMATION_STACK_1_0',\n 'Query': 'string'\n },\n Tags={\n 'string': 'string'\n }\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the group, which is the identifier of the group in other operations. A resource group name cannot be updated after it is created. A resource group name can have a maximum of 128 characters, including letters, numbers, hyphens, dots, and underscores. The name cannot start with AWS or aws ; these are reserved. A resource group name must be unique within your account.\n \n\n :type Description: string\n :param Description: The description of the resource group. Descriptions can have a maximum of 511 characters, including letters, numbers, hyphens, underscores, punctuation, and spaces.\n\n :type ResourceQuery: dict\n :param ResourceQuery: [REQUIRED]\n The resource query that determines which AWS resources are members of this group.\n Type (string) -- [REQUIRED]The type of the query. The valid values in this release are TAG_FILTERS_1_0 and CLOUDFORMATION_STACK_1_0 .\n TAG_FILTERS_1_0: * A JSON syntax that lets you specify a collection of simple tag filters for resource types and tags, as supported by the AWS Tagging API GetResources operation. If you specify more than one tag key, only resources that match all tag keys, and at least one value of each specified tag key, are returned in your query. If you specify more than one value for a tag key, a resource matches the filter if it has a tag key value that matches any of the specified values.\n For example, consider the following sample query for resources that have two tags, Stage and Version , with two values each. ([{'Key':'Stage','Values':['Test','Deploy']},{'Key':'Version','Values':['1','2']}] ) The results of this query might include the following.\n An EC2 instance that has the following two tags: {'Key':'Stage','Values':['Deploy']} , and {'Key':'Version','Values':['2']}\n An S3 bucket that has the following two tags: {'Key':'Stage','Values':['Test','Deploy']}, and {'Key':'Version','Values':['1']}\n The query would not return the following results, however. The following EC2 instance does not have all tag keys specified in the filter, so it is rejected. The RDS database has all of the tag keys, but no values that match at least one of the specified tag key values in the filter.\n An EC2 instance that has only the following tag: {'Key':'Stage','Values':['Deploy']} .\n An RDS database that has the following two tags: {'Key':'Stage','Values':['Archived']} , and {'Key':'Version','Values':['4']}\n CLOUDFORMATION_STACK_1_0: * A JSON syntax that lets you specify a CloudFormation stack ARN.\n Query (string) -- [REQUIRED]The query that defines a group or a search.\n \n\n :type Tags: dict\n :param Tags: The tags to add to the group. A tag is a string-to-string map of key-value pairs. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.\n (string) --\n (string) --\n \n\n :rtype: dict\n :return: {\n 'Group': {\n 'GroupArn': 'string',\n 'Name': 'string',\n 'Description': 'string'\n },\n 'ResourceQuery': {\n 'Type': 'TAG_FILTERS_1_0'|'CLOUDFORMATION_STACK_1_0',\n 'Query': 'string'\n },\n 'Tags': {\n 'string': 'string'\n }\n }\n \n \n :returns: \n TAG_FILTERS_1_0: * A JSON syntax that lets you specify a collection of simple tag filters for resource types and tags, as supported by the AWS Tagging API GetResources operation. If you specify more than one tag key, only resources that match all tag keys, and at least one value of each specified tag key, are returned in your query. If you specify more than one value for a tag key, a resource matches the filter if it has a tag key value that matches any of the specified values.\n \n \"\"\"\n pass\n\ndef delete_group(GroupName=None):\n \"\"\"\n Deletes a specified resource group. Deleting a resource group does not delete resources that are members of the group; it only deletes the group structure.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_group(\n GroupName='string'\n )\n \n \n :type GroupName: string\n :param GroupName: [REQUIRED]\n The name of the resource group to delete.\n \n\n :rtype: dict\n :return: {\n 'Group': {\n 'GroupArn': 'string',\n 'Name': 'string',\n 'Description': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_group(GroupName=None):\n \"\"\"\n Returns information about a specified resource group.\n See also: AWS API Documentation\n \n \n :example: response = client.get_group(\n GroupName='string'\n )\n \n \n :type GroupName: string\n :param GroupName: [REQUIRED]\n The name of the resource group.\n \n\n :rtype: dict\n :return: {\n 'Group': {\n 'GroupArn': 'string',\n 'Name': 'string',\n 'Description': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_group_query(GroupName=None):\n \"\"\"\n Returns the resource query associated with the specified resource group.\n See also: AWS API Documentation\n \n \n :example: response = client.get_group_query(\n GroupName='string'\n )\n \n \n :type GroupName: string\n :param GroupName: [REQUIRED]\n The name of the resource group.\n \n\n :rtype: dict\n :return: {\n 'GroupQuery': {\n 'GroupName': 'string',\n 'ResourceQuery': {\n 'Type': 'TAG_FILTERS_1_0'|'CLOUDFORMATION_STACK_1_0',\n 'Query': 'string'\n }\n }\n }\n \n \n :returns: \n An EC2 instance that has the following two tags: {\"Key\":\"Stage\",\"Values\":[\"Deploy\"]} , and {\"Key\":\"Version\",\"Values\":[\"2\"]}\n An S3 bucket that has the following two tags: {\"Key\":\"Stage\",\"Values\":[\"Test\",\"Deploy\"]}, and {\"Key\":\"Version\",\"Values\":[\"1\"]}\n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_tags(Arn=None):\n \"\"\"\n Returns a list of tags that are associated with a resource, specified by an ARN.\n See also: AWS API Documentation\n \n \n :example: response = client.get_tags(\n Arn='string'\n )\n \n \n :type Arn: string\n :param Arn: [REQUIRED]\n The ARN of the resource for which you want a list of tags. The resource must exist within the account you are using.\n \n\n :rtype: dict\n :return: {\n 'Arn': 'string',\n 'Tags': {\n 'string': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_group_resources(GroupName=None, Filters=None, MaxResults=None, NextToken=None):\n \"\"\"\n Returns a list of ARNs of resources that are members of a specified resource group.\n See also: AWS API Documentation\n \n \n :example: response = client.list_group_resources(\n GroupName='string',\n Filters=[\n {\n 'Name': 'resource-type',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type GroupName: string\n :param GroupName: [REQUIRED]\n The name of the resource group.\n \n\n :type Filters: list\n :param Filters: Filters, formatted as ResourceFilter objects, that you want to apply to a ListGroupResources operation.\n resource-type - Filter resources by their type. Specify up to five resource types in the format AWS::ServiceCode::ResourceType. For example, AWS::EC2::Instance, or AWS::S3::Bucket.\n (dict) --A filter name and value pair that is used to obtain more specific results from a list of resources.\n Name (string) -- [REQUIRED]The name of the filter. Filter names are case-sensitive.\n Values (list) -- [REQUIRED]One or more filter values. Allowed filter values vary by resource filter name, and are case-sensitive.\n (string) --\n \n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of group member ARNs that are returned in a single call by ListGroupResources, in paginated output. By default, this number is 50.\n\n :type NextToken: string\n :param NextToken: The NextToken value that is returned in a paginated ListGroupResources request. To get the next page of results, run the call again, add the NextToken parameter, and specify the NextToken value.\n\n :rtype: dict\n :return: {\n 'ResourceIdentifiers': [\n {\n 'ResourceArn': 'string',\n 'ResourceType': 'string'\n },\n ],\n 'NextToken': 'string',\n 'QueryErrors': [\n {\n 'ErrorCode': 'CLOUDFORMATION_STACK_INACTIVE'|'CLOUDFORMATION_STACK_NOT_EXISTING',\n 'Message': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef list_groups(Filters=None, MaxResults=None, NextToken=None):\n \"\"\"\n Returns a list of existing resource groups in your account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_groups(\n Filters=[\n {\n 'Name': 'resource-type',\n 'Values': [\n 'string',\n ]\n },\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type Filters: list\n :param Filters: Filters, formatted as GroupFilter objects, that you want to apply to a ListGroups operation.\n resource-type - Filter groups by resource type. Specify up to five resource types in the format AWS::ServiceCode::ResourceType. For example, AWS::EC2::Instance, or AWS::S3::Bucket.\n (dict) --A filter name and value pair that is used to obtain more specific results from a list of groups.\n Name (string) -- [REQUIRED]The name of the filter. Filter names are case-sensitive.\n Values (list) -- [REQUIRED]One or more filter values. Allowed filter values vary by group filter name, and are case-sensitive.\n (string) --\n \n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of resource group results that are returned by ListGroups in paginated output. By default, this number is 50.\n\n :type NextToken: string\n :param NextToken: The NextToken value that is returned in a paginated ListGroups request. To get the next page of results, run the call again, add the NextToken parameter, and specify the NextToken value.\n\n :rtype: dict\n :return: {\n 'GroupIdentifiers': [\n {\n 'GroupName': 'string',\n 'GroupArn': 'string'\n },\n ],\n 'Groups': [\n {\n 'GroupArn': 'string',\n 'Name': 'string',\n 'Description': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef search_resources(ResourceQuery=None, MaxResults=None, NextToken=None):\n \"\"\"\n Returns a list of AWS resource identifiers that matches a specified query. The query uses the same format as a resource query in a CreateGroup or UpdateGroupQuery operation.\n See also: AWS API Documentation\n \n \n :example: response = client.search_resources(\n ResourceQuery={\n 'Type': 'TAG_FILTERS_1_0'|'CLOUDFORMATION_STACK_1_0',\n 'Query': 'string'\n },\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type ResourceQuery: dict\n :param ResourceQuery: [REQUIRED]\n The search query, using the same formats that are supported for resource group definition.\n Type (string) -- [REQUIRED]The type of the query. The valid values in this release are TAG_FILTERS_1_0 and CLOUDFORMATION_STACK_1_0 .\n TAG_FILTERS_1_0: * A JSON syntax that lets you specify a collection of simple tag filters for resource types and tags, as supported by the AWS Tagging API GetResources operation. If you specify more than one tag key, only resources that match all tag keys, and at least one value of each specified tag key, are returned in your query. If you specify more than one value for a tag key, a resource matches the filter if it has a tag key value that matches any of the specified values.\n For example, consider the following sample query for resources that have two tags, Stage and Version , with two values each. ([{'Key':'Stage','Values':['Test','Deploy']},{'Key':'Version','Values':['1','2']}] ) The results of this query might include the following.\n An EC2 instance that has the following two tags: {'Key':'Stage','Values':['Deploy']} , and {'Key':'Version','Values':['2']}\n An S3 bucket that has the following two tags: {'Key':'Stage','Values':['Test','Deploy']}, and {'Key':'Version','Values':['1']}\n The query would not return the following results, however. The following EC2 instance does not have all tag keys specified in the filter, so it is rejected. The RDS database has all of the tag keys, but no values that match at least one of the specified tag key values in the filter.\n An EC2 instance that has only the following tag: {'Key':'Stage','Values':['Deploy']} .\n An RDS database that has the following two tags: {'Key':'Stage','Values':['Archived']} , and {'Key':'Version','Values':['4']}\n CLOUDFORMATION_STACK_1_0: * A JSON syntax that lets you specify a CloudFormation stack ARN.\n Query (string) -- [REQUIRED]The query that defines a group or a search.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of group member ARNs returned by SearchResources in paginated output. By default, this number is 50.\n\n :type NextToken: string\n :param NextToken: The NextToken value that is returned in a paginated SearchResources request. To get the next page of results, run the call again, add the NextToken parameter, and specify the NextToken value.\n\n :rtype: dict\n :return: {\n 'ResourceIdentifiers': [\n {\n 'ResourceArn': 'string',\n 'ResourceType': 'string'\n },\n ],\n 'NextToken': 'string',\n 'QueryErrors': [\n {\n 'ErrorCode': 'CLOUDFORMATION_STACK_INACTIVE'|'CLOUDFORMATION_STACK_NOT_EXISTING',\n 'Message': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef tag(Arn=None, Tags=None):\n \"\"\"\n Adds specified tags to a resource with the specified ARN. Existing tags on a resource are not changed if they are not specified in the request parameters.\n See also: AWS API Documentation\n \n \n :example: response = client.tag(\n Arn='string',\n Tags={\n 'string': 'string'\n }\n )\n \n \n :type Arn: string\n :param Arn: [REQUIRED]\n The ARN of the resource to which to add tags.\n \n\n :type Tags: dict\n :param Tags: [REQUIRED]\n The tags to add to the specified resource. A tag is a string-to-string map of key-value pairs. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.\n (string) --\n (string) --\n \n\n :rtype: dict\n :return: {\n 'Arn': 'string',\n 'Tags': {\n 'string': 'string'\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef untag(Arn=None, Keys=None):\n \"\"\"\n Deletes specified tags from a specified resource.\n See also: AWS API Documentation\n \n \n :example: response = client.untag(\n Arn='string',\n Keys=[\n 'string',\n ]\n )\n \n \n :type Arn: string\n :param Arn: [REQUIRED]\n The ARN of the resource from which to remove tags.\n \n\n :type Keys: list\n :param Keys: [REQUIRED]\n The keys of the tags to be removed.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'Arn': 'string',\n 'Keys': [\n 'string',\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef update_group(GroupName=None, Description=None):\n \"\"\"\n Updates an existing group with a new or changed description. You cannot update the name of a resource group.\n See also: AWS API Documentation\n \n \n :example: response = client.update_group(\n GroupName='string',\n Description='string'\n )\n \n \n :type GroupName: string\n :param GroupName: [REQUIRED]\n The name of the resource group for which you want to update its description.\n \n\n :type Description: string\n :param Description: The description of the resource group. Descriptions can have a maximum of 511 characters, including letters, numbers, hyphens, underscores, punctuation, and spaces.\n\n :rtype: dict\n :return: {\n 'Group': {\n 'GroupArn': 'string',\n 'Name': 'string',\n 'Description': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef update_group_query(GroupName=None, ResourceQuery=None):\n \"\"\"\n Updates the resource query of a group.\n See also: AWS API Documentation\n \n \n :example: response = client.update_group_query(\n GroupName='string',\n ResourceQuery={\n 'Type': 'TAG_FILTERS_1_0'|'CLOUDFORMATION_STACK_1_0',\n 'Query': 'string'\n }\n )\n \n \n :type GroupName: string\n :param GroupName: [REQUIRED]\n The name of the resource group for which you want to edit the query.\n \n\n :type ResourceQuery: dict\n :param ResourceQuery: [REQUIRED]\n The resource query that determines which AWS resources are members of the resource group.\n Type (string) -- [REQUIRED]The type of the query. The valid values in this release are TAG_FILTERS_1_0 and CLOUDFORMATION_STACK_1_0 .\n TAG_FILTERS_1_0: * A JSON syntax that lets you specify a collection of simple tag filters for resource types and tags, as supported by the AWS Tagging API GetResources operation. If you specify more than one tag key, only resources that match all tag keys, and at least one value of each specified tag key, are returned in your query. If you specify more than one value for a tag key, a resource matches the filter if it has a tag key value that matches any of the specified values.\n For example, consider the following sample query for resources that have two tags, Stage and Version , with two values each. ([{'Key':'Stage','Values':['Test','Deploy']},{'Key':'Version','Values':['1','2']}] ) The results of this query might include the following.\n An EC2 instance that has the following two tags: {'Key':'Stage','Values':['Deploy']} , and {'Key':'Version','Values':['2']}\n An S3 bucket that has the following two tags: {'Key':'Stage','Values':['Test','Deploy']}, and {'Key':'Version','Values':['1']}\n The query would not return the following results, however. The following EC2 instance does not have all tag keys specified in the filter, so it is rejected. The RDS database has all of the tag keys, but no values that match at least one of the specified tag key values in the filter.\n An EC2 instance that has only the following tag: {'Key':'Stage','Values':['Deploy']} .\n An RDS database that has the following two tags: {'Key':'Stage','Values':['Archived']} , and {'Key':'Version','Values':['4']}\n CLOUDFORMATION_STACK_1_0: * A JSON syntax that lets you specify a CloudFormation stack ARN.\n Query (string) -- [REQUIRED]The query that defines a group or a search.\n \n\n :rtype: dict\n :return: {\n 'GroupQuery': {\n 'GroupName': 'string',\n 'ResourceQuery': {\n 'Type': 'TAG_FILTERS_1_0'|'CLOUDFORMATION_STACK_1_0',\n 'Query': 'string'\n }\n }\n }\n \n \n :returns: \n TAG_FILTERS_1_0: * A JSON syntax that lets you specify a collection of simple tag filters for resource types and tags, as supported by the AWS Tagging API GetResources operation. If you specify more than one tag key, only resources that match all tag keys, and at least one value of each specified tag key, are returned in your query. If you specify more than one value for a tag key, a resource matches the filter if it has a tag key value that matches any of the specified values.\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6266400814056396, "alphanum_fraction": 0.6346729397773743, "avg_line_length": 34.783973693847656, "blob_id": "80bdd58c711fe9321f94222ba5e8a6ef292270ab", "content_id": "cd048dbadf44f3e0d6cf0175d3f5c36e1fc0aadb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 41081, "license_type": "permissive", "max_line_length": 357, "num_lines": 1148, "path": "/pyboto3/mq.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_broker(AutoMinorVersionUpgrade=None, BrokerName=None, Configuration=None, CreatorRequestId=None, DeploymentMode=None, EngineType=None, EngineVersion=None, HostInstanceType=None, Logs=None, MaintenanceWindowStartTime=None, PubliclyAccessible=None, SecurityGroups=None, SubnetIds=None, Tags=None, Users=None):\n \"\"\"\n Creates a broker. Note: This API is asynchronous.\n See also: AWS API Documentation\n \n \n :example: response = client.create_broker(\n AutoMinorVersionUpgrade=True|False,\n BrokerName='string',\n Configuration={\n 'Id': 'string',\n 'Revision': 123\n },\n CreatorRequestId='string',\n DeploymentMode='SINGLE_INSTANCE'|'ACTIVE_STANDBY_MULTI_AZ',\n EngineType='ACTIVEMQ',\n EngineVersion='string',\n HostInstanceType='string',\n Logs={\n 'Audit': True|False,\n 'General': True|False\n },\n MaintenanceWindowStartTime={\n 'DayOfWeek': 'MONDAY'|'TUESDAY'|'WEDNESDAY'|'THURSDAY'|'FRIDAY'|'SATURDAY'|'SUNDAY',\n 'TimeOfDay': 'string',\n 'TimeZone': 'string'\n },\n PubliclyAccessible=True|False,\n SecurityGroups=[\n 'string',\n ],\n SubnetIds=[\n 'string',\n ],\n Tags={\n 'string': 'string'\n },\n Users=[\n {\n 'ConsoleAccess': True|False,\n 'Groups': [\n 'string',\n ],\n 'Password': 'string',\n 'Username': 'string'\n },\n ]\n )\n \n \n :type AutoMinorVersionUpgrade: boolean\n :param AutoMinorVersionUpgrade: Required. Enables automatic upgrades to new minor versions for brokers, as Apache releases the versions. The automatic upgrades occur during the maintenance window of the broker or after a manual broker reboot.\n\n :type BrokerName: string\n :param BrokerName: Required. The name of the broker. This value must be unique in your AWS account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, and must not contain whitespaces, brackets, wildcard characters, or special characters.\n\n :type Configuration: dict\n :param Configuration: A list of information about the configuration.\n Id (string) -- Required. The unique ID that Amazon MQ generates for the configuration.\n Revision (integer) -- The revision number of the configuration.\n \n\n :type CreatorRequestId: string\n :param CreatorRequestId: The unique ID that the requester receives for the created broker. Amazon MQ passes your ID with the API action. Note: We recommend using a Universally Unique Identifier (UUID) for the creatorRequestId. You may omit the creatorRequestId if your application doesn't require idempotency.This field is autopopulated if not provided.\n\n :type DeploymentMode: string\n :param DeploymentMode: Required. The deployment mode of the broker.\n\n :type EngineType: string\n :param EngineType: Required. The type of broker engine. Note: Currently, Amazon MQ supports only ACTIVEMQ.\n\n :type EngineVersion: string\n :param EngineVersion: Required. The version of the broker engine. For a list of supported engine versions, see https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/broker-engine.html\n\n :type HostInstanceType: string\n :param HostInstanceType: Required. The broker's instance type.\n\n :type Logs: dict\n :param Logs: Enables Amazon CloudWatch logging for brokers.\n Audit (boolean) -- Enables audit logging. Every user management action made using JMX or the ActiveMQ Web Console is logged.\n General (boolean) -- Enables general logging.\n \n\n :type MaintenanceWindowStartTime: dict\n :param MaintenanceWindowStartTime: The parameters that determine the WeeklyStartTime.\n DayOfWeek (string) -- Required. The day of the week.\n TimeOfDay (string) -- Required. The time, in 24-hour format.\n TimeZone (string) -- The time zone, UTC by default, in either the Country/City format, or the UTC offset format.\n \n\n :type PubliclyAccessible: boolean\n :param PubliclyAccessible: Required. Enables connections from applications outside of the VPC that hosts the broker's subnets.\n\n :type SecurityGroups: list\n :param SecurityGroups: The list of rules (1 minimum, 125 maximum) that authorize connections to brokers.\n (string) --\n \n\n :type SubnetIds: list\n :param SubnetIds: The list of groups (2 maximum) that define which subnets and IP ranges the broker can use from different Availability Zones. A SINGLE_INSTANCE deployment requires one subnet (for example, the default subnet). An ACTIVE_STANDBY_MULTI_AZ deployment requires two subnets.\n (string) --\n \n\n :type Tags: dict\n :param Tags: Create tags when creating the broker.\n (string) --\n (string) --\n \n\n :type Users: list\n :param Users: Required. The list of ActiveMQ users (persons or applications) who can access queues and topics. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.\n (dict) -- An ActiveMQ user associated with the broker.\n ConsoleAccess (boolean) -- Enables access to the the ActiveMQ Web Console for the ActiveMQ user.\n Groups (list) -- The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.\n (string) --\n Password (string) -- Required. The password of the ActiveMQ user. This value must be at least 12 characters long, must contain at least 4 unique characters, and must not contain commas.\n Username (string) -- Required. The username of the ActiveMQ user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.\n \n\n :rtype: dict\n :return: {\n 'BrokerArn': 'string',\n 'BrokerId': 'string'\n }\n \n \n :returns: \n (dict) -- HTTP Status Code 200: OK.\n BrokerArn (string) -- The Amazon Resource Name (ARN) of the broker.\n BrokerId (string) -- The unique ID that Amazon MQ generates for the broker.\n \n \n \n \"\"\"\n pass\n\ndef create_configuration(EngineType=None, EngineVersion=None, Name=None, Tags=None):\n \"\"\"\n Creates a new configuration for the specified configuration name. Amazon MQ uses the default configuration (the engine type and version).\n See also: AWS API Documentation\n \n \n :example: response = client.create_configuration(\n EngineType='ACTIVEMQ',\n EngineVersion='string',\n Name='string',\n Tags={\n 'string': 'string'\n }\n )\n \n \n :type EngineType: string\n :param EngineType: Required. The type of broker engine. Note: Currently, Amazon MQ supports only ACTIVEMQ.\n\n :type EngineVersion: string\n :param EngineVersion: Required. The version of the broker engine. For a list of supported engine versions, see https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/broker-engine.html\n\n :type Name: string\n :param Name: Required. The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long.\n\n :type Tags: dict\n :param Tags: Create tags when creating the configuration.\n (string) --\n (string) --\n \n\n :rtype: dict\n :return: {\n 'Arn': 'string',\n 'Created': datetime(2015, 1, 1),\n 'Id': 'string',\n 'LatestRevision': {\n 'Created': datetime(2015, 1, 1),\n 'Description': 'string',\n 'Revision': 123\n },\n 'Name': 'string'\n }\n \n \n :returns: \n (dict) -- HTTP Status Code 200: OK.\n Arn (string) -- Required. The Amazon Resource Name (ARN) of the configuration.\n Created (datetime) -- Required. The date and time of the configuration.\n Id (string) -- Required. The unique ID that Amazon MQ generates for the configuration.\n LatestRevision (dict) -- The latest revision of the configuration.\n Created (datetime) -- Required. The date and time of the configuration revision.\n Description (string) -- The description of the configuration revision.\n Revision (integer) -- Required. The revision number of the configuration.\n \n \n Name (string) -- Required. The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long.\n \n \n \n \"\"\"\n pass\n\ndef create_tags(ResourceArn=None, Tags=None):\n \"\"\"\n Add a tag to a resource.\n See also: AWS API Documentation\n \n \n :example: response = client.create_tags(\n ResourceArn='string',\n Tags={\n 'string': 'string'\n }\n )\n \n \n :type ResourceArn: string\n :param ResourceArn: [REQUIRED] the Amazon Resource Name (ARN)\n\n :type Tags: dict\n :param Tags: The key-value pair for the resource tag.\n (string) --\n (string) --\n \n\n \"\"\"\n pass\n\ndef create_user(BrokerId=None, ConsoleAccess=None, Groups=None, Password=None, Username=None):\n \"\"\"\n Creates an ActiveMQ user.\n See also: AWS API Documentation\n \n \n :example: response = client.create_user(\n BrokerId='string',\n ConsoleAccess=True|False,\n Groups=[\n 'string',\n ],\n Password='string',\n Username='string'\n )\n \n \n :type BrokerId: string\n :param BrokerId: [REQUIRED] The unique ID that Amazon MQ generates for the broker.\n\n :type ConsoleAccess: boolean\n :param ConsoleAccess: Enables access to the the ActiveMQ Web Console for the ActiveMQ user.\n\n :type Groups: list\n :param Groups: The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.\n (string) --\n \n\n :type Password: string\n :param Password: Required. The password of the user. This value must be at least 12 characters long, must contain at least 4 unique characters, and must not contain commas.\n\n :type Username: string\n :param Username: [REQUIRED] The username of the ActiveMQ user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) -- HTTP Status Code 200: OK.\n \n \"\"\"\n pass\n\ndef delete_broker(BrokerId=None):\n \"\"\"\n Deletes a broker. Note: This API is asynchronous.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_broker(\n BrokerId='string'\n )\n \n \n :type BrokerId: string\n :param BrokerId: [REQUIRED] The name of the broker. This value must be unique in your AWS account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, and must not contain whitespaces, brackets, wildcard characters, or special characters.\n\n :rtype: dict\n :return: {\n 'BrokerId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_tags(ResourceArn=None, TagKeys=None):\n \"\"\"\n Remove a tag from a resource.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_tags(\n ResourceArn='string',\n TagKeys=[\n 'string',\n ]\n )\n \n \n :type ResourceArn: string\n :param ResourceArn: [REQUIRED] the Amazon Resource Name (ARN)\n\n :type TagKeys: list\n :param TagKeys: [REQUIRED] An array of tag keys to delete\n (string) --\n \n\n \"\"\"\n pass\n\ndef delete_user(BrokerId=None, Username=None):\n \"\"\"\n Deletes an ActiveMQ user.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_user(\n BrokerId='string',\n Username='string'\n )\n \n \n :type BrokerId: string\n :param BrokerId: [REQUIRED] The unique ID that Amazon MQ generates for the broker.\n\n :type Username: string\n :param Username: [REQUIRED] The username of the ActiveMQ user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) -- HTTP Status Code 200: OK.\n \n \"\"\"\n pass\n\ndef describe_broker(BrokerId=None):\n \"\"\"\n Returns information about the specified broker.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_broker(\n BrokerId='string'\n )\n \n \n :type BrokerId: string\n :param BrokerId: [REQUIRED] The name of the broker. This value must be unique in your AWS account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, and must not contain whitespaces, brackets, wildcard characters, or special characters.\n\n :rtype: dict\n :return: {\n 'AutoMinorVersionUpgrade': True|False,\n 'BrokerArn': 'string',\n 'BrokerId': 'string',\n 'BrokerInstances': [\n {\n 'ConsoleURL': 'string',\n 'Endpoints': [\n 'string',\n ],\n 'IpAddress': 'string'\n },\n ],\n 'BrokerName': 'string',\n 'BrokerState': 'CREATION_IN_PROGRESS'|'CREATION_FAILED'|'DELETION_IN_PROGRESS'|'RUNNING'|'REBOOT_IN_PROGRESS',\n 'Configurations': {\n 'Current': {\n 'Id': 'string',\n 'Revision': 123\n },\n 'History': [\n {\n 'Id': 'string',\n 'Revision': 123\n },\n ],\n 'Pending': {\n 'Id': 'string',\n 'Revision': 123\n }\n },\n 'Created': datetime(2015, 1, 1),\n 'DeploymentMode': 'SINGLE_INSTANCE'|'ACTIVE_STANDBY_MULTI_AZ',\n 'EngineType': 'ACTIVEMQ',\n 'EngineVersion': 'string',\n 'HostInstanceType': 'string',\n 'Logs': {\n 'Audit': True|False,\n 'AuditLogGroup': 'string',\n 'General': True|False,\n 'GeneralLogGroup': 'string',\n 'Pending': {\n 'Audit': True|False,\n 'General': True|False\n }\n },\n 'MaintenanceWindowStartTime': {\n 'DayOfWeek': 'MONDAY'|'TUESDAY'|'WEDNESDAY'|'THURSDAY'|'FRIDAY'|'SATURDAY'|'SUNDAY',\n 'TimeOfDay': 'string',\n 'TimeZone': 'string'\n },\n 'PendingEngineVersion': 'string',\n 'PubliclyAccessible': True|False,\n 'SecurityGroups': [\n 'string',\n ],\n 'SubnetIds': [\n 'string',\n ],\n 'Tags': {\n 'string': 'string'\n },\n 'Users': [\n {\n 'PendingChange': 'CREATE'|'UPDATE'|'DELETE',\n 'Username': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef describe_configuration(ConfigurationId=None):\n \"\"\"\n Returns information about the specified configuration.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_configuration(\n ConfigurationId='string'\n )\n \n \n :type ConfigurationId: string\n :param ConfigurationId: [REQUIRED] The unique ID that Amazon MQ generates for the configuration.\n\n :rtype: dict\n :return: {\n 'Arn': 'string',\n 'Created': datetime(2015, 1, 1),\n 'Description': 'string',\n 'EngineType': 'ACTIVEMQ',\n 'EngineVersion': 'string',\n 'Id': 'string',\n 'LatestRevision': {\n 'Created': datetime(2015, 1, 1),\n 'Description': 'string',\n 'Revision': 123\n },\n 'Name': 'string',\n 'Tags': {\n 'string': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_configuration_revision(ConfigurationId=None, ConfigurationRevision=None):\n \"\"\"\n Returns the specified configuration revision for the specified configuration.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_configuration_revision(\n ConfigurationId='string',\n ConfigurationRevision='string'\n )\n \n \n :type ConfigurationId: string\n :param ConfigurationId: [REQUIRED] The unique ID that Amazon MQ generates for the configuration.\n\n :type ConfigurationRevision: string\n :param ConfigurationRevision: [REQUIRED] The revision of the configuration.\n\n :rtype: dict\n :return: {\n 'ConfigurationId': 'string',\n 'Created': datetime(2015, 1, 1),\n 'Data': 'string',\n 'Description': 'string'\n }\n \n \n :returns: \n (dict) -- HTTP Status Code 200: OK.\n ConfigurationId (string) -- Required. The unique ID that Amazon MQ generates for the configuration.\n Created (datetime) -- Required. The date and time of the configuration.\n Data (string) -- Required. The base64-encoded XML configuration.\n Description (string) -- The description of the configuration.\n \n \n \n \"\"\"\n pass\n\ndef describe_user(BrokerId=None, Username=None):\n \"\"\"\n Returns information about an ActiveMQ user.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_user(\n BrokerId='string',\n Username='string'\n )\n \n \n :type BrokerId: string\n :param BrokerId: [REQUIRED] The unique ID that Amazon MQ generates for the broker.\n\n :type Username: string\n :param Username: [REQUIRED] The username of the ActiveMQ user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.\n\n :rtype: dict\n :return: {\n 'BrokerId': 'string',\n 'ConsoleAccess': True|False,\n 'Groups': [\n 'string',\n ],\n 'Pending': {\n 'ConsoleAccess': True|False,\n 'Groups': [\n 'string',\n ],\n 'PendingChange': 'CREATE'|'UPDATE'|'DELETE'\n },\n 'Username': 'string'\n }\n \n \n :returns: \n (dict) -- HTTP Status Code 200: OK.\n BrokerId (string) -- Required. The unique ID that Amazon MQ generates for the broker.\n ConsoleAccess (boolean) -- Enables access to the the ActiveMQ Web Console for the ActiveMQ user.\n Groups (list) -- The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.\n (string) --\n \n \n Pending (dict) -- The status of the changes pending for the ActiveMQ user.\n ConsoleAccess (boolean) -- Enables access to the the ActiveMQ Web Console for the ActiveMQ user.\n Groups (list) -- The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.\n (string) --\n \n \n PendingChange (string) -- Required. The type of change pending for the ActiveMQ user.\n \n \n Username (string) -- Required. The username of the ActiveMQ user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.\n \n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_brokers(MaxResults=None, NextToken=None):\n \"\"\"\n Returns a list of all brokers.\n See also: AWS API Documentation\n \n \n :example: response = client.list_brokers(\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type MaxResults: integer\n :param MaxResults: The maximum number of brokers that Amazon MQ can return per page (20 by default). This value must be an integer from 5 to 100.\n\n :type NextToken: string\n :param NextToken: The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.\n\n :rtype: dict\n :return: {\n 'BrokerSummaries': [\n {\n 'BrokerArn': 'string',\n 'BrokerId': 'string',\n 'BrokerName': 'string',\n 'BrokerState': 'CREATION_IN_PROGRESS'|'CREATION_FAILED'|'DELETION_IN_PROGRESS'|'RUNNING'|'REBOOT_IN_PROGRESS',\n 'Created': datetime(2015, 1, 1),\n 'DeploymentMode': 'SINGLE_INSTANCE'|'ACTIVE_STANDBY_MULTI_AZ',\n 'HostInstanceType': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (dict) -- HTTP Status Code 200: OK.\n BrokerSummaries (list) -- A list of information about all brokers.\n (dict) -- The Amazon Resource Name (ARN) of the broker.\n BrokerArn (string) -- The Amazon Resource Name (ARN) of the broker.\n BrokerId (string) -- The unique ID that Amazon MQ generates for the broker.\n BrokerName (string) -- The name of the broker. This value must be unique in your AWS account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, and must not contain whitespaces, brackets, wildcard characters, or special characters.\n BrokerState (string) -- The status of the broker.\n Created (datetime) -- The time when the broker was created.\n DeploymentMode (string) -- Required. The deployment mode of the broker.\n HostInstanceType (string) -- The broker's instance type.\n \n \n \n \n NextToken (string) -- The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.\n \n \n \n \"\"\"\n pass\n\ndef list_configuration_revisions(ConfigurationId=None, MaxResults=None, NextToken=None):\n \"\"\"\n Returns a list of all revisions for the specified configuration.\n See also: AWS API Documentation\n \n \n :example: response = client.list_configuration_revisions(\n ConfigurationId='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type ConfigurationId: string\n :param ConfigurationId: [REQUIRED] The unique ID that Amazon MQ generates for the configuration.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of configurations that Amazon MQ can return per page (20 by default). This value must be an integer from 5 to 100.\n\n :type NextToken: string\n :param NextToken: The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.\n\n :rtype: dict\n :return: {\n 'ConfigurationId': 'string',\n 'MaxResults': 123,\n 'NextToken': 'string',\n 'Revisions': [\n {\n 'Created': datetime(2015, 1, 1),\n 'Description': 'string',\n 'Revision': 123\n },\n ]\n }\n \n \n :returns: \n (dict) -- HTTP Status Code 200: OK.\n ConfigurationId (string) -- The unique ID that Amazon MQ generates for the configuration.\n MaxResults (integer) -- The maximum number of configuration revisions that can be returned per page (20 by default). This value must be an integer from 5 to 100.\n NextToken (string) -- The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.\n Revisions (list) -- The list of all revisions for the specified configuration.\n (dict) -- Returns information about the specified configuration revision.\n Created (datetime) -- Required. The date and time of the configuration revision.\n Description (string) -- The description of the configuration revision.\n Revision (integer) -- Required. The revision number of the configuration.\n \n \n \n \n \n \n \n \"\"\"\n pass\n\ndef list_configurations(MaxResults=None, NextToken=None):\n \"\"\"\n Returns a list of all configurations.\n See also: AWS API Documentation\n \n \n :example: response = client.list_configurations(\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type MaxResults: integer\n :param MaxResults: The maximum number of configurations that Amazon MQ can return per page (20 by default). This value must be an integer from 5 to 100.\n\n :type NextToken: string\n :param NextToken: The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.\n\n :rtype: dict\n :return: {\n 'Configurations': [\n {\n 'Arn': 'string',\n 'Created': datetime(2015, 1, 1),\n 'Description': 'string',\n 'EngineType': 'ACTIVEMQ',\n 'EngineVersion': 'string',\n 'Id': 'string',\n 'LatestRevision': {\n 'Created': datetime(2015, 1, 1),\n 'Description': 'string',\n 'Revision': 123\n },\n 'Name': 'string',\n 'Tags': {\n 'string': 'string'\n }\n },\n ],\n 'MaxResults': 123,\n 'NextToken': 'string'\n }\n \n \n :returns: \n (dict) -- HTTP Status Code 200: OK.\n Configurations (list) -- The list of all revisions for the specified configuration.\n (dict) -- Returns information about all configurations.\n Arn (string) -- Required. The ARN of the configuration.\n Created (datetime) -- Required. The date and time of the configuration revision.\n Description (string) -- Required. The description of the configuration.\n EngineType (string) -- Required. The type of broker engine. Note: Currently, Amazon MQ supports only ACTIVEMQ.\n EngineVersion (string) -- Required. The version of the broker engine. For a list of supported engine versions, see https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/broker-engine.html\n Id (string) -- Required. The unique ID that Amazon MQ generates for the configuration.\n LatestRevision (dict) -- Required. The latest revision of the configuration.\n Created (datetime) -- Required. The date and time of the configuration revision.\n Description (string) -- The description of the configuration revision.\n Revision (integer) -- Required. The revision number of the configuration.\n \n \n Name (string) -- Required. The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long.\n Tags (dict) -- The list of all tags associated with this configuration.\n (string) --\n (string) --\n \n \n \n \n \n \n \n \n MaxResults (integer) -- The maximum number of configurations that Amazon MQ can return per page (20 by default). This value must be an integer from 5 to 100.\n NextToken (string) -- The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.\n \n \n \n \"\"\"\n pass\n\ndef list_tags(ResourceArn=None):\n \"\"\"\n Lists tags for a resource.\n See also: AWS API Documentation\n \n \n :example: response = client.list_tags(\n ResourceArn='string'\n )\n \n \n :type ResourceArn: string\n :param ResourceArn: [REQUIRED] the Amazon Resource Name (ARN)\n\n :rtype: dict\n :return: {\n 'Tags': {\n 'string': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef list_users(BrokerId=None, MaxResults=None, NextToken=None):\n \"\"\"\n Returns a list of all ActiveMQ users.\n See also: AWS API Documentation\n \n \n :example: response = client.list_users(\n BrokerId='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type BrokerId: string\n :param BrokerId: [REQUIRED] The unique ID that Amazon MQ generates for the broker.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of ActiveMQ users that can be returned per page (20 by default). This value must be an integer from 5 to 100.\n\n :type NextToken: string\n :param NextToken: The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.\n\n :rtype: dict\n :return: {\n 'BrokerId': 'string',\n 'MaxResults': 123,\n 'NextToken': 'string',\n 'Users': [\n {\n 'PendingChange': 'CREATE'|'UPDATE'|'DELETE',\n 'Username': 'string'\n },\n ]\n }\n \n \n :returns: \n (dict) -- HTTP Status Code 200: OK.\n BrokerId (string) -- Required. The unique ID that Amazon MQ generates for the broker.\n MaxResults (integer) -- Required. The maximum number of ActiveMQ users that can be returned per page (20 by default). This value must be an integer from 5 to 100.\n NextToken (string) -- The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.\n Users (list) -- Required. The list of all ActiveMQ usernames for the specified broker.\n (dict) -- Returns a list of all ActiveMQ users.\n PendingChange (string) -- The type of change pending for the ActiveMQ user.\n Username (string) -- Required. The username of the ActiveMQ user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.\n \n \n \n \n \n \n \n \"\"\"\n pass\n\ndef reboot_broker(BrokerId=None):\n \"\"\"\n Reboots a broker. Note: This API is asynchronous.\n See also: AWS API Documentation\n \n \n :example: response = client.reboot_broker(\n BrokerId='string'\n )\n \n \n :type BrokerId: string\n :param BrokerId: [REQUIRED] The unique ID that Amazon MQ generates for the broker.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef update_broker(AutoMinorVersionUpgrade=None, BrokerId=None, Configuration=None, EngineVersion=None, Logs=None):\n \"\"\"\n Adds a pending configuration change to a broker.\n See also: AWS API Documentation\n \n \n :example: response = client.update_broker(\n AutoMinorVersionUpgrade=True|False,\n BrokerId='string',\n Configuration={\n 'Id': 'string',\n 'Revision': 123\n },\n EngineVersion='string',\n Logs={\n 'Audit': True|False,\n 'General': True|False\n }\n )\n \n \n :type AutoMinorVersionUpgrade: boolean\n :param AutoMinorVersionUpgrade: Enables automatic upgrades to new minor versions for brokers, as Apache releases the versions. The automatic upgrades occur during the maintenance window of the broker or after a manual broker reboot.\n\n :type BrokerId: string\n :param BrokerId: [REQUIRED] The name of the broker. This value must be unique in your AWS account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, and must not contain whitespaces, brackets, wildcard characters, or special characters.\n\n :type Configuration: dict\n :param Configuration: A list of information about the configuration.\n Id (string) -- Required. The unique ID that Amazon MQ generates for the configuration.\n Revision (integer) -- The revision number of the configuration.\n \n\n :type EngineVersion: string\n :param EngineVersion: The version of the broker engine. For a list of supported engine versions, see https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/broker-engine.html\n\n :type Logs: dict\n :param Logs: Enables Amazon CloudWatch logging for brokers.\n Audit (boolean) -- Enables audit logging. Every user management action made using JMX or the ActiveMQ Web Console is logged.\n General (boolean) -- Enables general logging.\n \n\n :rtype: dict\n :return: {\n 'AutoMinorVersionUpgrade': True|False,\n 'BrokerId': 'string',\n 'Configuration': {\n 'Id': 'string',\n 'Revision': 123\n },\n 'EngineVersion': 'string',\n 'Logs': {\n 'Audit': True|False,\n 'General': True|False\n }\n }\n \n \n :returns: \n (dict) -- HTTP Status Code 200: OK.\n AutoMinorVersionUpgrade (boolean) -- The new value of automatic upgrades to new minor version for brokers.\n BrokerId (string) -- Required. The unique ID that Amazon MQ generates for the broker.\n Configuration (dict) -- The ID of the updated configuration.\n Id (string) -- Required. The unique ID that Amazon MQ generates for the configuration.\n Revision (integer) -- The revision number of the configuration.\n \n \n EngineVersion (string) -- The version of the broker engine to upgrade to. For a list of supported engine versions, see https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/broker-engine.html\n Logs (dict) -- The list of information about logs to be enabled for the specified broker.\n Audit (boolean) -- Enables audit logging. Every user management action made using JMX or the ActiveMQ Web Console is logged.\n General (boolean) -- Enables general logging.\n \n \n \n \n \n \"\"\"\n pass\n\ndef update_configuration(ConfigurationId=None, Data=None, Description=None):\n \"\"\"\n Updates the specified configuration.\n See also: AWS API Documentation\n \n \n :example: response = client.update_configuration(\n ConfigurationId='string',\n Data='string',\n Description='string'\n )\n \n \n :type ConfigurationId: string\n :param ConfigurationId: [REQUIRED] The unique ID that Amazon MQ generates for the configuration.\n\n :type Data: string\n :param Data: Required. The base64-encoded XML configuration.\n\n :type Description: string\n :param Description: The description of the configuration.\n\n :rtype: dict\n :return: {\n 'Arn': 'string',\n 'Created': datetime(2015, 1, 1),\n 'Id': 'string',\n 'LatestRevision': {\n 'Created': datetime(2015, 1, 1),\n 'Description': 'string',\n 'Revision': 123\n },\n 'Name': 'string',\n 'Warnings': [\n {\n 'AttributeName': 'string',\n 'ElementName': 'string',\n 'Reason': 'DISALLOWED_ELEMENT_REMOVED'|'DISALLOWED_ATTRIBUTE_REMOVED'|'INVALID_ATTRIBUTE_VALUE_REMOVED'\n },\n ]\n }\n \n \n :returns: \n (dict) -- HTTP Status Code 200: OK.\n Arn (string) -- Required. The Amazon Resource Name (ARN) of the configuration.\n Created (datetime) -- Required. The date and time of the configuration.\n Id (string) -- Required. The unique ID that Amazon MQ generates for the configuration.\n LatestRevision (dict) -- The latest revision of the configuration.\n Created (datetime) -- Required. The date and time of the configuration revision.\n Description (string) -- The description of the configuration revision.\n Revision (integer) -- Required. The revision number of the configuration.\n \n \n Name (string) -- Required. The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long.\n Warnings (list) -- The list of the first 20 warnings about the configuration XML elements or attributes that were sanitized.\n (dict) -- Returns information about the XML element or attribute that was sanitized in the configuration.\n AttributeName (string) -- The name of the XML attribute that has been sanitized.\n ElementName (string) -- The name of the XML element that has been sanitized.\n Reason (string) -- Required. The reason for which the XML elements or attributes were sanitized.\n \n \n \n \n \n \n \n \"\"\"\n pass\n\ndef update_user(BrokerId=None, ConsoleAccess=None, Groups=None, Password=None, Username=None):\n \"\"\"\n Updates the information for an ActiveMQ user.\n See also: AWS API Documentation\n \n \n :example: response = client.update_user(\n BrokerId='string',\n ConsoleAccess=True|False,\n Groups=[\n 'string',\n ],\n Password='string',\n Username='string'\n )\n \n \n :type BrokerId: string\n :param BrokerId: [REQUIRED] The unique ID that Amazon MQ generates for the broker.\n\n :type ConsoleAccess: boolean\n :param ConsoleAccess: Enables access to the the ActiveMQ Web Console for the ActiveMQ user.\n\n :type Groups: list\n :param Groups: The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.\n (string) --\n \n\n :type Password: string\n :param Password: The password of the user. This value must be at least 12 characters long, must contain at least 4 unique characters, and must not contain commas.\n\n :type Username: string\n :param Username: [REQUIRED] Required. The username of the ActiveMQ user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) -- HTTP Status Code 200: OK.\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6652432680130005, "alphanum_fraction": 0.6974797248840332, "avg_line_length": 34.439170837402344, "blob_id": "787400058a242e8d42b0e2a84b6127ca69fc5322", "content_id": "a14796535bacbb0499035e5f7d6ebf31afd454be", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11943, "license_type": "permissive", "max_line_length": 75, "num_lines": 337, "path": "/pyboto3/clients.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "import boto3\nacm = boto3.client(\"acm\")\n\"\"\":type : pyboto3.acm\"\"\"\nacm_pca = boto3.client(\"acm-pca\")\n\"\"\":type : pyboto3.acm_pca\"\"\"\nalexaforbusiness = boto3.client(\"alexaforbusiness\")\n\"\"\":type : pyboto3.alexaforbusiness\"\"\"\namplify = boto3.client(\"amplify\")\n\"\"\":type : pyboto3.amplify\"\"\"\napigateway = boto3.client(\"apigateway\")\n\"\"\":type : pyboto3.apigateway\"\"\"\napigatewaymanagementapi = boto3.client(\"apigatewaymanagementapi\")\n\"\"\":type : pyboto3.apigatewaymanagementapi\"\"\"\napigatewayv2 = boto3.client(\"apigatewayv2\")\n\"\"\":type : pyboto3.apigatewayv2\"\"\"\napplication_autoscaling = boto3.client(\"application-autoscaling\")\n\"\"\":type : pyboto3.application_autoscaling\"\"\"\nappmesh = boto3.client(\"appmesh\")\n\"\"\":type : pyboto3.appmesh\"\"\"\nappstream = boto3.client(\"appstream\")\n\"\"\":type : pyboto3.appstream\"\"\"\nappsync = boto3.client(\"appsync\")\n\"\"\":type : pyboto3.appsync\"\"\"\nathena = boto3.client(\"athena\")\n\"\"\":type : pyboto3.athena\"\"\"\nautoscaling = boto3.client(\"autoscaling\")\n\"\"\":type : pyboto3.autoscaling\"\"\"\nautoscaling_plans = boto3.client(\"autoscaling-plans\")\n\"\"\":type : pyboto3.autoscaling_plans\"\"\"\nbatch = boto3.client(\"batch\")\n\"\"\":type : pyboto3.batch\"\"\"\nbudgets = boto3.client(\"budgets\")\n\"\"\":type : pyboto3.budgets\"\"\"\nce = boto3.client(\"ce\")\n\"\"\":type : pyboto3.ce\"\"\"\nchime = boto3.client(\"chime\")\n\"\"\":type : pyboto3.chime\"\"\"\ncloud9 = boto3.client(\"cloud9\")\n\"\"\":type : pyboto3.cloud9\"\"\"\nclouddirectory = boto3.client(\"clouddirectory\")\n\"\"\":type : pyboto3.clouddirectory\"\"\"\ncloudformation = boto3.client(\"cloudformation\")\n\"\"\":type : pyboto3.cloudformation\"\"\"\ncloudfront = boto3.client(\"cloudfront\")\n\"\"\":type : pyboto3.cloudfront\"\"\"\ncloudhsm = boto3.client(\"cloudhsm\")\n\"\"\":type : pyboto3.cloudhsm\"\"\"\ncloudhsmv2 = boto3.client(\"cloudhsmv2\")\n\"\"\":type : pyboto3.cloudhsmv2\"\"\"\ncloudsearch = boto3.client(\"cloudsearch\")\n\"\"\":type : pyboto3.cloudsearch\"\"\"\ncloudsearchdomain = boto3.client(\"cloudsearchdomain\")\n\"\"\":type : pyboto3.cloudsearchdomain\"\"\"\ncloudtrail = boto3.client(\"cloudtrail\")\n\"\"\":type : pyboto3.cloudtrail\"\"\"\ncloudwatch = boto3.client(\"cloudwatch\")\n\"\"\":type : pyboto3.cloudwatch\"\"\"\ncodebuild = boto3.client(\"codebuild\")\n\"\"\":type : pyboto3.codebuild\"\"\"\ncodecommit = boto3.client(\"codecommit\")\n\"\"\":type : pyboto3.codecommit\"\"\"\ncodedeploy = boto3.client(\"codedeploy\")\n\"\"\":type : pyboto3.codedeploy\"\"\"\ncodepipeline = boto3.client(\"codepipeline\")\n\"\"\":type : pyboto3.codepipeline\"\"\"\ncodestar = boto3.client(\"codestar\")\n\"\"\":type : pyboto3.codestar\"\"\"\ncognito_identity = boto3.client(\"cognito-identity\")\n\"\"\":type : pyboto3.cognito_identity\"\"\"\ncognito_idp = boto3.client(\"cognito-idp\")\n\"\"\":type : pyboto3.cognito_idp\"\"\"\ncognito_sync = boto3.client(\"cognito-sync\")\n\"\"\":type : pyboto3.cognito_sync\"\"\"\ncomprehend = boto3.client(\"comprehend\")\n\"\"\":type : pyboto3.comprehend\"\"\"\ncomprehendmedical = boto3.client(\"comprehendmedical\")\n\"\"\":type : pyboto3.comprehendmedical\"\"\"\nconfig = boto3.client(\"config\")\n\"\"\":type : pyboto3.config\"\"\"\nconnect = boto3.client(\"connect\")\n\"\"\":type : pyboto3.connect\"\"\"\ncur = boto3.client(\"cur\")\n\"\"\":type : pyboto3.cur\"\"\"\ndatapipeline = boto3.client(\"datapipeline\")\n\"\"\":type : pyboto3.datapipeline\"\"\"\ndatasync = boto3.client(\"datasync\")\n\"\"\":type : pyboto3.datasync\"\"\"\ndax = boto3.client(\"dax\")\n\"\"\":type : pyboto3.dax\"\"\"\ndevicefarm = boto3.client(\"devicefarm\")\n\"\"\":type : pyboto3.devicefarm\"\"\"\ndirectconnect = boto3.client(\"directconnect\")\n\"\"\":type : pyboto3.directconnect\"\"\"\ndiscovery = boto3.client(\"discovery\")\n\"\"\":type : pyboto3.discovery\"\"\"\ndlm = boto3.client(\"dlm\")\n\"\"\":type : pyboto3.dlm\"\"\"\ndms = boto3.client(\"dms\")\n\"\"\":type : pyboto3.dms\"\"\"\nds = boto3.client(\"ds\")\n\"\"\":type : pyboto3.ds\"\"\"\ndynamodb = boto3.client(\"dynamodb\")\n\"\"\":type : pyboto3.dynamodb\"\"\"\ndynamodbstreams = boto3.client(\"dynamodbstreams\")\n\"\"\":type : pyboto3.dynamodbstreams\"\"\"\nec2 = boto3.client(\"ec2\")\n\"\"\":type : pyboto3.ec2\"\"\"\necr = boto3.client(\"ecr\")\n\"\"\":type : pyboto3.ecr\"\"\"\necs = boto3.client(\"ecs\")\n\"\"\":type : pyboto3.ecs\"\"\"\nefs = boto3.client(\"efs\")\n\"\"\":type : pyboto3.efs\"\"\"\neks = boto3.client(\"eks\")\n\"\"\":type : pyboto3.eks\"\"\"\nelasticache = boto3.client(\"elasticache\")\n\"\"\":type : pyboto3.elasticache\"\"\"\nelasticbeanstalk = boto3.client(\"elasticbeanstalk\")\n\"\"\":type : pyboto3.elasticbeanstalk\"\"\"\nelastictranscoder = boto3.client(\"elastictranscoder\")\n\"\"\":type : pyboto3.elastictranscoder\"\"\"\nelb = boto3.client(\"elb\")\n\"\"\":type : pyboto3.elb\"\"\"\nelbv2 = boto3.client(\"elbv2\")\n\"\"\":type : pyboto3.elbv2\"\"\"\nemr = boto3.client(\"emr\")\n\"\"\":type : pyboto3.emr\"\"\"\nes = boto3.client(\"es\")\n\"\"\":type : pyboto3.es\"\"\"\nevents = boto3.client(\"events\")\n\"\"\":type : pyboto3.events\"\"\"\nfirehose = boto3.client(\"firehose\")\n\"\"\":type : pyboto3.firehose\"\"\"\nfms = boto3.client(\"fms\")\n\"\"\":type : pyboto3.fms\"\"\"\nfsx = boto3.client(\"fsx\")\n\"\"\":type : pyboto3.fsx\"\"\"\ngamelift = boto3.client(\"gamelift\")\n\"\"\":type : pyboto3.gamelift\"\"\"\nglacier = boto3.client(\"glacier\")\n\"\"\":type : pyboto3.glacier\"\"\"\nglobalaccelerator = boto3.client(\"globalaccelerator\")\n\"\"\":type : pyboto3.globalaccelerator\"\"\"\nglue = boto3.client(\"glue\")\n\"\"\":type : pyboto3.glue\"\"\"\ngreengrass = boto3.client(\"greengrass\")\n\"\"\":type : pyboto3.greengrass\"\"\"\nguardduty = boto3.client(\"guardduty\")\n\"\"\":type : pyboto3.guardduty\"\"\"\nhealth = boto3.client(\"health\")\n\"\"\":type : pyboto3.health\"\"\"\niam = boto3.client(\"iam\")\n\"\"\":type : pyboto3.iam\"\"\"\nimportexport = boto3.client(\"importexport\")\n\"\"\":type : pyboto3.importexport\"\"\"\ninspector = boto3.client(\"inspector\")\n\"\"\":type : pyboto3.inspector\"\"\"\niot = boto3.client(\"iot\")\n\"\"\":type : pyboto3.iot\"\"\"\niot_data = boto3.client(\"iot-data\")\n\"\"\":type : pyboto3.iot_data\"\"\"\niot_jobs_data = boto3.client(\"iot-jobs-data\")\n\"\"\":type : pyboto3.iot_jobs_data\"\"\"\niot1click_devices = boto3.client(\"iot1click-devices\")\n\"\"\":type : pyboto3.iot1click_devices\"\"\"\niot1click_projects = boto3.client(\"iot1click-projects\")\n\"\"\":type : pyboto3.iot1click_projects\"\"\"\niotanalytics = boto3.client(\"iotanalytics\")\n\"\"\":type : pyboto3.iotanalytics\"\"\"\nkafka = boto3.client(\"kafka\")\n\"\"\":type : pyboto3.kafka\"\"\"\nkinesis = boto3.client(\"kinesis\")\n\"\"\":type : pyboto3.kinesis\"\"\"\nkinesis_video_archived_media = boto3.client(\"kinesis-video-archived-media\")\n\"\"\":type : pyboto3.kinesis_video_archived_media\"\"\"\nkinesis_video_media = boto3.client(\"kinesis-video-media\")\n\"\"\":type : pyboto3.kinesis_video_media\"\"\"\nkinesisanalytics = boto3.client(\"kinesisanalytics\")\n\"\"\":type : pyboto3.kinesisanalytics\"\"\"\nkinesisanalyticsv2 = boto3.client(\"kinesisanalyticsv2\")\n\"\"\":type : pyboto3.kinesisanalyticsv2\"\"\"\nkinesisvideo = boto3.client(\"kinesisvideo\")\n\"\"\":type : pyboto3.kinesisvideo\"\"\"\nkms = boto3.client(\"kms\")\n\"\"\":type : pyboto3.kms\"\"\"\nlambda_ = boto3.client(\"lambda\")\n\"\"\":type : pyboto3.lambda_\"\"\"\nlex_models = boto3.client(\"lex-models\")\n\"\"\":type : pyboto3.lex_models\"\"\"\nlex_runtime = boto3.client(\"lex-runtime\")\n\"\"\":type : pyboto3.lex_runtime\"\"\"\nlicense_manager = boto3.client(\"license-manager\")\n\"\"\":type : pyboto3.license_manager\"\"\"\nlightsail = boto3.client(\"lightsail\")\n\"\"\":type : pyboto3.lightsail\"\"\"\nlogs = boto3.client(\"logs\")\n\"\"\":type : pyboto3.logs\"\"\"\nmachinelearning = boto3.client(\"machinelearning\")\n\"\"\":type : pyboto3.machinelearning\"\"\"\nmacie = boto3.client(\"macie\")\n\"\"\":type : pyboto3.macie\"\"\"\nmarketplace_entitlement = boto3.client(\"marketplace-entitlement\")\n\"\"\":type : pyboto3.marketplace_entitlement\"\"\"\nmarketplacecommerceanalytics = boto3.client(\"marketplacecommerceanalytics\")\n\"\"\":type : pyboto3.marketplacecommerceanalytics\"\"\"\nmediaconnect = boto3.client(\"mediaconnect\")\n\"\"\":type : pyboto3.mediaconnect\"\"\"\nmediaconvert = boto3.client(\"mediaconvert\")\n\"\"\":type : pyboto3.mediaconvert\"\"\"\nmedialive = boto3.client(\"medialive\")\n\"\"\":type : pyboto3.medialive\"\"\"\nmediapackage = boto3.client(\"mediapackage\")\n\"\"\":type : pyboto3.mediapackage\"\"\"\nmediastore = boto3.client(\"mediastore\")\n\"\"\":type : pyboto3.mediastore\"\"\"\nmediastore_data = boto3.client(\"mediastore-data\")\n\"\"\":type : pyboto3.mediastore_data\"\"\"\nmediatailor = boto3.client(\"mediatailor\")\n\"\"\":type : pyboto3.mediatailor\"\"\"\nmeteringmarketplace = boto3.client(\"meteringmarketplace\")\n\"\"\":type : pyboto3.meteringmarketplace\"\"\"\nmgh = boto3.client(\"mgh\")\n\"\"\":type : pyboto3.mgh\"\"\"\nmobile = boto3.client(\"mobile\")\n\"\"\":type : pyboto3.mobile\"\"\"\nmq = boto3.client(\"mq\")\n\"\"\":type : pyboto3.mq\"\"\"\nmturk = boto3.client(\"mturk\")\n\"\"\":type : pyboto3.mturk\"\"\"\nneptune = boto3.client(\"neptune\")\n\"\"\":type : pyboto3.neptune\"\"\"\nopsworks = boto3.client(\"opsworks\")\n\"\"\":type : pyboto3.opsworks\"\"\"\nopsworkscm = boto3.client(\"opsworkscm\")\n\"\"\":type : pyboto3.opsworkscm\"\"\"\norganizations = boto3.client(\"organizations\")\n\"\"\":type : pyboto3.organizations\"\"\"\npi = boto3.client(\"pi\")\n\"\"\":type : pyboto3.pi\"\"\"\npinpoint = boto3.client(\"pinpoint\")\n\"\"\":type : pyboto3.pinpoint\"\"\"\npinpoint_email = boto3.client(\"pinpoint-email\")\n\"\"\":type : pyboto3.pinpoint_email\"\"\"\npolly = boto3.client(\"polly\")\n\"\"\":type : pyboto3.polly\"\"\"\npricing = boto3.client(\"pricing\")\n\"\"\":type : pyboto3.pricing\"\"\"\nquicksight = boto3.client(\"quicksight\")\n\"\"\":type : pyboto3.quicksight\"\"\"\nram = boto3.client(\"ram\")\n\"\"\":type : pyboto3.ram\"\"\"\nrds = boto3.client(\"rds\")\n\"\"\":type : pyboto3.rds\"\"\"\nrds_data = boto3.client(\"rds-data\")\n\"\"\":type : pyboto3.rds_data\"\"\"\nredshift = boto3.client(\"redshift\")\n\"\"\":type : pyboto3.redshift\"\"\"\nrekognition = boto3.client(\"rekognition\")\n\"\"\":type : pyboto3.rekognition\"\"\"\nresource_groups = boto3.client(\"resource-groups\")\n\"\"\":type : pyboto3.resource_groups\"\"\"\nresourcegroupstaggingapi = boto3.client(\"resourcegroupstaggingapi\")\n\"\"\":type : pyboto3.resourcegroupstaggingapi\"\"\"\nrobomaker = boto3.client(\"robomaker\")\n\"\"\":type : pyboto3.robomaker\"\"\"\nroute53 = boto3.client(\"route53\")\n\"\"\":type : pyboto3.route53\"\"\"\nroute53domains = boto3.client(\"route53domains\")\n\"\"\":type : pyboto3.route53domains\"\"\"\nroute53resolver = boto3.client(\"route53resolver\")\n\"\"\":type : pyboto3.route53resolver\"\"\"\ns3 = boto3.client(\"s3\")\n\"\"\":type : pyboto3.s3\"\"\"\ns3control = boto3.client(\"s3control\")\n\"\"\":type : pyboto3.s3control\"\"\"\nsagemaker = boto3.client(\"sagemaker\")\n\"\"\":type : pyboto3.sagemaker\"\"\"\nsagemaker_runtime = boto3.client(\"sagemaker-runtime\")\n\"\"\":type : pyboto3.sagemaker_runtime\"\"\"\nsdb = boto3.client(\"sdb\")\n\"\"\":type : pyboto3.sdb\"\"\"\nsecretsmanager = boto3.client(\"secretsmanager\")\n\"\"\":type : pyboto3.secretsmanager\"\"\"\nsecurityhub = boto3.client(\"securityhub\")\n\"\"\":type : pyboto3.securityhub\"\"\"\nserverlessrepo = boto3.client(\"serverlessrepo\")\n\"\"\":type : pyboto3.serverlessrepo\"\"\"\nservicecatalog = boto3.client(\"servicecatalog\")\n\"\"\":type : pyboto3.servicecatalog\"\"\"\nservicediscovery = boto3.client(\"servicediscovery\")\n\"\"\":type : pyboto3.servicediscovery\"\"\"\nses = boto3.client(\"ses\")\n\"\"\":type : pyboto3.ses\"\"\"\nshield = boto3.client(\"shield\")\n\"\"\":type : pyboto3.shield\"\"\"\nsigner = boto3.client(\"signer\")\n\"\"\":type : pyboto3.signer\"\"\"\nsms = boto3.client(\"sms\")\n\"\"\":type : pyboto3.sms\"\"\"\nsms_voice = boto3.client(\"sms-voice\")\n\"\"\":type : pyboto3.sms_voice\"\"\"\nsnowball = boto3.client(\"snowball\")\n\"\"\":type : pyboto3.snowball\"\"\"\nsns = boto3.client(\"sns\")\n\"\"\":type : pyboto3.sns\"\"\"\nsqs = boto3.client(\"sqs\")\n\"\"\":type : pyboto3.sqs\"\"\"\nssm = boto3.client(\"ssm\")\n\"\"\":type : pyboto3.ssm\"\"\"\nstepfunctions = boto3.client(\"stepfunctions\")\n\"\"\":type : pyboto3.stepfunctions\"\"\"\nstoragegateway = boto3.client(\"storagegateway\")\n\"\"\":type : pyboto3.storagegateway\"\"\"\nsts = boto3.client(\"sts\")\n\"\"\":type : pyboto3.sts\"\"\"\nsupport = boto3.client(\"support\")\n\"\"\":type : pyboto3.support\"\"\"\nswf = boto3.client(\"swf\")\n\"\"\":type : pyboto3.swf\"\"\"\ntranscribe = boto3.client(\"transcribe\")\n\"\"\":type : pyboto3.transcribe\"\"\"\ntransfer = boto3.client(\"transfer\")\n\"\"\":type : pyboto3.transfer\"\"\"\ntranslate = boto3.client(\"translate\")\n\"\"\":type : pyboto3.translate\"\"\"\nwaf = boto3.client(\"waf\")\n\"\"\":type : pyboto3.waf\"\"\"\nwaf_regional = boto3.client(\"waf-regional\")\n\"\"\":type : pyboto3.waf_regional\"\"\"\nworkdocs = boto3.client(\"workdocs\")\n\"\"\":type : pyboto3.workdocs\"\"\"\nworkmail = boto3.client(\"workmail\")\n\"\"\":type : pyboto3.workmail\"\"\"\nworkspaces = boto3.client(\"workspaces\")\n\"\"\":type : pyboto3.workspaces\"\"\"\nxray = boto3.client(\"xray\")\n\"\"\":type : pyboto3.xray\"\"\"\n" }, { "alpha_fraction": 0.6458454132080078, "alphanum_fraction": 0.651728630065918, "avg_line_length": 37.671348571777344, "blob_id": "63a454471032a61980d7e0a4dd7c5f7d9c865d3c", "content_id": "2dd25fe39dfc1d0ee0550833e704b420ef65edd5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13768, "license_type": "permissive", "max_line_length": 389, "num_lines": 356, "path": "/pyboto3/macie.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef associate_member_account(memberAccountId=None):\n \"\"\"\n Associates a specified AWS account with Amazon Macie as a member account.\n See also: AWS API Documentation\n \n \n :example: response = client.associate_member_account(\n memberAccountId='string'\n )\n \n \n :type memberAccountId: string\n :param memberAccountId: [REQUIRED]\n The ID of the AWS account that you want to associate with Amazon Macie as a member account.\n \n\n \"\"\"\n pass\n\ndef associate_s3_resources(memberAccountId=None, s3Resources=None):\n \"\"\"\n Associates specified S3 resources with Amazon Macie for monitoring and data classification. If memberAccountId isn't specified, the action associates specified S3 resources with Macie for the current master account. If memberAccountId is specified, the action associates specified S3 resources with Macie for the specified member account.\n See also: AWS API Documentation\n \n \n :example: response = client.associate_s3_resources(\n memberAccountId='string',\n s3Resources=[\n {\n 'bucketName': 'string',\n 'prefix': 'string',\n 'classificationType': {\n 'oneTime': 'FULL'|'NONE',\n 'continuous': 'FULL'\n }\n },\n ]\n )\n \n \n :type memberAccountId: string\n :param memberAccountId: The ID of the Amazon Macie member account whose resources you want to associate with Macie.\n\n :type s3Resources: list\n :param s3Resources: [REQUIRED]\n The S3 resources that you want to associate with Amazon Macie for monitoring and data classification.\n (dict) --The S3 resources that you want to associate with Amazon Macie for monitoring and data classification. This data type is used as a request parameter in the AssociateS3Resources action and a response parameter in the ListS3Resources action.\n bucketName (string) -- [REQUIRED]The name of the S3 bucket that you want to associate with Amazon Macie.\n prefix (string) --The prefix of the S3 bucket that you want to associate with Amazon Macie.\n classificationType (dict) -- [REQUIRED]The classification type that you want to specify for the resource associated with Amazon Macie.\n oneTime (string) -- [REQUIRED]A one-time classification of all of the existing objects in a specified S3 bucket.\n continuous (string) -- [REQUIRED]A continuous classification of the objects that are added to a specified S3 bucket. Amazon Macie begins performing continuous classification after a bucket is successfully associated with Amazon Macie.\n \n \n\n :rtype: dict\n :return: {\n 'failedS3Resources': [\n {\n 'failedItem': {\n 'bucketName': 'string',\n 'prefix': 'string'\n },\n 'errorCode': 'string',\n 'errorMessage': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef disassociate_member_account(memberAccountId=None):\n \"\"\"\n Removes the specified member account from Amazon Macie.\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_member_account(\n memberAccountId='string'\n )\n \n \n :type memberAccountId: string\n :param memberAccountId: [REQUIRED]\n The ID of the member account that you want to remove from Amazon Macie.\n \n\n \"\"\"\n pass\n\ndef disassociate_s3_resources(memberAccountId=None, associatedS3Resources=None):\n \"\"\"\n Removes specified S3 resources from being monitored by Amazon Macie. If memberAccountId isn't specified, the action removes specified S3 resources from Macie for the current master account. If memberAccountId is specified, the action removes specified S3 resources from Macie for the specified member account.\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_s3_resources(\n memberAccountId='string',\n associatedS3Resources=[\n {\n 'bucketName': 'string',\n 'prefix': 'string'\n },\n ]\n )\n \n \n :type memberAccountId: string\n :param memberAccountId: The ID of the Amazon Macie member account whose resources you want to remove from being monitored by Amazon Macie.\n\n :type associatedS3Resources: list\n :param associatedS3Resources: [REQUIRED]\n The S3 resources (buckets or prefixes) that you want to remove from being monitored and classified by Amazon Macie.\n (dict) --Contains information about the S3 resource. This data type is used as a request parameter in the DisassociateS3Resources action and can be used as a response parameter in the AssociateS3Resources and UpdateS3Resources actions.\n bucketName (string) -- [REQUIRED]The name of the S3 bucket.\n prefix (string) --The prefix of the S3 bucket.\n \n \n\n :rtype: dict\n :return: {\n 'failedS3Resources': [\n {\n 'failedItem': {\n 'bucketName': 'string',\n 'prefix': 'string'\n },\n 'errorCode': 'string',\n 'errorMessage': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_member_accounts(nextToken=None, maxResults=None):\n \"\"\"\n Lists all Amazon Macie member accounts for the current Amazon Macie master account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_member_accounts(\n nextToken='string',\n maxResults=123\n )\n \n \n :type nextToken: string\n :param nextToken: Use this parameter when paginating results. Set the value of this parameter to null on your first call to the ListMemberAccounts action. Subsequent calls to the action fill nextToken in the request with the value of nextToken from the previous response to continue listing data.\n\n :type maxResults: integer\n :param maxResults: Use this parameter to indicate the maximum number of items that you want in the response. The default value is 250.\n\n :rtype: dict\n :return: {\n 'memberAccounts': [\n {\n 'accountId': 'string'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_s3_resources(memberAccountId=None, nextToken=None, maxResults=None):\n \"\"\"\n Lists all the S3 resources associated with Amazon Macie. If memberAccountId isn't specified, the action lists the S3 resources associated with Amazon Macie for the current master account. If memberAccountId is specified, the action lists the S3 resources associated with Amazon Macie for the specified member account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_s3_resources(\n memberAccountId='string',\n nextToken='string',\n maxResults=123\n )\n \n \n :type memberAccountId: string\n :param memberAccountId: The Amazon Macie member account ID whose associated S3 resources you want to list.\n\n :type nextToken: string\n :param nextToken: Use this parameter when paginating results. Set its value to null on your first call to the ListS3Resources action. Subsequent calls to the action fill nextToken in the request with the value of nextToken from the previous response to continue listing data.\n\n :type maxResults: integer\n :param maxResults: Use this parameter to indicate the maximum number of items that you want in the response. The default value is 250.\n\n :rtype: dict\n :return: {\n 's3Resources': [\n {\n 'bucketName': 'string',\n 'prefix': 'string',\n 'classificationType': {\n 'oneTime': 'FULL'|'NONE',\n 'continuous': 'FULL'\n }\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef update_s3_resources(memberAccountId=None, s3ResourcesUpdate=None):\n \"\"\"\n Updates the classification types for the specified S3 resources. If memberAccountId isn't specified, the action updates the classification types of the S3 resources associated with Amazon Macie for the current master account. If memberAccountId is specified, the action updates the classification types of the S3 resources associated with Amazon Macie for the specified member account.\n See also: AWS API Documentation\n \n \n :example: response = client.update_s3_resources(\n memberAccountId='string',\n s3ResourcesUpdate=[\n {\n 'bucketName': 'string',\n 'prefix': 'string',\n 'classificationTypeUpdate': {\n 'oneTime': 'FULL'|'NONE',\n 'continuous': 'FULL'\n }\n },\n ]\n )\n \n \n :type memberAccountId: string\n :param memberAccountId: The AWS ID of the Amazon Macie member account whose S3 resources' classification types you want to update.\n\n :type s3ResourcesUpdate: list\n :param s3ResourcesUpdate: [REQUIRED]\n The S3 resources whose classification types you want to update.\n (dict) --The S3 resources whose classification types you want to update. This data type is used as a request parameter in the UpdateS3Resources action.\n bucketName (string) -- [REQUIRED]The name of the S3 bucket whose classification types you want to update.\n prefix (string) --The prefix of the S3 bucket whose classification types you want to update.\n classificationTypeUpdate (dict) -- [REQUIRED]The classification type that you want to update for the resource associated with Amazon Macie.\n oneTime (string) --A one-time classification of all of the existing objects in a specified S3 bucket.\n continuous (string) --A continuous classification of the objects that are added to a specified S3 bucket. Amazon Macie begins performing continuous classification after a bucket is successfully associated with Amazon Macie.\n \n \n\n :rtype: dict\n :return: {\n 'failedS3Resources': [\n {\n 'failedItem': {\n 'bucketName': 'string',\n 'prefix': 'string'\n },\n 'errorCode': 'string',\n 'errorMessage': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6574208736419678, "alphanum_fraction": 0.666242241859436, "avg_line_length": 37.98936080932617, "blob_id": "9c9b0b2922600796a8276ec8e9cab59ebc07cf79", "content_id": "5f0fb7522ef93f89883201b3e6a894e2718ec925", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10996, "license_type": "permissive", "max_line_length": 410, "num_lines": 282, "path": "/pyboto3/mediastoredata.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef delete_object(Path=None):\n \"\"\"\n Deletes an object at the specified path.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_object(\n Path='string'\n )\n \n \n :type Path: string\n :param Path: [REQUIRED]\n The path (including the file name) where the object is stored in the container. Format: <folder name>/<folder name>/<file name>\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef describe_object(Path=None):\n \"\"\"\n Gets the headers for an object at the specified path.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_object(\n Path='string'\n )\n \n \n :type Path: string\n :param Path: [REQUIRED]\n The path (including the file name) where the object is stored in the container. Format: <folder name>/<folder name>/<file name>\n \n\n :rtype: dict\n :return: {\n 'ETag': 'string',\n 'ContentType': 'string',\n 'ContentLength': 123,\n 'CacheControl': 'string',\n 'LastModified': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_object(Path=None, Range=None):\n \"\"\"\n Downloads the object at the specified path.\n See also: AWS API Documentation\n \n \n :example: response = client.get_object(\n Path='string',\n Range='string'\n )\n \n \n :type Path: string\n :param Path: [REQUIRED]\n The path (including the file name) where the object is stored in the container. Format: <folder name>/<folder name>/<file name>\n For example, to upload the file mlaw.avi to the folder path premium\\canada in the container movies , enter the path premium/canada/mlaw.avi .\n Do not include the container name in this path.\n If the path includes any folders that don't exist yet, the service creates them. For example, suppose you have an existing premium/usa subfolder. If you specify premium/canada , the service creates a canada subfolder in the premium folder. You then have two subfolders, usa and canada , in the premium folder.\n There is no correlation between the path to the source and the path (folders) in the container in AWS Elemental MediaStore.\n For more information about folders and how they exist in a container, see the AWS Elemental MediaStore User Guide .\n The file name is the name that is assigned to the file that you upload. The file can have the same name inside and outside of AWS Elemental MediaStore, or it can have the same name. The file name can include or omit an extension.\n \n\n :type Range: string\n :param Range: The range bytes of an object to retrieve. For more information about the Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 .\n\n :rtype: dict\n :return: {\n 'Body': StreamingBody(),\n 'CacheControl': 'string',\n 'ContentRange': 'string',\n 'ContentLength': 123,\n 'ContentType': 'string',\n 'ETag': 'string',\n 'LastModified': datetime(2015, 1, 1),\n 'StatusCode': 123\n }\n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_items(Path=None, MaxResults=None, NextToken=None):\n \"\"\"\n Provides a list of metadata entries about folders and objects in the specified folder.\n See also: AWS API Documentation\n \n \n :example: response = client.list_items(\n Path='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type Path: string\n :param Path: The path in the container from which to retrieve items. Format: <folder name>/<folder name>/<file name>\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return per API request. For example, you submit a ListItems request with MaxResults set at 500. Although 2,000 items match your request, the service returns no more than the first 500 items. (The service also returns a NextToken value that you can use to fetch the next batch of results.) The service might return fewer results than the MaxResults value.\n If MaxResults is not included in the request, the service defaults to pagination with a maximum of 1,000 results per page.\n \n\n :type NextToken: string\n :param NextToken: The token that identifies which batch of results that you want to see. For example, you submit a ListItems request with MaxResults set at 500. The service returns the first batch of results (up to 500) and a NextToken value. To see the next batch of results, you can submit the ListItems request a second time and specify the NextToken value.\n Tokens expire after 15 minutes.\n \n\n :rtype: dict\n :return: {\n 'Items': [\n {\n 'Name': 'string',\n 'Type': 'OBJECT'|'FOLDER',\n 'ETag': 'string',\n 'LastModified': datetime(2015, 1, 1),\n 'ContentType': 'string',\n 'ContentLength': 123\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef put_object(Body=None, Path=None, ContentType=None, CacheControl=None, StorageClass=None):\n \"\"\"\n Uploads an object to the specified path. Object sizes are limited to 25 MB.\n See also: AWS API Documentation\n \n \n :example: response = client.put_object(\n Body=b'bytes'|file,\n Path='string',\n ContentType='string',\n CacheControl='string',\n StorageClass='TEMPORAL'\n )\n \n \n :type Body: bytes or seekable file-like object\n :param Body: [REQUIRED]\n The bytes to be stored.\n \n\n :type Path: string\n :param Path: [REQUIRED]\n The path (including the file name) where the object is stored in the container. Format: <folder name>/<folder name>/<file name>\n For example, to upload the file mlaw.avi to the folder path premium\\canada in the container movies , enter the path premium/canada/mlaw.avi .\n Do not include the container name in this path.\n If the path includes any folders that don't exist yet, the service creates them. For example, suppose you have an existing premium/usa subfolder. If you specify premium/canada , the service creates a canada subfolder in the premium folder. You then have two subfolders, usa and canada , in the premium folder.\n There is no correlation between the path to the source and the path (folders) in the container in AWS Elemental MediaStore.\n For more information about folders and how they exist in a container, see the AWS Elemental MediaStore User Guide .\n The file name is the name that is assigned to the file that you upload. The file can have the same name inside and outside of AWS Elemental MediaStore, or it can have the same name. The file name can include or omit an extension.\n \n\n :type ContentType: string\n :param ContentType: The content type of the object.\n\n :type CacheControl: string\n :param CacheControl: An optional CacheControl header that allows the caller to control the object's cache behavior. Headers can be passed in as specified in the HTTP at https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 .\n Headers with a custom user-defined value are also accepted.\n \n\n :type StorageClass: string\n :param StorageClass: Indicates the storage class of a Put request. Defaults to high-performance temporal storage class, and objects are persisted into durable storage shortly after being received.\n\n :rtype: dict\n :return: {\n 'ContentSHA256': 'string',\n 'ETag': 'string',\n 'StorageClass': 'TEMPORAL'\n }\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.5518496036529541, "alphanum_fraction": 0.5568788051605225, "avg_line_length": 37.70602798461914, "blob_id": "3debbbab9f0490c3bd331a0bcb23115ae2b7cf52", "content_id": "6ed3001610ba6bd5186b39725d6fba06e4f3297a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 78342, "license_type": "permissive", "max_line_length": 664, "num_lines": 2024, "path": "/pyboto3/iotanalytics.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef batch_put_message(channelName=None, messages=None):\n \"\"\"\n Sends messages to a channel.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_put_message(\n channelName='string',\n messages=[\n {\n 'messageId': 'string',\n 'payload': b'bytes'\n },\n ]\n )\n \n \n :type channelName: string\n :param channelName: [REQUIRED]\n The name of the channel where the messages are sent.\n \n\n :type messages: list\n :param messages: [REQUIRED]\n The list of messages to be sent. Each message has format: '{ 'messageId': 'string', 'payload': 'string'}'.\n Note that the field names of message payloads (data) that you send to AWS IoT Analytics:\n Must contain only alphanumeric characters and undescores (_); no other special characters are allowed.\n Must begin with an alphabetic character or single underscore (_).\n Cannot contain hyphens (-).\n In regular expression terms: '^[A-Za-z_]([A-Za-z0-9]*|[A-Za-z0-9][A-Za-z0-9_]*)$'.\n Cannot be greater than 255 characters.\n Are case-insensitive. (Fields named 'foo' and 'FOO' in the same payload are considered duplicates.)\n For example, {'temp_01': 29} or {'_temp_01': 29} are valid, but {'temp-01': 29}, {'01_temp': 29} or {'__temp_01': 29} are invalid in message payloads.\n (dict) --Information about a message.\n messageId (string) -- [REQUIRED]The ID you wish to assign to the message. Each 'messageId' must be unique within each batch sent.\n payload (bytes) -- [REQUIRED]The payload of the message. This may be a JSON string or a Base64-encoded string representing binary data (in which case you must decode it by means of a pipeline activity).\n \n \n\n :rtype: dict\n :return: {\n 'batchPutMessageErrorEntries': [\n {\n 'messageId': 'string',\n 'errorCode': 'string',\n 'errorMessage': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef cancel_pipeline_reprocessing(pipelineName=None, reprocessingId=None):\n \"\"\"\n Cancels the reprocessing of data through the pipeline.\n See also: AWS API Documentation\n \n \n :example: response = client.cancel_pipeline_reprocessing(\n pipelineName='string',\n reprocessingId='string'\n )\n \n \n :type pipelineName: string\n :param pipelineName: [REQUIRED]\n The name of pipeline for which data reprocessing is canceled.\n \n\n :type reprocessingId: string\n :param reprocessingId: [REQUIRED]\n The ID of the reprocessing task (returned by 'StartPipelineReprocessing').\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef create_channel(channelName=None, retentionPeriod=None, tags=None):\n \"\"\"\n Creates a channel. A channel collects data from an MQTT topic and archives the raw, unprocessed messages before publishing the data to a pipeline.\n See also: AWS API Documentation\n \n \n :example: response = client.create_channel(\n channelName='string',\n retentionPeriod={\n 'unlimited': True|False,\n 'numberOfDays': 123\n },\n tags=[\n {\n 'key': 'string',\n 'value': 'string'\n },\n ]\n )\n \n \n :type channelName: string\n :param channelName: [REQUIRED]\n The name of the channel.\n \n\n :type retentionPeriod: dict\n :param retentionPeriod: How long, in days, message data is kept for the channel.\n unlimited (boolean) --If true, message data is kept indefinitely.\n numberOfDays (integer) --The number of days that message data is kept. The 'unlimited' parameter must be false.\n \n\n :type tags: list\n :param tags: Metadata which can be used to manage the channel.\n (dict) --A set of key/value pairs which are used to manage the resource.\n key (string) -- [REQUIRED]The tag's key.\n value (string) -- [REQUIRED]The tag's value.\n \n \n\n :rtype: dict\n :return: {\n 'channelName': 'string',\n 'channelArn': 'string',\n 'retentionPeriod': {\n 'unlimited': True|False,\n 'numberOfDays': 123\n }\n }\n \n \n \"\"\"\n pass\n\ndef create_dataset(datasetName=None, actions=None, triggers=None, contentDeliveryRules=None, retentionPeriod=None, tags=None):\n \"\"\"\n Creates a data set. A data set stores data retrieved from a data store by applying a \"queryAction\" (a SQL query) or a \"containerAction\" (executing a containerized application). This operation creates the skeleton of a data set. The data set can be populated manually by calling \"CreateDatasetContent\" or automatically according to a \"trigger\" you specify.\n See also: AWS API Documentation\n \n \n :example: response = client.create_dataset(\n datasetName='string',\n actions=[\n {\n 'actionName': 'string',\n 'queryAction': {\n 'sqlQuery': 'string',\n 'filters': [\n {\n 'deltaTime': {\n 'offsetSeconds': 123,\n 'timeExpression': 'string'\n }\n },\n ]\n },\n 'containerAction': {\n 'image': 'string',\n 'executionRoleArn': 'string',\n 'resourceConfiguration': {\n 'computeType': 'ACU_1'|'ACU_2',\n 'volumeSizeInGB': 123\n },\n 'variables': [\n {\n 'name': 'string',\n 'stringValue': 'string',\n 'doubleValue': 123.0,\n 'datasetContentVersionValue': {\n 'datasetName': 'string'\n },\n 'outputFileUriValue': {\n 'fileName': 'string'\n }\n },\n ]\n }\n },\n ],\n triggers=[\n {\n 'schedule': {\n 'expression': 'string'\n },\n 'dataset': {\n 'name': 'string'\n }\n },\n ],\n contentDeliveryRules=[\n {\n 'entryName': 'string',\n 'destination': {\n 'iotEventsDestinationConfiguration': {\n 'inputName': 'string',\n 'roleArn': 'string'\n }\n }\n },\n ],\n retentionPeriod={\n 'unlimited': True|False,\n 'numberOfDays': 123\n },\n tags=[\n {\n 'key': 'string',\n 'value': 'string'\n },\n ]\n )\n \n \n :type datasetName: string\n :param datasetName: [REQUIRED]\n The name of the data set.\n \n\n :type actions: list\n :param actions: [REQUIRED]\n A list of actions that create the data set contents.\n (dict) --A 'DatasetAction' object that specifies how data set contents are automatically created.\n actionName (string) --The name of the data set action by which data set contents are automatically created.\n queryAction (dict) --An 'SqlQueryDatasetAction' object that uses an SQL query to automatically create data set contents.\n sqlQuery (string) -- [REQUIRED]A SQL query string.\n filters (list) --Pre-filters applied to message data.\n (dict) --Information which is used to filter message data, to segregate it according to the time frame in which it arrives.\n deltaTime (dict) --Used to limit data to that which has arrived since the last execution of the action.\n offsetSeconds (integer) -- [REQUIRED]The number of seconds of estimated 'in flight' lag time of message data. When you create data set contents using message data from a specified time frame, some message data may still be 'in flight' when processing begins, and so will not arrive in time to be processed. Use this field to make allowances for the 'in flight' time of your message data, so that data not processed from a previous time frame will be included with the next time frame. Without this, missed message data would be excluded from processing during the next time frame as well, because its timestamp places it within the previous time frame.\n timeExpression (string) -- [REQUIRED]An expression by which the time of the message data may be determined. This may be the name of a timestamp field, or a SQL expression which is used to derive the time the message data was generated.\n \n \n containerAction (dict) --Information which allows the system to run a containerized application in order to create the data set contents. The application must be in a Docker container along with any needed support libraries.\n image (string) -- [REQUIRED]The ARN of the Docker container stored in your account. The Docker container contains an application and needed support libraries and is used to generate data set contents.\n executionRoleArn (string) -- [REQUIRED]The ARN of the role which gives permission to the system to access needed resources in order to run the 'containerAction'. This includes, at minimum, permission to retrieve the data set contents which are the input to the containerized application.\n resourceConfiguration (dict) -- [REQUIRED]Configuration of the resource which executes the 'containerAction'.\n computeType (string) -- [REQUIRED]The type of the compute resource used to execute the 'containerAction'. Possible values are: ACU_1 (vCPU=4, memory=16GiB) or ACU_2 (vCPU=8, memory=32GiB).\n volumeSizeInGB (integer) -- [REQUIRED]The size (in GB) of the persistent storage available to the resource instance used to execute the 'containerAction' (min: 1, max: 50).\n variables (list) --The values of variables used within the context of the execution of the containerized application (basically, parameters passed to the application). Each variable must have a name and a value given by one of 'stringValue', 'datasetContentVersionValue', or 'outputFileUriValue'.\n (dict) --An instance of a variable to be passed to the 'containerAction' execution. Each variable must have a name and a value given by one of 'stringValue', 'datasetContentVersionValue', or 'outputFileUriValue'.\n name (string) -- [REQUIRED]The name of the variable.\n stringValue (string) --The value of the variable as a string.\n doubleValue (float) --The value of the variable as a double (numeric).\n datasetContentVersionValue (dict) --The value of the variable as a structure that specifies a data set content version.\n datasetName (string) -- [REQUIRED]The name of the data set whose latest contents are used as input to the notebook or application.\n outputFileUriValue (dict) --The value of the variable as a structure that specifies an output file URI.\n fileName (string) -- [REQUIRED]The URI of the location where data set contents are stored, usually the URI of a file in an S3 bucket.\n \n \n \n \n\n :type triggers: list\n :param triggers: A list of triggers. A trigger causes data set contents to be populated at a specified time interval or when another data set's contents are created. The list of triggers can be empty or contain up to five DataSetTrigger objects.\n (dict) --The 'DatasetTrigger' that specifies when the data set is automatically updated.\n schedule (dict) --The 'Schedule' when the trigger is initiated.\n expression (string) --The expression that defines when to trigger an update. For more information, see Schedule Expressions for Rules in the Amazon CloudWatch documentation.\n dataset (dict) --The data set whose content creation triggers the creation of this data set's contents.\n name (string) -- [REQUIRED]The name of the data set whose content generation triggers the new data set content generation.\n \n \n\n :type contentDeliveryRules: list\n :param contentDeliveryRules: When data set contents are created they are delivered to destinations specified here.\n (dict) --When data set contents are created they are delivered to destination specified here.\n entryName (string) --The name of the data set content delivery rules entry.\n destination (dict) -- [REQUIRED]The destination to which data set contents are delivered.\n iotEventsDestinationConfiguration (dict) --Configuration information for delivery of data set contents to AWS IoT Events.\n inputName (string) -- [REQUIRED]The name of the AWS IoT Events input to which data set contents are delivered.\n roleArn (string) -- [REQUIRED]The ARN of the role which grants AWS IoT Analytics permission to deliver data set contents to an AWS IoT Events input.\n \n \n \n\n :type retentionPeriod: dict\n :param retentionPeriod: [Optional] How long, in days, message data is kept for the data set. If not given or set to null, the latest version of the dataset content plus the latest succeeded version (if they are different) are retained for at most 90 days.\n unlimited (boolean) --If true, message data is kept indefinitely.\n numberOfDays (integer) --The number of days that message data is kept. The 'unlimited' parameter must be false.\n \n\n :type tags: list\n :param tags: Metadata which can be used to manage the data set.\n (dict) --A set of key/value pairs which are used to manage the resource.\n key (string) -- [REQUIRED]The tag's key.\n value (string) -- [REQUIRED]The tag's value.\n \n \n\n :rtype: dict\n :return: {\n 'datasetName': 'string',\n 'datasetArn': 'string',\n 'retentionPeriod': {\n 'unlimited': True|False,\n 'numberOfDays': 123\n }\n }\n \n \n \"\"\"\n pass\n\ndef create_dataset_content(datasetName=None):\n \"\"\"\n Creates the content of a data set by applying a \"queryAction\" (a SQL query) or a \"containerAction\" (executing a containerized application).\n See also: AWS API Documentation\n \n \n :example: response = client.create_dataset_content(\n datasetName='string'\n )\n \n \n :type datasetName: string\n :param datasetName: [REQUIRED]\n The name of the data set.\n \n\n :rtype: dict\n :return: {\n 'versionId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_datastore(datastoreName=None, retentionPeriod=None, tags=None):\n \"\"\"\n Creates a data store, which is a repository for messages.\n See also: AWS API Documentation\n \n \n :example: response = client.create_datastore(\n datastoreName='string',\n retentionPeriod={\n 'unlimited': True|False,\n 'numberOfDays': 123\n },\n tags=[\n {\n 'key': 'string',\n 'value': 'string'\n },\n ]\n )\n \n \n :type datastoreName: string\n :param datastoreName: [REQUIRED]\n The name of the data store.\n \n\n :type retentionPeriod: dict\n :param retentionPeriod: How long, in days, message data is kept for the data store.\n unlimited (boolean) --If true, message data is kept indefinitely.\n numberOfDays (integer) --The number of days that message data is kept. The 'unlimited' parameter must be false.\n \n\n :type tags: list\n :param tags: Metadata which can be used to manage the data store.\n (dict) --A set of key/value pairs which are used to manage the resource.\n key (string) -- [REQUIRED]The tag's key.\n value (string) -- [REQUIRED]The tag's value.\n \n \n\n :rtype: dict\n :return: {\n 'datastoreName': 'string',\n 'datastoreArn': 'string',\n 'retentionPeriod': {\n 'unlimited': True|False,\n 'numberOfDays': 123\n }\n }\n \n \n \"\"\"\n pass\n\ndef create_pipeline(pipelineName=None, pipelineActivities=None, tags=None):\n \"\"\"\n Creates a pipeline. A pipeline consumes messages from one or more channels and allows you to process the messages before storing them in a data store.\n See also: AWS API Documentation\n \n \n :example: response = client.create_pipeline(\n pipelineName='string',\n pipelineActivities=[\n {\n 'channel': {\n 'name': 'string',\n 'channelName': 'string',\n 'next': 'string'\n },\n 'lambda': {\n 'name': 'string',\n 'lambdaName': 'string',\n 'batchSize': 123,\n 'next': 'string'\n },\n 'datastore': {\n 'name': 'string',\n 'datastoreName': 'string'\n },\n 'addAttributes': {\n 'name': 'string',\n 'attributes': {\n 'string': 'string'\n },\n 'next': 'string'\n },\n 'removeAttributes': {\n 'name': 'string',\n 'attributes': [\n 'string',\n ],\n 'next': 'string'\n },\n 'selectAttributes': {\n 'name': 'string',\n 'attributes': [\n 'string',\n ],\n 'next': 'string'\n },\n 'filter': {\n 'name': 'string',\n 'filter': 'string',\n 'next': 'string'\n },\n 'math': {\n 'name': 'string',\n 'attribute': 'string',\n 'math': 'string',\n 'next': 'string'\n },\n 'deviceRegistryEnrich': {\n 'name': 'string',\n 'attribute': 'string',\n 'thingName': 'string',\n 'roleArn': 'string',\n 'next': 'string'\n },\n 'deviceShadowEnrich': {\n 'name': 'string',\n 'attribute': 'string',\n 'thingName': 'string',\n 'roleArn': 'string',\n 'next': 'string'\n }\n },\n ],\n tags=[\n {\n 'key': 'string',\n 'value': 'string'\n },\n ]\n )\n \n \n :type pipelineName: string\n :param pipelineName: [REQUIRED]\n The name of the pipeline.\n \n\n :type pipelineActivities: list\n :param pipelineActivities: [REQUIRED]\n A list of pipeline activities.\n The list can be 1-25 PipelineActivity objects. Activities perform transformations on your messages, such as removing, renaming, or adding message attributes; filtering messages based on attribute values; invoking your Lambda functions on messages for advanced processing; or performing mathematical transformations to normalize device data.\n (dict) --An activity that performs a transformation on a message.\n channel (dict) --Determines the source of the messages to be processed.\n name (string) -- [REQUIRED]The name of the 'channel' activity.\n channelName (string) -- [REQUIRED]The name of the channel from which the messages are processed.\n next (string) --The next activity in the pipeline.\n lambda (dict) --Runs a Lambda function to modify the message.\n name (string) -- [REQUIRED]The name of the 'lambda' activity.\n lambdaName (string) -- [REQUIRED]The name of the Lambda function that is run on the message.\n batchSize (integer) -- [REQUIRED]The number of messages passed to the Lambda function for processing.\n The AWS Lambda function must be able to process all of these messages within five minutes, which is the maximum timeout duration for Lambda functions.\n next (string) --The next activity in the pipeline.\n datastore (dict) --Specifies where to store the processed message data.\n name (string) -- [REQUIRED]The name of the 'datastore' activity.\n datastoreName (string) -- [REQUIRED]The name of the data store where processed messages are stored.\n addAttributes (dict) --Adds other attributes based on existing attributes in the message.\n name (string) -- [REQUIRED]The name of the 'addAttributes' activity.\n attributes (dict) -- [REQUIRED]A list of 1-50 'AttributeNameMapping' objects that map an existing attribute to a new attribute.\n Note\n The existing attributes remain in the message, so if you want to remove the originals, use 'RemoveAttributeActivity'.\n (string) --\n (string) --\n \n next (string) --The next activity in the pipeline.\n removeAttributes (dict) --Removes attributes from a message.\n name (string) -- [REQUIRED]The name of the 'removeAttributes' activity.\n attributes (list) -- [REQUIRED]A list of 1-50 attributes to remove from the message.\n (string) --\n next (string) --The next activity in the pipeline.\n selectAttributes (dict) --Creates a new message using only the specified attributes from the original message.\n name (string) -- [REQUIRED]The name of the 'selectAttributes' activity.\n attributes (list) -- [REQUIRED]A list of the attributes to select from the message.\n (string) --\n next (string) --The next activity in the pipeline.\n filter (dict) --Filters a message based on its attributes.\n name (string) -- [REQUIRED]The name of the 'filter' activity.\n filter (string) -- [REQUIRED]An expression that looks like a SQL WHERE clause that must return a Boolean value.\n next (string) --The next activity in the pipeline.\n math (dict) --Computes an arithmetic expression using the message's attributes and adds it to the message.\n name (string) -- [REQUIRED]The name of the 'math' activity.\n attribute (string) -- [REQUIRED]The name of the attribute that contains the result of the math operation.\n math (string) -- [REQUIRED]An expression that uses one or more existing attributes and must return an integer value.\n next (string) --The next activity in the pipeline.\n deviceRegistryEnrich (dict) --Adds data from the AWS IoT device registry to your message.\n name (string) -- [REQUIRED]The name of the 'deviceRegistryEnrich' activity.\n attribute (string) -- [REQUIRED]The name of the attribute that is added to the message.\n thingName (string) -- [REQUIRED]The name of the IoT device whose registry information is added to the message.\n roleArn (string) -- [REQUIRED]The ARN of the role that allows access to the device's registry information.\n next (string) --The next activity in the pipeline.\n deviceShadowEnrich (dict) --Adds information from the AWS IoT Device Shadows service to a message.\n name (string) -- [REQUIRED]The name of the 'deviceShadowEnrich' activity.\n attribute (string) -- [REQUIRED]The name of the attribute that is added to the message.\n thingName (string) -- [REQUIRED]The name of the IoT device whose shadow information is added to the message.\n roleArn (string) -- [REQUIRED]The ARN of the role that allows access to the device's shadow.\n next (string) --The next activity in the pipeline.\n \n \n\n :type tags: list\n :param tags: Metadata which can be used to manage the pipeline.\n (dict) --A set of key/value pairs which are used to manage the resource.\n key (string) -- [REQUIRED]The tag's key.\n value (string) -- [REQUIRED]The tag's value.\n \n \n\n :rtype: dict\n :return: {\n 'pipelineName': 'string',\n 'pipelineArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_channel(channelName=None):\n \"\"\"\n Deletes the specified channel.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_channel(\n channelName='string'\n )\n \n \n :type channelName: string\n :param channelName: [REQUIRED]\n The name of the channel to delete.\n \n\n \"\"\"\n pass\n\ndef delete_dataset(datasetName=None):\n \"\"\"\n Deletes the specified data set.\n You do not have to delete the content of the data set before you perform this operation.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_dataset(\n datasetName='string'\n )\n \n \n :type datasetName: string\n :param datasetName: [REQUIRED]\n The name of the data set to delete.\n \n\n \"\"\"\n pass\n\ndef delete_dataset_content(datasetName=None, versionId=None):\n \"\"\"\n Deletes the content of the specified data set.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_dataset_content(\n datasetName='string',\n versionId='string'\n )\n \n \n :type datasetName: string\n :param datasetName: [REQUIRED]\n The name of the data set whose content is deleted.\n \n\n :type versionId: string\n :param versionId: The version of the data set whose content is deleted. You can also use the strings '$LATEST' or '$LATEST_SUCCEEDED' to delete the latest or latest successfully completed data set. If not specified, '$LATEST_SUCCEEDED' is the default.\n\n \"\"\"\n pass\n\ndef delete_datastore(datastoreName=None):\n \"\"\"\n Deletes the specified data store.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_datastore(\n datastoreName='string'\n )\n \n \n :type datastoreName: string\n :param datastoreName: [REQUIRED]\n The name of the data store to delete.\n \n\n \"\"\"\n pass\n\ndef delete_pipeline(pipelineName=None):\n \"\"\"\n Deletes the specified pipeline.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_pipeline(\n pipelineName='string'\n )\n \n \n :type pipelineName: string\n :param pipelineName: [REQUIRED]\n The name of the pipeline to delete.\n \n\n \"\"\"\n pass\n\ndef describe_channel(channelName=None, includeStatistics=None):\n \"\"\"\n Retrieves information about a channel.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_channel(\n channelName='string',\n includeStatistics=True|False\n )\n \n \n :type channelName: string\n :param channelName: [REQUIRED]\n The name of the channel whose information is retrieved.\n \n\n :type includeStatistics: boolean\n :param includeStatistics: If true, additional statistical information about the channel is included in the response.\n\n :rtype: dict\n :return: {\n 'channel': {\n 'name': 'string',\n 'arn': 'string',\n 'status': 'CREATING'|'ACTIVE'|'DELETING',\n 'retentionPeriod': {\n 'unlimited': True|False,\n 'numberOfDays': 123\n },\n 'creationTime': datetime(2015, 1, 1),\n 'lastUpdateTime': datetime(2015, 1, 1)\n },\n 'statistics': {\n 'size': {\n 'estimatedSizeInBytes': 123.0,\n 'estimatedOn': datetime(2015, 1, 1)\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_dataset(datasetName=None):\n \"\"\"\n Retrieves information about a data set.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_dataset(\n datasetName='string'\n )\n \n \n :type datasetName: string\n :param datasetName: [REQUIRED]\n The name of the data set whose information is retrieved.\n \n\n :rtype: dict\n :return: {\n 'dataset': {\n 'name': 'string',\n 'arn': 'string',\n 'actions': [\n {\n 'actionName': 'string',\n 'queryAction': {\n 'sqlQuery': 'string',\n 'filters': [\n {\n 'deltaTime': {\n 'offsetSeconds': 123,\n 'timeExpression': 'string'\n }\n },\n ]\n },\n 'containerAction': {\n 'image': 'string',\n 'executionRoleArn': 'string',\n 'resourceConfiguration': {\n 'computeType': 'ACU_1'|'ACU_2',\n 'volumeSizeInGB': 123\n },\n 'variables': [\n {\n 'name': 'string',\n 'stringValue': 'string',\n 'doubleValue': 123.0,\n 'datasetContentVersionValue': {\n 'datasetName': 'string'\n },\n 'outputFileUriValue': {\n 'fileName': 'string'\n }\n },\n ]\n }\n },\n ],\n 'triggers': [\n {\n 'schedule': {\n 'expression': 'string'\n },\n 'dataset': {\n 'name': 'string'\n }\n },\n ],\n 'contentDeliveryRules': [\n {\n 'entryName': 'string',\n 'destination': {\n 'iotEventsDestinationConfiguration': {\n 'inputName': 'string',\n 'roleArn': 'string'\n }\n }\n },\n ],\n 'status': 'CREATING'|'ACTIVE'|'DELETING',\n 'creationTime': datetime(2015, 1, 1),\n 'lastUpdateTime': datetime(2015, 1, 1),\n 'retentionPeriod': {\n 'unlimited': True|False,\n 'numberOfDays': 123\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_datastore(datastoreName=None, includeStatistics=None):\n \"\"\"\n Retrieves information about a data store.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_datastore(\n datastoreName='string',\n includeStatistics=True|False\n )\n \n \n :type datastoreName: string\n :param datastoreName: [REQUIRED]\n The name of the data store\n \n\n :type includeStatistics: boolean\n :param includeStatistics: If true, additional statistical information about the datastore is included in the response.\n\n :rtype: dict\n :return: {\n 'datastore': {\n 'name': 'string',\n 'arn': 'string',\n 'status': 'CREATING'|'ACTIVE'|'DELETING',\n 'retentionPeriod': {\n 'unlimited': True|False,\n 'numberOfDays': 123\n },\n 'creationTime': datetime(2015, 1, 1),\n 'lastUpdateTime': datetime(2015, 1, 1)\n },\n 'statistics': {\n 'size': {\n 'estimatedSizeInBytes': 123.0,\n 'estimatedOn': datetime(2015, 1, 1)\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_logging_options():\n \"\"\"\n Retrieves the current settings of the AWS IoT Analytics logging options.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_logging_options()\n \n \n :rtype: dict\n :return: {\n 'loggingOptions': {\n 'roleArn': 'string',\n 'level': 'ERROR',\n 'enabled': True|False\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_pipeline(pipelineName=None):\n \"\"\"\n Retrieves information about a pipeline.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_pipeline(\n pipelineName='string'\n )\n \n \n :type pipelineName: string\n :param pipelineName: [REQUIRED]\n The name of the pipeline whose information is retrieved.\n \n\n :rtype: dict\n :return: {\n 'pipeline': {\n 'name': 'string',\n 'arn': 'string',\n 'activities': [\n {\n 'channel': {\n 'name': 'string',\n 'channelName': 'string',\n 'next': 'string'\n },\n 'lambda': {\n 'name': 'string',\n 'lambdaName': 'string',\n 'batchSize': 123,\n 'next': 'string'\n },\n 'datastore': {\n 'name': 'string',\n 'datastoreName': 'string'\n },\n 'addAttributes': {\n 'name': 'string',\n 'attributes': {\n 'string': 'string'\n },\n 'next': 'string'\n },\n 'removeAttributes': {\n 'name': 'string',\n 'attributes': [\n 'string',\n ],\n 'next': 'string'\n },\n 'selectAttributes': {\n 'name': 'string',\n 'attributes': [\n 'string',\n ],\n 'next': 'string'\n },\n 'filter': {\n 'name': 'string',\n 'filter': 'string',\n 'next': 'string'\n },\n 'math': {\n 'name': 'string',\n 'attribute': 'string',\n 'math': 'string',\n 'next': 'string'\n },\n 'deviceRegistryEnrich': {\n 'name': 'string',\n 'attribute': 'string',\n 'thingName': 'string',\n 'roleArn': 'string',\n 'next': 'string'\n },\n 'deviceShadowEnrich': {\n 'name': 'string',\n 'attribute': 'string',\n 'thingName': 'string',\n 'roleArn': 'string',\n 'next': 'string'\n }\n },\n ],\n 'reprocessingSummaries': [\n {\n 'id': 'string',\n 'status': 'RUNNING'|'SUCCEEDED'|'CANCELLED'|'FAILED',\n 'creationTime': datetime(2015, 1, 1)\n },\n ],\n 'creationTime': datetime(2015, 1, 1),\n 'lastUpdateTime': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_dataset_content(datasetName=None, versionId=None):\n \"\"\"\n Retrieves the contents of a data set as pre-signed URIs.\n See also: AWS API Documentation\n \n \n :example: response = client.get_dataset_content(\n datasetName='string',\n versionId='string'\n )\n \n \n :type datasetName: string\n :param datasetName: [REQUIRED]\n The name of the data set whose contents are retrieved.\n \n\n :type versionId: string\n :param versionId: The version of the data set whose contents are retrieved. You can also use the strings '$LATEST' or '$LATEST_SUCCEEDED' to retrieve the contents of the latest or latest successfully completed data set. If not specified, '$LATEST_SUCCEEDED' is the default.\n\n :rtype: dict\n :return: {\n 'entries': [\n {\n 'entryName': 'string',\n 'dataURI': 'string'\n },\n ],\n 'timestamp': datetime(2015, 1, 1),\n 'status': {\n 'state': 'CREATING'|'SUCCEEDED'|'FAILED',\n 'reason': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_channels(nextToken=None, maxResults=None):\n \"\"\"\n Retrieves a list of channels.\n See also: AWS API Documentation\n \n \n :example: response = client.list_channels(\n nextToken='string',\n maxResults=123\n )\n \n \n :type nextToken: string\n :param nextToken: The token for the next set of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return in this request.\n The default value is 100.\n \n\n :rtype: dict\n :return: {\n 'channelSummaries': [\n {\n 'channelName': 'string',\n 'status': 'CREATING'|'ACTIVE'|'DELETING',\n 'creationTime': datetime(2015, 1, 1),\n 'lastUpdateTime': datetime(2015, 1, 1)\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_dataset_contents(datasetName=None, nextToken=None, maxResults=None, scheduledOnOrAfter=None, scheduledBefore=None):\n \"\"\"\n Lists information about data set contents that have been created.\n See also: AWS API Documentation\n \n \n :example: response = client.list_dataset_contents(\n datasetName='string',\n nextToken='string',\n maxResults=123,\n scheduledOnOrAfter=datetime(2015, 1, 1),\n scheduledBefore=datetime(2015, 1, 1)\n )\n \n \n :type datasetName: string\n :param datasetName: [REQUIRED]\n The name of the data set whose contents information you want to list.\n \n\n :type nextToken: string\n :param nextToken: The token for the next set of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return in this request.\n\n :type scheduledOnOrAfter: datetime\n :param scheduledOnOrAfter: A filter to limit results to those data set contents whose creation is scheduled on or after the given time. See the field triggers.schedule in the CreateDataset request. (timestamp)\n\n :type scheduledBefore: datetime\n :param scheduledBefore: A filter to limit results to those data set contents whose creation is scheduled before the given time. See the field triggers.schedule in the CreateDataset request. (timestamp)\n\n :rtype: dict\n :return: {\n 'datasetContentSummaries': [\n {\n 'version': 'string',\n 'status': {\n 'state': 'CREATING'|'SUCCEEDED'|'FAILED',\n 'reason': 'string'\n },\n 'creationTime': datetime(2015, 1, 1),\n 'scheduleTime': datetime(2015, 1, 1)\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_datasets(nextToken=None, maxResults=None):\n \"\"\"\n Retrieves information about data sets.\n See also: AWS API Documentation\n \n \n :example: response = client.list_datasets(\n nextToken='string',\n maxResults=123\n )\n \n \n :type nextToken: string\n :param nextToken: The token for the next set of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return in this request.\n The default value is 100.\n \n\n :rtype: dict\n :return: {\n 'datasetSummaries': [\n {\n 'datasetName': 'string',\n 'status': 'CREATING'|'ACTIVE'|'DELETING',\n 'creationTime': datetime(2015, 1, 1),\n 'lastUpdateTime': datetime(2015, 1, 1),\n 'triggers': [\n {\n 'schedule': {\n 'expression': 'string'\n },\n 'dataset': {\n 'name': 'string'\n }\n },\n ],\n 'actions': [\n {\n 'actionName': 'string',\n 'actionType': 'QUERY'|'CONTAINER'\n },\n ]\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_datastores(nextToken=None, maxResults=None):\n \"\"\"\n Retrieves a list of data stores.\n See also: AWS API Documentation\n \n \n :example: response = client.list_datastores(\n nextToken='string',\n maxResults=123\n )\n \n \n :type nextToken: string\n :param nextToken: The token for the next set of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return in this request.\n The default value is 100.\n \n\n :rtype: dict\n :return: {\n 'datastoreSummaries': [\n {\n 'datastoreName': 'string',\n 'status': 'CREATING'|'ACTIVE'|'DELETING',\n 'creationTime': datetime(2015, 1, 1),\n 'lastUpdateTime': datetime(2015, 1, 1)\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_pipelines(nextToken=None, maxResults=None):\n \"\"\"\n Retrieves a list of pipelines.\n See also: AWS API Documentation\n \n \n :example: response = client.list_pipelines(\n nextToken='string',\n maxResults=123\n )\n \n \n :type nextToken: string\n :param nextToken: The token for the next set of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return in this request.\n The default value is 100.\n \n\n :rtype: dict\n :return: {\n 'pipelineSummaries': [\n {\n 'pipelineName': 'string',\n 'reprocessingSummaries': [\n {\n 'id': 'string',\n 'status': 'RUNNING'|'SUCCEEDED'|'CANCELLED'|'FAILED',\n 'creationTime': datetime(2015, 1, 1)\n },\n ],\n 'creationTime': datetime(2015, 1, 1),\n 'lastUpdateTime': datetime(2015, 1, 1)\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_tags_for_resource(resourceArn=None):\n \"\"\"\n Lists the tags (metadata) which you have assigned to the resource.\n See also: AWS API Documentation\n \n \n :example: response = client.list_tags_for_resource(\n resourceArn='string'\n )\n \n \n :type resourceArn: string\n :param resourceArn: [REQUIRED]\n The ARN of the resource whose tags you want to list.\n \n\n :rtype: dict\n :return: {\n 'tags': [\n {\n 'key': 'string',\n 'value': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef put_logging_options(loggingOptions=None):\n \"\"\"\n Sets or updates the AWS IoT Analytics logging options.\n Note that if you update the value of any loggingOptions field, it takes up to one minute for the change to take effect. Also, if you change the policy attached to the role you specified in the roleArn field (for example, to correct an invalid policy) it takes up to 5 minutes for that change to take effect.\n See also: AWS API Documentation\n \n \n :example: response = client.put_logging_options(\n loggingOptions={\n 'roleArn': 'string',\n 'level': 'ERROR',\n 'enabled': True|False\n }\n )\n \n \n :type loggingOptions: dict\n :param loggingOptions: [REQUIRED]\n The new values of the AWS IoT Analytics logging options.\n roleArn (string) -- [REQUIRED]The ARN of the role that grants permission to AWS IoT Analytics to perform logging.\n level (string) -- [REQUIRED]The logging level. Currently, only 'ERROR' is supported.\n enabled (boolean) -- [REQUIRED]If true, logging is enabled for AWS IoT Analytics.\n \n\n \"\"\"\n pass\n\ndef run_pipeline_activity(pipelineActivity=None, payloads=None):\n \"\"\"\n Simulates the results of running a pipeline activity on a message payload.\n See also: AWS API Documentation\n \n \n :example: response = client.run_pipeline_activity(\n pipelineActivity={\n 'channel': {\n 'name': 'string',\n 'channelName': 'string',\n 'next': 'string'\n },\n 'lambda': {\n 'name': 'string',\n 'lambdaName': 'string',\n 'batchSize': 123,\n 'next': 'string'\n },\n 'datastore': {\n 'name': 'string',\n 'datastoreName': 'string'\n },\n 'addAttributes': {\n 'name': 'string',\n 'attributes': {\n 'string': 'string'\n },\n 'next': 'string'\n },\n 'removeAttributes': {\n 'name': 'string',\n 'attributes': [\n 'string',\n ],\n 'next': 'string'\n },\n 'selectAttributes': {\n 'name': 'string',\n 'attributes': [\n 'string',\n ],\n 'next': 'string'\n },\n 'filter': {\n 'name': 'string',\n 'filter': 'string',\n 'next': 'string'\n },\n 'math': {\n 'name': 'string',\n 'attribute': 'string',\n 'math': 'string',\n 'next': 'string'\n },\n 'deviceRegistryEnrich': {\n 'name': 'string',\n 'attribute': 'string',\n 'thingName': 'string',\n 'roleArn': 'string',\n 'next': 'string'\n },\n 'deviceShadowEnrich': {\n 'name': 'string',\n 'attribute': 'string',\n 'thingName': 'string',\n 'roleArn': 'string',\n 'next': 'string'\n }\n },\n payloads=[\n b'bytes',\n ]\n )\n \n \n :type pipelineActivity: dict\n :param pipelineActivity: [REQUIRED]\n The pipeline activity that is run. This must not be a 'channel' activity or a 'datastore' activity because these activities are used in a pipeline only to load the original message and to store the (possibly) transformed message. If a 'lambda' activity is specified, only short-running Lambda functions (those with a timeout of less than 30 seconds or less) can be used.\n channel (dict) --Determines the source of the messages to be processed.\n name (string) -- [REQUIRED]The name of the 'channel' activity.\n channelName (string) -- [REQUIRED]The name of the channel from which the messages are processed.\n next (string) --The next activity in the pipeline.\n lambda (dict) --Runs a Lambda function to modify the message.\n name (string) -- [REQUIRED]The name of the 'lambda' activity.\n lambdaName (string) -- [REQUIRED]The name of the Lambda function that is run on the message.\n batchSize (integer) -- [REQUIRED]The number of messages passed to the Lambda function for processing.\n The AWS Lambda function must be able to process all of these messages within five minutes, which is the maximum timeout duration for Lambda functions.\n next (string) --The next activity in the pipeline.\n datastore (dict) --Specifies where to store the processed message data.\n name (string) -- [REQUIRED]The name of the 'datastore' activity.\n datastoreName (string) -- [REQUIRED]The name of the data store where processed messages are stored.\n addAttributes (dict) --Adds other attributes based on existing attributes in the message.\n name (string) -- [REQUIRED]The name of the 'addAttributes' activity.\n attributes (dict) -- [REQUIRED]A list of 1-50 'AttributeNameMapping' objects that map an existing attribute to a new attribute.\n Note\n The existing attributes remain in the message, so if you want to remove the originals, use 'RemoveAttributeActivity'.\n (string) --\n (string) --\n \n next (string) --The next activity in the pipeline.\n removeAttributes (dict) --Removes attributes from a message.\n name (string) -- [REQUIRED]The name of the 'removeAttributes' activity.\n attributes (list) -- [REQUIRED]A list of 1-50 attributes to remove from the message.\n (string) --\n next (string) --The next activity in the pipeline.\n selectAttributes (dict) --Creates a new message using only the specified attributes from the original message.\n name (string) -- [REQUIRED]The name of the 'selectAttributes' activity.\n attributes (list) -- [REQUIRED]A list of the attributes to select from the message.\n (string) --\n next (string) --The next activity in the pipeline.\n filter (dict) --Filters a message based on its attributes.\n name (string) -- [REQUIRED]The name of the 'filter' activity.\n filter (string) -- [REQUIRED]An expression that looks like a SQL WHERE clause that must return a Boolean value.\n next (string) --The next activity in the pipeline.\n math (dict) --Computes an arithmetic expression using the message's attributes and adds it to the message.\n name (string) -- [REQUIRED]The name of the 'math' activity.\n attribute (string) -- [REQUIRED]The name of the attribute that contains the result of the math operation.\n math (string) -- [REQUIRED]An expression that uses one or more existing attributes and must return an integer value.\n next (string) --The next activity in the pipeline.\n deviceRegistryEnrich (dict) --Adds data from the AWS IoT device registry to your message.\n name (string) -- [REQUIRED]The name of the 'deviceRegistryEnrich' activity.\n attribute (string) -- [REQUIRED]The name of the attribute that is added to the message.\n thingName (string) -- [REQUIRED]The name of the IoT device whose registry information is added to the message.\n roleArn (string) -- [REQUIRED]The ARN of the role that allows access to the device's registry information.\n next (string) --The next activity in the pipeline.\n deviceShadowEnrich (dict) --Adds information from the AWS IoT Device Shadows service to a message.\n name (string) -- [REQUIRED]The name of the 'deviceShadowEnrich' activity.\n attribute (string) -- [REQUIRED]The name of the attribute that is added to the message.\n thingName (string) -- [REQUIRED]The name of the IoT device whose shadow information is added to the message.\n roleArn (string) -- [REQUIRED]The ARN of the role that allows access to the device's shadow.\n next (string) --The next activity in the pipeline.\n \n \n\n :type payloads: list\n :param payloads: [REQUIRED]\n The sample message payloads on which the pipeline activity is run.\n (bytes) --\n \n\n :rtype: dict\n :return: {\n 'payloads': [\n b'bytes',\n ],\n 'logResult': 'string'\n }\n \n \n :returns: \n (bytes) --\n \n \"\"\"\n pass\n\ndef sample_channel_data(channelName=None, maxMessages=None, startTime=None, endTime=None):\n \"\"\"\n Retrieves a sample of messages from the specified channel ingested during the specified timeframe. Up to 10 messages can be retrieved.\n See also: AWS API Documentation\n \n \n :example: response = client.sample_channel_data(\n channelName='string',\n maxMessages=123,\n startTime=datetime(2015, 1, 1),\n endTime=datetime(2015, 1, 1)\n )\n \n \n :type channelName: string\n :param channelName: [REQUIRED]\n The name of the channel whose message samples are retrieved.\n \n\n :type maxMessages: integer\n :param maxMessages: The number of sample messages to be retrieved. The limit is 10, the default is also 10.\n\n :type startTime: datetime\n :param startTime: The start of the time window from which sample messages are retrieved.\n\n :type endTime: datetime\n :param endTime: The end of the time window from which sample messages are retrieved.\n\n :rtype: dict\n :return: {\n 'payloads': [\n b'bytes',\n ]\n }\n \n \n :returns: \n (bytes) --\n \n \"\"\"\n pass\n\ndef start_pipeline_reprocessing(pipelineName=None, startTime=None, endTime=None):\n \"\"\"\n Starts the reprocessing of raw message data through the pipeline.\n See also: AWS API Documentation\n \n \n :example: response = client.start_pipeline_reprocessing(\n pipelineName='string',\n startTime=datetime(2015, 1, 1),\n endTime=datetime(2015, 1, 1)\n )\n \n \n :type pipelineName: string\n :param pipelineName: [REQUIRED]\n The name of the pipeline on which to start reprocessing.\n \n\n :type startTime: datetime\n :param startTime: The start time (inclusive) of raw message data that is reprocessed.\n\n :type endTime: datetime\n :param endTime: The end time (exclusive) of raw message data that is reprocessed.\n\n :rtype: dict\n :return: {\n 'reprocessingId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef tag_resource(resourceArn=None, tags=None):\n \"\"\"\n Adds to or modifies the tags of the given resource. Tags are metadata which can be used to manage a resource.\n See also: AWS API Documentation\n \n \n :example: response = client.tag_resource(\n resourceArn='string',\n tags=[\n {\n 'key': 'string',\n 'value': 'string'\n },\n ]\n )\n \n \n :type resourceArn: string\n :param resourceArn: [REQUIRED]\n The ARN of the resource whose tags you want to modify.\n \n\n :type tags: list\n :param tags: [REQUIRED]\n The new or modified tags for the resource.\n (dict) --A set of key/value pairs which are used to manage the resource.\n key (string) -- [REQUIRED]The tag's key.\n value (string) -- [REQUIRED]The tag's value.\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef untag_resource(resourceArn=None, tagKeys=None):\n \"\"\"\n Removes the given tags (metadata) from the resource.\n See also: AWS API Documentation\n \n \n :example: response = client.untag_resource(\n resourceArn='string',\n tagKeys=[\n 'string',\n ]\n )\n \n \n :type resourceArn: string\n :param resourceArn: [REQUIRED]\n The ARN of the resource whose tags you want to remove.\n \n\n :type tagKeys: list\n :param tagKeys: [REQUIRED]\n The keys of those tags which you want to remove.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_channel(channelName=None, retentionPeriod=None):\n \"\"\"\n Updates the settings of a channel.\n See also: AWS API Documentation\n \n \n :example: response = client.update_channel(\n channelName='string',\n retentionPeriod={\n 'unlimited': True|False,\n 'numberOfDays': 123\n }\n )\n \n \n :type channelName: string\n :param channelName: [REQUIRED]\n The name of the channel to be updated.\n \n\n :type retentionPeriod: dict\n :param retentionPeriod: How long, in days, message data is kept for the channel.\n unlimited (boolean) --If true, message data is kept indefinitely.\n numberOfDays (integer) --The number of days that message data is kept. The 'unlimited' parameter must be false.\n \n\n \"\"\"\n pass\n\ndef update_dataset(datasetName=None, actions=None, triggers=None, contentDeliveryRules=None, retentionPeriod=None):\n \"\"\"\n Updates the settings of a data set.\n See also: AWS API Documentation\n \n \n :example: response = client.update_dataset(\n datasetName='string',\n actions=[\n {\n 'actionName': 'string',\n 'queryAction': {\n 'sqlQuery': 'string',\n 'filters': [\n {\n 'deltaTime': {\n 'offsetSeconds': 123,\n 'timeExpression': 'string'\n }\n },\n ]\n },\n 'containerAction': {\n 'image': 'string',\n 'executionRoleArn': 'string',\n 'resourceConfiguration': {\n 'computeType': 'ACU_1'|'ACU_2',\n 'volumeSizeInGB': 123\n },\n 'variables': [\n {\n 'name': 'string',\n 'stringValue': 'string',\n 'doubleValue': 123.0,\n 'datasetContentVersionValue': {\n 'datasetName': 'string'\n },\n 'outputFileUriValue': {\n 'fileName': 'string'\n }\n },\n ]\n }\n },\n ],\n triggers=[\n {\n 'schedule': {\n 'expression': 'string'\n },\n 'dataset': {\n 'name': 'string'\n }\n },\n ],\n contentDeliveryRules=[\n {\n 'entryName': 'string',\n 'destination': {\n 'iotEventsDestinationConfiguration': {\n 'inputName': 'string',\n 'roleArn': 'string'\n }\n }\n },\n ],\n retentionPeriod={\n 'unlimited': True|False,\n 'numberOfDays': 123\n }\n )\n \n \n :type datasetName: string\n :param datasetName: [REQUIRED]\n The name of the data set to update.\n \n\n :type actions: list\n :param actions: [REQUIRED]\n A list of 'DatasetAction' objects.\n (dict) --A 'DatasetAction' object that specifies how data set contents are automatically created.\n actionName (string) --The name of the data set action by which data set contents are automatically created.\n queryAction (dict) --An 'SqlQueryDatasetAction' object that uses an SQL query to automatically create data set contents.\n sqlQuery (string) -- [REQUIRED]A SQL query string.\n filters (list) --Pre-filters applied to message data.\n (dict) --Information which is used to filter message data, to segregate it according to the time frame in which it arrives.\n deltaTime (dict) --Used to limit data to that which has arrived since the last execution of the action.\n offsetSeconds (integer) -- [REQUIRED]The number of seconds of estimated 'in flight' lag time of message data. When you create data set contents using message data from a specified time frame, some message data may still be 'in flight' when processing begins, and so will not arrive in time to be processed. Use this field to make allowances for the 'in flight' time of your message data, so that data not processed from a previous time frame will be included with the next time frame. Without this, missed message data would be excluded from processing during the next time frame as well, because its timestamp places it within the previous time frame.\n timeExpression (string) -- [REQUIRED]An expression by which the time of the message data may be determined. This may be the name of a timestamp field, or a SQL expression which is used to derive the time the message data was generated.\n \n \n containerAction (dict) --Information which allows the system to run a containerized application in order to create the data set contents. The application must be in a Docker container along with any needed support libraries.\n image (string) -- [REQUIRED]The ARN of the Docker container stored in your account. The Docker container contains an application and needed support libraries and is used to generate data set contents.\n executionRoleArn (string) -- [REQUIRED]The ARN of the role which gives permission to the system to access needed resources in order to run the 'containerAction'. This includes, at minimum, permission to retrieve the data set contents which are the input to the containerized application.\n resourceConfiguration (dict) -- [REQUIRED]Configuration of the resource which executes the 'containerAction'.\n computeType (string) -- [REQUIRED]The type of the compute resource used to execute the 'containerAction'. Possible values are: ACU_1 (vCPU=4, memory=16GiB) or ACU_2 (vCPU=8, memory=32GiB).\n volumeSizeInGB (integer) -- [REQUIRED]The size (in GB) of the persistent storage available to the resource instance used to execute the 'containerAction' (min: 1, max: 50).\n variables (list) --The values of variables used within the context of the execution of the containerized application (basically, parameters passed to the application). Each variable must have a name and a value given by one of 'stringValue', 'datasetContentVersionValue', or 'outputFileUriValue'.\n (dict) --An instance of a variable to be passed to the 'containerAction' execution. Each variable must have a name and a value given by one of 'stringValue', 'datasetContentVersionValue', or 'outputFileUriValue'.\n name (string) -- [REQUIRED]The name of the variable.\n stringValue (string) --The value of the variable as a string.\n doubleValue (float) --The value of the variable as a double (numeric).\n datasetContentVersionValue (dict) --The value of the variable as a structure that specifies a data set content version.\n datasetName (string) -- [REQUIRED]The name of the data set whose latest contents are used as input to the notebook or application.\n outputFileUriValue (dict) --The value of the variable as a structure that specifies an output file URI.\n fileName (string) -- [REQUIRED]The URI of the location where data set contents are stored, usually the URI of a file in an S3 bucket.\n \n \n \n \n\n :type triggers: list\n :param triggers: A list of 'DatasetTrigger' objects. The list can be empty or can contain up to five DataSetTrigger objects.\n (dict) --The 'DatasetTrigger' that specifies when the data set is automatically updated.\n schedule (dict) --The 'Schedule' when the trigger is initiated.\n expression (string) --The expression that defines when to trigger an update. For more information, see Schedule Expressions for Rules in the Amazon CloudWatch documentation.\n dataset (dict) --The data set whose content creation triggers the creation of this data set's contents.\n name (string) -- [REQUIRED]The name of the data set whose content generation triggers the new data set content generation.\n \n \n\n :type contentDeliveryRules: list\n :param contentDeliveryRules: When data set contents are created they are delivered to destinations specified here.\n (dict) --When data set contents are created they are delivered to destination specified here.\n entryName (string) --The name of the data set content delivery rules entry.\n destination (dict) -- [REQUIRED]The destination to which data set contents are delivered.\n iotEventsDestinationConfiguration (dict) --Configuration information for delivery of data set contents to AWS IoT Events.\n inputName (string) -- [REQUIRED]The name of the AWS IoT Events input to which data set contents are delivered.\n roleArn (string) -- [REQUIRED]The ARN of the role which grants AWS IoT Analytics permission to deliver data set contents to an AWS IoT Events input.\n \n \n \n\n :type retentionPeriod: dict\n :param retentionPeriod: How long, in days, message data is kept for the data set.\n unlimited (boolean) --If true, message data is kept indefinitely.\n numberOfDays (integer) --The number of days that message data is kept. The 'unlimited' parameter must be false.\n \n\n \"\"\"\n pass\n\ndef update_datastore(datastoreName=None, retentionPeriod=None):\n \"\"\"\n Updates the settings of a data store.\n See also: AWS API Documentation\n \n \n :example: response = client.update_datastore(\n datastoreName='string',\n retentionPeriod={\n 'unlimited': True|False,\n 'numberOfDays': 123\n }\n )\n \n \n :type datastoreName: string\n :param datastoreName: [REQUIRED]\n The name of the data store to be updated.\n \n\n :type retentionPeriod: dict\n :param retentionPeriod: How long, in days, message data is kept for the data store.\n unlimited (boolean) --If true, message data is kept indefinitely.\n numberOfDays (integer) --The number of days that message data is kept. The 'unlimited' parameter must be false.\n \n\n \"\"\"\n pass\n\ndef update_pipeline(pipelineName=None, pipelineActivities=None):\n \"\"\"\n Updates the settings of a pipeline.\n See also: AWS API Documentation\n \n \n :example: response = client.update_pipeline(\n pipelineName='string',\n pipelineActivities=[\n {\n 'channel': {\n 'name': 'string',\n 'channelName': 'string',\n 'next': 'string'\n },\n 'lambda': {\n 'name': 'string',\n 'lambdaName': 'string',\n 'batchSize': 123,\n 'next': 'string'\n },\n 'datastore': {\n 'name': 'string',\n 'datastoreName': 'string'\n },\n 'addAttributes': {\n 'name': 'string',\n 'attributes': {\n 'string': 'string'\n },\n 'next': 'string'\n },\n 'removeAttributes': {\n 'name': 'string',\n 'attributes': [\n 'string',\n ],\n 'next': 'string'\n },\n 'selectAttributes': {\n 'name': 'string',\n 'attributes': [\n 'string',\n ],\n 'next': 'string'\n },\n 'filter': {\n 'name': 'string',\n 'filter': 'string',\n 'next': 'string'\n },\n 'math': {\n 'name': 'string',\n 'attribute': 'string',\n 'math': 'string',\n 'next': 'string'\n },\n 'deviceRegistryEnrich': {\n 'name': 'string',\n 'attribute': 'string',\n 'thingName': 'string',\n 'roleArn': 'string',\n 'next': 'string'\n },\n 'deviceShadowEnrich': {\n 'name': 'string',\n 'attribute': 'string',\n 'thingName': 'string',\n 'roleArn': 'string',\n 'next': 'string'\n }\n },\n ]\n )\n \n \n :type pipelineName: string\n :param pipelineName: [REQUIRED]\n The name of the pipeline to update.\n \n\n :type pipelineActivities: list\n :param pipelineActivities: [REQUIRED]\n A list of 'PipelineActivity' objects.\n The list can be 1-25 PipelineActivity objects. Activities perform transformations on your messages, such as removing, renaming or adding message attributes; filtering messages based on attribute values; invoking your Lambda functions on messages for advanced processing; or performing mathematical transformations to normalize device data.\n (dict) --An activity that performs a transformation on a message.\n channel (dict) --Determines the source of the messages to be processed.\n name (string) -- [REQUIRED]The name of the 'channel' activity.\n channelName (string) -- [REQUIRED]The name of the channel from which the messages are processed.\n next (string) --The next activity in the pipeline.\n lambda (dict) --Runs a Lambda function to modify the message.\n name (string) -- [REQUIRED]The name of the 'lambda' activity.\n lambdaName (string) -- [REQUIRED]The name of the Lambda function that is run on the message.\n batchSize (integer) -- [REQUIRED]The number of messages passed to the Lambda function for processing.\n The AWS Lambda function must be able to process all of these messages within five minutes, which is the maximum timeout duration for Lambda functions.\n next (string) --The next activity in the pipeline.\n datastore (dict) --Specifies where to store the processed message data.\n name (string) -- [REQUIRED]The name of the 'datastore' activity.\n datastoreName (string) -- [REQUIRED]The name of the data store where processed messages are stored.\n addAttributes (dict) --Adds other attributes based on existing attributes in the message.\n name (string) -- [REQUIRED]The name of the 'addAttributes' activity.\n attributes (dict) -- [REQUIRED]A list of 1-50 'AttributeNameMapping' objects that map an existing attribute to a new attribute.\n Note\n The existing attributes remain in the message, so if you want to remove the originals, use 'RemoveAttributeActivity'.\n (string) --\n (string) --\n \n next (string) --The next activity in the pipeline.\n removeAttributes (dict) --Removes attributes from a message.\n name (string) -- [REQUIRED]The name of the 'removeAttributes' activity.\n attributes (list) -- [REQUIRED]A list of 1-50 attributes to remove from the message.\n (string) --\n next (string) --The next activity in the pipeline.\n selectAttributes (dict) --Creates a new message using only the specified attributes from the original message.\n name (string) -- [REQUIRED]The name of the 'selectAttributes' activity.\n attributes (list) -- [REQUIRED]A list of the attributes to select from the message.\n (string) --\n next (string) --The next activity in the pipeline.\n filter (dict) --Filters a message based on its attributes.\n name (string) -- [REQUIRED]The name of the 'filter' activity.\n filter (string) -- [REQUIRED]An expression that looks like a SQL WHERE clause that must return a Boolean value.\n next (string) --The next activity in the pipeline.\n math (dict) --Computes an arithmetic expression using the message's attributes and adds it to the message.\n name (string) -- [REQUIRED]The name of the 'math' activity.\n attribute (string) -- [REQUIRED]The name of the attribute that contains the result of the math operation.\n math (string) -- [REQUIRED]An expression that uses one or more existing attributes and must return an integer value.\n next (string) --The next activity in the pipeline.\n deviceRegistryEnrich (dict) --Adds data from the AWS IoT device registry to your message.\n name (string) -- [REQUIRED]The name of the 'deviceRegistryEnrich' activity.\n attribute (string) -- [REQUIRED]The name of the attribute that is added to the message.\n thingName (string) -- [REQUIRED]The name of the IoT device whose registry information is added to the message.\n roleArn (string) -- [REQUIRED]The ARN of the role that allows access to the device's registry information.\n next (string) --The next activity in the pipeline.\n deviceShadowEnrich (dict) --Adds information from the AWS IoT Device Shadows service to a message.\n name (string) -- [REQUIRED]The name of the 'deviceShadowEnrich' activity.\n attribute (string) -- [REQUIRED]The name of the attribute that is added to the message.\n thingName (string) -- [REQUIRED]The name of the IoT device whose shadow information is added to the message.\n roleArn (string) -- [REQUIRED]The ARN of the role that allows access to the device's shadow.\n next (string) --The next activity in the pipeline.\n \n \n\n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6802424192428589, "alphanum_fraction": 0.6852121353149414, "avg_line_length": 51.04731750488281, "blob_id": "cc14d2d2648f94f7684c86ca3c1b57ee0f00909c", "content_id": "1e2fb350d6022307baa44deac00461c31b62dc78", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16500, "license_type": "permissive", "max_line_length": 514, "num_lines": 317, "path": "/pyboto3/lexruntimeservice.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef post_content(botName=None, botAlias=None, userId=None, sessionAttributes=None, requestAttributes=None, contentType=None, accept=None, inputStream=None):\n \"\"\"\n Sends user input (text or speech) to Amazon Lex. Clients use this API to send text and audio requests to Amazon Lex at runtime. Amazon Lex interprets the user input using the machine learning model that it built for the bot.\n The PostContent operation supports audio input at 8kHz and 16kHz. You can use 8kHz audio to achieve higher speech recognition accuracy in telephone audio applications.\n In response, Amazon Lex returns the next message to convey to the user. Consider the following example messages:\n Not all Amazon Lex messages require a response from the user. For example, conclusion statements do not require a response. Some messages require only a yes or no response. In addition to the message , Amazon Lex provides additional context about the message in the response that you can use to enhance client behavior, such as displaying the appropriate client user interface. Consider the following examples:\n In addition, Amazon Lex also returns your application-specific sessionAttributes . For more information, see Managing Conversation Context .\n See also: AWS API Documentation\n \n \n :example: response = client.post_content(\n botName='string',\n botAlias='string',\n userId='string',\n sessionAttributes={...}|[...]|123|123.4|'string'|True|None,\n requestAttributes={...}|[...]|123|123.4|'string'|True|None,\n contentType='string',\n accept='string',\n inputStream=b'bytes'|file\n )\n \n \n :type botName: string\n :param botName: [REQUIRED]\n Name of the Amazon Lex bot.\n \n\n :type botAlias: string\n :param botAlias: [REQUIRED]\n Alias of the Amazon Lex bot.\n \n\n :type userId: string\n :param userId: [REQUIRED]\n The ID of the client application user. Amazon Lex uses this to identify a user's conversation with your bot. At runtime, each request must contain the userID field.\n To decide the user ID to use for your application, consider the following factors.\n The userID field must not contain any personally identifiable information of the user, for example, name, personal identification numbers, or other end user personal information.\n If you want a user to start a conversation on one device and continue on another device, use a user-specific identifier.\n If you want the same user to be able to have two independent conversations on two different devices, choose a device-specific identifier.\n A user can't have two independent conversations with two different versions of the same bot. For example, a user can't have a conversation with the PROD and BETA versions of the same bot. If you anticipate that a user will need to have conversation with two different versions, for example, while testing, include the bot alias in the user ID to separate the two conversations.\n \n\n :type sessionAttributes: JSON serializable\n :param sessionAttributes: You pass this value as the x-amz-lex-session-attributes HTTP header.\n Application-specific information passed between Amazon Lex and a client application. The value must be a JSON serialized and base64 encoded map with string keys and values. The total size of the sessionAttributes and requestAttributes headers is limited to 12 KB.\n For more information, see Setting Session Attributes .\n \n\n :type requestAttributes: JSON serializable\n :param requestAttributes: You pass this value as the x-amz-lex-request-attributes HTTP header.\n Request-specific information passed between Amazon Lex and a client application. The value must be a JSON serialized and base64 encoded map with string keys and values. The total size of the requestAttributes and sessionAttributes headers is limited to 12 KB.\n The namespace x-amz-lex: is reserved for special attributes. Don't create any request attributes with the prefix x-amz-lex: .\n For more information, see Setting Request Attributes .\n \n\n :type contentType: string\n :param contentType: [REQUIRED]\n You pass this value as the Content-Type HTTP header.\n Indicates the audio format or text. The header value must start with one of the following prefixes:\n PCM format, audio data must be in little-endian byte order.\n audio/l16; rate=16000; channels=1\n audio/x-l16; sample-rate=16000; channel-count=1\n audio/lpcm; sample-rate=8000; sample-size-bits=16; channel-count=1; is-big-endian=false\n Opus format\n audio/x-cbr-opus-with-preamble; preamble-size=0; bit-rate=256000; frame-size-milliseconds=4\n Text format\n text/plain; charset=utf-8\n \n\n :type accept: string\n :param accept: You pass this value as the Accept HTTP header.\n The message Amazon Lex returns in the response can be either text or speech based on the Accept HTTP header value in the request.\n If the value is text/plain; charset=utf-8 , Amazon Lex returns text in the response.\n If the value begins with audio/ , Amazon Lex returns speech in the response. Amazon Lex uses Amazon Polly to generate the speech (using the configuration you specified in the Accept header). For example, if you specify audio/mpeg as the value, Amazon Lex returns speech in the MPEG format. The following are the accepted values:\n audio/mpeg\n audio/ogg\n audio/pcm\n text/plain; charset=utf-8\n audio/* (defaults to mpeg)\n \n\n :type inputStream: bytes or seekable file-like object\n :param inputStream: [REQUIRED]\n User input in PCM or Opus audio format or text format as described in the Content-Type HTTP header.\n You can stream audio data to Amazon Lex or you can create a local buffer that captures all of the audio data before sending. In general, you get better performance if you stream audio data rather than buffering the data locally.\n \n\n :rtype: dict\n :return: {\n 'contentType': 'string',\n 'intentName': 'string',\n 'slots': {...}|[...]|123|123.4|'string'|True|None,\n 'sessionAttributes': {...}|[...]|123|123.4|'string'|True|None,\n 'message': 'string',\n 'messageFormat': 'PlainText'|'CustomPayload'|'SSML'|'Composite',\n 'dialogState': 'ElicitIntent'|'ConfirmIntent'|'ElicitSlot'|'Fulfilled'|'ReadyForFulfillment'|'Failed',\n 'slotToElicit': 'string',\n 'inputTranscript': 'string',\n 'audioStream': StreamingBody()\n }\n \n \n :returns: \n If the message is to elicit slot data, Amazon Lex returns the following context information:\n x-amz-lex-dialog-state header set to ElicitSlot\n x-amz-lex-intent-name header set to the intent name in the current context\n x-amz-lex-slot-to-elicit header set to the slot name for which the message is eliciting information\n x-amz-lex-slots header set to a map of slots configured for the intent with their current values\n \n \n If the message is a confirmation prompt, the x-amz-lex-dialog-state header is set to Confirmation and the x-amz-lex-slot-to-elicit header is omitted.\n If the message is a clarification prompt configured for the intent, indicating that the user intent is not understood, the x-amz-dialog-state header is set to ElicitIntent and the x-amz-slot-to-elicit header is omitted.\n \n \"\"\"\n pass\n\ndef post_text(botName=None, botAlias=None, userId=None, sessionAttributes=None, requestAttributes=None, inputText=None):\n \"\"\"\n Sends user input (text-only) to Amazon Lex. Client applications can use this API to send requests to Amazon Lex at runtime. Amazon Lex then interprets the user input using the machine learning model it built for the bot.\n In response, Amazon Lex returns the next message to convey to the user an optional responseCard to display. Consider the following example messages:\n Not all Amazon Lex messages require a user response. For example, a conclusion statement does not require a response. Some messages require only a \"yes\" or \"no\" user response. In addition to the message , Amazon Lex provides additional context about the message in the response that you might use to enhance client behavior, for example, to display the appropriate client user interface. These are the slotToElicit , dialogState , intentName , and slots fields in the response. Consider the following examples:\n In addition, Amazon Lex also returns your application-specific sessionAttributes . For more information, see Managing Conversation Context .\n See also: AWS API Documentation\n \n \n :example: response = client.post_text(\n botName='string',\n botAlias='string',\n userId='string',\n sessionAttributes={\n 'string': 'string'\n },\n requestAttributes={\n 'string': 'string'\n },\n inputText='string'\n )\n \n \n :type botName: string\n :param botName: [REQUIRED]\n The name of the Amazon Lex bot.\n \n\n :type botAlias: string\n :param botAlias: [REQUIRED]\n The alias of the Amazon Lex bot.\n \n\n :type userId: string\n :param userId: [REQUIRED]\n The ID of the client application user. Amazon Lex uses this to identify a user's conversation with your bot. At runtime, each request must contain the userID field.\n To decide the user ID to use for your application, consider the following factors.\n The userID field must not contain any personally identifiable information of the user, for example, name, personal identification numbers, or other end user personal information.\n If you want a user to start a conversation on one device and continue on another device, use a user-specific identifier.\n If you want the same user to be able to have two independent conversations on two different devices, choose a device-specific identifier.\n A user can't have two independent conversations with two different versions of the same bot. For example, a user can't have a conversation with the PROD and BETA versions of the same bot. If you anticipate that a user will need to have conversation with two different versions, for example, while testing, include the bot alias in the user ID to separate the two conversations.\n \n\n :type sessionAttributes: dict\n :param sessionAttributes: Application-specific information passed between Amazon Lex and a client application.\n For more information, see Setting Session Attributes .\n (string) --\n (string) --\n \n\n :type requestAttributes: dict\n :param requestAttributes: Request-specific information passed between Amazon Lex and a client application.\n The namespace x-amz-lex: is reserved for special attributes. Don't create any request attributes with the prefix x-amz-lex: .\n For more information, see Setting Request Attributes .\n (string) --\n (string) --\n \n\n :type inputText: string\n :param inputText: [REQUIRED]\n The text that the user entered (Amazon Lex interprets this text).\n \n\n :rtype: dict\n :return: {\n 'intentName': 'string',\n 'slots': {\n 'string': 'string'\n },\n 'sessionAttributes': {\n 'string': 'string'\n },\n 'message': 'string',\n 'messageFormat': 'PlainText'|'CustomPayload'|'SSML'|'Composite',\n 'dialogState': 'ElicitIntent'|'ConfirmIntent'|'ElicitSlot'|'Fulfilled'|'ReadyForFulfillment'|'Failed',\n 'slotToElicit': 'string',\n 'responseCard': {\n 'version': 'string',\n 'contentType': 'application/vnd.amazonaws.card.generic',\n 'genericAttachments': [\n {\n 'title': 'string',\n 'subTitle': 'string',\n 'attachmentLinkUrl': 'string',\n 'imageUrl': 'string',\n 'buttons': [\n {\n 'text': 'string',\n 'value': 'string'\n },\n ]\n },\n ]\n }\n }\n \n \n :returns: \n If the message is to elicit slot data, Amazon Lex returns the following context information:\n dialogState set to ElicitSlot\n intentName set to the intent name in the current context\n slotToElicit set to the slot name for which the message is eliciting information\n slots set to a map of slots, configured for the intent, with currently known values\n \n \n If the message is a confirmation prompt, the dialogState is set to ConfirmIntent and SlotToElicit is set to null.\n If the message is a clarification prompt (configured for the intent) that indicates that user intent is not understood, the dialogState is set to ElicitIntent and slotToElicit is set to null.\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6810463666915894, "alphanum_fraction": 0.6829738616943359, "avg_line_length": 39.649253845214844, "blob_id": "22caa4ec4005c54d9d97dc9bdcbc28ff4691e690", "content_id": "3e086d1bb3399b8d5fad1d9149e171f888576d33", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10895, "license_type": "permissive", "max_line_length": 572, "num_lines": 268, "path": "/pyboto3/mediatailor.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef delete_playback_configuration(Name=None):\n \"\"\"\n Deletes the configuration for the specified name.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_playback_configuration(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The identifier for the configuration.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_playback_configuration(Name=None):\n \"\"\"\n Returns the configuration for the specified name.\n See also: AWS API Documentation\n \n \n :example: response = client.get_playback_configuration(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The identifier for the configuration.\n \n\n :rtype: dict\n :return: {\n 'AdDecisionServerUrl': 'string',\n 'CdnConfiguration': {\n 'AdSegmentUrlPrefix': 'string',\n 'ContentSegmentUrlPrefix': 'string'\n },\n 'DashConfiguration': {\n 'ManifestEndpointPrefix': 'string',\n 'MpdLocation': 'string'\n },\n 'HlsConfiguration': {\n 'ManifestEndpointPrefix': 'string'\n },\n 'Name': 'string',\n 'PlaybackEndpointPrefix': 'string',\n 'SessionInitializationEndpointPrefix': 'string',\n 'SlateAdUrl': 'string',\n 'TranscodeProfileName': 'string',\n 'VideoContentSourceUrl': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_playback_configurations(MaxResults=None, NextToken=None):\n \"\"\"\n Returns a list of the configurations defined in AWS Elemental MediaTailor. You can specify a max number of configurations to return at a time. The default max is 50. Results are returned in pagefuls. If AWS Elemental MediaTailor has more configurations than the specified max, it provides parameters in the response that you can use to retrieve the next pageful.\n See also: AWS API Documentation\n \n \n :example: response = client.list_playback_configurations(\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type MaxResults: integer\n :param MaxResults: Maximum number of records to return.\n\n :type NextToken: string\n :param NextToken: Pagination token returned by the GET list request when results overrun the meximum allowed. Use the token to fetch the next page of results.\n\n :rtype: dict\n :return: {\n 'Items': [\n {\n 'AdDecisionServerUrl': 'string',\n 'CdnConfiguration': {\n 'AdSegmentUrlPrefix': 'string',\n 'ContentSegmentUrlPrefix': 'string'\n },\n 'Name': 'string',\n 'SlateAdUrl': 'string',\n 'VideoContentSourceUrl': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef put_playback_configuration(AdDecisionServerUrl=None, CdnConfiguration=None, DashConfiguration=None, Name=None, SlateAdUrl=None, TranscodeProfileName=None, VideoContentSourceUrl=None):\n \"\"\"\n Adds a new configuration to AWS Elemental MediaTailor.\n See also: AWS API Documentation\n \n \n :example: response = client.put_playback_configuration(\n AdDecisionServerUrl='string',\n CdnConfiguration={\n 'AdSegmentUrlPrefix': 'string',\n 'ContentSegmentUrlPrefix': 'string'\n },\n DashConfiguration={\n 'MpdLocation': 'string'\n },\n Name='string',\n SlateAdUrl='string',\n TranscodeProfileName='string',\n VideoContentSourceUrl='string'\n )\n \n \n :type AdDecisionServerUrl: string\n :param AdDecisionServerUrl: The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing you can provide a static VAST URL. The maximum length is 25000 characters.\n\n :type CdnConfiguration: dict\n :param CdnConfiguration: The configuration for using a content delivery network (CDN), like Amazon CloudFront, for content and ad segment management.\n AdSegmentUrlPrefix (string) --A non-default content delivery network (CDN) to serve ad segments. By default, AWS Elemental MediaTailor uses Amazon CloudFront with default cache settings as its CDN for ad segments. To set up an alternate CDN, create a rule in your CDN for the following origin: ads.mediatailor.<region>.amazonaws.com. Then specify the rule's name in this AdSegmentUrlPrefix. When AWS Elemental MediaTailor serves a manifest, it reports your CDN as the source for ad segments.\n ContentSegmentUrlPrefix (string) --A content delivery network (CDN) to cache content segments, so that content requests don t always have to go to the origin server. First, create a rule in your CDN for the content segment origin server. Then specify the rule's name in this ContentSegmentUrlPrefix. When AWS Elemental MediaTailor serves a manifest, it reports your CDN as the source for content segments.\n \n\n :type DashConfiguration: dict\n :param DashConfiguration: The configuration object for DASH content.\n MpdLocation (string) --The setting that controls whether MediaTailor includes the Location tag in DASH Manifests. MediaTailor populates the Location tag with the URL for manifest update requests, to be used by players that don't support sticky redirects. Disable this if you have CDN routing rules set up for accessing MediaTailor manifests and you are either using client-side reporting or your players support sticky HTTP redirects. Valid values are DISABLED and EMT_DEFAULT. The EMT_DEFAULT setting enables the inclusion of the tag and is the default value.\n \n\n :type Name: string\n :param Name: The identifier for the configuration.\n\n :type SlateAdUrl: string\n :param SlateAdUrl: The URL for a high-quality video asset to transcode and use to fill in time that's not used by ads. AWS Elemental MediaTailor shows the slate to fill in gaps in media content. Configuring the slate is optional for non-VPAID configurations. For VPAID, the slate is required because AWS Elemental MediaTailor provides it in the slots that are designated for dynamic ad content. The slate must be a high-quality asset that contains both audio and video.\n\n :type TranscodeProfileName: string\n :param TranscodeProfileName: Associate this playbackConfiguration with a custom transcode profile, overriding MediaTailor's dynamic transcoding defaults. Do not include this field if you have not setup custom profiles with the MediaTailor service team.\n\n :type VideoContentSourceUrl: string\n :param VideoContentSourceUrl: The URL prefix for the master playlist for the stream, minus the asset ID. The maximum length is 512 characters.\n\n :rtype: dict\n :return: {\n 'AdDecisionServerUrl': 'string',\n 'CdnConfiguration': {\n 'AdSegmentUrlPrefix': 'string',\n 'ContentSegmentUrlPrefix': 'string'\n },\n 'DashConfiguration': {\n 'ManifestEndpointPrefix': 'string',\n 'MpdLocation': 'string'\n },\n 'HlsConfiguration': {\n 'ManifestEndpointPrefix': 'string'\n },\n 'Name': 'string',\n 'PlaybackEndpointPrefix': 'string',\n 'SessionInitializationEndpointPrefix': 'string',\n 'SlateAdUrl': 'string',\n 'TranscodeProfileName': 'string',\n 'VideoContentSourceUrl': 'string'\n }\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6271724104881287, "alphanum_fraction": 0.6403638124465942, "avg_line_length": 42.53493881225586, "blob_id": "f3d03d16eff134083e9d1d1ae5e02fe1cda5e202", "content_id": "1ff86c8edf752533a523ac8b0ddf78e6568bf574", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 54202, "license_type": "permissive", "max_line_length": 752, "num_lines": 1245, "path": "/pyboto3/connect.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_user(Username=None, Password=None, IdentityInfo=None, PhoneConfig=None, DirectoryUserId=None, SecurityProfileIds=None, RoutingProfileId=None, HierarchyGroupId=None, InstanceId=None):\n \"\"\"\n Creates a new user account in your Amazon Connect instance.\n See also: AWS API Documentation\n \n \n :example: response = client.create_user(\n Username='string',\n Password='string',\n IdentityInfo={\n 'FirstName': 'string',\n 'LastName': 'string',\n 'Email': 'string'\n },\n PhoneConfig={\n 'PhoneType': 'SOFT_PHONE'|'DESK_PHONE',\n 'AutoAccept': True|False,\n 'AfterContactWorkTimeLimit': 123,\n 'DeskPhoneNumber': 'string'\n },\n DirectoryUserId='string',\n SecurityProfileIds=[\n 'string',\n ],\n RoutingProfileId='string',\n HierarchyGroupId='string',\n InstanceId='string'\n )\n \n \n :type Username: string\n :param Username: [REQUIRED]\n The user name in Amazon Connect for the account to create. If you are using SAML for identity management in your Amazon Connect, the value for Username can include up to 64 characters from [a-zA-Z0-9_-.@]+.\n \n\n :type Password: string\n :param Password: The password for the user account to create. This is required if you are using Amazon Connect for identity management. If you are using SAML for identity management and include this parameter, an InvalidRequestException is returned.\n\n :type IdentityInfo: dict\n :param IdentityInfo: Information about the user, including email address, first name, and last name.\n FirstName (string) --The first name used in the user account. This is required if you are using Amazon Connect or SAML for identity management.\n LastName (string) --The last name used in the user account. This is required if you are using Amazon Connect or SAML for identity management.\n Email (string) --The email address added to the user account. If you are using SAML for identity management and include this parameter, an InvalidRequestException is returned.\n \n\n :type PhoneConfig: dict\n :param PhoneConfig: [REQUIRED]\n Specifies the phone settings for the user, including AfterContactWorkTimeLimit , AutoAccept , DeskPhoneNumber , and PhoneType .\n PhoneType (string) -- [REQUIRED]The phone type selected for the user, either Soft phone or Desk phone.\n AutoAccept (boolean) --The Auto accept setting for the user, Yes or No.\n AfterContactWorkTimeLimit (integer) --The After Call Work (ACW) timeout setting, in seconds, for the user.\n DeskPhoneNumber (string) --The phone number for the user's desk phone.\n \n\n :type DirectoryUserId: string\n :param DirectoryUserId: The unique identifier for the user account in the directory service directory used for identity management. If Amazon Connect is unable to access the existing directory, you can use the DirectoryUserId to authenticate users. If you include the parameter, it is assumed that Amazon Connect cannot access the directory. If the parameter is not included, the UserIdentityInfo is used to authenticate users from your existing directory.\n This parameter is required if you are using an existing directory for identity management in Amazon Connect when Amazon Connect cannot access your directory to authenticate users. If you are using SAML for identity management and include this parameter, an InvalidRequestException is returned.\n \n\n :type SecurityProfileIds: list\n :param SecurityProfileIds: [REQUIRED]\n The unique identifier of the security profile to assign to the user created.\n (string) --\n \n\n :type RoutingProfileId: string\n :param RoutingProfileId: [REQUIRED]\n The unique identifier for the routing profile to assign to the user created.\n \n\n :type HierarchyGroupId: string\n :param HierarchyGroupId: The unique identifier for the hierarchy group to assign to the user created.\n\n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.\n \n\n :rtype: dict\n :return: {\n 'UserId': 'string',\n 'UserArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_user(InstanceId=None, UserId=None):\n \"\"\"\n Deletes a user account from Amazon Connect.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_user(\n InstanceId='string',\n UserId='string'\n )\n \n \n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.\n \n\n :type UserId: string\n :param UserId: [REQUIRED]\n The unique identifier of the user to delete.\n \n\n \"\"\"\n pass\n\ndef describe_user(UserId=None, InstanceId=None):\n \"\"\"\n Returns a User object that contains information about the user account specified by the UserId .\n See also: AWS API Documentation\n \n \n :example: response = client.describe_user(\n UserId='string',\n InstanceId='string'\n )\n \n \n :type UserId: string\n :param UserId: [REQUIRED]\n Unique identifier for the user account to return.\n \n\n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.\n \n\n :rtype: dict\n :return: {\n 'User': {\n 'Id': 'string',\n 'Arn': 'string',\n 'Username': 'string',\n 'IdentityInfo': {\n 'FirstName': 'string',\n 'LastName': 'string',\n 'Email': 'string'\n },\n 'PhoneConfig': {\n 'PhoneType': 'SOFT_PHONE'|'DESK_PHONE',\n 'AutoAccept': True|False,\n 'AfterContactWorkTimeLimit': 123,\n 'DeskPhoneNumber': 'string'\n },\n 'DirectoryUserId': 'string',\n 'SecurityProfileIds': [\n 'string',\n ],\n 'RoutingProfileId': 'string',\n 'HierarchyGroupId': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_user_hierarchy_group(HierarchyGroupId=None, InstanceId=None):\n \"\"\"\n Returns a HierarchyGroup object that includes information about a hierarchy group in your instance.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_user_hierarchy_group(\n HierarchyGroupId='string',\n InstanceId='string'\n )\n \n \n :type HierarchyGroupId: string\n :param HierarchyGroupId: [REQUIRED]\n The identifier for the hierarchy group to return.\n \n\n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.\n \n\n :rtype: dict\n :return: {\n 'HierarchyGroup': {\n 'Id': 'string',\n 'Arn': 'string',\n 'Name': 'string',\n 'LevelId': 'string',\n 'HierarchyPath': {\n 'LevelOne': {\n 'Id': 'string',\n 'Arn': 'string',\n 'Name': 'string'\n },\n 'LevelTwo': {\n 'Id': 'string',\n 'Arn': 'string',\n 'Name': 'string'\n },\n 'LevelThree': {\n 'Id': 'string',\n 'Arn': 'string',\n 'Name': 'string'\n },\n 'LevelFour': {\n 'Id': 'string',\n 'Arn': 'string',\n 'Name': 'string'\n },\n 'LevelFive': {\n 'Id': 'string',\n 'Arn': 'string',\n 'Name': 'string'\n }\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_user_hierarchy_structure(InstanceId=None):\n \"\"\"\n Returns a HiearchyGroupStructure object, which contains data about the levels in the agent hierarchy.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_user_hierarchy_structure(\n InstanceId='string'\n )\n \n \n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.\n \n\n :rtype: dict\n :return: {\n 'HierarchyStructure': {\n 'LevelOne': {\n 'Id': 'string',\n 'Arn': 'string',\n 'Name': 'string'\n },\n 'LevelTwo': {\n 'Id': 'string',\n 'Arn': 'string',\n 'Name': 'string'\n },\n 'LevelThree': {\n 'Id': 'string',\n 'Arn': 'string',\n 'Name': 'string'\n },\n 'LevelFour': {\n 'Id': 'string',\n 'Arn': 'string',\n 'Name': 'string'\n },\n 'LevelFive': {\n 'Id': 'string',\n 'Arn': 'string',\n 'Name': 'string'\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_contact_attributes(InstanceId=None, InitialContactId=None):\n \"\"\"\n Retrieves the contact attributes associated with a contact.\n See also: AWS API Documentation\n \n \n :example: response = client.get_contact_attributes(\n InstanceId='string',\n InitialContactId='string'\n )\n \n \n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The instance ID for the instance from which to retrieve contact attributes.\n \n\n :type InitialContactId: string\n :param InitialContactId: [REQUIRED]\n The ID for the initial contact in Amazon Connect associated with the attributes to update.\n \n\n :rtype: dict\n :return: {\n 'Attributes': {\n 'string': 'string'\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_current_metric_data(InstanceId=None, Filters=None, Groupings=None, CurrentMetrics=None, NextToken=None, MaxResults=None):\n \"\"\"\n The GetCurrentMetricData operation retrieves current metric data from your Amazon Connect instance.\n If you are using an IAM account, it must have permission to the connect:GetCurrentMetricData action.\n See also: AWS API Documentation\n \n \n :example: response = client.get_current_metric_data(\n InstanceId='string',\n Filters={\n 'Queues': [\n 'string',\n ],\n 'Channels': [\n 'VOICE',\n ]\n },\n Groupings=[\n 'QUEUE'|'CHANNEL',\n ],\n CurrentMetrics=[\n {\n 'Name': 'AGENTS_ONLINE'|'AGENTS_AVAILABLE'|'AGENTS_ON_CALL'|'AGENTS_NON_PRODUCTIVE'|'AGENTS_AFTER_CONTACT_WORK'|'AGENTS_ERROR'|'AGENTS_STAFFED'|'CONTACTS_IN_QUEUE'|'OLDEST_CONTACT_AGE'|'CONTACTS_SCHEDULED',\n 'Unit': 'SECONDS'|'COUNT'|'PERCENT'\n },\n ],\n NextToken='string',\n MaxResults=123\n )\n \n \n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.\n \n\n :type Filters: dict\n :param Filters: [REQUIRED]\n A Filters object that contains a list of queue IDs or queue ARNs, up to 100, or list of Channels to use to filter the metrics returned in the response. Metric data is retrieved only for the resources associated with the queue IDs, ARNs, or Channels included in the filter. You can include both IDs and ARNs in the same request. To retrieve metrics for all queues, add the queue ID or ARN for each queue in your instance. Only VOICE is supported for Channels.\n To find the ARN for a queue, open the queue you want to use in the Amazon Connect Queue editor. The ARN for the queue is displayed in the address bar as part of the URL. For example, the queue ARN is the set of characters at the end of the URL, after 'id=' such as arn:aws:connect:us-east-1:270923740243:instance/78fb859d-1b7d-44b1-8aa3-12f0835c5855/queue/1d1a4575-9618-40ab-bbeb-81e45795fe61 . The queue ID is also included in the URL, and is the string after 'queue/'.\n Queues (list) --A list of up to 100 queue IDs or queue ARNs to use to filter the metrics retrieved. You can include both IDs and ARNs in a request.\n (string) --\n Channels (list) --The Channel to use as a filter for the metrics returned. Only VOICE is supported.\n (string) --\n \n\n :type Groupings: list\n :param Groupings: The grouping applied to the metrics returned. For example, when grouped by QUEUE, the metrics returned apply to each queue rather than aggregated for all queues. If you group by CHANNEL, you should include a Channels filter. The only supported channel is VOICE.\n If no Grouping is included in the request, a summary of CurrentMetrics is returned.\n (string) --\n \n\n :type CurrentMetrics: list\n :param CurrentMetrics: [REQUIRED]\n A list of CurrentMetric objects for the metrics to retrieve. Each CurrentMetric includes a name of a metric to retrieve and the unit to use for it. You must list each metric to retrieve data for in the request.\n The following metrics are available:\n AGENTS_AVAILABLE\n Unit: COUNT\n AGENTS_ONLINE\n Unit: COUNT\n AGENTS_ON_CALL\n Unit: COUNT\n AGENTS_STAFFED\n Unit: COUNT\n AGENTS_AFTER_CONTACT_WORK\n Unit: COUNT\n AGENTS_NON_PRODUCTIVE\n Unit: COUNT\n AGENTS_ERROR\n Unit: COUNT\n CONTACTS_IN_QUEUE\n Unit: COUNT\n OLDEST_CONTACT_AGE\n Unit: SECONDS\n CONTACTS_SCHEDULED\n Unit: COUNT\n (dict) --A CurrentMetric object that contains the Name and Unit for the metric.\n Name (string) --The name of the metric.\n Unit (string) --The unit for the metric.\n \n \n\n :type NextToken: string\n :param NextToken: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.\n The token expires after 5 minutes from the time it is created. Subsequent requests that use the `NextToken must use the same request parameters as the request that generated the token.\n \n\n :type MaxResults: integer\n :param MaxResults: MaxResults indicates the maximum number of results to return per page in the response, between 1 and 100.\n\n :rtype: dict\n :return: {\n 'NextToken': 'string',\n 'MetricResults': [\n {\n 'Dimensions': {\n 'Queue': {\n 'Id': 'string',\n 'Arn': 'string'\n },\n 'Channel': 'VOICE'\n },\n 'Collections': [\n {\n 'Metric': {\n 'Name': 'AGENTS_ONLINE'|'AGENTS_AVAILABLE'|'AGENTS_ON_CALL'|'AGENTS_NON_PRODUCTIVE'|'AGENTS_AFTER_CONTACT_WORK'|'AGENTS_ERROR'|'AGENTS_STAFFED'|'CONTACTS_IN_QUEUE'|'OLDEST_CONTACT_AGE'|'CONTACTS_SCHEDULED',\n 'Unit': 'SECONDS'|'COUNT'|'PERCENT'\n },\n 'Value': 123.0\n },\n ]\n },\n ],\n 'DataSnapshotTime': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef get_federation_token(InstanceId=None):\n \"\"\"\n Retrieves a token for federation.\n See also: AWS API Documentation\n \n \n :example: response = client.get_federation_token(\n InstanceId='string'\n )\n \n \n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.\n \n\n :rtype: dict\n :return: {\n 'Credentials': {\n 'AccessToken': 'string',\n 'AccessTokenExpiration': datetime(2015, 1, 1),\n 'RefreshToken': 'string',\n 'RefreshTokenExpiration': datetime(2015, 1, 1)\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_metric_data(InstanceId=None, StartTime=None, EndTime=None, Filters=None, Groupings=None, HistoricalMetrics=None, NextToken=None, MaxResults=None):\n \"\"\"\n The GetMetricData operation retrieves historical metrics data from your Amazon Connect instance.\n If you are using an IAM account, it must have permission to the connect:GetMetricData action.\n See also: AWS API Documentation\n \n \n :example: response = client.get_metric_data(\n InstanceId='string',\n StartTime=datetime(2015, 1, 1),\n EndTime=datetime(2015, 1, 1),\n Filters={\n 'Queues': [\n 'string',\n ],\n 'Channels': [\n 'VOICE',\n ]\n },\n Groupings=[\n 'QUEUE'|'CHANNEL',\n ],\n HistoricalMetrics=[\n {\n 'Name': 'CONTACTS_QUEUED'|'CONTACTS_HANDLED'|'CONTACTS_ABANDONED'|'CONTACTS_CONSULTED'|'CONTACTS_AGENT_HUNG_UP_FIRST'|'CONTACTS_HANDLED_INCOMING'|'CONTACTS_HANDLED_OUTBOUND'|'CONTACTS_HOLD_ABANDONS'|'CONTACTS_TRANSFERRED_IN'|'CONTACTS_TRANSFERRED_OUT'|'CONTACTS_TRANSFERRED_IN_FROM_QUEUE'|'CONTACTS_TRANSFERRED_OUT_FROM_QUEUE'|'CONTACTS_MISSED'|'CALLBACK_CONTACTS_HANDLED'|'API_CONTACTS_HANDLED'|'OCCUPANCY'|'HANDLE_TIME'|'AFTER_CONTACT_WORK_TIME'|'QUEUED_TIME'|'ABANDON_TIME'|'QUEUE_ANSWER_TIME'|'HOLD_TIME'|'INTERACTION_TIME'|'INTERACTION_AND_HOLD_TIME'|'SERVICE_LEVEL',\n 'Threshold': {\n 'Comparison': 'LT',\n 'ThresholdValue': 123.0\n },\n 'Statistic': 'SUM'|'MAX'|'AVG',\n 'Unit': 'SECONDS'|'COUNT'|'PERCENT'\n },\n ],\n NextToken='string',\n MaxResults=123\n )\n \n \n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.\n \n\n :type StartTime: datetime\n :param StartTime: [REQUIRED]\n The timestamp, in UNIX Epoch time format, at which to start the reporting interval for the retrieval of historical metrics data. The time must be specified using a multiple of 5 minutes, such as 10:05, 10:10, 10:15.\n StartTime cannot be earlier than 24 hours before the time of the request. Historical metrics are available in Amazon Connect only for 24 hours.\n \n\n :type EndTime: datetime\n :param EndTime: [REQUIRED]\n The timestamp, in UNIX Epoch time format, at which to end the reporting interval for the retrieval of historical metrics data. The time must be specified using an interval of 5 minutes, such as 11:00, 11:05, 11:10, and must be later than the StartTime timestamp.\n The time range between StartTime and EndTime must be less than 24 hours.\n \n\n :type Filters: dict\n :param Filters: [REQUIRED]\n A Filters object that contains a list of queue IDs or queue ARNs, up to 100, or a list of Channels to use to filter the metrics returned in the response. Metric data is retrieved only for the resources associated with the IDs, ARNs, or Channels included in the filter. You can use both IDs and ARNs together in a request. Only VOICE is supported for Channel.\n To find the ARN for a queue, open the queue you want to use in the Amazon Connect Queue editor. The ARN for the queue is displayed in the address bar as part of the URL. For example, the queue ARN is the set of characters at the end of the URL, after 'id=' such as arn:aws:connect:us-east-1:270923740243:instance/78fb859d-1b7d-44b1-8aa3-12f0835c5855/queue/1d1a4575-9618-40ab-bbeb-81e45795fe61 . The queue ID is also included in the URL, and is the string after 'queue/'.\n Queues (list) --A list of up to 100 queue IDs or queue ARNs to use to filter the metrics retrieved. You can include both IDs and ARNs in a request.\n (string) --\n Channels (list) --The Channel to use as a filter for the metrics returned. Only VOICE is supported.\n (string) --\n \n\n :type Groupings: list\n :param Groupings: The grouping applied to the metrics returned. For example, when results are grouped by queueId, the metrics returned are grouped by queue. The values returned apply to the metrics for each queue rather than aggregated for all queues.\n The current version supports grouping by Queue\n If no Grouping is included in the request, a summary of HistoricalMetrics for all queues is returned.\n (string) --\n \n\n :type HistoricalMetrics: list\n :param HistoricalMetrics: [REQUIRED]\n A list of HistoricalMetric objects that contain the metrics to retrieve with the request.\n A HistoricalMetric object contains: HistoricalMetricName , Statistic , Threshold , and Unit .\n You must list each metric to retrieve data for in the request. For each historical metric you include in the request, you must include a Unit and a Statistic .\n The following historical metrics are available:\n CONTACTS_QUEUED\n Unit: COUNT\n Statistic: SUM\n CONTACTS_HANDLED\n Unit: COUNT\n Statistics: SUM\n CONTACTS_ABANDONED\n Unit: COUNT\n Statistics: SUM\n CONTACTS_CONSULTED\n Unit: COUNT\n Statistics: SUM\n CONTACTS_AGENT_HUNG_UP_FIRST\n Unit: COUNT\n Statistics: SUM\n CONTACTS_HANDLED_INCOMING\n Unit: COUNT\n Statistics: SUM\n CONTACTS_HANDLED_OUTBOUND\n Unit: COUNT\n Statistics: SUM\n CONTACTS_HOLD_ABANDONS\n Unit: COUNT\n Statistics: SUM\n CONTACTS_TRANSFERRED_IN\n Unit: COUNT\n Statistics: SUM\n CONTACTS_TRANSFERRED_OUT\n Unit: COUNT\n Statistics: SUM\n CONTACTS_TRANSFERRED_IN_FROM_QUEUE\n Unit: COUNT\n Statistics: SUM\n CONTACTS_TRANSFERRED_OUT_FROM_QUEUE\n Unit: COUNT\n Statistics: SUM\n CALLBACK_CONTACTS_HANDLED\n Unit: COUNT\n Statistics: SUM\n CALLBACK_CONTACTS_HANDLED\n Unit: COUNT\n Statistics: SUM\n API_CONTACTS_HANDLED\n Unit: COUNT\n Statistics: SUM\n CONTACTS_MISSED\n Unit: COUNT\n Statistics: SUM\n OCCUPANCY\n Unit: PERCENT\n Statistics: AVG\n HANDLE_TIME\n Unit: SECONDS\n Statistics: AVG\n AFTER_CONTACT_WORK_TIME\n Unit: SECONDS\n Statistics: AVG\n QUEUED_TIME\n Unit: SECONDS\n Statistics: MAX\n ABANDON_TIME\n Unit: COUNT\n Statistics: SUM\n QUEUE_ANSWER_TIME\n Unit: SECONDS\n Statistics: AVG\n HOLD_TIME\n Unit: SECONDS\n Statistics: AVG\n INTERACTION_TIME\n Unit: SECONDS\n Statistics: AVG\n INTERACTION_AND_HOLD_TIME\n Unit: SECONDS\n Statistics: AVG\n SERVICE_LEVEL\n Unit: PERCENT\n Statistics: AVG\n Threshold: Only 'Less than' comparisons are supported, with the following service level thresholds: 15, 20, 25, 30, 45, 60, 90, 120, 180, 240, 300, 600\n (dict) --A HistoricalMetric object that contains the Name, Unit, Statistic, and Threshold for the metric.\n Name (string) --The name of the historical metric.\n Threshold (dict) --The threshold for the metric, used with service level metrics.\n Comparison (string) --The Threshold to use to compare service level metrics to. Only 'Less than' (LT) comparisons are supported.\n ThresholdValue (float) --The value of the threshold to compare the metric to. Only 'Less than' (LT) comparisons are supported.\n Statistic (string) --The statistic for the metric.\n Unit (string) --The unit for the metric.\n \n \n\n :type NextToken: string\n :param NextToken: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.\n\n :type MaxResults: integer\n :param MaxResults: Indicates the maximum number of results to return per page in the response, between 1-100.\n\n :rtype: dict\n :return: {\n 'NextToken': 'string',\n 'MetricResults': [\n {\n 'Dimensions': {\n 'Queue': {\n 'Id': 'string',\n 'Arn': 'string'\n },\n 'Channel': 'VOICE'\n },\n 'Collections': [\n {\n 'Metric': {\n 'Name': 'CONTACTS_QUEUED'|'CONTACTS_HANDLED'|'CONTACTS_ABANDONED'|'CONTACTS_CONSULTED'|'CONTACTS_AGENT_HUNG_UP_FIRST'|'CONTACTS_HANDLED_INCOMING'|'CONTACTS_HANDLED_OUTBOUND'|'CONTACTS_HOLD_ABANDONS'|'CONTACTS_TRANSFERRED_IN'|'CONTACTS_TRANSFERRED_OUT'|'CONTACTS_TRANSFERRED_IN_FROM_QUEUE'|'CONTACTS_TRANSFERRED_OUT_FROM_QUEUE'|'CONTACTS_MISSED'|'CALLBACK_CONTACTS_HANDLED'|'API_CONTACTS_HANDLED'|'OCCUPANCY'|'HANDLE_TIME'|'AFTER_CONTACT_WORK_TIME'|'QUEUED_TIME'|'ABANDON_TIME'|'QUEUE_ANSWER_TIME'|'HOLD_TIME'|'INTERACTION_TIME'|'INTERACTION_AND_HOLD_TIME'|'SERVICE_LEVEL',\n 'Threshold': {\n 'Comparison': 'LT',\n 'ThresholdValue': 123.0\n },\n 'Statistic': 'SUM'|'MAX'|'AVG',\n 'Unit': 'SECONDS'|'COUNT'|'PERCENT'\n },\n 'Value': 123.0\n },\n ]\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_routing_profiles(InstanceId=None, NextToken=None, MaxResults=None):\n \"\"\"\n Returns an array of RoutingProfileSummary objects that includes information about the routing profiles in your instance.\n See also: AWS API Documentation\n \n \n :example: response = client.list_routing_profiles(\n InstanceId='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.\n \n\n :type NextToken: string\n :param NextToken: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of routing profiles to return in the response.\n\n :rtype: dict\n :return: {\n 'RoutingProfileSummaryList': [\n {\n 'Id': 'string',\n 'Arn': 'string',\n 'Name': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_security_profiles(InstanceId=None, NextToken=None, MaxResults=None):\n \"\"\"\n Returns an array of SecurityProfileSummary objects that contain information about the security profiles in your instance, including the ARN, Id, and Name of the security profile.\n See also: AWS API Documentation\n \n \n :example: response = client.list_security_profiles(\n InstanceId='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.\n \n\n :type NextToken: string\n :param NextToken: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of security profiles to return.\n\n :rtype: dict\n :return: {\n 'SecurityProfileSummaryList': [\n {\n 'Id': 'string',\n 'Arn': 'string',\n 'Name': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_user_hierarchy_groups(InstanceId=None, NextToken=None, MaxResults=None):\n \"\"\"\n Returns a UserHierarchyGroupSummaryList , which is an array of HierarchyGroupSummary objects that contain information about the hierarchy groups in your instance.\n See also: AWS API Documentation\n \n \n :example: response = client.list_user_hierarchy_groups(\n InstanceId='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.\n \n\n :type NextToken: string\n :param NextToken: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of hierarchy groups to return.\n\n :rtype: dict\n :return: {\n 'UserHierarchyGroupSummaryList': [\n {\n 'Id': 'string',\n 'Arn': 'string',\n 'Name': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_users(InstanceId=None, NextToken=None, MaxResults=None):\n \"\"\"\n Returns a UserSummaryList , which is an array of UserSummary objects.\n See also: AWS API Documentation\n \n \n :example: response = client.list_users(\n InstanceId='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.\n \n\n :type NextToken: string\n :param NextToken: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return in the response.\n\n :rtype: dict\n :return: {\n 'UserSummaryList': [\n {\n 'Id': 'string',\n 'Arn': 'string',\n 'Username': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef start_outbound_voice_contact(DestinationPhoneNumber=None, ContactFlowId=None, InstanceId=None, ClientToken=None, SourcePhoneNumber=None, QueueId=None, Attributes=None):\n \"\"\"\n The StartOutboundVoiceContact operation initiates a contact flow to place an outbound call to a customer.\n If you are using an IAM account, it must have permission to the connect:StartOutboundVoiceContact action.\n There is a 60 second dialing timeout for this operation. If the call is not connected after 60 seconds, the call fails.\n See also: AWS API Documentation\n \n \n :example: response = client.start_outbound_voice_contact(\n DestinationPhoneNumber='string',\n ContactFlowId='string',\n InstanceId='string',\n ClientToken='string',\n SourcePhoneNumber='string',\n QueueId='string',\n Attributes={\n 'string': 'string'\n }\n )\n \n \n :type DestinationPhoneNumber: string\n :param DestinationPhoneNumber: [REQUIRED]\n The phone number of the customer in E.164 format.\n \n\n :type ContactFlowId: string\n :param ContactFlowId: [REQUIRED]\n The identifier for the contact flow to connect the outbound call to.\n To find the ContactFlowId , open the contact flow you want to use in the Amazon Connect contact flow editor. The ID for the contact flow is displayed in the address bar as part of the URL. For example, the contact flow ID is the set of characters at the end of the URL, after 'contact-flow/' such as 78ea8fd5-2659-4f2b-b528-699760ccfc1b .\n \n\n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.\n \n\n :type ClientToken: string\n :param ClientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. The token is valid for 7 days after creation. If a contact is already started, the contact ID is returned. If the contact is disconnected, a new contact is started.\n This field is autopopulated if not provided.\n \n\n :type SourcePhoneNumber: string\n :param SourcePhoneNumber: The phone number, in E.164 format, associated with your Amazon Connect instance to use for the outbound call.\n\n :type QueueId: string\n :param QueueId: The queue to add the call to. If you specify a queue, the phone displayed for caller ID is the phone number specified in the queue. If you do not specify a queue, the queue used will be the queue defined in the contact flow.\n To find the QueueId , open the queue you want to use in the Amazon Connect Queue editor. The ID for the queue is displayed in the address bar as part of the URL. For example, the queue ID is the set of characters at the end of the URL, after 'queue/' such as queue/aeg40574-2d01-51c3-73d6-bf8624d2168c .\n \n\n :type Attributes: dict\n :param Attributes: Specify a custom key-value pair using an attribute map. The attributes are standard Amazon Connect attributes, and can be accessed in contact flows just like any other contact attributes.\n There can be up to 32,768 UTF-8 bytes across all key-value pairs per contact. Attribute keys can include only alphanumeric, dash, and underscore characters.\n For example, if you want play a greeting when the customer answers the call, you can pass the customer name in attributes similar to the following:\n (string) --\n (string) --\n \n\n :rtype: dict\n :return: {\n 'ContactId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef stop_contact(ContactId=None, InstanceId=None):\n \"\"\"\n Ends the contact initiated by the StartOutboundVoiceContact operation.\n If you are using an IAM account, it must have permission to the connect:StopContact action.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_contact(\n ContactId='string',\n InstanceId='string'\n )\n \n \n :type ContactId: string\n :param ContactId: [REQUIRED]\n The unique identifier of the contact to end.\n \n\n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_contact_attributes(InitialContactId=None, InstanceId=None, Attributes=None):\n \"\"\"\n The UpdateContactAttributes operation lets you programmatically create new, or update existing, contact attributes associated with a contact. You can use the operation to add or update attributes for both ongoing and completed contacts. For example, you can update the customer's name or the reason the customer called while the call is active, or add notes about steps that the agent took during the call that are displayed to the next agent that takes the call. You can also use the UpdateContactAttributes operation to update attributes for a contact using data from your CRM application and save the data with the contact in Amazon Connect. You could also flag calls for additional analysis, such as legal review or identifying abusive callers.\n Contact attributes are available in Amazon Connect for 24 months, and are then deleted.\n You cannot use the operation to update attributes for contacts that occurred prior to the release of the API, September 12, 2018. You can update attributes only for contacts that started after the release of the API. If you attempt to update attributes for a contact that occurred prior to the release of the API, a 400 error is returned. This applies also to queued callbacks that were initiated prior to the release of the API but are still active in your instance.\n See also: AWS API Documentation\n \n \n :example: response = client.update_contact_attributes(\n InitialContactId='string',\n InstanceId='string',\n Attributes={\n 'string': 'string'\n }\n )\n \n \n :type InitialContactId: string\n :param InitialContactId: [REQUIRED]\n The unique identifier of the contact for which to update attributes. This is the identifier for the contact associated with the first interaction with the contact center.\n \n\n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.\n \n\n :type Attributes: dict\n :param Attributes: [REQUIRED]\n Specify a custom key-value pair using an attribute map. The attributes are standard Amazon Connect attributes, and can be accessed in contact flows just like any other contact attributes.\n There can be up to 32,768 UTF-8 bytes across all key-value pairs per contact. Attribute keys can include only alphanumeric, dash, and underscore characters.\n (string) --\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_user_hierarchy(HierarchyGroupId=None, UserId=None, InstanceId=None):\n \"\"\"\n Assigns the specified hierarchy group to the user.\n See also: AWS API Documentation\n \n \n :example: response = client.update_user_hierarchy(\n HierarchyGroupId='string',\n UserId='string',\n InstanceId='string'\n )\n \n \n :type HierarchyGroupId: string\n :param HierarchyGroupId: The identifier for the hierarchy group to assign to the user.\n\n :type UserId: string\n :param UserId: [REQUIRED]\n The identifier of the user account to assign the hierarchy group to.\n \n\n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.\n \n\n \"\"\"\n pass\n\ndef update_user_identity_info(IdentityInfo=None, UserId=None, InstanceId=None):\n \"\"\"\n Updates the identity information for the specified user in a UserIdentityInfo object, including email, first name, and last name.\n See also: AWS API Documentation\n \n \n :example: response = client.update_user_identity_info(\n IdentityInfo={\n 'FirstName': 'string',\n 'LastName': 'string',\n 'Email': 'string'\n },\n UserId='string',\n InstanceId='string'\n )\n \n \n :type IdentityInfo: dict\n :param IdentityInfo: [REQUIRED]\n A UserIdentityInfo object.\n FirstName (string) --The first name used in the user account. This is required if you are using Amazon Connect or SAML for identity management.\n LastName (string) --The last name used in the user account. This is required if you are using Amazon Connect or SAML for identity management.\n Email (string) --The email address added to the user account. If you are using SAML for identity management and include this parameter, an InvalidRequestException is returned.\n \n\n :type UserId: string\n :param UserId: [REQUIRED]\n The identifier for the user account to update identity information for.\n \n\n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.\n \n\n \"\"\"\n pass\n\ndef update_user_phone_config(PhoneConfig=None, UserId=None, InstanceId=None):\n \"\"\"\n Updates the phone configuration settings in the UserPhoneConfig object for the specified user.\n See also: AWS API Documentation\n \n \n :example: response = client.update_user_phone_config(\n PhoneConfig={\n 'PhoneType': 'SOFT_PHONE'|'DESK_PHONE',\n 'AutoAccept': True|False,\n 'AfterContactWorkTimeLimit': 123,\n 'DeskPhoneNumber': 'string'\n },\n UserId='string',\n InstanceId='string'\n )\n \n \n :type PhoneConfig: dict\n :param PhoneConfig: [REQUIRED]\n A UserPhoneConfig object that contains settings for AfterContactWorkTimeLimit , AutoAccept , DeskPhoneNumber , and PhoneType to assign to the user.\n PhoneType (string) -- [REQUIRED]The phone type selected for the user, either Soft phone or Desk phone.\n AutoAccept (boolean) --The Auto accept setting for the user, Yes or No.\n AfterContactWorkTimeLimit (integer) --The After Call Work (ACW) timeout setting, in seconds, for the user.\n DeskPhoneNumber (string) --The phone number for the user's desk phone.\n \n\n :type UserId: string\n :param UserId: [REQUIRED]\n The identifier for the user account to change phone settings for.\n \n\n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.\n \n\n \"\"\"\n pass\n\ndef update_user_routing_profile(RoutingProfileId=None, UserId=None, InstanceId=None):\n \"\"\"\n Assigns the specified routing profile to a user.\n See also: AWS API Documentation\n \n \n :example: response = client.update_user_routing_profile(\n RoutingProfileId='string',\n UserId='string',\n InstanceId='string'\n )\n \n \n :type RoutingProfileId: string\n :param RoutingProfileId: [REQUIRED]\n The identifier of the routing profile to assign to the user.\n \n\n :type UserId: string\n :param UserId: [REQUIRED]\n The identifier for the user account to assign the routing profile to.\n \n\n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.\n \n\n \"\"\"\n pass\n\ndef update_user_security_profiles(SecurityProfileIds=None, UserId=None, InstanceId=None):\n \"\"\"\n Updates the security profiles assigned to the user.\n See also: AWS API Documentation\n \n \n :example: response = client.update_user_security_profiles(\n SecurityProfileIds=[\n 'string',\n ],\n UserId='string',\n InstanceId='string'\n )\n \n \n :type SecurityProfileIds: list\n :param SecurityProfileIds: [REQUIRED]\n The identifiers for the security profiles to assign to the user.\n (string) --\n \n\n :type UserId: string\n :param UserId: [REQUIRED]\n The identifier of the user account to assign the security profiles.\n \n\n :type InstanceId: string\n :param InstanceId: [REQUIRED]\n The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.\n \n\n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.5644233822822571, "alphanum_fraction": 0.5683090090751648, "avg_line_length": 27.466814041137695, "blob_id": "1b321d42c486e9ae48d23e4c8279cdedce524e93", "content_id": "e6b664aacf2ece4eeb60a27ed7d708b22a31db64", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12868, "license_type": "permissive", "max_line_length": 226, "num_lines": 452, "path": "/pyboto3/mobile.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_project(name=None, region=None, contents=None, snapshotId=None):\n \"\"\"\n Creates an AWS Mobile Hub project.\n See also: AWS API Documentation\n \n \n :example: response = client.create_project(\n name='string',\n region='string',\n contents=b'bytes'|file,\n snapshotId='string'\n )\n \n \n :type name: string\n :param name: Name of the project.\n\n :type region: string\n :param region: Default region where project resources should be created.\n\n :type contents: bytes or seekable file-like object\n :param contents: ZIP or YAML file which contains configuration settings to be used when creating the project. This may be the contents of the file downloaded from the URL provided in an export project operation.\n\n :type snapshotId: string\n :param snapshotId: Unique identifier for an exported snapshot of project configuration. This snapshot identifier is included in the share URL when a project is exported.\n\n :rtype: dict\n :return: {\n 'details': {\n 'name': 'string',\n 'projectId': 'string',\n 'region': 'string',\n 'state': 'NORMAL'|'SYNCING'|'IMPORTING',\n 'createdDate': datetime(2015, 1, 1),\n 'lastUpdatedDate': datetime(2015, 1, 1),\n 'consoleUrl': 'string',\n 'resources': [\n {\n 'type': 'string',\n 'name': 'string',\n 'arn': 'string',\n 'feature': 'string',\n 'attributes': {\n 'string': 'string'\n }\n },\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef delete_project(projectId=None):\n \"\"\"\n Delets a project in AWS Mobile Hub.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_project(\n projectId='string'\n )\n \n \n :type projectId: string\n :param projectId: [REQUIRED]\n Unique project identifier.\n \n\n :rtype: dict\n :return: {\n 'deletedResources': [\n {\n 'type': 'string',\n 'name': 'string',\n 'arn': 'string',\n 'feature': 'string',\n 'attributes': {\n 'string': 'string'\n }\n },\n ],\n 'orphanedResources': [\n {\n 'type': 'string',\n 'name': 'string',\n 'arn': 'string',\n 'feature': 'string',\n 'attributes': {\n 'string': 'string'\n }\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef describe_bundle(bundleId=None):\n \"\"\"\n Get the bundle details for the requested bundle id.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_bundle(\n bundleId='string'\n )\n \n \n :type bundleId: string\n :param bundleId: [REQUIRED]\n Unique bundle identifier.\n \n\n :rtype: dict\n :return: {\n 'details': {\n 'bundleId': 'string',\n 'title': 'string',\n 'version': 'string',\n 'description': 'string',\n 'iconUrl': 'string',\n 'availablePlatforms': [\n 'OSX'|'WINDOWS'|'LINUX'|'OBJC'|'SWIFT'|'ANDROID'|'JAVASCRIPT',\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_project(projectId=None, syncFromResources=None):\n \"\"\"\n Gets details about a project in AWS Mobile Hub.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_project(\n projectId='string',\n syncFromResources=True|False\n )\n \n \n :type projectId: string\n :param projectId: [REQUIRED]\n Unique project identifier.\n \n\n :type syncFromResources: boolean\n :param syncFromResources: If set to true, causes AWS Mobile Hub to synchronize information from other services, e.g., update state of AWS CloudFormation stacks in the AWS Mobile Hub project.\n\n :rtype: dict\n :return: {\n 'details': {\n 'name': 'string',\n 'projectId': 'string',\n 'region': 'string',\n 'state': 'NORMAL'|'SYNCING'|'IMPORTING',\n 'createdDate': datetime(2015, 1, 1),\n 'lastUpdatedDate': datetime(2015, 1, 1),\n 'consoleUrl': 'string',\n 'resources': [\n {\n 'type': 'string',\n 'name': 'string',\n 'arn': 'string',\n 'feature': 'string',\n 'attributes': {\n 'string': 'string'\n }\n },\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef export_bundle(bundleId=None, projectId=None, platform=None):\n \"\"\"\n Generates customized software development kit (SDK) and or tool packages used to integrate mobile web or mobile app clients with backend AWS resources.\n See also: AWS API Documentation\n \n \n :example: response = client.export_bundle(\n bundleId='string',\n projectId='string',\n platform='OSX'|'WINDOWS'|'LINUX'|'OBJC'|'SWIFT'|'ANDROID'|'JAVASCRIPT'\n )\n \n \n :type bundleId: string\n :param bundleId: [REQUIRED]\n Unique bundle identifier.\n \n\n :type projectId: string\n :param projectId: Unique project identifier.\n\n :type platform: string\n :param platform: Developer desktop or target application platform.\n\n :rtype: dict\n :return: {\n 'downloadUrl': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef export_project(projectId=None):\n \"\"\"\n Exports project configuration to a snapshot which can be downloaded and shared. Note that mobile app push credentials are encrypted in exported projects, so they can only be shared successfully within the same AWS account.\n See also: AWS API Documentation\n \n \n :example: response = client.export_project(\n projectId='string'\n )\n \n \n :type projectId: string\n :param projectId: [REQUIRED]\n Unique project identifier.\n \n\n :rtype: dict\n :return: {\n 'downloadUrl': 'string',\n 'shareUrl': 'string',\n 'snapshotId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_bundles(maxResults=None, nextToken=None):\n \"\"\"\n List all available bundles.\n See also: AWS API Documentation\n \n \n :example: response = client.list_bundles(\n maxResults=123,\n nextToken='string'\n )\n \n \n :type maxResults: integer\n :param maxResults: Maximum number of records to list in a single response.\n\n :type nextToken: string\n :param nextToken: Pagination token. Set to null to start listing bundles from start. If non-null pagination token is returned in a result, then pass its value in here in another request to list more bundles.\n\n :rtype: dict\n :return: {\n 'bundleList': [\n {\n 'bundleId': 'string',\n 'title': 'string',\n 'version': 'string',\n 'description': 'string',\n 'iconUrl': 'string',\n 'availablePlatforms': [\n 'OSX'|'WINDOWS'|'LINUX'|'OBJC'|'SWIFT'|'ANDROID'|'JAVASCRIPT',\n ]\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_projects(maxResults=None, nextToken=None):\n \"\"\"\n Lists projects in AWS Mobile Hub.\n See also: AWS API Documentation\n \n \n :example: response = client.list_projects(\n maxResults=123,\n nextToken='string'\n )\n \n \n :type maxResults: integer\n :param maxResults: Maximum number of records to list in a single response.\n\n :type nextToken: string\n :param nextToken: Pagination token. Set to null to start listing projects from start. If non-null pagination token is returned in a result, then pass its value in here in another request to list more projects.\n\n :rtype: dict\n :return: {\n 'projects': [\n {\n 'name': 'string',\n 'projectId': 'string'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef update_project(contents=None, projectId=None):\n \"\"\"\n Update an existing project.\n See also: AWS API Documentation\n \n \n :example: response = client.update_project(\n contents=b'bytes'|file,\n projectId='string'\n )\n \n \n :type contents: bytes or seekable file-like object\n :param contents: ZIP or YAML file which contains project configuration to be updated. This should be the contents of the file downloaded from the URL provided in an export project operation.\n\n :type projectId: string\n :param projectId: [REQUIRED]\n Unique project identifier.\n \n\n :rtype: dict\n :return: {\n 'details': {\n 'name': 'string',\n 'projectId': 'string',\n 'region': 'string',\n 'state': 'NORMAL'|'SYNCING'|'IMPORTING',\n 'createdDate': datetime(2015, 1, 1),\n 'lastUpdatedDate': datetime(2015, 1, 1),\n 'consoleUrl': 'string',\n 'resources': [\n {\n 'type': 'string',\n 'name': 'string',\n 'arn': 'string',\n 'feature': 'string',\n 'attributes': {\n 'string': 'string'\n }\n },\n ]\n }\n }\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6551614999771118, "alphanum_fraction": 0.6606453657150269, "avg_line_length": 43.646018981933594, "blob_id": "c80561d0df55235e836274bfaa5f3ad0ff5112a1", "content_id": "7e188c53ef04971543d5a2b583c79efc2f838dcc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 75676, "license_type": "permissive", "max_line_length": 577, "num_lines": 1695, "path": "/pyboto3/pinpointemail.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_configuration_set(ConfigurationSetName=None, TrackingOptions=None, DeliveryOptions=None, ReputationOptions=None, SendingOptions=None):\n \"\"\"\n Create a configuration set. Configuration sets are groups of rules that you can apply to the emails you send using Amazon Pinpoint. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.\n See also: AWS API Documentation\n \n \n :example: response = client.create_configuration_set(\n ConfigurationSetName='string',\n TrackingOptions={\n 'CustomRedirectDomain': 'string'\n },\n DeliveryOptions={\n 'SendingPoolName': 'string'\n },\n ReputationOptions={\n 'ReputationMetricsEnabled': True|False,\n 'LastFreshStart': datetime(2015, 1, 1)\n },\n SendingOptions={\n 'SendingEnabled': True|False\n }\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: The name of the configuration set.\n\n :type TrackingOptions: dict\n :param TrackingOptions: An object that defines the open and click tracking options for emails that you send using the configuration set.\n CustomRedirectDomain (string) -- [REQUIRED]The domain that you want to use for tracking open and click events.\n \n\n :type DeliveryOptions: dict\n :param DeliveryOptions: An object that defines the dedicated IP pool that is used to send emails that you send using the configuration set.\n SendingPoolName (string) --The name of the dedicated IP pool that you want to associate with the configuration set.\n \n\n :type ReputationOptions: dict\n :param ReputationOptions: An object that defines whether or not Amazon Pinpoint collects reputation metrics for the emails that you send that use the configuration set.\n ReputationMetricsEnabled (boolean) --If true , tracking of reputation metrics is enabled for the configuration set. If false , tracking of reputation metrics is disabled for the configuration set.\n LastFreshStart (datetime) --The date and time (in Unix time) when the reputation metrics were last given a fresh start. When your account is given a fresh start, your reputation metrics are calculated starting from the date of the fresh start.\n \n\n :type SendingOptions: dict\n :param SendingOptions: An object that defines whether or not Amazon Pinpoint can send email that you send using the configuration set.\n SendingEnabled (boolean) --If true , email sending is enabled for the configuration set. If false , email sending is disabled for the configuration set.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef create_configuration_set_event_destination(ConfigurationSetName=None, EventDestinationName=None, EventDestination=None):\n \"\"\"\n Create an event destination. In Amazon Pinpoint, events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.\n A single configuration set can include more than one event destination.\n See also: AWS API Documentation\n \n \n :example: response = client.create_configuration_set_event_destination(\n ConfigurationSetName='string',\n EventDestinationName='string',\n EventDestination={\n 'Enabled': True|False,\n 'MatchingEventTypes': [\n 'SEND'|'REJECT'|'BOUNCE'|'COMPLAINT'|'DELIVERY'|'OPEN'|'CLICK'|'RENDERING_FAILURE',\n ],\n 'KinesisFirehoseDestination': {\n 'IamRoleArn': 'string',\n 'DeliveryStreamArn': 'string'\n },\n 'CloudWatchDestination': {\n 'DimensionConfigurations': [\n {\n 'DimensionName': 'string',\n 'DimensionValueSource': 'MESSAGE_TAG'|'EMAIL_HEADER'|'LINK_TAG',\n 'DefaultDimensionValue': 'string'\n },\n ]\n },\n 'SnsDestination': {\n 'TopicArn': 'string'\n },\n 'PinpointDestination': {\n 'ApplicationArn': 'string'\n }\n }\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: [REQUIRED]\n The name of the configuration set that you want to add an event destination to.\n \n\n :type EventDestinationName: string\n :param EventDestinationName: [REQUIRED]\n A name that identifies the event destination within the configuration set.\n \n\n :type EventDestination: dict\n :param EventDestination: [REQUIRED]\n An object that defines the event destination.\n Enabled (boolean) --If true , the event destination is enabled. When the event destination is enabled, the specified event types are sent to the destinations in this EventDestinationDefinition .\n If false , the event destination is disabled. When the event destination is disabled, events aren't sent to the specified destinations.\n MatchingEventTypes (list) --An array that specifies which events Amazon Pinpoint should send to the destinations in this EventDestinationDefinition .\n (string) --An email sending event type. For example, email sends, opens, and bounces are all email events.\n KinesisFirehoseDestination (dict) --An object that defines an Amazon Kinesis Data Firehose destination for email events. You can use Amazon Kinesis Data Firehose to stream data to other services, such as Amazon S3 and Amazon Redshift.\n IamRoleArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the IAM role that Amazon Pinpoint uses when sending email events to the Amazon Kinesis Data Firehose stream.\n DeliveryStreamArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Amazon Kinesis Data Firehose stream that Amazon Pinpoint sends email events to.\n CloudWatchDestination (dict) --An object that defines an Amazon CloudWatch destination for email events. You can use Amazon CloudWatch to monitor and gain insights on your email sending metrics.\n DimensionConfigurations (list) -- [REQUIRED]An array of objects that define the dimensions to use when you send email events to Amazon CloudWatch.\n (dict) --An object that defines the dimension configuration to use when you send Amazon Pinpoint email events to Amazon CloudWatch.\n DimensionName (string) -- [REQUIRED]The name of an Amazon CloudWatch dimension associated with an email sending metric. The name has to meet the following criteria:\n It can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n It can contain no more than 256 characters.\n DimensionValueSource (string) -- [REQUIRED]The location where Amazon Pinpoint finds the value of a dimension to publish to Amazon CloudWatch. If you want Amazon Pinpoint to use the message tags that you specify using an X-SES-MESSAGE-TAGS header or a parameter to the SendEmail/SendRawEmail API, choose messageTag . If you want Amazon Pinpoint to use your own email headers, choose emailHeader . If you want Amazon Pinpoint to use link tags, choose linkTags .\n DefaultDimensionValue (string) -- [REQUIRED]The default value of the dimension that is published to Amazon CloudWatch if you don't provide the value of the dimension when you send an email. This value has to meet the following criteria:\n It can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n It can contain no more than 256 characters.\n \n \n SnsDestination (dict) --An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur.\n TopicArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Amazon SNS topic that you want to publish email events to. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide .\n PinpointDestination (dict) --An object that defines a Amazon Pinpoint destination for email events. You can use Amazon Pinpoint events to create attributes in Amazon Pinpoint projects. You can use these attributes to create segments for your campaigns.\n ApplicationArn (string) --The Amazon Resource Name (ARN) of the Amazon Pinpoint project that you want to send email events to.\n \n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef create_dedicated_ip_pool(PoolName=None):\n \"\"\"\n Create a new pool of dedicated IP addresses. A pool can include one or more dedicated IP addresses that are associated with your Amazon Pinpoint account. You can associate a pool with a configuration set. When you send an email that uses that configuration set, Amazon Pinpoint sends it using only the IP addresses in the associated pool.\n See also: AWS API Documentation\n \n \n :example: response = client.create_dedicated_ip_pool(\n PoolName='string'\n )\n \n \n :type PoolName: string\n :param PoolName: [REQUIRED]\n The name of the dedicated IP pool.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef create_deliverability_test_report(ReportName=None, FromEmailAddress=None, Content=None):\n \"\"\"\n Create a new predictive inbox placement test. Predictive inbox placement tests can help you predict how your messages will be handled by various email providers around the world. When you perform a predictive inbox placement test, you provide a sample message that contains the content that you plan to send to your customers. Amazon Pinpoint then sends that message to special email addresses spread across several major email providers. After about 24 hours, the test is complete, and you can use the GetDeliverabilityTestReport operation to view the results of the test.\n See also: AWS API Documentation\n \n \n :example: response = client.create_deliverability_test_report(\n ReportName='string',\n FromEmailAddress='string',\n Content={\n 'Simple': {\n 'Subject': {\n 'Data': 'string',\n 'Charset': 'string'\n },\n 'Body': {\n 'Text': {\n 'Data': 'string',\n 'Charset': 'string'\n },\n 'Html': {\n 'Data': 'string',\n 'Charset': 'string'\n }\n }\n },\n 'Raw': {\n 'Data': b'bytes'\n }\n }\n )\n \n \n :type ReportName: string\n :param ReportName: A unique name that helps you to identify the predictive inbox placement test when you retrieve the results.\n\n :type FromEmailAddress: string\n :param FromEmailAddress: [REQUIRED]\n The email address that the predictive inbox placement test email was sent from.\n \n\n :type Content: dict\n :param Content: [REQUIRED]\n The HTML body of the message that you sent when you performed the predictive inbox placement test.\n Simple (dict) --The simple email message. The message consists of a subject and a message body.\n Subject (dict) -- [REQUIRED]The subject line of the email. The subject line can only contain 7-bit ASCII characters. However, you can specify non-ASCII characters in the subject line by using encoded-word syntax, as described in RFC 2047 .\n Data (string) -- [REQUIRED]The content of the message itself.\n Charset (string) --The character set for the content. Because of the constraints of the SMTP protocol, Amazon Pinpoint uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify UTF-8 , ISO-8859-1 , or Shift_JIS .\n Body (dict) -- [REQUIRED]The body of the message. You can specify an HTML version of the message, a text-only version of the message, or both.\n Text (dict) --An object that represents the version of the message that is displayed in email clients that don't support HTML, or clients where the recipient has disabled HTML rendering.\n Data (string) -- [REQUIRED]The content of the message itself.\n Charset (string) --The character set for the content. Because of the constraints of the SMTP protocol, Amazon Pinpoint uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify UTF-8 , ISO-8859-1 , or Shift_JIS .\n Html (dict) --An object that represents the version of the message that is displayed in email clients that support HTML. HTML messages can include formatted text, hyperlinks, images, and more.\n Data (string) -- [REQUIRED]The content of the message itself.\n Charset (string) --The character set for the content. Because of the constraints of the SMTP protocol, Amazon Pinpoint uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify UTF-8 , ISO-8859-1 , or Shift_JIS .\n \n Raw (dict) --The raw email message. The message has to meet the following criteria:\n The message has to contain a header and a body, separated by one blank line.\n All of the required header fields must be present in the message.\n Each part of a multipart MIME message must be formatted properly.\n If you include attachments, they must be in a file format that Amazon Pinpoint supports.\n The entire message must be Base64 encoded.\n If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients' email clients render the message properly.\n The length of any single line of text in the message can't exceed 1,000 characters. This restriction is defined in RFC 5321 .\n Data (bytes) -- [REQUIRED]The raw email message. The message has to meet the following criteria:\n The message has to contain a header and a body, separated by one blank line.\n All of the required header fields must be present in the message.\n Each part of a multipart MIME message must be formatted properly.\n Attachments must be in a file format that Amazon Pinpoint supports.\n The entire message must be Base64 encoded.\n If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients' email clients render the message properly.\n The length of any single line of text in the message can't exceed 1,000 characters. This restriction is defined in RFC 5321 .\n \n \n\n :rtype: dict\n :return: {\n 'ReportId': 'string',\n 'DeliverabilityTestStatus': 'IN_PROGRESS'|'COMPLETED'\n }\n \n \n \"\"\"\n pass\n\ndef create_email_identity(EmailIdentity=None):\n \"\"\"\n Verifies an email identity for use with Amazon Pinpoint. In Amazon Pinpoint, an identity is an email address or domain that you use when you send email. Before you can use an identity to send email with Amazon Pinpoint, you first have to verify it. By verifying an address, you demonstrate that you're the owner of the address, and that you've given Amazon Pinpoint permission to send email from the address.\n When you verify an email address, Amazon Pinpoint sends an email to the address. Your email address is verified as soon as you follow the link in the verification email.\n When you verify a domain, this operation provides a set of DKIM tokens, which you can convert into CNAME tokens. You add these CNAME tokens to the DNS configuration for your domain. Your domain is verified when Amazon Pinpoint detects these records in the DNS configuration for your domain. It usually takes around 72 hours to complete the domain verification process.\n See also: AWS API Documentation\n \n \n :example: response = client.create_email_identity(\n EmailIdentity='string'\n )\n \n \n :type EmailIdentity: string\n :param EmailIdentity: [REQUIRED]\n The email address or domain that you want to verify.\n \n\n :rtype: dict\n :return: {\n 'IdentityType': 'EMAIL_ADDRESS'|'DOMAIN'|'MANAGED_DOMAIN',\n 'VerifiedForSendingStatus': True|False,\n 'DkimAttributes': {\n 'SigningEnabled': True|False,\n 'Status': 'PENDING'|'SUCCESS'|'FAILED'|'TEMPORARY_FAILURE'|'NOT_STARTED',\n 'Tokens': [\n 'string',\n ]\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef delete_configuration_set(ConfigurationSetName=None):\n \"\"\"\n Delete an existing configuration set.\n In Amazon Pinpoint, configuration sets are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_configuration_set(\n ConfigurationSetName='string'\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: [REQUIRED]\n The name of the configuration set that you want to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_configuration_set_event_destination(ConfigurationSetName=None, EventDestinationName=None):\n \"\"\"\n Delete an event destination.\n In Amazon Pinpoint, events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_configuration_set_event_destination(\n ConfigurationSetName='string',\n EventDestinationName='string'\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: [REQUIRED]\n The name of the configuration set that contains the event destination that you want to delete.\n \n\n :type EventDestinationName: string\n :param EventDestinationName: [REQUIRED]\n The name of the event destination that you want to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_dedicated_ip_pool(PoolName=None):\n \"\"\"\n Delete a dedicated IP pool.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_dedicated_ip_pool(\n PoolName='string'\n )\n \n \n :type PoolName: string\n :param PoolName: [REQUIRED]\n The name of the dedicated IP pool that you want to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_email_identity(EmailIdentity=None):\n \"\"\"\n Deletes an email identity that you previously verified for use with Amazon Pinpoint. An identity can be either an email address or a domain name.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_email_identity(\n EmailIdentity='string'\n )\n \n \n :type EmailIdentity: string\n :param EmailIdentity: [REQUIRED]\n The identity (that is, the email address or domain) that you want to delete from your Amazon Pinpoint account.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_account():\n \"\"\"\n Obtain information about the email-sending status and capabilities of your Amazon Pinpoint account in the current AWS Region.\n See also: AWS API Documentation\n \n \n :example: response = client.get_account()\n \n \n :rtype: dict\n :return: {\n 'SendQuota': {\n 'Max24HourSend': 123.0,\n 'MaxSendRate': 123.0,\n 'SentLast24Hours': 123.0\n },\n 'SendingEnabled': True|False,\n 'DedicatedIpAutoWarmupEnabled': True|False,\n 'EnforcementStatus': 'string',\n 'ProductionAccessEnabled': True|False\n }\n \n \n \"\"\"\n pass\n\ndef get_blacklist_reports(BlacklistItemNames=None):\n \"\"\"\n Retrieve a list of the blacklists that your dedicated IP addresses appear on.\n See also: AWS API Documentation\n \n \n :example: response = client.get_blacklist_reports(\n BlacklistItemNames=[\n 'string',\n ]\n )\n \n \n :type BlacklistItemNames: list\n :param BlacklistItemNames: [REQUIRED]\n A list of IP addresses that you want to retrieve blacklist information about. You can only specify the dedicated IP addresses that you use to send email using Amazon Pinpoint or Amazon SES.\n (string) --An IP address that you want to obtain blacklist information for.\n \n\n :rtype: dict\n :return: {\n 'BlacklistReport': {\n 'string': [\n {\n 'RblName': 'string',\n 'ListingTime': datetime(2015, 1, 1),\n 'Description': 'string'\n },\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_configuration_set(ConfigurationSetName=None):\n \"\"\"\n Get information about an existing configuration set, including the dedicated IP pool that it's associated with, whether or not it's enabled for sending email, and more.\n In Amazon Pinpoint, configuration sets are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.\n See also: AWS API Documentation\n \n \n :example: response = client.get_configuration_set(\n ConfigurationSetName='string'\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: [REQUIRED]\n The name of the configuration set that you want to obtain more information about.\n \n\n :rtype: dict\n :return: {\n 'ConfigurationSetName': 'string',\n 'TrackingOptions': {\n 'CustomRedirectDomain': 'string'\n },\n 'DeliveryOptions': {\n 'SendingPoolName': 'string'\n },\n 'ReputationOptions': {\n 'ReputationMetricsEnabled': True|False,\n 'LastFreshStart': datetime(2015, 1, 1)\n },\n 'SendingOptions': {\n 'SendingEnabled': True|False\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_configuration_set_event_destinations(ConfigurationSetName=None):\n \"\"\"\n Retrieve a list of event destinations that are associated with a configuration set.\n In Amazon Pinpoint, events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.\n See also: AWS API Documentation\n \n \n :example: response = client.get_configuration_set_event_destinations(\n ConfigurationSetName='string'\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: [REQUIRED]\n The name of the configuration set that contains the event destination.\n \n\n :rtype: dict\n :return: {\n 'EventDestinations': [\n {\n 'Name': 'string',\n 'Enabled': True|False,\n 'MatchingEventTypes': [\n 'SEND'|'REJECT'|'BOUNCE'|'COMPLAINT'|'DELIVERY'|'OPEN'|'CLICK'|'RENDERING_FAILURE',\n ],\n 'KinesisFirehoseDestination': {\n 'IamRoleArn': 'string',\n 'DeliveryStreamArn': 'string'\n },\n 'CloudWatchDestination': {\n 'DimensionConfigurations': [\n {\n 'DimensionName': 'string',\n 'DimensionValueSource': 'MESSAGE_TAG'|'EMAIL_HEADER'|'LINK_TAG',\n 'DefaultDimensionValue': 'string'\n },\n ]\n },\n 'SnsDestination': {\n 'TopicArn': 'string'\n },\n 'PinpointDestination': {\n 'ApplicationArn': 'string'\n }\n },\n ]\n }\n \n \n :returns: \n It can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n It can contain no more than 256 characters.\n \n \"\"\"\n pass\n\ndef get_dedicated_ip(Ip=None):\n \"\"\"\n Get information about a dedicated IP address, including the name of the dedicated IP pool that it's associated with, as well information about the automatic warm-up process for the address.\n See also: AWS API Documentation\n \n \n :example: response = client.get_dedicated_ip(\n Ip='string'\n )\n \n \n :type Ip: string\n :param Ip: [REQUIRED]\n The IP address that you want to obtain more information about. The value you specify has to be a dedicated IP address that's assocaited with your Amazon Pinpoint account.\n \n\n :rtype: dict\n :return: {\n 'DedicatedIp': {\n 'Ip': 'string',\n 'WarmupStatus': 'IN_PROGRESS'|'DONE',\n 'WarmupPercentage': 123,\n 'PoolName': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_dedicated_ips(PoolName=None, NextToken=None, PageSize=None):\n \"\"\"\n List the dedicated IP addresses that are associated with your Amazon Pinpoint account.\n See also: AWS API Documentation\n \n \n :example: response = client.get_dedicated_ips(\n PoolName='string',\n NextToken='string',\n PageSize=123\n )\n \n \n :type PoolName: string\n :param PoolName: The name of the IP pool that the dedicated IP address is associated with.\n\n :type NextToken: string\n :param NextToken: A token returned from a previous call to GetDedicatedIps to indicate the position of the dedicated IP pool in the list of IP pools.\n\n :type PageSize: integer\n :param PageSize: The number of results to show in a single call to GetDedicatedIpsRequest . If the number of results is larger than the number you specified in this parameter, then the response includes a NextToken element, which you can use to obtain additional results.\n\n :rtype: dict\n :return: {\n 'DedicatedIps': [\n {\n 'Ip': 'string',\n 'WarmupStatus': 'IN_PROGRESS'|'DONE',\n 'WarmupPercentage': 123,\n 'PoolName': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n IN_PROGRESS The IP address isn't ready to use because the dedicated IP warm-up process is ongoing.\n DONE The dedicated IP warm-up process is complete, and the IP address is ready to use.\n \n \"\"\"\n pass\n\ndef get_deliverability_dashboard_options():\n \"\"\"\n Show the status of the Deliverability dashboard. When the Deliverability dashboard is enabled, you gain access to reputation metrics for the domains that you use to send email using Amazon Pinpoint. You also gain the ability to perform predictive inbox placement tests.\n When you use the Deliverability dashboard, you pay a monthly charge of USD$1,250.00, in addition to any other fees that you accrue by using Amazon Pinpoint. If you enable the Deliverability dashboard after the first day of a calendar month, AWS prorates the monthly charge based on how many days have elapsed in the current calendar month.\n See also: AWS API Documentation\n \n \n :example: response = client.get_deliverability_dashboard_options()\n \n \n :rtype: dict\n :return: {\n 'DashboardEnabled': True|False\n }\n \n \n \"\"\"\n pass\n\ndef get_deliverability_test_report(ReportId=None):\n \"\"\"\n Retrieve the results of a predictive inbox placement test.\n See also: AWS API Documentation\n \n \n :example: response = client.get_deliverability_test_report(\n ReportId='string'\n )\n \n \n :type ReportId: string\n :param ReportId: [REQUIRED]\n A unique string that identifies the predictive inbox placement test.\n \n\n :rtype: dict\n :return: {\n 'DeliverabilityTestReport': {\n 'ReportId': 'string',\n 'ReportName': 'string',\n 'Subject': 'string',\n 'FromEmailAddress': 'string',\n 'CreateDate': datetime(2015, 1, 1),\n 'DeliverabilityTestStatus': 'IN_PROGRESS'|'COMPLETED'\n },\n 'OverallPlacement': {\n 'InboxPercentage': 123.0,\n 'SpamPercentage': 123.0,\n 'MissingPercentage': 123.0,\n 'SpfPercentage': 123.0,\n 'DkimPercentage': 123.0\n },\n 'IspPlacements': [\n {\n 'IspName': 'string',\n 'PlacementStatistics': {\n 'InboxPercentage': 123.0,\n 'SpamPercentage': 123.0,\n 'MissingPercentage': 123.0,\n 'SpfPercentage': 123.0,\n 'DkimPercentage': 123.0\n }\n },\n ],\n 'Message': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_domain_statistics_report(Domain=None, StartDate=None, EndDate=None):\n \"\"\"\n Retrieve inbox placement and engagement rates for the domains that you use to send email.\n See also: AWS API Documentation\n \n \n :example: response = client.get_domain_statistics_report(\n Domain='string',\n StartDate=datetime(2015, 1, 1),\n EndDate=datetime(2015, 1, 1)\n )\n \n \n :type Domain: string\n :param Domain: [REQUIRED]\n The domain that you want to obtain deliverability metrics for.\n \n\n :type StartDate: datetime\n :param StartDate: [REQUIRED]\n The first day (in Unix time) that you want to obtain domain deliverability metrics for.\n \n\n :type EndDate: datetime\n :param EndDate: [REQUIRED]\n The last day (in Unix time) that you want to obtain domain deliverability metrics for. The EndDate that you specify has to be less than or equal to 30 days after the StartDate .\n \n\n :rtype: dict\n :return: {\n 'OverallVolume': {\n 'VolumeStatistics': {\n 'InboxRawCount': 123,\n 'SpamRawCount': 123,\n 'ProjectedInbox': 123,\n 'ProjectedSpam': 123\n },\n 'ReadRatePercent': 123.0,\n 'DomainIspPlacements': [\n {\n 'IspName': 'string',\n 'InboxRawCount': 123,\n 'SpamRawCount': 123,\n 'InboxPercentage': 123.0,\n 'SpamPercentage': 123.0\n },\n ]\n },\n 'DailyVolumes': [\n {\n 'StartDate': datetime(2015, 1, 1),\n 'VolumeStatistics': {\n 'InboxRawCount': 123,\n 'SpamRawCount': 123,\n 'ProjectedInbox': 123,\n 'ProjectedSpam': 123\n },\n 'DomainIspPlacements': [\n {\n 'IspName': 'string',\n 'InboxRawCount': 123,\n 'SpamRawCount': 123,\n 'InboxPercentage': 123.0,\n 'SpamPercentage': 123.0\n },\n ]\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_email_identity(EmailIdentity=None):\n \"\"\"\n Provides information about a specific identity associated with your Amazon Pinpoint account, including the identity's verification status, its DKIM authentication status, and its custom Mail-From settings.\n See also: AWS API Documentation\n \n \n :example: response = client.get_email_identity(\n EmailIdentity='string'\n )\n \n \n :type EmailIdentity: string\n :param EmailIdentity: [REQUIRED]\n The email identity that you want to retrieve details for.\n \n\n :rtype: dict\n :return: {\n 'IdentityType': 'EMAIL_ADDRESS'|'DOMAIN'|'MANAGED_DOMAIN',\n 'FeedbackForwardingStatus': True|False,\n 'VerifiedForSendingStatus': True|False,\n 'DkimAttributes': {\n 'SigningEnabled': True|False,\n 'Status': 'PENDING'|'SUCCESS'|'FAILED'|'TEMPORARY_FAILURE'|'NOT_STARTED',\n 'Tokens': [\n 'string',\n ]\n },\n 'MailFromAttributes': {\n 'MailFromDomain': 'string',\n 'MailFromDomainStatus': 'PENDING'|'SUCCESS'|'FAILED'|'TEMPORARY_FAILURE',\n 'BehaviorOnMxFailure': 'USE_DEFAULT_VALUE'|'REJECT_MESSAGE'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_configuration_sets(NextToken=None, PageSize=None):\n \"\"\"\n List all of the configuration sets associated with your Amazon Pinpoint account in the current region.\n In Amazon Pinpoint, configuration sets are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.\n See also: AWS API Documentation\n \n \n :example: response = client.list_configuration_sets(\n NextToken='string',\n PageSize=123\n )\n \n \n :type NextToken: string\n :param NextToken: A token returned from a previous call to ListConfigurationSets to indicate the position in the list of configuration sets.\n\n :type PageSize: integer\n :param PageSize: The number of results to show in a single call to ListConfigurationSets . If the number of results is larger than the number you specified in this parameter, then the response includes a NextToken element, which you can use to obtain additional results.\n\n :rtype: dict\n :return: {\n 'ConfigurationSets': [\n 'string',\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_dedicated_ip_pools(NextToken=None, PageSize=None):\n \"\"\"\n List all of the dedicated IP pools that exist in your Amazon Pinpoint account in the current AWS Region.\n See also: AWS API Documentation\n \n \n :example: response = client.list_dedicated_ip_pools(\n NextToken='string',\n PageSize=123\n )\n \n \n :type NextToken: string\n :param NextToken: A token returned from a previous call to ListDedicatedIpPools to indicate the position in the list of dedicated IP pools.\n\n :type PageSize: integer\n :param PageSize: The number of results to show in a single call to ListDedicatedIpPools . If the number of results is larger than the number you specified in this parameter, then the response includes a NextToken element, which you can use to obtain additional results.\n\n :rtype: dict\n :return: {\n 'DedicatedIpPools': [\n 'string',\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_deliverability_test_reports(NextToken=None, PageSize=None):\n \"\"\"\n Show a list of the predictive inbox placement tests that you've performed, regardless of their statuses. For predictive inbox placement tests that are complete, you can use the GetDeliverabilityTestReport operation to view the results.\n See also: AWS API Documentation\n \n \n :example: response = client.list_deliverability_test_reports(\n NextToken='string',\n PageSize=123\n )\n \n \n :type NextToken: string\n :param NextToken: A token returned from a previous call to ListDeliverabilityTestReports to indicate the position in the list of predictive inbox placement tests.\n\n :type PageSize: integer\n :param PageSize: The number of results to show in a single call to ListDeliverabilityTestReports . If the number of results is larger than the number you specified in this parameter, then the response includes a NextToken element, which you can use to obtain additional results.\n The value you specify has to be at least 0, and can be no more than 1000.\n \n\n :rtype: dict\n :return: {\n 'DeliverabilityTestReports': [\n {\n 'ReportId': 'string',\n 'ReportName': 'string',\n 'Subject': 'string',\n 'FromEmailAddress': 'string',\n 'CreateDate': datetime(2015, 1, 1),\n 'DeliverabilityTestStatus': 'IN_PROGRESS'|'COMPLETED'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_email_identities(NextToken=None, PageSize=None):\n \"\"\"\n Returns a list of all of the email identities that are associated with your Amazon Pinpoint account. An identity can be either an email address or a domain. This operation returns identities that are verified as well as those that aren't.\n See also: AWS API Documentation\n \n \n :example: response = client.list_email_identities(\n NextToken='string',\n PageSize=123\n )\n \n \n :type NextToken: string\n :param NextToken: A token returned from a previous call to ListEmailIdentities to indicate the position in the list of identities.\n\n :type PageSize: integer\n :param PageSize: The number of results to show in a single call to ListEmailIdentities . If the number of results is larger than the number you specified in this parameter, then the response includes a NextToken element, which you can use to obtain additional results.\n The value you specify has to be at least 0, and can be no more than 1000.\n \n\n :rtype: dict\n :return: {\n 'EmailIdentities': [\n {\n 'IdentityType': 'EMAIL_ADDRESS'|'DOMAIN'|'MANAGED_DOMAIN',\n 'IdentityName': 'string',\n 'SendingEnabled': True|False\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n EMAIL_ADDRESS The identity is an email address.\n DOMAIN The identity is a domain.\n MANAGED_DOMAIN The identity is a domain that is managed by AWS.\n \n \"\"\"\n pass\n\ndef put_account_dedicated_ip_warmup_attributes(AutoWarmupEnabled=None):\n \"\"\"\n Enable or disable the automatic warm-up feature for dedicated IP addresses.\n See also: AWS API Documentation\n \n \n :example: response = client.put_account_dedicated_ip_warmup_attributes(\n AutoWarmupEnabled=True|False\n )\n \n \n :type AutoWarmupEnabled: boolean\n :param AutoWarmupEnabled: Enables or disables the automatic warm-up feature for dedicated IP addresses that are associated with your Amazon Pinpoint account in the current AWS Region. Set to true to enable the automatic warm-up feature, or set to false to disable it.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef put_account_sending_attributes(SendingEnabled=None):\n \"\"\"\n Enable or disable the ability of your account to send email.\n See also: AWS API Documentation\n \n \n :example: response = client.put_account_sending_attributes(\n SendingEnabled=True|False\n )\n \n \n :type SendingEnabled: boolean\n :param SendingEnabled: Enables or disables your account's ability to send email. Set to true to enable email sending, or set to false to disable email sending.\n Note\n If AWS paused your account's ability to send email, you can't use this operation to resume your account's ability to send email.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef put_configuration_set_delivery_options(ConfigurationSetName=None, SendingPoolName=None):\n \"\"\"\n Associate a configuration set with a dedicated IP pool. You can use dedicated IP pools to create groups of dedicated IP addresses for sending specific types of email.\n See also: AWS API Documentation\n \n \n :example: response = client.put_configuration_set_delivery_options(\n ConfigurationSetName='string',\n SendingPoolName='string'\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: [REQUIRED]\n The name of the configuration set that you want to associate with a dedicated IP pool.\n \n\n :type SendingPoolName: string\n :param SendingPoolName: The name of the dedicated IP pool that you want to associate with the configuration set.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef put_configuration_set_reputation_options(ConfigurationSetName=None, ReputationMetricsEnabled=None):\n \"\"\"\n Enable or disable collection of reputation metrics for emails that you send using a particular configuration set in a specific AWS Region.\n See also: AWS API Documentation\n \n \n :example: response = client.put_configuration_set_reputation_options(\n ConfigurationSetName='string',\n ReputationMetricsEnabled=True|False\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: [REQUIRED]\n The name of the configuration set that you want to enable or disable reputation metric tracking for.\n \n\n :type ReputationMetricsEnabled: boolean\n :param ReputationMetricsEnabled: If true , tracking of reputation metrics is enabled for the configuration set. If false , tracking of reputation metrics is disabled for the configuration set.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef put_configuration_set_sending_options(ConfigurationSetName=None, SendingEnabled=None):\n \"\"\"\n Enable or disable email sending for messages that use a particular configuration set in a specific AWS Region.\n See also: AWS API Documentation\n \n \n :example: response = client.put_configuration_set_sending_options(\n ConfigurationSetName='string',\n SendingEnabled=True|False\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: [REQUIRED]\n The name of the configuration set that you want to enable or disable email sending for.\n \n\n :type SendingEnabled: boolean\n :param SendingEnabled: If true , email sending is enabled for the configuration set. If false , email sending is disabled for the configuration set.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef put_configuration_set_tracking_options(ConfigurationSetName=None, CustomRedirectDomain=None):\n \"\"\"\n Specify a custom domain to use for open and click tracking elements in email that you send using Amazon Pinpoint.\n See also: AWS API Documentation\n \n \n :example: response = client.put_configuration_set_tracking_options(\n ConfigurationSetName='string',\n CustomRedirectDomain='string'\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: [REQUIRED]\n The name of the configuration set that you want to add a custom tracking domain to.\n \n\n :type CustomRedirectDomain: string\n :param CustomRedirectDomain: The domain that you want to use to track open and click events.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef put_dedicated_ip_in_pool(Ip=None, DestinationPoolName=None):\n \"\"\"\n Move a dedicated IP address to an existing dedicated IP pool.\n See also: AWS API Documentation\n \n \n :example: response = client.put_dedicated_ip_in_pool(\n Ip='string',\n DestinationPoolName='string'\n )\n \n \n :type Ip: string\n :param Ip: [REQUIRED]\n The IP address that you want to move to the dedicated IP pool. The value you specify has to be a dedicated IP address that's associated with your Amazon Pinpoint account.\n \n\n :type DestinationPoolName: string\n :param DestinationPoolName: [REQUIRED]\n The name of the IP pool that you want to add the dedicated IP address to. You have to specify an IP pool that already exists.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef put_dedicated_ip_warmup_attributes(Ip=None, WarmupPercentage=None):\n \"\"\"\n See also: AWS API Documentation\n \n \n :example: response = client.put_dedicated_ip_warmup_attributes(\n Ip='string',\n WarmupPercentage=123\n )\n \n \n :type Ip: string\n :param Ip: [REQUIRED]\n The dedicated IP address that you want to update the warm-up attributes for.\n \n\n :type WarmupPercentage: integer\n :param WarmupPercentage: [REQUIRED]\n The warm-up percentage that you want to associate with the dedicated IP address.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef put_deliverability_dashboard_option(DashboardEnabled=None):\n \"\"\"\n Enable or disable the Deliverability dashboard. When you enable the Deliverability dashboard, you gain access to reputation metrics for the domains that you use to send email using Amazon Pinpoint. You also gain the ability to perform predictive inbox placement tests.\n When you use the Deliverability dashboard, you pay a monthly charge of USD$1,250.00, in addition to any other fees that you accrue by using Amazon Pinpoint. If you enable the Deliverability dashboard after the first day of a calendar month, we prorate the monthly charge based on how many days have elapsed in the current calendar month.\n See also: AWS API Documentation\n \n \n :example: response = client.put_deliverability_dashboard_option(\n DashboardEnabled=True|False\n )\n \n \n :type DashboardEnabled: boolean\n :param DashboardEnabled: [REQUIRED]\n Indicates whether the Deliverability dashboard is enabled. If the value is true , then the dashboard is enabled.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef put_email_identity_dkim_attributes(EmailIdentity=None, SigningEnabled=None):\n \"\"\"\n Used to enable or disable DKIM authentication for an email identity.\n See also: AWS API Documentation\n \n \n :example: response = client.put_email_identity_dkim_attributes(\n EmailIdentity='string',\n SigningEnabled=True|False\n )\n \n \n :type EmailIdentity: string\n :param EmailIdentity: [REQUIRED]\n The email identity that you want to change the DKIM settings for.\n \n\n :type SigningEnabled: boolean\n :param SigningEnabled: Sets the DKIM signing configuration for the identity.\n When you set this value true , then the messages that Amazon Pinpoint sends from the identity are DKIM-signed. When you set this value to false , then the messages that Amazon Pinpoint sends from the identity aren't DKIM-signed.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef put_email_identity_feedback_attributes(EmailIdentity=None, EmailForwardingEnabled=None):\n \"\"\"\n Used to enable or disable feedback forwarding for an identity. This setting determines what happens when an identity is used to send an email that results in a bounce or complaint event.\n When you enable feedback forwarding, Amazon Pinpoint sends you email notifications when bounce or complaint events occur. Amazon Pinpoint sends this notification to the address that you specified in the Return-Path header of the original email.\n When you disable feedback forwarding, Amazon Pinpoint sends notifications through other mechanisms, such as by notifying an Amazon SNS topic. You're required to have a method of tracking bounces and complaints. If you haven't set up another mechanism for receiving bounce or complaint notifications, Amazon Pinpoint sends an email notification when these events occur (even if this setting is disabled).\n See also: AWS API Documentation\n \n \n :example: response = client.put_email_identity_feedback_attributes(\n EmailIdentity='string',\n EmailForwardingEnabled=True|False\n )\n \n \n :type EmailIdentity: string\n :param EmailIdentity: [REQUIRED]\n The email identity that you want to configure bounce and complaint feedback forwarding for.\n \n\n :type EmailForwardingEnabled: boolean\n :param EmailForwardingEnabled: Sets the feedback forwarding configuration for the identity.\n If the value is true , Amazon Pinpoint sends you email notifications when bounce or complaint events occur. Amazon Pinpoint sends this notification to the address that you specified in the Return-Path header of the original email.\n When you set this value to false , Amazon Pinpoint sends notifications through other mechanisms, such as by notifying an Amazon SNS topic or another event destination. You're required to have a method of tracking bounces and complaints. If you haven't set up another mechanism for receiving bounce or complaint notifications, Amazon Pinpoint sends an email notification when these events occur (even if this setting is disabled).\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef put_email_identity_mail_from_attributes(EmailIdentity=None, MailFromDomain=None, BehaviorOnMxFailure=None):\n \"\"\"\n Used to enable or disable the custom Mail-From domain configuration for an email identity.\n See also: AWS API Documentation\n \n \n :example: response = client.put_email_identity_mail_from_attributes(\n EmailIdentity='string',\n MailFromDomain='string',\n BehaviorOnMxFailure='USE_DEFAULT_VALUE'|'REJECT_MESSAGE'\n )\n \n \n :type EmailIdentity: string\n :param EmailIdentity: [REQUIRED]\n The verified email identity that you want to set up the custom MAIL FROM domain for.\n \n\n :type MailFromDomain: string\n :param MailFromDomain: The custom MAIL FROM domain that you want the verified identity to use. The MAIL FROM domain must meet the following criteria:\n It has to be a subdomain of the verified identity.\n It can't be used to receive email.\n It can't be used in a 'From' address if the MAIL FROM domain is a destination for feedback forwarding emails.\n \n\n :type BehaviorOnMxFailure: string\n :param BehaviorOnMxFailure: The action that you want Amazon Pinpoint to take if it can't read the required MX record when you send an email. When you set this value to UseDefaultValue , Amazon Pinpoint uses amazonses.com as the MAIL FROM domain. When you set this value to RejectMessage , Amazon Pinpoint returns a MailFromDomainNotVerified error, and doesn't attempt to deliver the email.\n These behaviors are taken when the custom MAIL FROM domain configuration is in the Pending , Failed , and TemporaryFailure states.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef send_email(FromEmailAddress=None, Destination=None, ReplyToAddresses=None, FeedbackForwardingEmailAddress=None, Content=None, EmailTags=None, ConfigurationSetName=None):\n \"\"\"\n Sends an email message. You can use the Amazon Pinpoint Email API to send two types of messages:\n See also: AWS API Documentation\n \n \n :example: response = client.send_email(\n FromEmailAddress='string',\n Destination={\n 'ToAddresses': [\n 'string',\n ],\n 'CcAddresses': [\n 'string',\n ],\n 'BccAddresses': [\n 'string',\n ]\n },\n ReplyToAddresses=[\n 'string',\n ],\n FeedbackForwardingEmailAddress='string',\n Content={\n 'Simple': {\n 'Subject': {\n 'Data': 'string',\n 'Charset': 'string'\n },\n 'Body': {\n 'Text': {\n 'Data': 'string',\n 'Charset': 'string'\n },\n 'Html': {\n 'Data': 'string',\n 'Charset': 'string'\n }\n }\n },\n 'Raw': {\n 'Data': b'bytes'\n }\n },\n EmailTags=[\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n ConfigurationSetName='string'\n )\n \n \n :type FromEmailAddress: string\n :param FromEmailAddress: The email address that you want to use as the 'From' address for the email. The address that you specify has to be verified.\n\n :type Destination: dict\n :param Destination: [REQUIRED]\n An object that contains the recipients of the email message.\n ToAddresses (list) --An array that contains the email addresses of the 'To' recipients for the email.\n (string) --\n CcAddresses (list) --An array that contains the email addresses of the 'CC' (carbon copy) recipients for the email.\n (string) --\n BccAddresses (list) --An array that contains the email addresses of the 'BCC' (blind carbon copy) recipients for the email.\n (string) --\n \n\n :type ReplyToAddresses: list\n :param ReplyToAddresses: The 'Reply-to' email addresses for the message. When the recipient replies to the message, each Reply-to address receives the reply.\n (string) --\n \n\n :type FeedbackForwardingEmailAddress: string\n :param FeedbackForwardingEmailAddress: The address that Amazon Pinpoint should send bounce and complaint notifications to.\n\n :type Content: dict\n :param Content: [REQUIRED]\n An object that contains the body of the message. You can send either a Simple message or a Raw message.\n Simple (dict) --The simple email message. The message consists of a subject and a message body.\n Subject (dict) -- [REQUIRED]The subject line of the email. The subject line can only contain 7-bit ASCII characters. However, you can specify non-ASCII characters in the subject line by using encoded-word syntax, as described in RFC 2047 .\n Data (string) -- [REQUIRED]The content of the message itself.\n Charset (string) --The character set for the content. Because of the constraints of the SMTP protocol, Amazon Pinpoint uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify UTF-8 , ISO-8859-1 , or Shift_JIS .\n Body (dict) -- [REQUIRED]The body of the message. You can specify an HTML version of the message, a text-only version of the message, or both.\n Text (dict) --An object that represents the version of the message that is displayed in email clients that don't support HTML, or clients where the recipient has disabled HTML rendering.\n Data (string) -- [REQUIRED]The content of the message itself.\n Charset (string) --The character set for the content. Because of the constraints of the SMTP protocol, Amazon Pinpoint uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify UTF-8 , ISO-8859-1 , or Shift_JIS .\n Html (dict) --An object that represents the version of the message that is displayed in email clients that support HTML. HTML messages can include formatted text, hyperlinks, images, and more.\n Data (string) -- [REQUIRED]The content of the message itself.\n Charset (string) --The character set for the content. Because of the constraints of the SMTP protocol, Amazon Pinpoint uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify UTF-8 , ISO-8859-1 , or Shift_JIS .\n \n Raw (dict) --The raw email message. The message has to meet the following criteria:\n The message has to contain a header and a body, separated by one blank line.\n All of the required header fields must be present in the message.\n Each part of a multipart MIME message must be formatted properly.\n If you include attachments, they must be in a file format that Amazon Pinpoint supports.\n The entire message must be Base64 encoded.\n If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients' email clients render the message properly.\n The length of any single line of text in the message can't exceed 1,000 characters. This restriction is defined in RFC 5321 .\n Data (bytes) -- [REQUIRED]The raw email message. The message has to meet the following criteria:\n The message has to contain a header and a body, separated by one blank line.\n All of the required header fields must be present in the message.\n Each part of a multipart MIME message must be formatted properly.\n Attachments must be in a file format that Amazon Pinpoint supports.\n The entire message must be Base64 encoded.\n If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients' email clients render the message properly.\n The length of any single line of text in the message can't exceed 1,000 characters. This restriction is defined in RFC 5321 .\n \n \n\n :type EmailTags: list\n :param EmailTags: A list of tags, in the form of name/value pairs, to apply to an email that you send using the SendEmail operation. Tags correspond to characteristics of the email that you define, so that you can publish email sending events.\n (dict) --Contains the name and value of a tag that you apply to an email. You can use message tags when you publish email sending events.\n Name (string) -- [REQUIRED]The name of the message tag. The message tag name has to meet the following criteria:\n It can only contain ASCII letters (a z, A Z), numbers (0 9), underscores (_), or dashes (-).\n It can contain no more than 256 characters.\n Value (string) -- [REQUIRED]The value of the message tag. The message tag value has to meet the following criteria:\n It can only contain ASCII letters (a z, A Z), numbers (0 9), underscores (_), or dashes (-).\n It can contain no more than 256 characters.\n \n \n\n :type ConfigurationSetName: string\n :param ConfigurationSetName: The name of the configuration set that you want to use when sending the email.\n\n :rtype: dict\n :return: {\n 'MessageId': 'string'\n }\n \n \n :returns: \n FromEmailAddress (string) -- The email address that you want to use as the \"From\" address for the email. The address that you specify has to be verified.\n Destination (dict) -- [REQUIRED]\n An object that contains the recipients of the email message.\n \n ToAddresses (list) --An array that contains the email addresses of the \"To\" recipients for the email.\n \n (string) --\n \n \n CcAddresses (list) --An array that contains the email addresses of the \"CC\" (carbon copy) recipients for the email.\n \n (string) --\n \n \n BccAddresses (list) --An array that contains the email addresses of the \"BCC\" (blind carbon copy) recipients for the email.\n \n (string) --\n \n \n \n \n ReplyToAddresses (list) -- The \"Reply-to\" email addresses for the message. When the recipient replies to the message, each Reply-to address receives the reply.\n \n (string) --\n \n \n FeedbackForwardingEmailAddress (string) -- The address that Amazon Pinpoint should send bounce and complaint notifications to.\n Content (dict) -- [REQUIRED]\n An object that contains the body of the message. You can send either a Simple message or a Raw message.\n \n Simple (dict) --The simple email message. The message consists of a subject and a message body.\n \n Subject (dict) -- [REQUIRED]The subject line of the email. The subject line can only contain 7-bit ASCII characters. However, you can specify non-ASCII characters in the subject line by using encoded-word syntax, as described in RFC 2047 .\n \n Data (string) -- [REQUIRED]The content of the message itself.\n \n Charset (string) --The character set for the content. Because of the constraints of the SMTP protocol, Amazon Pinpoint uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify UTF-8 , ISO-8859-1 , or Shift_JIS .\n \n \n \n Body (dict) -- [REQUIRED]The body of the message. You can specify an HTML version of the message, a text-only version of the message, or both.\n \n Text (dict) --An object that represents the version of the message that is displayed in email clients that don't support HTML, or clients where the recipient has disabled HTML rendering.\n \n Data (string) -- [REQUIRED]The content of the message itself.\n \n Charset (string) --The character set for the content. Because of the constraints of the SMTP protocol, Amazon Pinpoint uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify UTF-8 , ISO-8859-1 , or Shift_JIS .\n \n \n \n Html (dict) --An object that represents the version of the message that is displayed in email clients that support HTML. HTML messages can include formatted text, hyperlinks, images, and more.\n \n Data (string) -- [REQUIRED]The content of the message itself.\n \n Charset (string) --The character set for the content. Because of the constraints of the SMTP protocol, Amazon Pinpoint uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify UTF-8 , ISO-8859-1 , or Shift_JIS .\n \n \n \n \n \n \n \n Raw (dict) --The raw email message. The message has to meet the following criteria:\n \n The message has to contain a header and a body, separated by one blank line.\n All of the required header fields must be present in the message.\n Each part of a multipart MIME message must be formatted properly.\n If you include attachments, they must be in a file format that Amazon Pinpoint supports.\n The entire message must be Base64 encoded.\n If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients' email clients render the message properly.\n The length of any single line of text in the message can't exceed 1,000 characters. This restriction is defined in RFC 5321 .\n \n \n Data (bytes) -- [REQUIRED]The raw email message. The message has to meet the following criteria:\n \n The message has to contain a header and a body, separated by one blank line.\n All of the required header fields must be present in the message.\n Each part of a multipart MIME message must be formatted properly.\n Attachments must be in a file format that Amazon Pinpoint supports.\n The entire message must be Base64 encoded.\n If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients' email clients render the message properly.\n The length of any single line of text in the message can't exceed 1,000 characters. This restriction is defined in RFC 5321 .\n \n \n \n \n \n \n EmailTags (list) -- A list of tags, in the form of name/value pairs, to apply to an email that you send using the SendEmail operation. Tags correspond to characteristics of the email that you define, so that you can publish email sending events.\n \n (dict) --Contains the name and value of a tag that you apply to an email. You can use message tags when you publish email sending events.\n \n Name (string) -- [REQUIRED]The name of the message tag. The message tag name has to meet the following criteria:\n \n It can only contain ASCII letters (az, AZ), numbers (09), underscores (_), or dashes (-).\n It can contain no more than 256 characters.\n \n \n Value (string) -- [REQUIRED]The value of the message tag. The message tag value has to meet the following criteria:\n \n It can only contain ASCII letters (az, AZ), numbers (09), underscores (_), or dashes (-).\n It can contain no more than 256 characters.\n \n \n \n \n \n \n ConfigurationSetName (string) -- The name of the configuration set that you want to use when sending the email.\n \n \"\"\"\n pass\n\ndef update_configuration_set_event_destination(ConfigurationSetName=None, EventDestinationName=None, EventDestination=None):\n \"\"\"\n Update the configuration of an event destination for a configuration set.\n In Amazon Pinpoint, events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.\n See also: AWS API Documentation\n \n \n :example: response = client.update_configuration_set_event_destination(\n ConfigurationSetName='string',\n EventDestinationName='string',\n EventDestination={\n 'Enabled': True|False,\n 'MatchingEventTypes': [\n 'SEND'|'REJECT'|'BOUNCE'|'COMPLAINT'|'DELIVERY'|'OPEN'|'CLICK'|'RENDERING_FAILURE',\n ],\n 'KinesisFirehoseDestination': {\n 'IamRoleArn': 'string',\n 'DeliveryStreamArn': 'string'\n },\n 'CloudWatchDestination': {\n 'DimensionConfigurations': [\n {\n 'DimensionName': 'string',\n 'DimensionValueSource': 'MESSAGE_TAG'|'EMAIL_HEADER'|'LINK_TAG',\n 'DefaultDimensionValue': 'string'\n },\n ]\n },\n 'SnsDestination': {\n 'TopicArn': 'string'\n },\n 'PinpointDestination': {\n 'ApplicationArn': 'string'\n }\n }\n )\n \n \n :type ConfigurationSetName: string\n :param ConfigurationSetName: [REQUIRED]\n The name of the configuration set that contains the event destination that you want to modify.\n \n\n :type EventDestinationName: string\n :param EventDestinationName: [REQUIRED]\n The name of the event destination that you want to modify.\n \n\n :type EventDestination: dict\n :param EventDestination: [REQUIRED]\n An object that defines the event destination.\n Enabled (boolean) --If true , the event destination is enabled. When the event destination is enabled, the specified event types are sent to the destinations in this EventDestinationDefinition .\n If false , the event destination is disabled. When the event destination is disabled, events aren't sent to the specified destinations.\n MatchingEventTypes (list) --An array that specifies which events Amazon Pinpoint should send to the destinations in this EventDestinationDefinition .\n (string) --An email sending event type. For example, email sends, opens, and bounces are all email events.\n KinesisFirehoseDestination (dict) --An object that defines an Amazon Kinesis Data Firehose destination for email events. You can use Amazon Kinesis Data Firehose to stream data to other services, such as Amazon S3 and Amazon Redshift.\n IamRoleArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the IAM role that Amazon Pinpoint uses when sending email events to the Amazon Kinesis Data Firehose stream.\n DeliveryStreamArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Amazon Kinesis Data Firehose stream that Amazon Pinpoint sends email events to.\n CloudWatchDestination (dict) --An object that defines an Amazon CloudWatch destination for email events. You can use Amazon CloudWatch to monitor and gain insights on your email sending metrics.\n DimensionConfigurations (list) -- [REQUIRED]An array of objects that define the dimensions to use when you send email events to Amazon CloudWatch.\n (dict) --An object that defines the dimension configuration to use when you send Amazon Pinpoint email events to Amazon CloudWatch.\n DimensionName (string) -- [REQUIRED]The name of an Amazon CloudWatch dimension associated with an email sending metric. The name has to meet the following criteria:\n It can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n It can contain no more than 256 characters.\n DimensionValueSource (string) -- [REQUIRED]The location where Amazon Pinpoint finds the value of a dimension to publish to Amazon CloudWatch. If you want Amazon Pinpoint to use the message tags that you specify using an X-SES-MESSAGE-TAGS header or a parameter to the SendEmail/SendRawEmail API, choose messageTag . If you want Amazon Pinpoint to use your own email headers, choose emailHeader . If you want Amazon Pinpoint to use link tags, choose linkTags .\n DefaultDimensionValue (string) -- [REQUIRED]The default value of the dimension that is published to Amazon CloudWatch if you don't provide the value of the dimension when you send an email. This value has to meet the following criteria:\n It can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n It can contain no more than 256 characters.\n \n \n SnsDestination (dict) --An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur.\n TopicArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Amazon SNS topic that you want to publish email events to. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide .\n PinpointDestination (dict) --An object that defines a Amazon Pinpoint destination for email events. You can use Amazon Pinpoint events to create attributes in Amazon Pinpoint projects. You can use these attributes to create segments for your campaigns.\n ApplicationArn (string) --The Amazon Resource Name (ARN) of the Amazon Pinpoint project that you want to send email events to.\n \n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.5835464000701904, "alphanum_fraction": 0.5896105766296387, "avg_line_length": 45.39794921875, "blob_id": "87691ff09343146ba8db6bcf9b24be62ae1aca40", "content_id": "39f9329479db941c0a63e400705d4e311c523f89", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 135715, "license_type": "permissive", "max_line_length": 470, "num_lines": 2925, "path": "/pyboto3/elasticloadbalancingv2.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef add_listener_certificates(ListenerArn=None, Certificates=None):\n \"\"\"\n Adds the specified certificate to the specified secure listener.\n If the certificate was already added, the call is successful but the certificate is not added again.\n To list the certificates for your listener, use DescribeListenerCertificates . To remove certificates from your listener, use RemoveListenerCertificates . To specify the default SSL server certificate, use ModifyListener .\n See also: AWS API Documentation\n \n \n :example: response = client.add_listener_certificates(\n ListenerArn='string',\n Certificates=[\n {\n 'CertificateArn': 'string',\n 'IsDefault': True|False\n },\n ]\n )\n \n \n :type ListenerArn: string\n :param ListenerArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the listener.\n \n\n :type Certificates: list\n :param Certificates: [REQUIRED]\n The certificate to add. You can specify one certificate per call. Set CertificateArn to the certificate ARN but do not set IsDefault .\n (dict) --Information about an SSL server certificate.\n CertificateArn (string) --The Amazon Resource Name (ARN) of the certificate.\n IsDefault (boolean) --Indicates whether the certificate is the default certificate. Do not set IsDefault when specifying a certificate as an input parameter.\n \n \n\n :rtype: dict\n :return: {\n 'Certificates': [\n {\n 'CertificateArn': 'string',\n 'IsDefault': True|False\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef add_tags(ResourceArns=None, Tags=None):\n \"\"\"\n Adds the specified tags to the specified Elastic Load Balancing resource. You can tag your Application Load Balancers, Network Load Balancers, and your target groups.\n Each tag consists of a key and an optional value. If a resource already has a tag with the same key, AddTags updates its value.\n To list the current tags for your resources, use DescribeTags . To remove tags from your resources, use RemoveTags .\n See also: AWS API Documentation\n \n Examples\n This example adds the specified tags to the specified load balancer.\n Expected Output:\n \n :example: response = client.add_tags(\n ResourceArns=[\n 'string',\n ],\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type ResourceArns: list\n :param ResourceArns: [REQUIRED]\n The Amazon Resource Name (ARN) of the resource.\n (string) --\n \n\n :type Tags: list\n :param Tags: [REQUIRED]\n The tags. Each resource can have a maximum of 10 tags.\n (dict) --Information about a tag.\n Key (string) -- [REQUIRED]The key of the tag.\n Value (string) --The value of the tag.\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_listener(LoadBalancerArn=None, Protocol=None, Port=None, SslPolicy=None, Certificates=None, DefaultActions=None):\n \"\"\"\n Creates a listener for the specified Application Load Balancer or Network Load Balancer.\n To update a listener, use ModifyListener . When you are finished with a listener, you can delete it using DeleteListener . If you are finished with both the listener and the load balancer, you can delete them both using DeleteLoadBalancer .\n This operation is idempotent, which means that it completes at most one time. If you attempt to create multiple listeners with the same settings, each call succeeds.\n For more information, see Listeners for Your Application Load Balancers in the Application Load Balancers Guide and Listeners for Your Network Load Balancers in the Network Load Balancers Guide .\n See also: AWS API Documentation\n \n Examples\n This example creates an HTTP listener for the specified load balancer that forwards requests to the specified target group.\n Expected Output:\n This example creates an HTTPS listener for the specified load balancer that forwards requests to the specified target group. Note that you must specify an SSL certificate for an HTTPS listener. You can create and manage certificates using AWS Certificate Manager (ACM). Alternatively, you can create a certificate using SSL/TLS tools, get the certificate signed by a certificate authority (CA), and upload the certificate to AWS Identity and Access Management (IAM).\n Expected Output:\n \n :example: response = client.create_listener(\n LoadBalancerArn='string',\n Protocol='HTTP'|'HTTPS'|'TCP',\n Port=123,\n SslPolicy='string',\n Certificates=[\n {\n 'CertificateArn': 'string',\n 'IsDefault': True|False\n },\n ],\n DefaultActions=[\n {\n 'Type': 'forward'|'authenticate-oidc'|'authenticate-cognito'|'redirect'|'fixed-response',\n 'TargetGroupArn': 'string',\n 'AuthenticateOidcConfig': {\n 'Issuer': 'string',\n 'AuthorizationEndpoint': 'string',\n 'TokenEndpoint': 'string',\n 'UserInfoEndpoint': 'string',\n 'ClientId': 'string',\n 'ClientSecret': 'string',\n 'SessionCookieName': 'string',\n 'Scope': 'string',\n 'SessionTimeout': 123,\n 'AuthenticationRequestExtraParams': {\n 'string': 'string'\n },\n 'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate'\n },\n 'AuthenticateCognitoConfig': {\n 'UserPoolArn': 'string',\n 'UserPoolClientId': 'string',\n 'UserPoolDomain': 'string',\n 'SessionCookieName': 'string',\n 'Scope': 'string',\n 'SessionTimeout': 123,\n 'AuthenticationRequestExtraParams': {\n 'string': 'string'\n },\n 'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate'\n },\n 'Order': 123,\n 'RedirectConfig': {\n 'Protocol': 'string',\n 'Port': 'string',\n 'Host': 'string',\n 'Path': 'string',\n 'Query': 'string',\n 'StatusCode': 'HTTP_301'|'HTTP_302'\n },\n 'FixedResponseConfig': {\n 'MessageBody': 'string',\n 'StatusCode': 'string',\n 'ContentType': 'string'\n }\n },\n ]\n )\n \n \n :type LoadBalancerArn: string\n :param LoadBalancerArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the load balancer.\n \n\n :type Protocol: string\n :param Protocol: [REQUIRED]\n The protocol for connections from clients to the load balancer. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocol is TCP.\n \n\n :type Port: integer\n :param Port: [REQUIRED]\n The port on which the load balancer is listening.\n \n\n :type SslPolicy: string\n :param SslPolicy: [HTTPS listeners] The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy.\n\n :type Certificates: list\n :param Certificates: [HTTPS listeners] The default SSL server certificate. You must provide exactly one certificate. Set CertificateArn to the certificate ARN but do not set IsDefault .\n To create a certificate list, use AddListenerCertificates .\n (dict) --Information about an SSL server certificate.\n CertificateArn (string) --The Amazon Resource Name (ARN) of the certificate.\n IsDefault (boolean) --Indicates whether the certificate is the default certificate. Do not set IsDefault when specifying a certificate as an input parameter.\n \n \n\n :type DefaultActions: list\n :param DefaultActions: [REQUIRED]\n The actions for the default rule. The rule must include one forward action or one or more fixed-response actions.\n If the action type is forward , you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer or TCP for a Network Load Balancer.\n [HTTPS listener] If the action type is authenticate-oidc , you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.\n [HTTPS listener] If the action type is authenticate-cognito , you authenticate users through the user pools supported by Amazon Cognito.\n [Application Load Balancer] If the action type is redirect , you redirect specified client requests from one URL to another.\n [Application Load Balancer] If the action type is fixed-response , you drop specified client requests and return a custom HTTP response.\n (dict) --Information about an action.\n Type (string) -- [REQUIRED]The type of action. Each rule must include exactly one of the following types of actions: forward , fixed-response , or redirect .\n TargetGroupArn (string) --The Amazon Resource Name (ARN) of the target group. Specify only when Type is forward .\n AuthenticateOidcConfig (dict) --[HTTPS listener] Information about an identity provider that is compliant with OpenID Connect (OIDC). Specify only when Type is authenticate-oidc .\n Issuer (string) -- [REQUIRED]The OIDC issuer identifier of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.\n AuthorizationEndpoint (string) -- [REQUIRED]The authorization endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.\n TokenEndpoint (string) -- [REQUIRED]The token endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.\n UserInfoEndpoint (string) -- [REQUIRED]The user info endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.\n ClientId (string) -- [REQUIRED]The OAuth 2.0 client identifier.\n ClientSecret (string) -- [REQUIRED]The OAuth 2.0 client secret.\n SessionCookieName (string) --The name of the cookie used to maintain session information. The default is AWSELBAuthSessionCookie.\n Scope (string) --The set of user claims to be requested from the IdP. The default is openid .\n To verify which scope values your IdP supports and how to separate multiple values, see the documentation for your IdP.\n SessionTimeout (integer) --The maximum duration of the authentication session, in seconds. The default is 604800 seconds (7 days).\n AuthenticationRequestExtraParams (dict) --The query parameters (up to 10) to include in the redirect request to the authorization endpoint.\n (string) --\n (string) --\n \n OnUnauthenticatedRequest (string) --The behavior if the user is not authenticated. The following are possible values:\n deny- Return an HTTP 401 Unauthorized error.\n allow- Allow the request to be forwarded to the target.\n authenticate- Redirect the request to the IdP authorization endpoint. This is the default value.\n \n AuthenticateCognitoConfig (dict) --[HTTPS listener] Information for using Amazon Cognito to authenticate users. Specify only when Type is authenticate-cognito .\n UserPoolArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Amazon Cognito user pool.\n UserPoolClientId (string) -- [REQUIRED]The ID of the Amazon Cognito user pool client.\n UserPoolDomain (string) -- [REQUIRED]The domain prefix or fully-qualified domain name of the Amazon Cognito user pool.\n SessionCookieName (string) --The name of the cookie used to maintain session information. The default is AWSELBAuthSessionCookie.\n Scope (string) --The set of user claims to be requested from the IdP. The default is openid .\n To verify which scope values your IdP supports and how to separate multiple values, see the documentation for your IdP.\n SessionTimeout (integer) --The maximum duration of the authentication session, in seconds. The default is 604800 seconds (7 days).\n AuthenticationRequestExtraParams (dict) --The query parameters (up to 10) to include in the redirect request to the authorization endpoint.\n (string) --\n (string) --\n \n OnUnauthenticatedRequest (string) --The behavior if the user is not authenticated. The following are possible values:\n deny- Return an HTTP 401 Unauthorized error.\n allow- Allow the request to be forwarded to the target.\n authenticate- Redirect the request to the IdP authorization endpoint. This is the default value.\n \n Order (integer) --The order for the action. This value is required for rules with multiple actions. The action with the lowest value for order is performed first. The final action to be performed must be a forward or a fixed-response action.\n RedirectConfig (dict) --[Application Load Balancer] Information for creating a redirect action. Specify only when Type is redirect .\n Protocol (string) --The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You cannot redirect HTTPS to HTTP.\n Port (string) --The port. You can specify a value from 1 to 65535 or #{port}.\n Host (string) --The hostname. This component is not percent-encoded. The hostname can contain #{host}.\n Path (string) --The absolute path, starting with the leading '/'. This component is not percent-encoded. The path can contain #{host}, #{path}, and #{port}.\n Query (string) --The query parameters, URL-encoded when necessary, but not percent-encoded. Do not include the leading '?', as it is automatically added. You can specify any of the reserved keywords.\n StatusCode (string) -- [REQUIRED]The HTTP redirect code. The redirect is either permanent (HTTP 301) or temporary (HTTP 302).\n FixedResponseConfig (dict) --[Application Load Balancer] Information for creating an action that returns a custom HTTP response. Specify only when Type is fixed-response .\n MessageBody (string) --The message.\n StatusCode (string) -- [REQUIRED]The HTTP response code (2XX, 4XX, or 5XX).\n ContentType (string) --The content type.\n Valid Values: text/plain | text/css | text/html | application/javascript | application/json\n \n \n\n :rtype: dict\n :return: {\n 'Listeners': [\n {\n 'ListenerArn': 'string',\n 'LoadBalancerArn': 'string',\n 'Port': 123,\n 'Protocol': 'HTTP'|'HTTPS'|'TCP',\n 'Certificates': [\n {\n 'CertificateArn': 'string',\n 'IsDefault': True|False\n },\n ],\n 'SslPolicy': 'string',\n 'DefaultActions': [\n {\n 'Type': 'forward'|'authenticate-oidc'|'authenticate-cognito'|'redirect'|'fixed-response',\n 'TargetGroupArn': 'string',\n 'AuthenticateOidcConfig': {\n 'Issuer': 'string',\n 'AuthorizationEndpoint': 'string',\n 'TokenEndpoint': 'string',\n 'UserInfoEndpoint': 'string',\n 'ClientId': 'string',\n 'ClientSecret': 'string',\n 'SessionCookieName': 'string',\n 'Scope': 'string',\n 'SessionTimeout': 123,\n 'AuthenticationRequestExtraParams': {\n 'string': 'string'\n },\n 'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate'\n },\n 'AuthenticateCognitoConfig': {\n 'UserPoolArn': 'string',\n 'UserPoolClientId': 'string',\n 'UserPoolDomain': 'string',\n 'SessionCookieName': 'string',\n 'Scope': 'string',\n 'SessionTimeout': 123,\n 'AuthenticationRequestExtraParams': {\n 'string': 'string'\n },\n 'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate'\n },\n 'Order': 123,\n 'RedirectConfig': {\n 'Protocol': 'string',\n 'Port': 'string',\n 'Host': 'string',\n 'Path': 'string',\n 'Query': 'string',\n 'StatusCode': 'HTTP_301'|'HTTP_302'\n },\n 'FixedResponseConfig': {\n 'MessageBody': 'string',\n 'StatusCode': 'string',\n 'ContentType': 'string'\n }\n },\n ]\n },\n ]\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef create_load_balancer(Name=None, Subnets=None, SubnetMappings=None, SecurityGroups=None, Scheme=None, Tags=None, Type=None, IpAddressType=None):\n \"\"\"\n Creates an Application Load Balancer or a Network Load Balancer.\n When you create a load balancer, you can specify security groups, public subnets, IP address type, and tags. Otherwise, you could do so later using SetSecurityGroups , SetSubnets , SetIpAddressType , and AddTags .\n To create listeners for your load balancer, use CreateListener . To describe your current load balancers, see DescribeLoadBalancers . When you are finished with a load balancer, you can delete it using DeleteLoadBalancer .\n For limit information, see Limits for Your Application Load Balancer in the Application Load Balancers Guide and Limits for Your Network Load Balancer in the Network Load Balancers Guide .\n This operation is idempotent, which means that it completes at most one time. If you attempt to create multiple load balancers with the same settings, each call succeeds.\n For more information, see Application Load Balancers in the Application Load Balancers Guide and Network Load Balancers in the Network Load Balancers Guide .\n See also: AWS API Documentation\n \n Examples\n This example creates an Internet-facing load balancer and enables the Availability Zones for the specified subnets.\n Expected Output:\n This example creates an internal load balancer and enables the Availability Zones for the specified subnets.\n Expected Output:\n \n :example: response = client.create_load_balancer(\n Name='string',\n Subnets=[\n 'string',\n ],\n SubnetMappings=[\n {\n 'SubnetId': 'string',\n 'AllocationId': 'string'\n },\n ],\n SecurityGroups=[\n 'string',\n ],\n Scheme='internet-facing'|'internal',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n Type='application'|'network',\n IpAddressType='ipv4'|'dualstack'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the load balancer.\n This name must be unique per region per account, can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, must not begin or end with a hyphen, and must not begin with 'internal-'.\n \n\n :type Subnets: list\n :param Subnets: The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.\n [Application Load Balancers] You must specify subnets from at least two Availability Zones.\n [Network Load Balancers] You can specify subnets from one or more Availability Zones.\n (string) --\n \n\n :type SubnetMappings: list\n :param SubnetMappings: The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.\n [Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.\n [Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet.\n (dict) --Information about a subnet mapping.\n SubnetId (string) --The ID of the subnet.\n AllocationId (string) --[Network Load Balancers] The allocation ID of the Elastic IP address.\n \n \n\n :type SecurityGroups: list\n :param SecurityGroups: [Application Load Balancers] The IDs of the security groups for the load balancer.\n (string) --\n \n\n :type Scheme: string\n :param Scheme: The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from clients over the internet.\n The nodes of an internal load balancer have only private IP addresses. The DNS name of an internal load balancer is publicly resolvable to the private IP addresses of the nodes. Therefore, internal load balancers can only route requests from clients with access to the VPC for the load balancer.\n The default is an Internet-facing load balancer.\n \n\n :type Tags: list\n :param Tags: One or more tags to assign to the load balancer.\n (dict) --Information about a tag.\n Key (string) -- [REQUIRED]The key of the tag.\n Value (string) --The value of the tag.\n \n \n\n :type Type: string\n :param Type: The type of load balancer. The default is application .\n\n :type IpAddressType: string\n :param IpAddressType: [Application Load Balancers] The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). Internal load balancers must use ipv4 .\n\n :rtype: dict\n :return: {\n 'LoadBalancers': [\n {\n 'LoadBalancerArn': 'string',\n 'DNSName': 'string',\n 'CanonicalHostedZoneId': 'string',\n 'CreatedTime': datetime(2015, 1, 1),\n 'LoadBalancerName': 'string',\n 'Scheme': 'internet-facing'|'internal',\n 'VpcId': 'string',\n 'State': {\n 'Code': 'active'|'provisioning'|'active_impaired'|'failed',\n 'Reason': 'string'\n },\n 'Type': 'application'|'network',\n 'AvailabilityZones': [\n {\n 'ZoneName': 'string',\n 'SubnetId': 'string',\n 'LoadBalancerAddresses': [\n {\n 'IpAddress': 'string',\n 'AllocationId': 'string'\n },\n ]\n },\n ],\n 'SecurityGroups': [\n 'string',\n ],\n 'IpAddressType': 'ipv4'|'dualstack'\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef create_rule(ListenerArn=None, Conditions=None, Priority=None, Actions=None):\n \"\"\"\n Creates a rule for the specified listener. The listener must be associated with an Application Load Balancer.\n Rules are evaluated in priority order, from the lowest value to the highest value. When the conditions for a rule are met, its actions are performed. If the conditions for no rules are met, the actions for the default rule are performed. For more information, see Listener Rules in the Application Load Balancers Guide .\n To view your current rules, use DescribeRules . To update a rule, use ModifyRule . To set the priorities of your rules, use SetRulePriorities . To delete a rule, use DeleteRule .\n See also: AWS API Documentation\n \n Examples\n This example creates a rule that forwards requests to the specified target group if the URL contains the specified pattern (for example, /img/*).\n Expected Output:\n \n :example: response = client.create_rule(\n ListenerArn='string',\n Conditions=[\n {\n 'Field': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n Priority=123,\n Actions=[\n {\n 'Type': 'forward'|'authenticate-oidc'|'authenticate-cognito'|'redirect'|'fixed-response',\n 'TargetGroupArn': 'string',\n 'AuthenticateOidcConfig': {\n 'Issuer': 'string',\n 'AuthorizationEndpoint': 'string',\n 'TokenEndpoint': 'string',\n 'UserInfoEndpoint': 'string',\n 'ClientId': 'string',\n 'ClientSecret': 'string',\n 'SessionCookieName': 'string',\n 'Scope': 'string',\n 'SessionTimeout': 123,\n 'AuthenticationRequestExtraParams': {\n 'string': 'string'\n },\n 'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate'\n },\n 'AuthenticateCognitoConfig': {\n 'UserPoolArn': 'string',\n 'UserPoolClientId': 'string',\n 'UserPoolDomain': 'string',\n 'SessionCookieName': 'string',\n 'Scope': 'string',\n 'SessionTimeout': 123,\n 'AuthenticationRequestExtraParams': {\n 'string': 'string'\n },\n 'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate'\n },\n 'Order': 123,\n 'RedirectConfig': {\n 'Protocol': 'string',\n 'Port': 'string',\n 'Host': 'string',\n 'Path': 'string',\n 'Query': 'string',\n 'StatusCode': 'HTTP_301'|'HTTP_302'\n },\n 'FixedResponseConfig': {\n 'MessageBody': 'string',\n 'StatusCode': 'string',\n 'ContentType': 'string'\n }\n },\n ]\n )\n \n \n :type ListenerArn: string\n :param ListenerArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the listener.\n \n\n :type Conditions: list\n :param Conditions: [REQUIRED]\n The conditions. Each condition specifies a field name and a single value.\n If the field name is host-header , you can specify a single host name (for example, my.example.com). A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters.\n A-Z, a-z, 0-9\n .\n (matches 0 or more characters)\n ? (matches exactly 1 character)\n If the field name is path-pattern , you can specify a single path pattern. A path pattern is case-sensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters.\n A-Z, a-z, 0-9\n _ - . $ / ~ ' ' @ : +\n & (using &amp;)\n (matches 0 or more characters)\n ? (matches exactly 1 character)\n (dict) --Information about a condition for a rule.\n Field (string) --The name of the field. The possible values are host-header and path-pattern .\n Values (list) --The condition value.\n If the field name is host-header , you can specify a single host name (for example, my.example.com). A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters.\n A-Z, a-z, 0-9\n .\n (matches 0 or more characters)\n ? (matches exactly 1 character)\n If the field name is path-pattern , you can specify a single path pattern (for example, /img/*). A path pattern is case-sensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters.\n A-Z, a-z, 0-9\n _ - . $ / ~ ' ' @ : +\n & (using &amp;)\n (matches 0 or more characters)\n ? (matches exactly 1 character)\n (string) --\n \n \n\n :type Priority: integer\n :param Priority: [REQUIRED]\n The rule priority. A listener can't have multiple rules with the same priority.\n \n\n :type Actions: list\n :param Actions: [REQUIRED]\n The actions. Each rule must include exactly one of the following types of actions: forward , fixed-response , or redirect .\n If the action type is forward , you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer or TCP for a Network Load Balancer.\n [HTTPS listener] If the action type is authenticate-oidc , you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.\n [HTTPS listener] If the action type is authenticate-cognito , you authenticate users through the user pools supported by Amazon Cognito.\n [Application Load Balancer] If the action type is redirect , you redirect specified client requests from one URL to another.\n [Application Load Balancer] If the action type is fixed-response , you drop specified client requests and return a custom HTTP response.\n (dict) --Information about an action.\n Type (string) -- [REQUIRED]The type of action. Each rule must include exactly one of the following types of actions: forward , fixed-response , or redirect .\n TargetGroupArn (string) --The Amazon Resource Name (ARN) of the target group. Specify only when Type is forward .\n AuthenticateOidcConfig (dict) --[HTTPS listener] Information about an identity provider that is compliant with OpenID Connect (OIDC). Specify only when Type is authenticate-oidc .\n Issuer (string) -- [REQUIRED]The OIDC issuer identifier of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.\n AuthorizationEndpoint (string) -- [REQUIRED]The authorization endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.\n TokenEndpoint (string) -- [REQUIRED]The token endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.\n UserInfoEndpoint (string) -- [REQUIRED]The user info endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.\n ClientId (string) -- [REQUIRED]The OAuth 2.0 client identifier.\n ClientSecret (string) -- [REQUIRED]The OAuth 2.0 client secret.\n SessionCookieName (string) --The name of the cookie used to maintain session information. The default is AWSELBAuthSessionCookie.\n Scope (string) --The set of user claims to be requested from the IdP. The default is openid .\n To verify which scope values your IdP supports and how to separate multiple values, see the documentation for your IdP.\n SessionTimeout (integer) --The maximum duration of the authentication session, in seconds. The default is 604800 seconds (7 days).\n AuthenticationRequestExtraParams (dict) --The query parameters (up to 10) to include in the redirect request to the authorization endpoint.\n (string) --\n (string) --\n \n OnUnauthenticatedRequest (string) --The behavior if the user is not authenticated. The following are possible values:\n deny- Return an HTTP 401 Unauthorized error.\n allow- Allow the request to be forwarded to the target.\n authenticate- Redirect the request to the IdP authorization endpoint. This is the default value.\n \n AuthenticateCognitoConfig (dict) --[HTTPS listener] Information for using Amazon Cognito to authenticate users. Specify only when Type is authenticate-cognito .\n UserPoolArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Amazon Cognito user pool.\n UserPoolClientId (string) -- [REQUIRED]The ID of the Amazon Cognito user pool client.\n UserPoolDomain (string) -- [REQUIRED]The domain prefix or fully-qualified domain name of the Amazon Cognito user pool.\n SessionCookieName (string) --The name of the cookie used to maintain session information. The default is AWSELBAuthSessionCookie.\n Scope (string) --The set of user claims to be requested from the IdP. The default is openid .\n To verify which scope values your IdP supports and how to separate multiple values, see the documentation for your IdP.\n SessionTimeout (integer) --The maximum duration of the authentication session, in seconds. The default is 604800 seconds (7 days).\n AuthenticationRequestExtraParams (dict) --The query parameters (up to 10) to include in the redirect request to the authorization endpoint.\n (string) --\n (string) --\n \n OnUnauthenticatedRequest (string) --The behavior if the user is not authenticated. The following are possible values:\n deny- Return an HTTP 401 Unauthorized error.\n allow- Allow the request to be forwarded to the target.\n authenticate- Redirect the request to the IdP authorization endpoint. This is the default value.\n \n Order (integer) --The order for the action. This value is required for rules with multiple actions. The action with the lowest value for order is performed first. The final action to be performed must be a forward or a fixed-response action.\n RedirectConfig (dict) --[Application Load Balancer] Information for creating a redirect action. Specify only when Type is redirect .\n Protocol (string) --The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You cannot redirect HTTPS to HTTP.\n Port (string) --The port. You can specify a value from 1 to 65535 or #{port}.\n Host (string) --The hostname. This component is not percent-encoded. The hostname can contain #{host}.\n Path (string) --The absolute path, starting with the leading '/'. This component is not percent-encoded. The path can contain #{host}, #{path}, and #{port}.\n Query (string) --The query parameters, URL-encoded when necessary, but not percent-encoded. Do not include the leading '?', as it is automatically added. You can specify any of the reserved keywords.\n StatusCode (string) -- [REQUIRED]The HTTP redirect code. The redirect is either permanent (HTTP 301) or temporary (HTTP 302).\n FixedResponseConfig (dict) --[Application Load Balancer] Information for creating an action that returns a custom HTTP response. Specify only when Type is fixed-response .\n MessageBody (string) --The message.\n StatusCode (string) -- [REQUIRED]The HTTP response code (2XX, 4XX, or 5XX).\n ContentType (string) --The content type.\n Valid Values: text/plain | text/css | text/html | application/javascript | application/json\n \n \n\n :rtype: dict\n :return: {\n 'Rules': [\n {\n 'RuleArn': 'string',\n 'Priority': 'string',\n 'Conditions': [\n {\n 'Field': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n 'Actions': [\n {\n 'Type': 'forward'|'authenticate-oidc'|'authenticate-cognito'|'redirect'|'fixed-response',\n 'TargetGroupArn': 'string',\n 'AuthenticateOidcConfig': {\n 'Issuer': 'string',\n 'AuthorizationEndpoint': 'string',\n 'TokenEndpoint': 'string',\n 'UserInfoEndpoint': 'string',\n 'ClientId': 'string',\n 'ClientSecret': 'string',\n 'SessionCookieName': 'string',\n 'Scope': 'string',\n 'SessionTimeout': 123,\n 'AuthenticationRequestExtraParams': {\n 'string': 'string'\n },\n 'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate'\n },\n 'AuthenticateCognitoConfig': {\n 'UserPoolArn': 'string',\n 'UserPoolClientId': 'string',\n 'UserPoolDomain': 'string',\n 'SessionCookieName': 'string',\n 'Scope': 'string',\n 'SessionTimeout': 123,\n 'AuthenticationRequestExtraParams': {\n 'string': 'string'\n },\n 'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate'\n },\n 'Order': 123,\n 'RedirectConfig': {\n 'Protocol': 'string',\n 'Port': 'string',\n 'Host': 'string',\n 'Path': 'string',\n 'Query': 'string',\n 'StatusCode': 'HTTP_301'|'HTTP_302'\n },\n 'FixedResponseConfig': {\n 'MessageBody': 'string',\n 'StatusCode': 'string',\n 'ContentType': 'string'\n }\n },\n ],\n 'IsDefault': True|False\n },\n ]\n }\n \n \n :returns: \n A-Z, a-z, 0-9\n \n .\n \n \n \n (matches 0 or more characters)\n \n \n ? (matches exactly 1 character)\n \n \"\"\"\n pass\n\ndef create_target_group(Name=None, Protocol=None, Port=None, VpcId=None, HealthCheckProtocol=None, HealthCheckPort=None, HealthCheckEnabled=None, HealthCheckPath=None, HealthCheckIntervalSeconds=None, HealthCheckTimeoutSeconds=None, HealthyThresholdCount=None, UnhealthyThresholdCount=None, Matcher=None, TargetType=None):\n \"\"\"\n Creates a target group.\n To register targets with the target group, use RegisterTargets . To update the health check settings for the target group, use ModifyTargetGroup . To monitor the health of targets in the target group, use DescribeTargetHealth .\n To route traffic to the targets in a target group, specify the target group in an action using CreateListener or CreateRule .\n To delete a target group, use DeleteTargetGroup .\n This operation is idempotent, which means that it completes at most one time. If you attempt to create multiple target groups with the same settings, each call succeeds.\n For more information, see Target Groups for Your Application Load Balancers in the Application Load Balancers Guide or Target Groups for Your Network Load Balancers in the Network Load Balancers Guide .\n See also: AWS API Documentation\n \n Examples\n This example creates a target group that you can use to route traffic to targets using HTTP on port 80. This target group uses the default health check configuration.\n Expected Output:\n \n :example: response = client.create_target_group(\n Name='string',\n Protocol='HTTP'|'HTTPS'|'TCP',\n Port=123,\n VpcId='string',\n HealthCheckProtocol='HTTP'|'HTTPS'|'TCP',\n HealthCheckPort='string',\n HealthCheckEnabled=True|False,\n HealthCheckPath='string',\n HealthCheckIntervalSeconds=123,\n HealthCheckTimeoutSeconds=123,\n HealthyThresholdCount=123,\n UnhealthyThresholdCount=123,\n Matcher={\n 'HttpCode': 'string'\n },\n TargetType='instance'|'ip'|'lambda'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the target group.\n This name must be unique per region per account, can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, and must not begin or end with a hyphen.\n \n\n :type Protocol: string\n :param Protocol: The protocol to use for routing traffic to the targets. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocol is TCP. If the target is a Lambda function, this parameter does not apply.\n\n :type Port: integer\n :param Port: The port on which the targets receive traffic. This port is used unless you specify a port override when registering the target. If the target is a Lambda function, this parameter does not apply.\n\n :type VpcId: string\n :param VpcId: The identifier of the virtual private cloud (VPC). If the target is a Lambda function, this parameter does not apply.\n\n :type HealthCheckProtocol: string\n :param HealthCheckProtocol: The protocol the load balancer uses when performing health checks on targets. The TCP protocol is supported only if the protocol of the target group is TCP. For Application Load Balancers, the default is HTTP. For Network Load Balancers, the default is TCP.\n\n :type HealthCheckPort: string\n :param HealthCheckPort: The port the load balancer uses when performing health checks on targets. The default is traffic-port , which is the port on which each target receives traffic from the load balancer.\n\n :type HealthCheckEnabled: boolean\n :param HealthCheckEnabled: Indicates whether health checks are enabled. If the target type is instance or ip , the default is true . If the target type is lambda , the default is false .\n\n :type HealthCheckPath: string\n :param HealthCheckPath: [HTTP/HTTPS health checks] The ping path that is the destination on the targets for health checks. The default is /.\n\n :type HealthCheckIntervalSeconds: integer\n :param HealthCheckIntervalSeconds: The approximate amount of time, in seconds, between health checks of an individual target. For Application Load Balancers, the range is 5 300 seconds. For Network Load Balancers, the supported values are 10 or 30 seconds. If the target type is instance or ip , the default is 30 seconds. If the target type is lambda , the default is 35 seconds.\n\n :type HealthCheckTimeoutSeconds: integer\n :param HealthCheckTimeoutSeconds: The amount of time, in seconds, during which no response from a target means a failed health check. For Application Load Balancers, the range is 2 120 seconds and the default is 5 seconds if the target type is instance or ip and 30 seconds if the target type is lambda . For Network Load Balancers, this is 10 seconds for TCP and HTTPS health checks and 6 seconds for HTTP health checks.\n\n :type HealthyThresholdCount: integer\n :param HealthyThresholdCount: The number of consecutive health checks successes required before considering an unhealthy target healthy. For Application Load Balancers, the default is 5. For Network Load Balancers, the default is 3.\n\n :type UnhealthyThresholdCount: integer\n :param UnhealthyThresholdCount: The number of consecutive health check failures required before considering a target unhealthy. For Application Load Balancers, the default is 2. For Network Load Balancers, this value must be the same as the healthy threshold count.\n\n :type Matcher: dict\n :param Matcher: [HTTP/HTTPS health checks] The HTTP codes to use when checking for a successful response from a target.\n HttpCode (string) -- [REQUIRED]The HTTP codes.\n For Application Load Balancers, you can specify values between 200 and 499, and the default value is 200. You can specify multiple values (for example, '200,202') or a range of values (for example, '200-299').\n For Network Load Balancers, this is 200 399.\n \n\n :type TargetType: string\n :param TargetType: The type of target that you must specify when registering targets with this target group. You can't specify targets for a target group using more than one target type.\n instance - Targets are specified by instance ID. This is the default value.\n ip - Targets are specified by IP address. You can specify IP addresses from the subnets of the virtual private cloud (VPC) for the target group, the RFC 1918 range (10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16), and the RFC 6598 range (100.64.0.0/10). You can't specify publicly routable IP addresses.\n lambda - The target groups contains a single Lambda function.\n \n\n :rtype: dict\n :return: {\n 'TargetGroups': [\n {\n 'TargetGroupArn': 'string',\n 'TargetGroupName': 'string',\n 'Protocol': 'HTTP'|'HTTPS'|'TCP',\n 'Port': 123,\n 'VpcId': 'string',\n 'HealthCheckProtocol': 'HTTP'|'HTTPS'|'TCP',\n 'HealthCheckPort': 'string',\n 'HealthCheckEnabled': True|False,\n 'HealthCheckIntervalSeconds': 123,\n 'HealthCheckTimeoutSeconds': 123,\n 'HealthyThresholdCount': 123,\n 'UnhealthyThresholdCount': 123,\n 'HealthCheckPath': 'string',\n 'Matcher': {\n 'HttpCode': 'string'\n },\n 'LoadBalancerArns': [\n 'string',\n ],\n 'TargetType': 'instance'|'ip'|'lambda'\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef delete_listener(ListenerArn=None):\n \"\"\"\n Deletes the specified listener.\n Alternatively, your listener is deleted when you delete the load balancer to which it is attached, using DeleteLoadBalancer .\n See also: AWS API Documentation\n \n Examples\n This example deletes the specified listener.\n Expected Output:\n \n :example: response = client.delete_listener(\n ListenerArn='string'\n )\n \n \n :type ListenerArn: string\n :param ListenerArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the listener.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_load_balancer(LoadBalancerArn=None):\n \"\"\"\n Deletes the specified Application Load Balancer or Network Load Balancer and its attached listeners.\n You can't delete a load balancer if deletion protection is enabled. If the load balancer does not exist or has already been deleted, the call succeeds.\n Deleting a load balancer does not affect its registered targets. For example, your EC2 instances continue to run and are still registered to their target groups. If you no longer need these EC2 instances, you can stop or terminate them.\n See also: AWS API Documentation\n \n Examples\n This example deletes the specified load balancer.\n Expected Output:\n \n :example: response = client.delete_load_balancer(\n LoadBalancerArn='string'\n )\n \n \n :type LoadBalancerArn: string\n :param LoadBalancerArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the load balancer.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_rule(RuleArn=None):\n \"\"\"\n Deletes the specified rule.\n See also: AWS API Documentation\n \n Examples\n This example deletes the specified rule.\n Expected Output:\n \n :example: response = client.delete_rule(\n RuleArn='string'\n )\n \n \n :type RuleArn: string\n :param RuleArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the rule.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_target_group(TargetGroupArn=None):\n \"\"\"\n Deletes the specified target group.\n You can delete a target group if it is not referenced by any actions. Deleting a target group also deletes any associated health checks.\n See also: AWS API Documentation\n \n Examples\n This example deletes the specified target group.\n Expected Output:\n \n :example: response = client.delete_target_group(\n TargetGroupArn='string'\n )\n \n \n :type TargetGroupArn: string\n :param TargetGroupArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the target group.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef deregister_targets(TargetGroupArn=None, Targets=None):\n \"\"\"\n Deregisters the specified targets from the specified target group. After the targets are deregistered, they no longer receive traffic from the load balancer.\n See also: AWS API Documentation\n \n Examples\n This example deregisters the specified instance from the specified target group.\n Expected Output:\n \n :example: response = client.deregister_targets(\n TargetGroupArn='string',\n Targets=[\n {\n 'Id': 'string',\n 'Port': 123,\n 'AvailabilityZone': 'string'\n },\n ]\n )\n \n \n :type TargetGroupArn: string\n :param TargetGroupArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the target group.\n \n\n :type Targets: list\n :param Targets: [REQUIRED]\n The targets. If you specified a port override when you registered a target, you must specify both the target ID and the port when you deregister it.\n (dict) --Information about a target.\n Id (string) -- [REQUIRED]The ID of the target. If the target type of the target group is instance , specify an instance ID. If the target type is ip , specify an IP address. If the target type is lambda , specify the ARN of the Lambda function.\n Port (integer) --The port on which the target is listening.\n AvailabilityZone (string) --An Availability Zone or all . This determines whether the target receives traffic from the load balancer nodes in the specified Availability Zone or from all enabled Availability Zones for the load balancer.\n This parameter is not supported if the target type of the target group is instance .\n If the target type is ip and the IP address is in a subnet of the VPC for the target group, the Availability Zone is automatically detected and this parameter is optional. If the IP address is outside the VPC, this parameter is required.\n With an Application Load Balancer, if the target type is ip and the IP address is outside the VPC for the target group, the only supported value is all .\n If the target type is lambda , this parameter is optional and the only supported value is all .\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef describe_account_limits(Marker=None, PageSize=None):\n \"\"\"\n Describes the current Elastic Load Balancing resource limits for your AWS account.\n For more information, see Limits for Your Application Load Balancers in the Application Load Balancer Guide or Limits for Your Network Load Balancers in the Network Load Balancers Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.describe_account_limits(\n Marker='string',\n PageSize=123\n )\n \n \n :type Marker: string\n :param Marker: The marker for the next set of results. (You received this marker from a previous call.)\n\n :type PageSize: integer\n :param PageSize: The maximum number of results to return with this call.\n\n :rtype: dict\n :return: {\n 'Limits': [\n {\n 'Name': 'string',\n 'Max': 'string'\n },\n ],\n 'NextMarker': 'string'\n }\n \n \n :returns: \n application-load-balancers\n listeners-per-application-load-balancer\n listeners-per-network-load-balancer\n network-load-balancers\n rules-per-application-load-balancer\n target-groups\n targets-per-application-load-balancer\n targets-per-availability-zone-per-network-load-balancer\n targets-per-network-load-balancer\n \n \"\"\"\n pass\n\ndef describe_listener_certificates(ListenerArn=None, Marker=None, PageSize=None):\n \"\"\"\n Describes the certificates for the specified secure listener.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_listener_certificates(\n ListenerArn='string',\n Marker='string',\n PageSize=123\n )\n \n \n :type ListenerArn: string\n :param ListenerArn: [REQUIRED]\n The Amazon Resource Names (ARN) of the listener.\n \n\n :type Marker: string\n :param Marker: The marker for the next set of results. (You received this marker from a previous call.)\n\n :type PageSize: integer\n :param PageSize: The maximum number of results to return with this call.\n\n :rtype: dict\n :return: {\n 'Certificates': [\n {\n 'CertificateArn': 'string',\n 'IsDefault': True|False\n },\n ],\n 'NextMarker': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_listeners(LoadBalancerArn=None, ListenerArns=None, Marker=None, PageSize=None):\n \"\"\"\n Describes the specified listeners or the listeners for the specified Application Load Balancer or Network Load Balancer. You must specify either a load balancer or one or more listeners.\n See also: AWS API Documentation\n \n Examples\n This example describes the specified listener.\n Expected Output:\n \n :example: response = client.describe_listeners(\n LoadBalancerArn='string',\n ListenerArns=[\n 'string',\n ],\n Marker='string',\n PageSize=123\n )\n \n \n :type LoadBalancerArn: string\n :param LoadBalancerArn: The Amazon Resource Name (ARN) of the load balancer.\n\n :type ListenerArns: list\n :param ListenerArns: The Amazon Resource Names (ARN) of the listeners.\n (string) --\n \n\n :type Marker: string\n :param Marker: The marker for the next set of results. (You received this marker from a previous call.)\n\n :type PageSize: integer\n :param PageSize: The maximum number of results to return with this call.\n\n :rtype: dict\n :return: {\n 'Listeners': [\n {\n 'ListenerArn': 'string',\n 'LoadBalancerArn': 'string',\n 'Port': 123,\n 'Protocol': 'HTTP'|'HTTPS'|'TCP',\n 'Certificates': [\n {\n 'CertificateArn': 'string',\n 'IsDefault': True|False\n },\n ],\n 'SslPolicy': 'string',\n 'DefaultActions': [\n {\n 'Type': 'forward'|'authenticate-oidc'|'authenticate-cognito'|'redirect'|'fixed-response',\n 'TargetGroupArn': 'string',\n 'AuthenticateOidcConfig': {\n 'Issuer': 'string',\n 'AuthorizationEndpoint': 'string',\n 'TokenEndpoint': 'string',\n 'UserInfoEndpoint': 'string',\n 'ClientId': 'string',\n 'ClientSecret': 'string',\n 'SessionCookieName': 'string',\n 'Scope': 'string',\n 'SessionTimeout': 123,\n 'AuthenticationRequestExtraParams': {\n 'string': 'string'\n },\n 'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate'\n },\n 'AuthenticateCognitoConfig': {\n 'UserPoolArn': 'string',\n 'UserPoolClientId': 'string',\n 'UserPoolDomain': 'string',\n 'SessionCookieName': 'string',\n 'Scope': 'string',\n 'SessionTimeout': 123,\n 'AuthenticationRequestExtraParams': {\n 'string': 'string'\n },\n 'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate'\n },\n 'Order': 123,\n 'RedirectConfig': {\n 'Protocol': 'string',\n 'Port': 'string',\n 'Host': 'string',\n 'Path': 'string',\n 'Query': 'string',\n 'StatusCode': 'HTTP_301'|'HTTP_302'\n },\n 'FixedResponseConfig': {\n 'MessageBody': 'string',\n 'StatusCode': 'string',\n 'ContentType': 'string'\n }\n },\n ]\n },\n ],\n 'NextMarker': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef describe_load_balancer_attributes(LoadBalancerArn=None):\n \"\"\"\n Describes the attributes for the specified Application Load Balancer or Network Load Balancer.\n For more information, see Load Balancer Attributes in the Application Load Balancers Guide or Load Balancer Attributes in the Network Load Balancers Guide .\n See also: AWS API Documentation\n \n Examples\n This example describes the attributes of the specified load balancer.\n Expected Output:\n \n :example: response = client.describe_load_balancer_attributes(\n LoadBalancerArn='string'\n )\n \n \n :type LoadBalancerArn: string\n :param LoadBalancerArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the load balancer.\n \n\n :rtype: dict\n :return: {\n 'Attributes': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n \n \n :returns: \n access_logs.s3.enabled - Indicates whether access logs are enabled. The value is true or false . The default is false .\n access_logs.s3.bucket - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n access_logs.s3.prefix - The prefix for the location in the S3 bucket for the access logs.\n idle_timeout.timeout_seconds - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.\n routing.http2.enabled - Indicates whether HTTP/2 is enabled. The value is true or false . The default is true .\n \n \"\"\"\n pass\n\ndef describe_load_balancers(LoadBalancerArns=None, Names=None, Marker=None, PageSize=None):\n \"\"\"\n Describes the specified load balancers or all of your load balancers.\n To describe the listeners for a load balancer, use DescribeListeners . To describe the attributes for a load balancer, use DescribeLoadBalancerAttributes .\n See also: AWS API Documentation\n \n Examples\n This example describes the specified load balancer.\n Expected Output:\n \n :example: response = client.describe_load_balancers(\n LoadBalancerArns=[\n 'string',\n ],\n Names=[\n 'string',\n ],\n Marker='string',\n PageSize=123\n )\n \n \n :type LoadBalancerArns: list\n :param LoadBalancerArns: The Amazon Resource Names (ARN) of the load balancers. You can specify up to 20 load balancers in a single call.\n (string) --\n \n\n :type Names: list\n :param Names: The names of the load balancers.\n (string) --\n \n\n :type Marker: string\n :param Marker: The marker for the next set of results. (You received this marker from a previous call.)\n\n :type PageSize: integer\n :param PageSize: The maximum number of results to return with this call.\n\n :rtype: dict\n :return: {\n 'LoadBalancers': [\n {\n 'LoadBalancerArn': 'string',\n 'DNSName': 'string',\n 'CanonicalHostedZoneId': 'string',\n 'CreatedTime': datetime(2015, 1, 1),\n 'LoadBalancerName': 'string',\n 'Scheme': 'internet-facing'|'internal',\n 'VpcId': 'string',\n 'State': {\n 'Code': 'active'|'provisioning'|'active_impaired'|'failed',\n 'Reason': 'string'\n },\n 'Type': 'application'|'network',\n 'AvailabilityZones': [\n {\n 'ZoneName': 'string',\n 'SubnetId': 'string',\n 'LoadBalancerAddresses': [\n {\n 'IpAddress': 'string',\n 'AllocationId': 'string'\n },\n ]\n },\n ],\n 'SecurityGroups': [\n 'string',\n ],\n 'IpAddressType': 'ipv4'|'dualstack'\n },\n ],\n 'NextMarker': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_rules(ListenerArn=None, RuleArns=None, Marker=None, PageSize=None):\n \"\"\"\n Describes the specified rules or the rules for the specified listener. You must specify either a listener or one or more rules.\n See also: AWS API Documentation\n \n Examples\n This example describes the specified rule.\n Expected Output:\n \n :example: response = client.describe_rules(\n ListenerArn='string',\n RuleArns=[\n 'string',\n ],\n Marker='string',\n PageSize=123\n )\n \n \n :type ListenerArn: string\n :param ListenerArn: The Amazon Resource Name (ARN) of the listener.\n\n :type RuleArns: list\n :param RuleArns: The Amazon Resource Names (ARN) of the rules.\n (string) --\n \n\n :type Marker: string\n :param Marker: The marker for the next set of results. (You received this marker from a previous call.)\n\n :type PageSize: integer\n :param PageSize: The maximum number of results to return with this call.\n\n :rtype: dict\n :return: {\n 'Rules': [\n {\n 'RuleArn': 'string',\n 'Priority': 'string',\n 'Conditions': [\n {\n 'Field': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n 'Actions': [\n {\n 'Type': 'forward'|'authenticate-oidc'|'authenticate-cognito'|'redirect'|'fixed-response',\n 'TargetGroupArn': 'string',\n 'AuthenticateOidcConfig': {\n 'Issuer': 'string',\n 'AuthorizationEndpoint': 'string',\n 'TokenEndpoint': 'string',\n 'UserInfoEndpoint': 'string',\n 'ClientId': 'string',\n 'ClientSecret': 'string',\n 'SessionCookieName': 'string',\n 'Scope': 'string',\n 'SessionTimeout': 123,\n 'AuthenticationRequestExtraParams': {\n 'string': 'string'\n },\n 'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate'\n },\n 'AuthenticateCognitoConfig': {\n 'UserPoolArn': 'string',\n 'UserPoolClientId': 'string',\n 'UserPoolDomain': 'string',\n 'SessionCookieName': 'string',\n 'Scope': 'string',\n 'SessionTimeout': 123,\n 'AuthenticationRequestExtraParams': {\n 'string': 'string'\n },\n 'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate'\n },\n 'Order': 123,\n 'RedirectConfig': {\n 'Protocol': 'string',\n 'Port': 'string',\n 'Host': 'string',\n 'Path': 'string',\n 'Query': 'string',\n 'StatusCode': 'HTTP_301'|'HTTP_302'\n },\n 'FixedResponseConfig': {\n 'MessageBody': 'string',\n 'StatusCode': 'string',\n 'ContentType': 'string'\n }\n },\n ],\n 'IsDefault': True|False\n },\n ],\n 'NextMarker': 'string'\n }\n \n \n :returns: \n A-Z, a-z, 0-9\n \n .\n \n \n \n (matches 0 or more characters)\n \n \n ? (matches exactly 1 character)\n \n \"\"\"\n pass\n\ndef describe_ssl_policies(Names=None, Marker=None, PageSize=None):\n \"\"\"\n Describes the specified policies or all policies used for SSL negotiation.\n For more information, see Security Policies in the Application Load Balancers Guide .\n See also: AWS API Documentation\n \n Examples\n This example describes the specified policy used for SSL negotiation.\n Expected Output:\n \n :example: response = client.describe_ssl_policies(\n Names=[\n 'string',\n ],\n Marker='string',\n PageSize=123\n )\n \n \n :type Names: list\n :param Names: The names of the policies.\n (string) --\n \n\n :type Marker: string\n :param Marker: The marker for the next set of results. (You received this marker from a previous call.)\n\n :type PageSize: integer\n :param PageSize: The maximum number of results to return with this call.\n\n :rtype: dict\n :return: {\n 'SslPolicies': [\n {\n 'SslProtocols': [\n 'string',\n ],\n 'Ciphers': [\n {\n 'Name': 'string',\n 'Priority': 123\n },\n ],\n 'Name': 'string'\n },\n ],\n 'NextMarker': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_tags(ResourceArns=None):\n \"\"\"\n Describes the tags for the specified resources. You can describe the tags for one or more Application Load Balancers, Network Load Balancers, and target groups.\n See also: AWS API Documentation\n \n Examples\n This example describes the tags assigned to the specified load balancer.\n Expected Output:\n \n :example: response = client.describe_tags(\n ResourceArns=[\n 'string',\n ]\n )\n \n \n :type ResourceArns: list\n :param ResourceArns: [REQUIRED]\n The Amazon Resource Names (ARN) of the resources.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'TagDescriptions': [\n {\n 'ResourceArn': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef describe_target_group_attributes(TargetGroupArn=None):\n \"\"\"\n Describes the attributes for the specified target group.\n For more information, see Target Group Attributes in the Application Load Balancers Guide or Target Group Attributes in the Network Load Balancers Guide .\n See also: AWS API Documentation\n \n Examples\n This example describes the attributes of the specified target group.\n Expected Output:\n \n :example: response = client.describe_target_group_attributes(\n TargetGroupArn='string'\n )\n \n \n :type TargetGroupArn: string\n :param TargetGroupArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the target group.\n \n\n :rtype: dict\n :return: {\n 'Attributes': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n \n \n :returns: \n slow_start.duration_seconds - The time period, in seconds, during which a newly registered target receives a linearly increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). Slow start mode is disabled by default.\n stickiness.enabled - Indicates whether sticky sessions are enabled. The value is true or false . The default is false .\n stickiness.type - The type of sticky sessions. The possible value is lb_cookie .\n stickiness.lb_cookie.duration_seconds - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n \n \"\"\"\n pass\n\ndef describe_target_groups(LoadBalancerArn=None, TargetGroupArns=None, Names=None, Marker=None, PageSize=None):\n \"\"\"\n Describes the specified target groups or all of your target groups. By default, all target groups are described. Alternatively, you can specify one of the following to filter the results: the ARN of the load balancer, the names of one or more target groups, or the ARNs of one or more target groups.\n To describe the targets for a target group, use DescribeTargetHealth . To describe the attributes of a target group, use DescribeTargetGroupAttributes .\n See also: AWS API Documentation\n \n Examples\n This example describes the specified target group.\n Expected Output:\n \n :example: response = client.describe_target_groups(\n LoadBalancerArn='string',\n TargetGroupArns=[\n 'string',\n ],\n Names=[\n 'string',\n ],\n Marker='string',\n PageSize=123\n )\n \n \n :type LoadBalancerArn: string\n :param LoadBalancerArn: The Amazon Resource Name (ARN) of the load balancer.\n\n :type TargetGroupArns: list\n :param TargetGroupArns: The Amazon Resource Names (ARN) of the target groups.\n (string) --\n \n\n :type Names: list\n :param Names: The names of the target groups.\n (string) --\n \n\n :type Marker: string\n :param Marker: The marker for the next set of results. (You received this marker from a previous call.)\n\n :type PageSize: integer\n :param PageSize: The maximum number of results to return with this call.\n\n :rtype: dict\n :return: {\n 'TargetGroups': [\n {\n 'TargetGroupArn': 'string',\n 'TargetGroupName': 'string',\n 'Protocol': 'HTTP'|'HTTPS'|'TCP',\n 'Port': 123,\n 'VpcId': 'string',\n 'HealthCheckProtocol': 'HTTP'|'HTTPS'|'TCP',\n 'HealthCheckPort': 'string',\n 'HealthCheckEnabled': True|False,\n 'HealthCheckIntervalSeconds': 123,\n 'HealthCheckTimeoutSeconds': 123,\n 'HealthyThresholdCount': 123,\n 'UnhealthyThresholdCount': 123,\n 'HealthCheckPath': 'string',\n 'Matcher': {\n 'HttpCode': 'string'\n },\n 'LoadBalancerArns': [\n 'string',\n ],\n 'TargetType': 'instance'|'ip'|'lambda'\n },\n ],\n 'NextMarker': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_target_health(TargetGroupArn=None, Targets=None):\n \"\"\"\n Describes the health of the specified targets or all of your targets.\n See also: AWS API Documentation\n \n Examples\n This example describes the health of the targets for the specified target group. One target is healthy but the other is not specified in an action, so it can't receive traffic from the load balancer.\n Expected Output:\n This example describes the health of the specified target. This target is healthy.\n Expected Output:\n \n :example: response = client.describe_target_health(\n TargetGroupArn='string',\n Targets=[\n {\n 'Id': 'string',\n 'Port': 123,\n 'AvailabilityZone': 'string'\n },\n ]\n )\n \n \n :type TargetGroupArn: string\n :param TargetGroupArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the target group.\n \n\n :type Targets: list\n :param Targets: The targets.\n (dict) --Information about a target.\n Id (string) -- [REQUIRED]The ID of the target. If the target type of the target group is instance , specify an instance ID. If the target type is ip , specify an IP address. If the target type is lambda , specify the ARN of the Lambda function.\n Port (integer) --The port on which the target is listening.\n AvailabilityZone (string) --An Availability Zone or all . This determines whether the target receives traffic from the load balancer nodes in the specified Availability Zone or from all enabled Availability Zones for the load balancer.\n This parameter is not supported if the target type of the target group is instance .\n If the target type is ip and the IP address is in a subnet of the VPC for the target group, the Availability Zone is automatically detected and this parameter is optional. If the IP address is outside the VPC, this parameter is required.\n With an Application Load Balancer, if the target type is ip and the IP address is outside the VPC for the target group, the only supported value is all .\n If the target type is lambda , this parameter is optional and the only supported value is all .\n \n \n\n :rtype: dict\n :return: {\n 'TargetHealthDescriptions': [\n {\n 'Target': {\n 'Id': 'string',\n 'Port': 123,\n 'AvailabilityZone': 'string'\n },\n 'HealthCheckPort': 'string',\n 'TargetHealth': {\n 'State': 'initial'|'healthy'|'unhealthy'|'unused'|'draining'|'unavailable',\n 'Reason': 'Elb.RegistrationInProgress'|'Elb.InitialHealthChecking'|'Target.ResponseCodeMismatch'|'Target.Timeout'|'Target.FailedHealthChecks'|'Target.NotRegistered'|'Target.NotInUse'|'Target.DeregistrationInProgress'|'Target.InvalidState'|'Target.IpUnusable'|'Target.HealthCheckDisabled'|'Elb.InternalError',\n 'Description': 'string'\n }\n },\n ]\n }\n \n \n :returns: \n Elb.RegistrationInProgress - The target is in the process of being registered with the load balancer.\n Elb.InitialHealthChecking - The load balancer is still sending the target the minimum number of health checks required to determine its health status.\n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef modify_listener(ListenerArn=None, Port=None, Protocol=None, SslPolicy=None, Certificates=None, DefaultActions=None):\n \"\"\"\n Modifies the specified properties of the specified listener.\n Any properties that you do not specify retain their current values. However, changing the protocol from HTTPS to HTTP removes the security policy and SSL certificate properties. If you change the protocol from HTTP to HTTPS, you must add the security policy and server certificate.\n See also: AWS API Documentation\n \n Examples\n This example changes the default action for the specified listener.\n Expected Output:\n This example changes the server certificate for the specified HTTPS listener.\n Expected Output:\n \n :example: response = client.modify_listener(\n ListenerArn='string',\n Port=123,\n Protocol='HTTP'|'HTTPS'|'TCP',\n SslPolicy='string',\n Certificates=[\n {\n 'CertificateArn': 'string',\n 'IsDefault': True|False\n },\n ],\n DefaultActions=[\n {\n 'Type': 'forward'|'authenticate-oidc'|'authenticate-cognito'|'redirect'|'fixed-response',\n 'TargetGroupArn': 'string',\n 'AuthenticateOidcConfig': {\n 'Issuer': 'string',\n 'AuthorizationEndpoint': 'string',\n 'TokenEndpoint': 'string',\n 'UserInfoEndpoint': 'string',\n 'ClientId': 'string',\n 'ClientSecret': 'string',\n 'SessionCookieName': 'string',\n 'Scope': 'string',\n 'SessionTimeout': 123,\n 'AuthenticationRequestExtraParams': {\n 'string': 'string'\n },\n 'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate'\n },\n 'AuthenticateCognitoConfig': {\n 'UserPoolArn': 'string',\n 'UserPoolClientId': 'string',\n 'UserPoolDomain': 'string',\n 'SessionCookieName': 'string',\n 'Scope': 'string',\n 'SessionTimeout': 123,\n 'AuthenticationRequestExtraParams': {\n 'string': 'string'\n },\n 'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate'\n },\n 'Order': 123,\n 'RedirectConfig': {\n 'Protocol': 'string',\n 'Port': 'string',\n 'Host': 'string',\n 'Path': 'string',\n 'Query': 'string',\n 'StatusCode': 'HTTP_301'|'HTTP_302'\n },\n 'FixedResponseConfig': {\n 'MessageBody': 'string',\n 'StatusCode': 'string',\n 'ContentType': 'string'\n }\n },\n ]\n )\n \n \n :type ListenerArn: string\n :param ListenerArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the listener.\n \n\n :type Port: integer\n :param Port: The port for connections from clients to the load balancer.\n\n :type Protocol: string\n :param Protocol: The protocol for connections from clients to the load balancer. Application Load Balancers support HTTP and HTTPS and Network Load Balancers support TCP.\n\n :type SslPolicy: string\n :param SslPolicy: [HTTPS listeners] The security policy that defines which protocols and ciphers are supported. For more information, see Security Policies in the Application Load Balancers Guide .\n\n :type Certificates: list\n :param Certificates: [HTTPS listeners] The default SSL server certificate. You must provide exactly one certificate. Set CertificateArn to the certificate ARN but do not set IsDefault .\n To create a certificate list, use AddListenerCertificates .\n (dict) --Information about an SSL server certificate.\n CertificateArn (string) --The Amazon Resource Name (ARN) of the certificate.\n IsDefault (boolean) --Indicates whether the certificate is the default certificate. Do not set IsDefault when specifying a certificate as an input parameter.\n \n \n\n :type DefaultActions: list\n :param DefaultActions: The actions for the default rule. The rule must include one forward action or one or more fixed-response actions.\n If the action type is forward , you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer or TCP for a Network Load Balancer.\n [HTTPS listener] If the action type is authenticate-oidc , you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.\n [HTTPS listener] If the action type is authenticate-cognito , you authenticate users through the user pools supported by Amazon Cognito.\n [Application Load Balancer] If the action type is redirect , you redirect specified client requests from one URL to another.\n [Application Load Balancer] If the action type is fixed-response , you drop specified client requests and return a custom HTTP response.\n (dict) --Information about an action.\n Type (string) -- [REQUIRED]The type of action. Each rule must include exactly one of the following types of actions: forward , fixed-response , or redirect .\n TargetGroupArn (string) --The Amazon Resource Name (ARN) of the target group. Specify only when Type is forward .\n AuthenticateOidcConfig (dict) --[HTTPS listener] Information about an identity provider that is compliant with OpenID Connect (OIDC). Specify only when Type is authenticate-oidc .\n Issuer (string) -- [REQUIRED]The OIDC issuer identifier of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.\n AuthorizationEndpoint (string) -- [REQUIRED]The authorization endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.\n TokenEndpoint (string) -- [REQUIRED]The token endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.\n UserInfoEndpoint (string) -- [REQUIRED]The user info endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.\n ClientId (string) -- [REQUIRED]The OAuth 2.0 client identifier.\n ClientSecret (string) -- [REQUIRED]The OAuth 2.0 client secret.\n SessionCookieName (string) --The name of the cookie used to maintain session information. The default is AWSELBAuthSessionCookie.\n Scope (string) --The set of user claims to be requested from the IdP. The default is openid .\n To verify which scope values your IdP supports and how to separate multiple values, see the documentation for your IdP.\n SessionTimeout (integer) --The maximum duration of the authentication session, in seconds. The default is 604800 seconds (7 days).\n AuthenticationRequestExtraParams (dict) --The query parameters (up to 10) to include in the redirect request to the authorization endpoint.\n (string) --\n (string) --\n \n OnUnauthenticatedRequest (string) --The behavior if the user is not authenticated. The following are possible values:\n deny- Return an HTTP 401 Unauthorized error.\n allow- Allow the request to be forwarded to the target.\n authenticate- Redirect the request to the IdP authorization endpoint. This is the default value.\n \n AuthenticateCognitoConfig (dict) --[HTTPS listener] Information for using Amazon Cognito to authenticate users. Specify only when Type is authenticate-cognito .\n UserPoolArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Amazon Cognito user pool.\n UserPoolClientId (string) -- [REQUIRED]The ID of the Amazon Cognito user pool client.\n UserPoolDomain (string) -- [REQUIRED]The domain prefix or fully-qualified domain name of the Amazon Cognito user pool.\n SessionCookieName (string) --The name of the cookie used to maintain session information. The default is AWSELBAuthSessionCookie.\n Scope (string) --The set of user claims to be requested from the IdP. The default is openid .\n To verify which scope values your IdP supports and how to separate multiple values, see the documentation for your IdP.\n SessionTimeout (integer) --The maximum duration of the authentication session, in seconds. The default is 604800 seconds (7 days).\n AuthenticationRequestExtraParams (dict) --The query parameters (up to 10) to include in the redirect request to the authorization endpoint.\n (string) --\n (string) --\n \n OnUnauthenticatedRequest (string) --The behavior if the user is not authenticated. The following are possible values:\n deny- Return an HTTP 401 Unauthorized error.\n allow- Allow the request to be forwarded to the target.\n authenticate- Redirect the request to the IdP authorization endpoint. This is the default value.\n \n Order (integer) --The order for the action. This value is required for rules with multiple actions. The action with the lowest value for order is performed first. The final action to be performed must be a forward or a fixed-response action.\n RedirectConfig (dict) --[Application Load Balancer] Information for creating a redirect action. Specify only when Type is redirect .\n Protocol (string) --The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You cannot redirect HTTPS to HTTP.\n Port (string) --The port. You can specify a value from 1 to 65535 or #{port}.\n Host (string) --The hostname. This component is not percent-encoded. The hostname can contain #{host}.\n Path (string) --The absolute path, starting with the leading '/'. This component is not percent-encoded. The path can contain #{host}, #{path}, and #{port}.\n Query (string) --The query parameters, URL-encoded when necessary, but not percent-encoded. Do not include the leading '?', as it is automatically added. You can specify any of the reserved keywords.\n StatusCode (string) -- [REQUIRED]The HTTP redirect code. The redirect is either permanent (HTTP 301) or temporary (HTTP 302).\n FixedResponseConfig (dict) --[Application Load Balancer] Information for creating an action that returns a custom HTTP response. Specify only when Type is fixed-response .\n MessageBody (string) --The message.\n StatusCode (string) -- [REQUIRED]The HTTP response code (2XX, 4XX, or 5XX).\n ContentType (string) --The content type.\n Valid Values: text/plain | text/css | text/html | application/javascript | application/json\n \n \n\n :rtype: dict\n :return: {\n 'Listeners': [\n {\n 'ListenerArn': 'string',\n 'LoadBalancerArn': 'string',\n 'Port': 123,\n 'Protocol': 'HTTP'|'HTTPS'|'TCP',\n 'Certificates': [\n {\n 'CertificateArn': 'string',\n 'IsDefault': True|False\n },\n ],\n 'SslPolicy': 'string',\n 'DefaultActions': [\n {\n 'Type': 'forward'|'authenticate-oidc'|'authenticate-cognito'|'redirect'|'fixed-response',\n 'TargetGroupArn': 'string',\n 'AuthenticateOidcConfig': {\n 'Issuer': 'string',\n 'AuthorizationEndpoint': 'string',\n 'TokenEndpoint': 'string',\n 'UserInfoEndpoint': 'string',\n 'ClientId': 'string',\n 'ClientSecret': 'string',\n 'SessionCookieName': 'string',\n 'Scope': 'string',\n 'SessionTimeout': 123,\n 'AuthenticationRequestExtraParams': {\n 'string': 'string'\n },\n 'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate'\n },\n 'AuthenticateCognitoConfig': {\n 'UserPoolArn': 'string',\n 'UserPoolClientId': 'string',\n 'UserPoolDomain': 'string',\n 'SessionCookieName': 'string',\n 'Scope': 'string',\n 'SessionTimeout': 123,\n 'AuthenticationRequestExtraParams': {\n 'string': 'string'\n },\n 'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate'\n },\n 'Order': 123,\n 'RedirectConfig': {\n 'Protocol': 'string',\n 'Port': 'string',\n 'Host': 'string',\n 'Path': 'string',\n 'Query': 'string',\n 'StatusCode': 'HTTP_301'|'HTTP_302'\n },\n 'FixedResponseConfig': {\n 'MessageBody': 'string',\n 'StatusCode': 'string',\n 'ContentType': 'string'\n }\n },\n ]\n },\n ]\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef modify_load_balancer_attributes(LoadBalancerArn=None, Attributes=None):\n \"\"\"\n Modifies the specified attributes of the specified Application Load Balancer or Network Load Balancer.\n If any of the specified attributes can't be modified as requested, the call fails. Any existing attributes that you do not modify retain their current values.\n See also: AWS API Documentation\n \n Examples\n This example enables deletion protection for the specified load balancer.\n Expected Output:\n This example changes the idle timeout value for the specified load balancer.\n Expected Output:\n This example enables access logs for the specified load balancer. Note that the S3 bucket must exist in the same region as the load balancer and must have a policy attached that grants access to the Elastic Load Balancing service.\n Expected Output:\n \n :example: response = client.modify_load_balancer_attributes(\n LoadBalancerArn='string',\n Attributes=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type LoadBalancerArn: string\n :param LoadBalancerArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the load balancer.\n \n\n :type Attributes: list\n :param Attributes: [REQUIRED]\n The load balancer attributes.\n (dict) --Information about a load balancer attribute.\n Key (string) --The name of the attribute.\n The following attributes are supported by both Application Load Balancers and Network Load Balancers:\n deletion_protection.enabled - Indicates whether deletion protection is enabled. The value is true or false . The default is false .\n The following attributes are supported by only Application Load Balancers:\n access_logs.s3.enabled - Indicates whether access logs are enabled. The value is true or false . The default is false .\n access_logs.s3.bucket - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n access_logs.s3.prefix - The prefix for the location in the S3 bucket for the access logs.\n idle_timeout.timeout_seconds - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.\n routing.http2.enabled - Indicates whether HTTP/2 is enabled. The value is true or false . The default is true .\n The following attributes are supported by only Network Load Balancers:\n load_balancing.cross_zone.enabled - Indicates whether cross-zone load balancing is enabled. The value is true or false . The default is false .\n Value (string) --The value of the attribute.\n \n \n\n :rtype: dict\n :return: {\n 'Attributes': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n \n \n :returns: \n deletion_protection.enabled - Indicates whether deletion protection is enabled. The value is true or false . The default is false .\n \n \"\"\"\n pass\n\ndef modify_rule(RuleArn=None, Conditions=None, Actions=None):\n \"\"\"\n Modifies the specified rule.\n Any existing properties that you do not modify retain their current values.\n To modify the actions for the default rule, use ModifyListener .\n See also: AWS API Documentation\n \n Examples\n This example modifies the condition for the specified rule.\n Expected Output:\n \n :example: response = client.modify_rule(\n RuleArn='string',\n Conditions=[\n {\n 'Field': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n Actions=[\n {\n 'Type': 'forward'|'authenticate-oidc'|'authenticate-cognito'|'redirect'|'fixed-response',\n 'TargetGroupArn': 'string',\n 'AuthenticateOidcConfig': {\n 'Issuer': 'string',\n 'AuthorizationEndpoint': 'string',\n 'TokenEndpoint': 'string',\n 'UserInfoEndpoint': 'string',\n 'ClientId': 'string',\n 'ClientSecret': 'string',\n 'SessionCookieName': 'string',\n 'Scope': 'string',\n 'SessionTimeout': 123,\n 'AuthenticationRequestExtraParams': {\n 'string': 'string'\n },\n 'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate'\n },\n 'AuthenticateCognitoConfig': {\n 'UserPoolArn': 'string',\n 'UserPoolClientId': 'string',\n 'UserPoolDomain': 'string',\n 'SessionCookieName': 'string',\n 'Scope': 'string',\n 'SessionTimeout': 123,\n 'AuthenticationRequestExtraParams': {\n 'string': 'string'\n },\n 'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate'\n },\n 'Order': 123,\n 'RedirectConfig': {\n 'Protocol': 'string',\n 'Port': 'string',\n 'Host': 'string',\n 'Path': 'string',\n 'Query': 'string',\n 'StatusCode': 'HTTP_301'|'HTTP_302'\n },\n 'FixedResponseConfig': {\n 'MessageBody': 'string',\n 'StatusCode': 'string',\n 'ContentType': 'string'\n }\n },\n ]\n )\n \n \n :type RuleArn: string\n :param RuleArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the rule.\n \n\n :type Conditions: list\n :param Conditions: The conditions. Each condition specifies a field name and a single value.\n If the field name is host-header , you can specify a single host name (for example, my.example.com). A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters.\n A-Z, a-z, 0-9\n .\n (matches 0 or more characters)\n ? (matches exactly 1 character)\n If the field name is path-pattern , you can specify a single path pattern. A path pattern is case-sensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters.\n A-Z, a-z, 0-9\n _ - . $ / ~ ' ' @ : +\n & (using &amp;)\n (matches 0 or more characters)\n ? (matches exactly 1 character)\n (dict) --Information about a condition for a rule.\n Field (string) --The name of the field. The possible values are host-header and path-pattern .\n Values (list) --The condition value.\n If the field name is host-header , you can specify a single host name (for example, my.example.com). A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters.\n A-Z, a-z, 0-9\n .\n (matches 0 or more characters)\n ? (matches exactly 1 character)\n If the field name is path-pattern , you can specify a single path pattern (for example, /img/*). A path pattern is case-sensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters.\n A-Z, a-z, 0-9\n _ - . $ / ~ ' ' @ : +\n & (using &amp;)\n (matches 0 or more characters)\n ? (matches exactly 1 character)\n (string) --\n \n \n\n :type Actions: list\n :param Actions: The actions.\n If the action type is forward , you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer or TCP for a Network Load Balancer.\n [HTTPS listener] If the action type is authenticate-oidc , you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.\n [HTTPS listener] If the action type is authenticate-cognito , you authenticate users through the user pools supported by Amazon Cognito.\n [Application Load Balancer] If the action type is redirect , you redirect specified client requests from one URL to another.\n [Application Load Balancer] If the action type is fixed-response , you drop specified client requests and return a custom HTTP response.\n (dict) --Information about an action.\n Type (string) -- [REQUIRED]The type of action. Each rule must include exactly one of the following types of actions: forward , fixed-response , or redirect .\n TargetGroupArn (string) --The Amazon Resource Name (ARN) of the target group. Specify only when Type is forward .\n AuthenticateOidcConfig (dict) --[HTTPS listener] Information about an identity provider that is compliant with OpenID Connect (OIDC). Specify only when Type is authenticate-oidc .\n Issuer (string) -- [REQUIRED]The OIDC issuer identifier of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.\n AuthorizationEndpoint (string) -- [REQUIRED]The authorization endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.\n TokenEndpoint (string) -- [REQUIRED]The token endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.\n UserInfoEndpoint (string) -- [REQUIRED]The user info endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.\n ClientId (string) -- [REQUIRED]The OAuth 2.0 client identifier.\n ClientSecret (string) -- [REQUIRED]The OAuth 2.0 client secret.\n SessionCookieName (string) --The name of the cookie used to maintain session information. The default is AWSELBAuthSessionCookie.\n Scope (string) --The set of user claims to be requested from the IdP. The default is openid .\n To verify which scope values your IdP supports and how to separate multiple values, see the documentation for your IdP.\n SessionTimeout (integer) --The maximum duration of the authentication session, in seconds. The default is 604800 seconds (7 days).\n AuthenticationRequestExtraParams (dict) --The query parameters (up to 10) to include in the redirect request to the authorization endpoint.\n (string) --\n (string) --\n \n OnUnauthenticatedRequest (string) --The behavior if the user is not authenticated. The following are possible values:\n deny- Return an HTTP 401 Unauthorized error.\n allow- Allow the request to be forwarded to the target.\n authenticate- Redirect the request to the IdP authorization endpoint. This is the default value.\n \n AuthenticateCognitoConfig (dict) --[HTTPS listener] Information for using Amazon Cognito to authenticate users. Specify only when Type is authenticate-cognito .\n UserPoolArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Amazon Cognito user pool.\n UserPoolClientId (string) -- [REQUIRED]The ID of the Amazon Cognito user pool client.\n UserPoolDomain (string) -- [REQUIRED]The domain prefix or fully-qualified domain name of the Amazon Cognito user pool.\n SessionCookieName (string) --The name of the cookie used to maintain session information. The default is AWSELBAuthSessionCookie.\n Scope (string) --The set of user claims to be requested from the IdP. The default is openid .\n To verify which scope values your IdP supports and how to separate multiple values, see the documentation for your IdP.\n SessionTimeout (integer) --The maximum duration of the authentication session, in seconds. The default is 604800 seconds (7 days).\n AuthenticationRequestExtraParams (dict) --The query parameters (up to 10) to include in the redirect request to the authorization endpoint.\n (string) --\n (string) --\n \n OnUnauthenticatedRequest (string) --The behavior if the user is not authenticated. The following are possible values:\n deny- Return an HTTP 401 Unauthorized error.\n allow- Allow the request to be forwarded to the target.\n authenticate- Redirect the request to the IdP authorization endpoint. This is the default value.\n \n Order (integer) --The order for the action. This value is required for rules with multiple actions. The action with the lowest value for order is performed first. The final action to be performed must be a forward or a fixed-response action.\n RedirectConfig (dict) --[Application Load Balancer] Information for creating a redirect action. Specify only when Type is redirect .\n Protocol (string) --The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You cannot redirect HTTPS to HTTP.\n Port (string) --The port. You can specify a value from 1 to 65535 or #{port}.\n Host (string) --The hostname. This component is not percent-encoded. The hostname can contain #{host}.\n Path (string) --The absolute path, starting with the leading '/'. This component is not percent-encoded. The path can contain #{host}, #{path}, and #{port}.\n Query (string) --The query parameters, URL-encoded when necessary, but not percent-encoded. Do not include the leading '?', as it is automatically added. You can specify any of the reserved keywords.\n StatusCode (string) -- [REQUIRED]The HTTP redirect code. The redirect is either permanent (HTTP 301) or temporary (HTTP 302).\n FixedResponseConfig (dict) --[Application Load Balancer] Information for creating an action that returns a custom HTTP response. Specify only when Type is fixed-response .\n MessageBody (string) --The message.\n StatusCode (string) -- [REQUIRED]The HTTP response code (2XX, 4XX, or 5XX).\n ContentType (string) --The content type.\n Valid Values: text/plain | text/css | text/html | application/javascript | application/json\n \n \n\n :rtype: dict\n :return: {\n 'Rules': [\n {\n 'RuleArn': 'string',\n 'Priority': 'string',\n 'Conditions': [\n {\n 'Field': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n 'Actions': [\n {\n 'Type': 'forward'|'authenticate-oidc'|'authenticate-cognito'|'redirect'|'fixed-response',\n 'TargetGroupArn': 'string',\n 'AuthenticateOidcConfig': {\n 'Issuer': 'string',\n 'AuthorizationEndpoint': 'string',\n 'TokenEndpoint': 'string',\n 'UserInfoEndpoint': 'string',\n 'ClientId': 'string',\n 'ClientSecret': 'string',\n 'SessionCookieName': 'string',\n 'Scope': 'string',\n 'SessionTimeout': 123,\n 'AuthenticationRequestExtraParams': {\n 'string': 'string'\n },\n 'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate'\n },\n 'AuthenticateCognitoConfig': {\n 'UserPoolArn': 'string',\n 'UserPoolClientId': 'string',\n 'UserPoolDomain': 'string',\n 'SessionCookieName': 'string',\n 'Scope': 'string',\n 'SessionTimeout': 123,\n 'AuthenticationRequestExtraParams': {\n 'string': 'string'\n },\n 'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate'\n },\n 'Order': 123,\n 'RedirectConfig': {\n 'Protocol': 'string',\n 'Port': 'string',\n 'Host': 'string',\n 'Path': 'string',\n 'Query': 'string',\n 'StatusCode': 'HTTP_301'|'HTTP_302'\n },\n 'FixedResponseConfig': {\n 'MessageBody': 'string',\n 'StatusCode': 'string',\n 'ContentType': 'string'\n }\n },\n ],\n 'IsDefault': True|False\n },\n ]\n }\n \n \n :returns: \n A-Z, a-z, 0-9\n \n .\n \n \n \n (matches 0 or more characters)\n \n \n ? (matches exactly 1 character)\n \n \"\"\"\n pass\n\ndef modify_target_group(TargetGroupArn=None, HealthCheckProtocol=None, HealthCheckPort=None, HealthCheckPath=None, HealthCheckEnabled=None, HealthCheckIntervalSeconds=None, HealthCheckTimeoutSeconds=None, HealthyThresholdCount=None, UnhealthyThresholdCount=None, Matcher=None):\n \"\"\"\n Modifies the health checks used when evaluating the health state of the targets in the specified target group.\n To monitor the health of the targets, use DescribeTargetHealth .\n See also: AWS API Documentation\n \n Examples\n This example changes the configuration of the health checks used to evaluate the health of the targets for the specified target group.\n Expected Output:\n \n :example: response = client.modify_target_group(\n TargetGroupArn='string',\n HealthCheckProtocol='HTTP'|'HTTPS'|'TCP',\n HealthCheckPort='string',\n HealthCheckPath='string',\n HealthCheckEnabled=True|False,\n HealthCheckIntervalSeconds=123,\n HealthCheckTimeoutSeconds=123,\n HealthyThresholdCount=123,\n UnhealthyThresholdCount=123,\n Matcher={\n 'HttpCode': 'string'\n }\n )\n \n \n :type TargetGroupArn: string\n :param TargetGroupArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the target group.\n \n\n :type HealthCheckProtocol: string\n :param HealthCheckProtocol: The protocol the load balancer uses when performing health checks on targets. The TCP protocol is supported only if the protocol of the target group is TCP.\n If the protocol of the target group is TCP, you can't modify this setting.\n \n\n :type HealthCheckPort: string\n :param HealthCheckPort: The port the load balancer uses when performing health checks on targets.\n\n :type HealthCheckPath: string\n :param HealthCheckPath: [HTTP/HTTPS health checks] The ping path that is the destination for the health check request.\n\n :type HealthCheckEnabled: boolean\n :param HealthCheckEnabled: Indicates whether health checks are enabled.\n\n :type HealthCheckIntervalSeconds: integer\n :param HealthCheckIntervalSeconds: The approximate amount of time, in seconds, between health checks of an individual target. For Application Load Balancers, the range is 5 300 seconds. For Network Load Balancers, the supported values are 10 or 30 seconds.\n If the protocol of the target group is TCP, you can't modify this setting.\n \n\n :type HealthCheckTimeoutSeconds: integer\n :param HealthCheckTimeoutSeconds: [HTTP/HTTPS health checks] The amount of time, in seconds, during which no response means a failed health check.\n If the protocol of the target group is TCP, you can't modify this setting.\n \n\n :type HealthyThresholdCount: integer\n :param HealthyThresholdCount: The number of consecutive health checks successes required before considering an unhealthy target healthy.\n\n :type UnhealthyThresholdCount: integer\n :param UnhealthyThresholdCount: The number of consecutive health check failures required before considering the target unhealthy. For Network Load Balancers, this value must be the same as the healthy threshold count.\n\n :type Matcher: dict\n :param Matcher: [HTTP/HTTPS health checks] The HTTP codes to use when checking for a successful response from a target.\n If the protocol of the target group is TCP, you can't modify this setting.\n HttpCode (string) -- [REQUIRED]The HTTP codes.\n For Application Load Balancers, you can specify values between 200 and 499, and the default value is 200. You can specify multiple values (for example, '200,202') or a range of values (for example, '200-299').\n For Network Load Balancers, this is 200 399.\n \n\n :rtype: dict\n :return: {\n 'TargetGroups': [\n {\n 'TargetGroupArn': 'string',\n 'TargetGroupName': 'string',\n 'Protocol': 'HTTP'|'HTTPS'|'TCP',\n 'Port': 123,\n 'VpcId': 'string',\n 'HealthCheckProtocol': 'HTTP'|'HTTPS'|'TCP',\n 'HealthCheckPort': 'string',\n 'HealthCheckEnabled': True|False,\n 'HealthCheckIntervalSeconds': 123,\n 'HealthCheckTimeoutSeconds': 123,\n 'HealthyThresholdCount': 123,\n 'UnhealthyThresholdCount': 123,\n 'HealthCheckPath': 'string',\n 'Matcher': {\n 'HttpCode': 'string'\n },\n 'LoadBalancerArns': [\n 'string',\n ],\n 'TargetType': 'instance'|'ip'|'lambda'\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef modify_target_group_attributes(TargetGroupArn=None, Attributes=None):\n \"\"\"\n Modifies the specified attributes of the specified target group.\n See also: AWS API Documentation\n \n Examples\n This example sets the deregistration delay timeout to the specified value for the specified target group.\n Expected Output:\n \n :example: response = client.modify_target_group_attributes(\n TargetGroupArn='string',\n Attributes=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type TargetGroupArn: string\n :param TargetGroupArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the target group.\n \n\n :type Attributes: list\n :param Attributes: [REQUIRED]\n The attributes.\n (dict) --Information about a target group attribute.\n Key (string) --The name of the attribute.\n The following attribute is supported by both Application Load Balancers and Network Load Balancers:\n deregistration_delay.timeout_seconds - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused . The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.\n The following attributes are supported by Application Load Balancers if the target is not a Lambda function:\n slow_start.duration_seconds - The time period, in seconds, during which a newly registered target receives a linearly increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). Slow start mode is disabled by default.\n stickiness.enabled - Indicates whether sticky sessions are enabled. The value is true or false . The default is false .\n stickiness.type - The type of sticky sessions. The possible value is lb_cookie .\n stickiness.lb_cookie.duration_seconds - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n The following attribute is supported only if the target is a Lambda function.\n lambda.multi_value_headers.enabled - Indicates whether the request and response headers exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is true or false . The default is false . If the value is false and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.\n The following attribute is supported only by Network Load Balancers:\n proxy_protocol_v2.enabled - Indicates whether Proxy Protocol version 2 is enabled. The value is true or false . The default is false .\n Value (string) --The value of the attribute.\n \n \n\n :rtype: dict\n :return: {\n 'Attributes': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n \n \n :returns: \n deregistration_delay.timeout_seconds - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused . The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.\n \n \"\"\"\n pass\n\ndef register_targets(TargetGroupArn=None, Targets=None):\n \"\"\"\n Registers the specified targets with the specified target group.\n If the target is an EC2 instance, it must be in the running state when you register it.\n By default, the load balancer routes requests to registered targets using the protocol and port for the target group. Alternatively, you can override the port for a target when you register it. You can register each EC2 instance or IP address with the same target group multiple times using different ports.\n With a Network Load Balancer, you cannot register instances by instance ID if they have the following instance types: C1, CC1, CC2, CG1, CG2, CR1, CS1, G1, G2, HI1, HS1, M1, M2, M3, and T1. You can register instances of these types by IP address.\n To remove a target from a target group, use DeregisterTargets .\n See also: AWS API Documentation\n \n Examples\n This example registers the specified instances with the specified target group.\n Expected Output:\n This example registers the specified instance with the specified target group using multiple ports. This enables you to register ECS containers on the same instance as targets in the target group.\n Expected Output:\n \n :example: response = client.register_targets(\n TargetGroupArn='string',\n Targets=[\n {\n 'Id': 'string',\n 'Port': 123,\n 'AvailabilityZone': 'string'\n },\n ]\n )\n \n \n :type TargetGroupArn: string\n :param TargetGroupArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the target group.\n \n\n :type Targets: list\n :param Targets: [REQUIRED]\n The targets.\n To register a target by instance ID, specify the instance ID. To register a target by IP address, specify the IP address. To register a Lambda function, specify the ARN of the Lambda function.\n (dict) --Information about a target.\n Id (string) -- [REQUIRED]The ID of the target. If the target type of the target group is instance , specify an instance ID. If the target type is ip , specify an IP address. If the target type is lambda , specify the ARN of the Lambda function.\n Port (integer) --The port on which the target is listening.\n AvailabilityZone (string) --An Availability Zone or all . This determines whether the target receives traffic from the load balancer nodes in the specified Availability Zone or from all enabled Availability Zones for the load balancer.\n This parameter is not supported if the target type of the target group is instance .\n If the target type is ip and the IP address is in a subnet of the VPC for the target group, the Availability Zone is automatically detected and this parameter is optional. If the IP address is outside the VPC, this parameter is required.\n With an Application Load Balancer, if the target type is ip and the IP address is outside the VPC for the target group, the only supported value is all .\n If the target type is lambda , this parameter is optional and the only supported value is all .\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef remove_listener_certificates(ListenerArn=None, Certificates=None):\n \"\"\"\n Removes the specified certificate from the specified secure listener.\n You can't remove the default certificate for a listener. To replace the default certificate, call ModifyListener .\n To list the certificates for your listener, use DescribeListenerCertificates .\n See also: AWS API Documentation\n \n \n :example: response = client.remove_listener_certificates(\n ListenerArn='string',\n Certificates=[\n {\n 'CertificateArn': 'string',\n 'IsDefault': True|False\n },\n ]\n )\n \n \n :type ListenerArn: string\n :param ListenerArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the listener.\n \n\n :type Certificates: list\n :param Certificates: [REQUIRED]\n The certificate to remove. You can specify one certificate per call. Set CertificateArn to the certificate ARN but do not set IsDefault .\n (dict) --Information about an SSL server certificate.\n CertificateArn (string) --The Amazon Resource Name (ARN) of the certificate.\n IsDefault (boolean) --Indicates whether the certificate is the default certificate. Do not set IsDefault when specifying a certificate as an input parameter.\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef remove_tags(ResourceArns=None, TagKeys=None):\n \"\"\"\n Removes the specified tags from the specified Elastic Load Balancing resource.\n To list the current tags for your resources, use DescribeTags .\n See also: AWS API Documentation\n \n Examples\n This example removes the specified tags from the specified load balancer.\n Expected Output:\n \n :example: response = client.remove_tags(\n ResourceArns=[\n 'string',\n ],\n TagKeys=[\n 'string',\n ]\n )\n \n \n :type ResourceArns: list\n :param ResourceArns: [REQUIRED]\n The Amazon Resource Name (ARN) of the resource.\n (string) --\n \n\n :type TagKeys: list\n :param TagKeys: [REQUIRED]\n The tag keys for the tags to remove.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef set_ip_address_type(LoadBalancerArn=None, IpAddressType=None):\n \"\"\"\n Sets the type of IP addresses used by the subnets of the specified Application Load Balancer or Network Load Balancer.\n Network Load Balancers must use ipv4 .\n See also: AWS API Documentation\n \n \n :example: response = client.set_ip_address_type(\n LoadBalancerArn='string',\n IpAddressType='ipv4'|'dualstack'\n )\n \n \n :type LoadBalancerArn: string\n :param LoadBalancerArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the load balancer.\n \n\n :type IpAddressType: string\n :param IpAddressType: [REQUIRED]\n The IP address type. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). Internal load balancers must use ipv4 .\n \n\n :rtype: dict\n :return: {\n 'IpAddressType': 'ipv4'|'dualstack'\n }\n \n \n \"\"\"\n pass\n\ndef set_rule_priorities(RulePriorities=None):\n \"\"\"\n Sets the priorities of the specified rules.\n You can reorder the rules as long as there are no priority conflicts in the new order. Any existing rules that you do not specify retain their current priority.\n See also: AWS API Documentation\n \n Examples\n This example sets the priority of the specified rule.\n Expected Output:\n \n :example: response = client.set_rule_priorities(\n RulePriorities=[\n {\n 'RuleArn': 'string',\n 'Priority': 123\n },\n ]\n )\n \n \n :type RulePriorities: list\n :param RulePriorities: [REQUIRED]\n The rule priorities.\n (dict) --Information about the priorities for the rules for a listener.\n RuleArn (string) --The Amazon Resource Name (ARN) of the rule.\n Priority (integer) --The rule priority.\n \n \n\n :rtype: dict\n :return: {\n 'Rules': [\n {\n 'RuleArn': 'string',\n 'Priority': 'string',\n 'Conditions': [\n {\n 'Field': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n 'Actions': [\n {\n 'Type': 'forward'|'authenticate-oidc'|'authenticate-cognito'|'redirect'|'fixed-response',\n 'TargetGroupArn': 'string',\n 'AuthenticateOidcConfig': {\n 'Issuer': 'string',\n 'AuthorizationEndpoint': 'string',\n 'TokenEndpoint': 'string',\n 'UserInfoEndpoint': 'string',\n 'ClientId': 'string',\n 'ClientSecret': 'string',\n 'SessionCookieName': 'string',\n 'Scope': 'string',\n 'SessionTimeout': 123,\n 'AuthenticationRequestExtraParams': {\n 'string': 'string'\n },\n 'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate'\n },\n 'AuthenticateCognitoConfig': {\n 'UserPoolArn': 'string',\n 'UserPoolClientId': 'string',\n 'UserPoolDomain': 'string',\n 'SessionCookieName': 'string',\n 'Scope': 'string',\n 'SessionTimeout': 123,\n 'AuthenticationRequestExtraParams': {\n 'string': 'string'\n },\n 'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate'\n },\n 'Order': 123,\n 'RedirectConfig': {\n 'Protocol': 'string',\n 'Port': 'string',\n 'Host': 'string',\n 'Path': 'string',\n 'Query': 'string',\n 'StatusCode': 'HTTP_301'|'HTTP_302'\n },\n 'FixedResponseConfig': {\n 'MessageBody': 'string',\n 'StatusCode': 'string',\n 'ContentType': 'string'\n }\n },\n ],\n 'IsDefault': True|False\n },\n ]\n }\n \n \n :returns: \n A-Z, a-z, 0-9\n _ - . $ / ~ \" ' @ : +\n & (using &amp;)\n \n (matches 0 or more characters)\n \n \n ? (matches exactly 1 character)\n \n \"\"\"\n pass\n\ndef set_security_groups(LoadBalancerArn=None, SecurityGroups=None):\n \"\"\"\n Associates the specified security groups with the specified Application Load Balancer. The specified security groups override the previously associated security groups.\n You can't specify a security group for a Network Load Balancer.\n See also: AWS API Documentation\n \n Examples\n This example associates the specified security group with the specified load balancer.\n Expected Output:\n \n :example: response = client.set_security_groups(\n LoadBalancerArn='string',\n SecurityGroups=[\n 'string',\n ]\n )\n \n \n :type LoadBalancerArn: string\n :param LoadBalancerArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the load balancer.\n \n\n :type SecurityGroups: list\n :param SecurityGroups: [REQUIRED]\n The IDs of the security groups.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'SecurityGroupIds': [\n 'string',\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef set_subnets(LoadBalancerArn=None, Subnets=None, SubnetMappings=None):\n \"\"\"\n Enables the Availability Zone for the specified public subnets for the specified Application Load Balancer. The specified subnets replace the previously enabled subnets.\n You can't change the subnets for a Network Load Balancer.\n See also: AWS API Documentation\n \n Examples\n This example enables the Availability Zones for the specified subnets for the specified load balancer.\n Expected Output:\n \n :example: response = client.set_subnets(\n LoadBalancerArn='string',\n Subnets=[\n 'string',\n ],\n SubnetMappings=[\n {\n 'SubnetId': 'string',\n 'AllocationId': 'string'\n },\n ]\n )\n \n \n :type LoadBalancerArn: string\n :param LoadBalancerArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the load balancer.\n \n\n :type Subnets: list\n :param Subnets: The IDs of the public subnets. You must specify subnets from at least two Availability Zones. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.\n (string) --\n \n\n :type SubnetMappings: list\n :param SubnetMappings: The IDs of the public subnets. You must specify subnets from at least two Availability Zones. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.\n You cannot specify Elastic IP addresses for your subnets.\n (dict) --Information about a subnet mapping.\n SubnetId (string) --The ID of the subnet.\n AllocationId (string) --[Network Load Balancers] The allocation ID of the Elastic IP address.\n \n \n\n :rtype: dict\n :return: {\n 'AvailabilityZones': [\n {\n 'ZoneName': 'string',\n 'SubnetId': 'string',\n 'LoadBalancerAddresses': [\n {\n 'IpAddress': 'string',\n 'AllocationId': 'string'\n },\n ]\n },\n ]\n }\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.5529980659484863, "alphanum_fraction": 0.5612764358520508, "avg_line_length": 39.62517547607422, "blob_id": "33d736de596e3576d16b6ee836286d8525ef8285", "content_id": "a86dd8847afc5ba14bba33fa43996ca8e1c246dd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 204630, "license_type": "permissive", "max_line_length": 891, "num_lines": 5037, "path": "/pyboto3/devicefarm.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_device_pool(projectArn=None, name=None, description=None, rules=None):\n \"\"\"\n Creates a device pool.\n See also: AWS API Documentation\n \n Examples\n The following example creates a new device pool named MyDevicePool inside an existing project.\n Expected Output:\n \n :example: response = client.create_device_pool(\n projectArn='string',\n name='string',\n description='string',\n rules=[\n {\n 'attribute': 'ARN'|'PLATFORM'|'FORM_FACTOR'|'MANUFACTURER'|'REMOTE_ACCESS_ENABLED'|'REMOTE_DEBUG_ENABLED'|'APPIUM_VERSION'|'INSTANCE_ARN'|'INSTANCE_LABELS'|'FLEET_TYPE',\n 'operator': 'EQUALS'|'LESS_THAN'|'GREATER_THAN'|'IN'|'NOT_IN'|'CONTAINS',\n 'value': 'string'\n },\n ]\n )\n \n \n :type projectArn: string\n :param projectArn: [REQUIRED]\n The ARN of the project for the device pool.\n \n\n :type name: string\n :param name: [REQUIRED]\n The device pool's name.\n \n\n :type description: string\n :param description: The device pool's description.\n\n :type rules: list\n :param rules: [REQUIRED]\n The device pool's rules.\n (dict) --Represents a condition for a device pool. It is passed in as the rules parameter to CreateDevicePool and UpdateDevicePool .\n attribute (string) --The rule's attribute. It is the aspect of a device such as platform or model used as selection criteria to create or update a device pool.\n Allowed values include:\n ARN: The Amazon Resource Name (ARN) of a device. For example, 'arn:aws:devicefarm:us-west-2::device:12345Example'.\n PLATFORM: The device platform. Valid values are 'ANDROID' or 'IOS'.\n FORM_FACTOR: The device form factor. Valid values are 'PHONE' or 'TABLET'.\n MANUFACTURER: The device manufacturer. For example, 'Apple'.\n REMOTE_ACCESS_ENABLED: Whether the device is enabled for remote access. Valid values are 'TRUE' or 'FALSE'.\n REMOTE_DEBUG_ENABLED: Whether the device is enabled for remote debugging. Valid values are 'TRUE' or 'FALSE'.\n APPIUM_VERSION: The Appium version for the test.\n INSTANCE_ARN: The Amazon Resource Name (ARN) of the device instance.\n INSTANCE_LABELS: The label of the device instance.\n FLEET_TYPE: The fleet type. Valid values are 'PUBLIC' or 'PRIVATE'.\n operator (string) --The rule's operator.\n EQUALS: The equals operator.\n GREATER_THAN: The greater-than operator.\n IN: The in operator.\n LESS_THAN: The less-than operator.\n NOT_IN: The not-in operator.\n CONTAINS: The contains operator.\n value (string) --The rule's value.\n The value must be passed in as a string using escaped quotes.\n For example:\n 'value': ''ANDROID''\n \n \n\n :rtype: dict\n :return: {\n 'devicePool': {\n 'arn': 'string',\n 'name': 'string',\n 'description': 'string',\n 'type': 'CURATED'|'PRIVATE',\n 'rules': [\n {\n 'attribute': 'ARN'|'PLATFORM'|'FORM_FACTOR'|'MANUFACTURER'|'REMOTE_ACCESS_ENABLED'|'REMOTE_DEBUG_ENABLED'|'APPIUM_VERSION'|'INSTANCE_ARN'|'INSTANCE_LABELS'|'FLEET_TYPE',\n 'operator': 'EQUALS'|'LESS_THAN'|'GREATER_THAN'|'IN'|'NOT_IN'|'CONTAINS',\n 'value': 'string'\n },\n ]\n }\n }\n \n \n :returns: \n CURATED: A device pool that is created and managed by AWS Device Farm.\n PRIVATE: A device pool that is created and managed by the device pool developer.\n \n \"\"\"\n pass\n\ndef create_instance_profile(name=None, description=None, packageCleanup=None, excludeAppPackagesFromCleanup=None, rebootAfterUse=None):\n \"\"\"\n Creates a profile that can be applied to one or more private fleet device instances.\n See also: AWS API Documentation\n \n \n :example: response = client.create_instance_profile(\n name='string',\n description='string',\n packageCleanup=True|False,\n excludeAppPackagesFromCleanup=[\n 'string',\n ],\n rebootAfterUse=True|False\n )\n \n \n :type name: string\n :param name: [REQUIRED]\n The name of your instance profile.\n \n\n :type description: string\n :param description: The description of your instance profile.\n\n :type packageCleanup: boolean\n :param packageCleanup: When set to true , Device Farm will remove app packages after a test run. The default value is false for private devices.\n\n :type excludeAppPackagesFromCleanup: list\n :param excludeAppPackagesFromCleanup: An array of strings specifying the list of app packages that should not be cleaned up from the device after a test run is over.\n The list of packages is only considered if you set packageCleanup to true .\n (string) --\n \n\n :type rebootAfterUse: boolean\n :param rebootAfterUse: When set to true , Device Farm will reboot the instance after a test run. The default value is true .\n\n :rtype: dict\n :return: {\n 'instanceProfile': {\n 'arn': 'string',\n 'packageCleanup': True|False,\n 'excludeAppPackagesFromCleanup': [\n 'string',\n ],\n 'rebootAfterUse': True|False,\n 'name': 'string',\n 'description': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef create_network_profile(projectArn=None, name=None, description=None, type=None, uplinkBandwidthBits=None, downlinkBandwidthBits=None, uplinkDelayMs=None, downlinkDelayMs=None, uplinkJitterMs=None, downlinkJitterMs=None, uplinkLossPercent=None, downlinkLossPercent=None):\n \"\"\"\n Creates a network profile.\n See also: AWS API Documentation\n \n \n :example: response = client.create_network_profile(\n projectArn='string',\n name='string',\n description='string',\n type='CURATED'|'PRIVATE',\n uplinkBandwidthBits=123,\n downlinkBandwidthBits=123,\n uplinkDelayMs=123,\n downlinkDelayMs=123,\n uplinkJitterMs=123,\n downlinkJitterMs=123,\n uplinkLossPercent=123,\n downlinkLossPercent=123\n )\n \n \n :type projectArn: string\n :param projectArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the project for which you want to create a network profile.\n \n\n :type name: string\n :param name: [REQUIRED]\n The name you wish to specify for the new network profile.\n \n\n :type description: string\n :param description: The description of the network profile.\n\n :type type: string\n :param type: The type of network profile you wish to create. Valid values are listed below.\n\n :type uplinkBandwidthBits: integer\n :param uplinkBandwidthBits: The data throughput rate in bits per second, as an integer from 0 to 104857600.\n\n :type downlinkBandwidthBits: integer\n :param downlinkBandwidthBits: The data throughput rate in bits per second, as an integer from 0 to 104857600.\n\n :type uplinkDelayMs: integer\n :param uplinkDelayMs: Delay time for all packets to destination in milliseconds as an integer from 0 to 2000.\n\n :type downlinkDelayMs: integer\n :param downlinkDelayMs: Delay time for all packets to destination in milliseconds as an integer from 0 to 2000.\n\n :type uplinkJitterMs: integer\n :param uplinkJitterMs: Time variation in the delay of received packets in milliseconds as an integer from 0 to 2000.\n\n :type downlinkJitterMs: integer\n :param downlinkJitterMs: Time variation in the delay of received packets in milliseconds as an integer from 0 to 2000.\n\n :type uplinkLossPercent: integer\n :param uplinkLossPercent: Proportion of transmitted packets that fail to arrive from 0 to 100 percent.\n\n :type downlinkLossPercent: integer\n :param downlinkLossPercent: Proportion of received packets that fail to arrive from 0 to 100 percent.\n\n :rtype: dict\n :return: {\n 'networkProfile': {\n 'arn': 'string',\n 'name': 'string',\n 'description': 'string',\n 'type': 'CURATED'|'PRIVATE',\n 'uplinkBandwidthBits': 123,\n 'downlinkBandwidthBits': 123,\n 'uplinkDelayMs': 123,\n 'downlinkDelayMs': 123,\n 'uplinkJitterMs': 123,\n 'downlinkJitterMs': 123,\n 'uplinkLossPercent': 123,\n 'downlinkLossPercent': 123\n }\n }\n \n \n \"\"\"\n pass\n\ndef create_project(name=None, defaultJobTimeoutMinutes=None):\n \"\"\"\n Creates a new project.\n See also: AWS API Documentation\n \n Examples\n The following example creates a new project named MyProject.\n Expected Output:\n \n :example: response = client.create_project(\n name='string',\n defaultJobTimeoutMinutes=123\n )\n \n \n :type name: string\n :param name: [REQUIRED]\n The project's name.\n \n\n :type defaultJobTimeoutMinutes: integer\n :param defaultJobTimeoutMinutes: Sets the execution timeout value (in minutes) for a project. All test runs in this project will use the specified execution timeout value unless overridden when scheduling a run.\n\n :rtype: dict\n :return: {\n 'project': {\n 'arn': 'string',\n 'name': 'string',\n 'defaultJobTimeoutMinutes': 123,\n 'created': datetime(2015, 1, 1)\n }\n }\n \n \n \"\"\"\n pass\n\ndef create_remote_access_session(projectArn=None, deviceArn=None, instanceArn=None, sshPublicKey=None, remoteDebugEnabled=None, remoteRecordEnabled=None, remoteRecordAppArn=None, name=None, clientId=None, configuration=None, interactionMode=None, skipAppResign=None):\n \"\"\"\n Specifies and starts a remote access session.\n See also: AWS API Documentation\n \n Examples\n The following example creates a remote access session named MySession.\n Expected Output:\n \n :example: response = client.create_remote_access_session(\n projectArn='string',\n deviceArn='string',\n instanceArn='string',\n sshPublicKey='string',\n remoteDebugEnabled=True|False,\n remoteRecordEnabled=True|False,\n remoteRecordAppArn='string',\n name='string',\n clientId='string',\n configuration={\n 'billingMethod': 'METERED'|'UNMETERED',\n 'vpceConfigurationArns': [\n 'string',\n ]\n },\n interactionMode='INTERACTIVE'|'NO_VIDEO'|'VIDEO_ONLY',\n skipAppResign=True|False\n )\n \n \n :type projectArn: string\n :param projectArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the project for which you want to create a remote access session.\n \n\n :type deviceArn: string\n :param deviceArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the device for which you want to create a remote access session.\n \n\n :type instanceArn: string\n :param instanceArn: The Amazon Resource Name (ARN) of the device instance for which you want to create a remote access session.\n\n :type sshPublicKey: string\n :param sshPublicKey: The public key of the ssh key pair you want to use for connecting to remote devices in your remote debugging session. This is only required if remoteDebugEnabled is set to true .\n\n :type remoteDebugEnabled: boolean\n :param remoteDebugEnabled: Set to true if you want to access devices remotely for debugging in your remote access session.\n\n :type remoteRecordEnabled: boolean\n :param remoteRecordEnabled: Set to true to enable remote recording for the remote access session.\n\n :type remoteRecordAppArn: string\n :param remoteRecordAppArn: The Amazon Resource Name (ARN) for the app to be recorded in the remote access session.\n\n :type name: string\n :param name: The name of the remote access session that you wish to create.\n\n :type clientId: string\n :param clientId: Unique identifier for the client. If you want access to multiple devices on the same client, you should pass the same clientId value in each call to CreateRemoteAccessSession . This is required only if remoteDebugEnabled is set to true .\n\n :type configuration: dict\n :param configuration: The configuration information for the remote access session request.\n billingMethod (string) --The billing method for the remote access session.\n vpceConfigurationArns (list) --An array of Amazon Resource Names (ARNs) included in the VPC endpoint configuration.\n (string) --\n \n\n :type interactionMode: string\n :param interactionMode: The interaction mode of the remote access session. Valid values are:\n INTERACTIVE: You can interact with the iOS device by viewing, touching, and rotating the screen. You cannot run XCUITest framework-based tests in this mode.\n NO_VIDEO: You are connected to the device but cannot interact with it or view the screen. This mode has the fastest test execution speed. You can run XCUITest framework-based tests in this mode.\n VIDEO_ONLY: You can view the screen but cannot touch or rotate it. You can run XCUITest framework-based tests and watch the screen in this mode.\n \n\n :type skipAppResign: boolean\n :param skipAppResign: When set to true , for private devices, Device Farm will not sign your app again. For public devices, Device Farm always signs your apps again and this parameter has no effect.\n For more information about how Device Farm re-signs your app(s), see Do you modify my app? in the AWS Device Farm FAQs .\n \n\n :rtype: dict\n :return: {\n 'remoteAccessSession': {\n 'arn': 'string',\n 'name': 'string',\n 'created': datetime(2015, 1, 1),\n 'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',\n 'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',\n 'message': 'string',\n 'started': datetime(2015, 1, 1),\n 'stopped': datetime(2015, 1, 1),\n 'device': {\n 'arn': 'string',\n 'name': 'string',\n 'manufacturer': 'string',\n 'model': 'string',\n 'modelId': 'string',\n 'formFactor': 'PHONE'|'TABLET',\n 'platform': 'ANDROID'|'IOS',\n 'os': 'string',\n 'cpu': {\n 'frequency': 'string',\n 'architecture': 'string',\n 'clock': 123.0\n },\n 'resolution': {\n 'width': 123,\n 'height': 123\n },\n 'heapSize': 123,\n 'memory': 123,\n 'image': 'string',\n 'carrier': 'string',\n 'radio': 'string',\n 'remoteAccessEnabled': True|False,\n 'remoteDebugEnabled': True|False,\n 'fleetType': 'string',\n 'fleetName': 'string',\n 'instances': [\n {\n 'arn': 'string',\n 'deviceArn': 'string',\n 'labels': [\n 'string',\n ],\n 'status': 'IN_USE'|'PREPARING'|'AVAILABLE'|'NOT_AVAILABLE',\n 'udid': 'string',\n 'instanceProfile': {\n 'arn': 'string',\n 'packageCleanup': True|False,\n 'excludeAppPackagesFromCleanup': [\n 'string',\n ],\n 'rebootAfterUse': True|False,\n 'name': 'string',\n 'description': 'string'\n }\n },\n ],\n 'availability': 'TEMPORARY_NOT_AVAILABLE'|'BUSY'|'AVAILABLE'|'HIGHLY_AVAILABLE'\n },\n 'instanceArn': 'string',\n 'remoteDebugEnabled': True|False,\n 'remoteRecordEnabled': True|False,\n 'remoteRecordAppArn': 'string',\n 'hostAddress': 'string',\n 'clientId': 'string',\n 'billingMethod': 'METERED'|'UNMETERED',\n 'deviceMinutes': {\n 'total': 123.0,\n 'metered': 123.0,\n 'unmetered': 123.0\n },\n 'endpoint': 'string',\n 'deviceUdid': 'string',\n 'interactionMode': 'INTERACTIVE'|'NO_VIDEO'|'VIDEO_ONLY',\n 'skipAppResign': True|False\n }\n }\n \n \n :returns: \n PENDING: A pending status.\n PENDING_CONCURRENCY: A pending concurrency status.\n PENDING_DEVICE: A pending device status.\n PROCESSING: A processing status.\n SCHEDULING: A scheduling status.\n PREPARING: A preparing status.\n RUNNING: A running status.\n COMPLETED: A completed status.\n STOPPING: A stopping status.\n \n \"\"\"\n pass\n\ndef create_upload(projectArn=None, name=None, type=None, contentType=None):\n \"\"\"\n Uploads an app or test scripts.\n See also: AWS API Documentation\n \n Examples\n The following example creates a new Appium Python test package upload inside an existing project.\n Expected Output:\n \n :example: response = client.create_upload(\n projectArn='string',\n name='string',\n type='ANDROID_APP'|'IOS_APP'|'WEB_APP'|'EXTERNAL_DATA'|'APPIUM_JAVA_JUNIT_TEST_PACKAGE'|'APPIUM_JAVA_TESTNG_TEST_PACKAGE'|'APPIUM_PYTHON_TEST_PACKAGE'|'APPIUM_NODE_TEST_PACKAGE'|'APPIUM_RUBY_TEST_PACKAGE'|'APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE'|'APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE'|'APPIUM_WEB_PYTHON_TEST_PACKAGE'|'APPIUM_WEB_NODE_TEST_PACKAGE'|'APPIUM_WEB_RUBY_TEST_PACKAGE'|'CALABASH_TEST_PACKAGE'|'INSTRUMENTATION_TEST_PACKAGE'|'UIAUTOMATION_TEST_PACKAGE'|'UIAUTOMATOR_TEST_PACKAGE'|'XCTEST_TEST_PACKAGE'|'XCTEST_UI_TEST_PACKAGE'|'APPIUM_JAVA_JUNIT_TEST_SPEC'|'APPIUM_JAVA_TESTNG_TEST_SPEC'|'APPIUM_PYTHON_TEST_SPEC'|'APPIUM_NODE_TEST_SPEC'|'APPIUM_RUBY_TEST_SPEC'|'APPIUM_WEB_JAVA_JUNIT_TEST_SPEC'|'APPIUM_WEB_JAVA_TESTNG_TEST_SPEC'|'APPIUM_WEB_PYTHON_TEST_SPEC'|'APPIUM_WEB_NODE_TEST_SPEC'|'APPIUM_WEB_RUBY_TEST_SPEC'|'INSTRUMENTATION_TEST_SPEC'|'XCTEST_UI_TEST_SPEC',\n contentType='string'\n )\n \n \n :type projectArn: string\n :param projectArn: [REQUIRED]\n The ARN of the project for the upload.\n \n\n :type name: string\n :param name: [REQUIRED]\n The upload's file name. The name should not contain the '/' character. If uploading an iOS app, the file name needs to end with the .ipa extension. If uploading an Android app, the file name needs to end with the .apk extension. For all others, the file name must end with the .zip file extension.\n \n\n :type type: string\n :param type: [REQUIRED]\n The upload's upload type.\n Must be one of the following values:\n ANDROID_APP: An Android upload.\n IOS_APP: An iOS upload.\n WEB_APP: A web application upload.\n EXTERNAL_DATA: An external data upload.\n APPIUM_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.\n APPIUM_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.\n APPIUM_PYTHON_TEST_PACKAGE: An Appium Python test package upload.\n APPIUM_NODE_TEST_PACKAGE: An Appium Node.js test package upload.\n APPIUM_RUBY_TEST_PACKAGE: An Appium Ruby test package upload.\n APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload for a web app.\n APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload for a web app.\n APPIUM_WEB_PYTHON_TEST_PACKAGE: An Appium Python test package upload for a web app.\n APPIUM_WEB_NODE_TEST_PACKAGE: An Appium Node.js test package upload for a web app.\n APPIUM_WEB_RUBY_TEST_PACKAGE: An Appium Ruby test package upload for a web app.\n CALABASH_TEST_PACKAGE: A Calabash test package upload.\n INSTRUMENTATION_TEST_PACKAGE: An instrumentation upload.\n UIAUTOMATION_TEST_PACKAGE: A uiautomation test package upload.\n UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload.\n XCTEST_TEST_PACKAGE: An XCode test package upload.\n XCTEST_UI_TEST_PACKAGE: An XCode UI test package upload.\n APPIUM_JAVA_JUNIT_TEST_SPEC: An Appium Java JUnit test spec upload.\n APPIUM_JAVA_TESTNG_TEST_SPEC: An Appium Java TestNG test spec upload.\n APPIUM_PYTHON_TEST_SPEC: An Appium Python test spec upload.\n APPIUM_NODE_TEST_SPEC: An Appium Node.js test spec upload.\n APPIUM_RUBY_TEST_SPEC: An Appium Ruby test spec upload.\n APPIUM_WEB_JAVA_JUNIT_TEST_SPEC: An Appium Java JUnit test spec upload for a web app.\n APPIUM_WEB_JAVA_TESTNG_TEST_SPEC: An Appium Java TestNG test spec upload for a web app.\n APPIUM_WEB_PYTHON_TEST_SPEC: An Appium Python test spec upload for a web app.\n APPIUM_WEB_NODE_TEST_SPEC: An Appium Node.js test spec upload for a web app.\n APPIUM_WEB_RUBY_TEST_SPEC: An Appium Ruby test spec upload for a web app.\n INSTRUMENTATION_TEST_SPEC: An instrumentation test spec upload.\n XCTEST_UI_TEST_SPEC: An XCode UI test spec upload.\n Note If you call CreateUpload with WEB_APP specified, AWS Device Farm throws an ArgumentException error.\n \n\n :type contentType: string\n :param contentType: The upload's content type (for example, 'application/octet-stream').\n\n :rtype: dict\n :return: {\n 'upload': {\n 'arn': 'string',\n 'name': 'string',\n 'created': datetime(2015, 1, 1),\n 'type': 'ANDROID_APP'|'IOS_APP'|'WEB_APP'|'EXTERNAL_DATA'|'APPIUM_JAVA_JUNIT_TEST_PACKAGE'|'APPIUM_JAVA_TESTNG_TEST_PACKAGE'|'APPIUM_PYTHON_TEST_PACKAGE'|'APPIUM_NODE_TEST_PACKAGE'|'APPIUM_RUBY_TEST_PACKAGE'|'APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE'|'APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE'|'APPIUM_WEB_PYTHON_TEST_PACKAGE'|'APPIUM_WEB_NODE_TEST_PACKAGE'|'APPIUM_WEB_RUBY_TEST_PACKAGE'|'CALABASH_TEST_PACKAGE'|'INSTRUMENTATION_TEST_PACKAGE'|'UIAUTOMATION_TEST_PACKAGE'|'UIAUTOMATOR_TEST_PACKAGE'|'XCTEST_TEST_PACKAGE'|'XCTEST_UI_TEST_PACKAGE'|'APPIUM_JAVA_JUNIT_TEST_SPEC'|'APPIUM_JAVA_TESTNG_TEST_SPEC'|'APPIUM_PYTHON_TEST_SPEC'|'APPIUM_NODE_TEST_SPEC'|'APPIUM_RUBY_TEST_SPEC'|'APPIUM_WEB_JAVA_JUNIT_TEST_SPEC'|'APPIUM_WEB_JAVA_TESTNG_TEST_SPEC'|'APPIUM_WEB_PYTHON_TEST_SPEC'|'APPIUM_WEB_NODE_TEST_SPEC'|'APPIUM_WEB_RUBY_TEST_SPEC'|'INSTRUMENTATION_TEST_SPEC'|'XCTEST_UI_TEST_SPEC',\n 'status': 'INITIALIZED'|'PROCESSING'|'SUCCEEDED'|'FAILED',\n 'url': 'string',\n 'metadata': 'string',\n 'contentType': 'string',\n 'message': 'string',\n 'category': 'CURATED'|'PRIVATE'\n }\n }\n \n \n :returns: \n ANDROID_APP: An Android upload.\n IOS_APP: An iOS upload.\n WEB_APP: A web appliction upload.\n EXTERNAL_DATA: An external data upload.\n APPIUM_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.\n APPIUM_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.\n APPIUM_PYTHON_TEST_PACKAGE: An Appium Python test package upload.\n APPIUM_NODE_TEST_PACKAGE: An Appium Node.js test package upload.\n APPIUM_RUBY_TEST_PACKAGE: An Appium Ruby test package upload.\n APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload for web apps.\n APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload for web apps.\n APPIUM_WEB_PYTHON_TEST_PACKAGE: An Appium Python test package upload for web apps.\n APPIUM_WEB_NODE_TEST_PACKAGE: An Appium Node.js test package upload for web apps.\n APPIUM_WEB_RUBY_TEST_PACKAGE: An Appium Ruby test package upload for web apps.\n CALABASH_TEST_PACKAGE: A Calabash test package upload.\n INSTRUMENTATION_TEST_PACKAGE: An instrumentation upload.\n UIAUTOMATION_TEST_PACKAGE: A uiautomation test package upload.\n UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload.\n XCTEST_TEST_PACKAGE: An XCode test package upload.\n XCTEST_UI_TEST_PACKAGE: An XCode UI test package upload.\n APPIUM_JAVA_JUNIT_TEST_SPEC: An Appium Java JUnit test spec upload.\n APPIUM_JAVA_TESTNG_TEST_SPEC: An Appium Java TestNG test spec upload.\n APPIUM_PYTHON_TEST_SPEC: An Appium Python test spec upload.\n APPIUM_NODE_TEST_SPEC: An Appium Node.js test spec upload.\n APPIUM_RUBY_TEST_SPEC: An Appium Ruby test spec upload.\n APPIUM_WEB_JAVA_JUNIT_TEST_SPEC: An Appium Java JUnit test spec upload for a web app.\n APPIUM_WEB_JAVA_TESTNG_TEST_SPEC: An Appium Java TestNG test spec upload for a web app.\n APPIUM_WEB_PYTHON_TEST_SPEC: An Appium Python test spec upload for a web app.\n APPIUM_WEB_NODE_TEST_SPEC: An Appium Node.js test spec upload for a web app.\n APPIUM_WEB_RUBY_TEST_SPEC: An Appium Ruby test spec upload for a web app.\n INSTRUMENTATION_TEST_SPEC: An instrumentation test spec upload.\n XCTEST_UI_TEST_SPEC: An XCode UI test spec upload.\n \n \"\"\"\n pass\n\ndef create_vpce_configuration(vpceConfigurationName=None, vpceServiceName=None, serviceDnsName=None, vpceConfigurationDescription=None):\n \"\"\"\n Creates a configuration record in Device Farm for your Amazon Virtual Private Cloud (VPC) endpoint.\n See also: AWS API Documentation\n \n \n :example: response = client.create_vpce_configuration(\n vpceConfigurationName='string',\n vpceServiceName='string',\n serviceDnsName='string',\n vpceConfigurationDescription='string'\n )\n \n \n :type vpceConfigurationName: string\n :param vpceConfigurationName: [REQUIRED]\n The friendly name you give to your VPC endpoint configuration, to manage your configurations more easily.\n \n\n :type vpceServiceName: string\n :param vpceServiceName: [REQUIRED]\n The name of the VPC endpoint service running inside your AWS account that you want Device Farm to test.\n \n\n :type serviceDnsName: string\n :param serviceDnsName: [REQUIRED]\n The DNS name of the service running in your VPC that you want Device Farm to test.\n \n\n :type vpceConfigurationDescription: string\n :param vpceConfigurationDescription: An optional description, providing more details about your VPC endpoint configuration.\n\n :rtype: dict\n :return: {\n 'vpceConfiguration': {\n 'arn': 'string',\n 'vpceConfigurationName': 'string',\n 'vpceServiceName': 'string',\n 'serviceDnsName': 'string',\n 'vpceConfigurationDescription': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef delete_device_pool(arn=None):\n \"\"\"\n Deletes a device pool given the pool ARN. Does not allow deletion of curated pools owned by the system.\n See also: AWS API Documentation\n \n Examples\n The following example deletes a specific device pool.\n Expected Output:\n \n :example: response = client.delete_device_pool(\n arn='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n Represents the Amazon Resource Name (ARN) of the Device Farm device pool you wish to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_instance_profile(arn=None):\n \"\"\"\n Deletes a profile that can be applied to one or more private device instances.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_instance_profile(\n arn='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The Amazon Resource Name (ARN) of the instance profile you are requesting to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_network_profile(arn=None):\n \"\"\"\n Deletes a network profile.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_network_profile(\n arn='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The Amazon Resource Name (ARN) of the network profile you want to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_project(arn=None):\n \"\"\"\n Deletes an AWS Device Farm project, given the project ARN.\n See also: AWS API Documentation\n \n Examples\n The following example deletes a specific project.\n Expected Output:\n \n :example: response = client.delete_project(\n arn='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n Represents the Amazon Resource Name (ARN) of the Device Farm project you wish to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_remote_access_session(arn=None):\n \"\"\"\n Deletes a completed remote access session and its results.\n See also: AWS API Documentation\n \n Examples\n The following example deletes a specific remote access session.\n Expected Output:\n \n :example: response = client.delete_remote_access_session(\n arn='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The Amazon Resource Name (ARN) of the sesssion for which you want to delete remote access.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_run(arn=None):\n \"\"\"\n Deletes the run, given the run ARN.\n See also: AWS API Documentation\n \n Examples\n The following example deletes a specific test run.\n Expected Output:\n \n :example: response = client.delete_run(\n arn='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The Amazon Resource Name (ARN) for the run you wish to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_upload(arn=None):\n \"\"\"\n Deletes an upload given the upload ARN.\n See also: AWS API Documentation\n \n Examples\n The following example deletes a specific upload.\n Expected Output:\n \n :example: response = client.delete_upload(\n arn='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n Represents the Amazon Resource Name (ARN) of the Device Farm upload you wish to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_vpce_configuration(arn=None):\n \"\"\"\n Deletes a configuration for your Amazon Virtual Private Cloud (VPC) endpoint.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_vpce_configuration(\n arn='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The Amazon Resource Name (ARN) of the VPC endpoint configuration you want to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_account_settings():\n \"\"\"\n Returns the number of unmetered iOS and/or unmetered Android devices that have been purchased by the account.\n See also: AWS API Documentation\n \n Examples\n The following example returns information about your Device Farm account settings.\n Expected Output:\n \n :example: response = client.get_account_settings()\n \n \n :rtype: dict\n :return: {\n 'accountSettings': {\n 'awsAccountNumber': 'string',\n 'unmeteredDevices': {\n 'string': 123\n },\n 'unmeteredRemoteAccessDevices': {\n 'string': 123\n },\n 'maxJobTimeoutMinutes': 123,\n 'trialMinutes': {\n 'total': 123.0,\n 'remaining': 123.0\n },\n 'maxSlots': {\n 'string': 123\n },\n 'defaultJobTimeoutMinutes': 123,\n 'skipAppResign': True|False\n }\n }\n \n \n :returns: \n (string) --\n (integer) --\n \n \n \n \"\"\"\n pass\n\ndef get_device(arn=None):\n \"\"\"\n Gets information about a unique device type.\n See also: AWS API Documentation\n \n Examples\n The following example returns information about a specific device.\n Expected Output:\n \n :example: response = client.get_device(\n arn='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The device type's ARN.\n \n\n :rtype: dict\n :return: {\n 'device': {\n 'arn': 'string',\n 'name': 'string',\n 'manufacturer': 'string',\n 'model': 'string',\n 'modelId': 'string',\n 'formFactor': 'PHONE'|'TABLET',\n 'platform': 'ANDROID'|'IOS',\n 'os': 'string',\n 'cpu': {\n 'frequency': 'string',\n 'architecture': 'string',\n 'clock': 123.0\n },\n 'resolution': {\n 'width': 123,\n 'height': 123\n },\n 'heapSize': 123,\n 'memory': 123,\n 'image': 'string',\n 'carrier': 'string',\n 'radio': 'string',\n 'remoteAccessEnabled': True|False,\n 'remoteDebugEnabled': True|False,\n 'fleetType': 'string',\n 'fleetName': 'string',\n 'instances': [\n {\n 'arn': 'string',\n 'deviceArn': 'string',\n 'labels': [\n 'string',\n ],\n 'status': 'IN_USE'|'PREPARING'|'AVAILABLE'|'NOT_AVAILABLE',\n 'udid': 'string',\n 'instanceProfile': {\n 'arn': 'string',\n 'packageCleanup': True|False,\n 'excludeAppPackagesFromCleanup': [\n 'string',\n ],\n 'rebootAfterUse': True|False,\n 'name': 'string',\n 'description': 'string'\n }\n },\n ],\n 'availability': 'TEMPORARY_NOT_AVAILABLE'|'BUSY'|'AVAILABLE'|'HIGHLY_AVAILABLE'\n }\n }\n \n \n :returns: \n ANDROID: The Android platform.\n IOS: The iOS platform.\n \n \"\"\"\n pass\n\ndef get_device_instance(arn=None):\n \"\"\"\n Returns information about a device instance belonging to a private device fleet.\n See also: AWS API Documentation\n \n \n :example: response = client.get_device_instance(\n arn='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The Amazon Resource Name (ARN) of the instance you're requesting information about.\n \n\n :rtype: dict\n :return: {\n 'deviceInstance': {\n 'arn': 'string',\n 'deviceArn': 'string',\n 'labels': [\n 'string',\n ],\n 'status': 'IN_USE'|'PREPARING'|'AVAILABLE'|'NOT_AVAILABLE',\n 'udid': 'string',\n 'instanceProfile': {\n 'arn': 'string',\n 'packageCleanup': True|False,\n 'excludeAppPackagesFromCleanup': [\n 'string',\n ],\n 'rebootAfterUse': True|False,\n 'name': 'string',\n 'description': 'string'\n }\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_device_pool(arn=None):\n \"\"\"\n Gets information about a device pool.\n See also: AWS API Documentation\n \n Examples\n The following example returns information about a specific device pool, given a project ARN.\n Expected Output:\n \n :example: response = client.get_device_pool(\n arn='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The device pool's ARN.\n \n\n :rtype: dict\n :return: {\n 'devicePool': {\n 'arn': 'string',\n 'name': 'string',\n 'description': 'string',\n 'type': 'CURATED'|'PRIVATE',\n 'rules': [\n {\n 'attribute': 'ARN'|'PLATFORM'|'FORM_FACTOR'|'MANUFACTURER'|'REMOTE_ACCESS_ENABLED'|'REMOTE_DEBUG_ENABLED'|'APPIUM_VERSION'|'INSTANCE_ARN'|'INSTANCE_LABELS'|'FLEET_TYPE',\n 'operator': 'EQUALS'|'LESS_THAN'|'GREATER_THAN'|'IN'|'NOT_IN'|'CONTAINS',\n 'value': 'string'\n },\n ]\n }\n }\n \n \n :returns: \n ARN: The Amazon Resource Name (ARN) of a device. For example, \"arn:aws:devicefarm:us-west-2::device:12345Example\".\n PLATFORM: The device platform. Valid values are \"ANDROID\" or \"IOS\".\n FORM_FACTOR: The device form factor. Valid values are \"PHONE\" or \"TABLET\".\n MANUFACTURER: The device manufacturer. For example, \"Apple\".\n REMOTE_ACCESS_ENABLED: Whether the device is enabled for remote access. Valid values are \"TRUE\" or \"FALSE\".\n REMOTE_DEBUG_ENABLED: Whether the device is enabled for remote debugging. Valid values are \"TRUE\" or \"FALSE\".\n APPIUM_VERSION: The Appium version for the test.\n INSTANCE_ARN: The Amazon Resource Name (ARN) of the device instance.\n INSTANCE_LABELS: The label of the device instance.\n FLEET_TYPE: The fleet type. Valid values are \"PUBLIC\" or \"PRIVATE\".\n \n \"\"\"\n pass\n\ndef get_device_pool_compatibility(devicePoolArn=None, appArn=None, testType=None, test=None, configuration=None):\n \"\"\"\n Gets information about compatibility with a device pool.\n See also: AWS API Documentation\n \n Examples\n The following example returns information about the compatibility of a specific device pool, given its ARN.\n Expected Output:\n \n :example: response = client.get_device_pool_compatibility(\n devicePoolArn='string',\n appArn='string',\n testType='BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'WEB_PERFORMANCE_PROFILE'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_NODE'|'APPIUM_RUBY'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'APPIUM_WEB_NODE'|'APPIUM_WEB_RUBY'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI'|'REMOTE_ACCESS_RECORD'|'REMOTE_ACCESS_REPLAY',\n test={\n 'type': 'BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'WEB_PERFORMANCE_PROFILE'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_NODE'|'APPIUM_RUBY'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'APPIUM_WEB_NODE'|'APPIUM_WEB_RUBY'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI'|'REMOTE_ACCESS_RECORD'|'REMOTE_ACCESS_REPLAY',\n 'testPackageArn': 'string',\n 'testSpecArn': 'string',\n 'filter': 'string',\n 'parameters': {\n 'string': 'string'\n }\n },\n configuration={\n 'extraDataPackageArn': 'string',\n 'networkProfileArn': 'string',\n 'locale': 'string',\n 'location': {\n 'latitude': 123.0,\n 'longitude': 123.0\n },\n 'vpceConfigurationArns': [\n 'string',\n ],\n 'customerArtifactPaths': {\n 'iosPaths': [\n 'string',\n ],\n 'androidPaths': [\n 'string',\n ],\n 'deviceHostPaths': [\n 'string',\n ]\n },\n 'radios': {\n 'wifi': True|False,\n 'bluetooth': True|False,\n 'nfc': True|False,\n 'gps': True|False\n },\n 'auxiliaryApps': [\n 'string',\n ],\n 'billingMethod': 'METERED'|'UNMETERED'\n }\n )\n \n \n :type devicePoolArn: string\n :param devicePoolArn: [REQUIRED]\n The device pool's ARN.\n \n\n :type appArn: string\n :param appArn: The ARN of the app that is associated with the specified device pool.\n\n :type testType: string\n :param testType: The test type for the specified device pool.\n Allowed values include the following:\n BUILTIN_FUZZ: The built-in fuzz type.\n BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.\n APPIUM_JAVA_JUNIT: The Appium Java JUnit type.\n APPIUM_JAVA_TESTNG: The Appium Java TestNG type.\n APPIUM_PYTHON: The Appium Python type.\n APPIUM_NODE: The Appium Node.js type.\n APPIUM_RUBY: The Appium Ruby type.\n APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for web apps.\n APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for web apps.\n APPIUM_WEB_PYTHON: The Appium Python type for web apps.\n APPIUM_WEB_NODE: The Appium Node.js type for web apps.\n APPIUM_WEB_RUBY: The Appium Ruby type for web apps.\n CALABASH: The Calabash type.\n INSTRUMENTATION: The Instrumentation type.\n UIAUTOMATION: The uiautomation type.\n UIAUTOMATOR: The uiautomator type.\n XCTEST: The XCode test type.\n XCTEST_UI: The XCode UI test type.\n \n\n :type test: dict\n :param test: Information about the uploaded test to be run against the device pool.\n type (string) -- [REQUIRED]The test's type.\n Must be one of the following values:\n BUILTIN_FUZZ: The built-in fuzz type.\n BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.\n APPIUM_JAVA_JUNIT: The Appium Java JUnit type.\n APPIUM_JAVA_TESTNG: The Appium Java TestNG type.\n APPIUM_PYTHON: The Appium Python type.\n APPIUM_NODE: The Appium Node.js type.\n APPIUM_RUBY: The Appium Ruby type.\n APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for web apps.\n APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for web apps.\n APPIUM_WEB_PYTHON: The Appium Python type for web apps.\n APPIUM_WEB_NODE: The Appium Node.js type for web apps.\n APPIUM_WEB_RUBY: The Appium Ruby type for web apps.\n CALABASH: The Calabash type.\n INSTRUMENTATION: The Instrumentation type.\n UIAUTOMATION: The uiautomation type.\n UIAUTOMATOR: The uiautomator type.\n XCTEST: The XCode test type.\n XCTEST_UI: The XCode UI test type.\n testPackageArn (string) --The ARN of the uploaded test that will be run.\n testSpecArn (string) --The ARN of the YAML-formatted test specification.\n filter (string) --The test's filter.\n parameters (dict) --The test's parameters, such as test framework parameters and fixture settings. Parameters are represented by name-value pairs of strings.\n For all tests:\n app_performance_monitoring: Performance monitoring is enabled by default. Set this parameter to 'false' to disable it.\n For Calabash tests:\n profile: A cucumber profile, for example, 'my_profile_name'.\n tags: You can limit execution to features or scenarios that have (or don't have) certain tags, for example, '@smoke' or '@smoke,~@wip'.\n For Appium tests (all types):\n appium_version: The Appium version. Currently supported values are '1.6.5' (and higher), 'latest', and 'default'.\n latest will run the latest Appium version supported by Device Farm (1.9.1).\n For default , Device Farm will choose a compatible version of Appium for the device. The current behavior is to run 1.7.2 on Android devices and iOS 9 and earlier, 1.7.2 for iOS 10 and later.\n This behavior is subject to change.\n For Fuzz tests (Android only):\n event_count: The number of events, between 1 and 10000, that the UI fuzz test should perform.\n throttle: The time, in ms, between 0 and 1000, that the UI fuzz test should wait between events.\n seed: A seed to use for randomizing the UI fuzz test. Using the same seed value between tests ensures identical event sequences.\n For Explorer tests:\n username: A username to use if the Explorer encounters a login form. If not supplied, no username will be inserted.\n password: A password to use if the Explorer encounters a login form. If not supplied, no password will be inserted.\n For Instrumentation:\n filter: A test filter string. Examples:\n Running a single test case: 'com.android.abc.Test1'\n Running a single test: 'com.android.abc.Test1#smoke'\n Running multiple tests: 'com.android.abc.Test1,com.android.abc.Test2'\n For XCTest and XCTestUI:\n filter: A test filter string. Examples:\n Running a single test class: 'LoginTests'\n Running a multiple test classes: 'LoginTests,SmokeTests'\n Running a single test: 'LoginTests/testValid'\n Running multiple tests: 'LoginTests/testValid,LoginTests/testInvalid'\n For UIAutomator:\n filter: A test filter string. Examples:\n Running a single test case: 'com.android.abc.Test1'\n Running a single test: 'com.android.abc.Test1#smoke'\n Running multiple tests: 'com.android.abc.Test1,com.android.abc.Test2'\n \n (string) --\n (string) --\n \n \n\n :type configuration: dict\n :param configuration: An object containing information about the settings for a run.\n extraDataPackageArn (string) --The ARN of the extra data for the run. The extra data is a .zip file that AWS Device Farm will extract to external data for Android or the app's sandbox for iOS.\n networkProfileArn (string) --Reserved for internal use.\n locale (string) --Information about the locale that is used for the run.\n location (dict) --Information about the location that is used for the run.\n latitude (float) -- [REQUIRED]The latitude.\n longitude (float) -- [REQUIRED]The longitude.\n vpceConfigurationArns (list) --An array of Amazon Resource Names (ARNs) for your VPC endpoint configurations.\n (string) --\n customerArtifactPaths (dict) --Input CustomerArtifactPaths object for the scheduled run configuration.\n iosPaths (list) --Comma-separated list of paths on the iOS device where the artifacts generated by the customer's tests will be pulled from.\n (string) --\n androidPaths (list) --Comma-separated list of paths on the Android device where the artifacts generated by the customer's tests will be pulled from.\n (string) --\n deviceHostPaths (list) --Comma-separated list of paths in the test execution environment where the artifacts generated by the customer's tests will be pulled from.\n (string) --\n \n radios (dict) --Information about the radio states for the run.\n wifi (boolean) --True if Wi-Fi is enabled at the beginning of the test; otherwise, false.\n bluetooth (boolean) --True if Bluetooth is enabled at the beginning of the test; otherwise, false.\n nfc (boolean) --True if NFC is enabled at the beginning of the test; otherwise, false.\n gps (boolean) --True if GPS is enabled at the beginning of the test; otherwise, false.\n auxiliaryApps (list) --A list of auxiliary apps for the run.\n (string) --\n billingMethod (string) --Specifies the billing method for a test run: metered or unmetered . If the parameter is not specified, the default value is metered .\n \n\n :rtype: dict\n :return: {\n 'compatibleDevices': [\n {\n 'device': {\n 'arn': 'string',\n 'name': 'string',\n 'manufacturer': 'string',\n 'model': 'string',\n 'modelId': 'string',\n 'formFactor': 'PHONE'|'TABLET',\n 'platform': 'ANDROID'|'IOS',\n 'os': 'string',\n 'cpu': {\n 'frequency': 'string',\n 'architecture': 'string',\n 'clock': 123.0\n },\n 'resolution': {\n 'width': 123,\n 'height': 123\n },\n 'heapSize': 123,\n 'memory': 123,\n 'image': 'string',\n 'carrier': 'string',\n 'radio': 'string',\n 'remoteAccessEnabled': True|False,\n 'remoteDebugEnabled': True|False,\n 'fleetType': 'string',\n 'fleetName': 'string',\n 'instances': [\n {\n 'arn': 'string',\n 'deviceArn': 'string',\n 'labels': [\n 'string',\n ],\n 'status': 'IN_USE'|'PREPARING'|'AVAILABLE'|'NOT_AVAILABLE',\n 'udid': 'string',\n 'instanceProfile': {\n 'arn': 'string',\n 'packageCleanup': True|False,\n 'excludeAppPackagesFromCleanup': [\n 'string',\n ],\n 'rebootAfterUse': True|False,\n 'name': 'string',\n 'description': 'string'\n }\n },\n ],\n 'availability': 'TEMPORARY_NOT_AVAILABLE'|'BUSY'|'AVAILABLE'|'HIGHLY_AVAILABLE'\n },\n 'compatible': True|False,\n 'incompatibilityMessages': [\n {\n 'message': 'string',\n 'type': 'ARN'|'PLATFORM'|'FORM_FACTOR'|'MANUFACTURER'|'REMOTE_ACCESS_ENABLED'|'REMOTE_DEBUG_ENABLED'|'APPIUM_VERSION'|'INSTANCE_ARN'|'INSTANCE_LABELS'|'FLEET_TYPE'\n },\n ]\n },\n ],\n 'incompatibleDevices': [\n {\n 'device': {\n 'arn': 'string',\n 'name': 'string',\n 'manufacturer': 'string',\n 'model': 'string',\n 'modelId': 'string',\n 'formFactor': 'PHONE'|'TABLET',\n 'platform': 'ANDROID'|'IOS',\n 'os': 'string',\n 'cpu': {\n 'frequency': 'string',\n 'architecture': 'string',\n 'clock': 123.0\n },\n 'resolution': {\n 'width': 123,\n 'height': 123\n },\n 'heapSize': 123,\n 'memory': 123,\n 'image': 'string',\n 'carrier': 'string',\n 'radio': 'string',\n 'remoteAccessEnabled': True|False,\n 'remoteDebugEnabled': True|False,\n 'fleetType': 'string',\n 'fleetName': 'string',\n 'instances': [\n {\n 'arn': 'string',\n 'deviceArn': 'string',\n 'labels': [\n 'string',\n ],\n 'status': 'IN_USE'|'PREPARING'|'AVAILABLE'|'NOT_AVAILABLE',\n 'udid': 'string',\n 'instanceProfile': {\n 'arn': 'string',\n 'packageCleanup': True|False,\n 'excludeAppPackagesFromCleanup': [\n 'string',\n ],\n 'rebootAfterUse': True|False,\n 'name': 'string',\n 'description': 'string'\n }\n },\n ],\n 'availability': 'TEMPORARY_NOT_AVAILABLE'|'BUSY'|'AVAILABLE'|'HIGHLY_AVAILABLE'\n },\n 'compatible': True|False,\n 'incompatibilityMessages': [\n {\n 'message': 'string',\n 'type': 'ARN'|'PLATFORM'|'FORM_FACTOR'|'MANUFACTURER'|'REMOTE_ACCESS_ENABLED'|'REMOTE_DEBUG_ENABLED'|'APPIUM_VERSION'|'INSTANCE_ARN'|'INSTANCE_LABELS'|'FLEET_TYPE'\n },\n ]\n },\n ]\n }\n \n \n :returns: \n PHONE: The phone form factor.\n TABLET: The tablet form factor.\n \n \"\"\"\n pass\n\ndef get_instance_profile(arn=None):\n \"\"\"\n Returns information about the specified instance profile.\n See also: AWS API Documentation\n \n \n :example: response = client.get_instance_profile(\n arn='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The Amazon Resource Name (ARN) of your instance profile.\n \n\n :rtype: dict\n :return: {\n 'instanceProfile': {\n 'arn': 'string',\n 'packageCleanup': True|False,\n 'excludeAppPackagesFromCleanup': [\n 'string',\n ],\n 'rebootAfterUse': True|False,\n 'name': 'string',\n 'description': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_job(arn=None):\n \"\"\"\n Gets information about a job.\n See also: AWS API Documentation\n \n Examples\n The following example returns information about a specific job.\n Expected Output:\n \n :example: response = client.get_job(\n arn='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The job's ARN.\n \n\n :rtype: dict\n :return: {\n 'job': {\n 'arn': 'string',\n 'name': 'string',\n 'type': 'BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'WEB_PERFORMANCE_PROFILE'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_NODE'|'APPIUM_RUBY'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'APPIUM_WEB_NODE'|'APPIUM_WEB_RUBY'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI'|'REMOTE_ACCESS_RECORD'|'REMOTE_ACCESS_REPLAY',\n 'created': datetime(2015, 1, 1),\n 'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',\n 'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',\n 'started': datetime(2015, 1, 1),\n 'stopped': datetime(2015, 1, 1),\n 'counters': {\n 'total': 123,\n 'passed': 123,\n 'failed': 123,\n 'warned': 123,\n 'errored': 123,\n 'stopped': 123,\n 'skipped': 123\n },\n 'message': 'string',\n 'device': {\n 'arn': 'string',\n 'name': 'string',\n 'manufacturer': 'string',\n 'model': 'string',\n 'modelId': 'string',\n 'formFactor': 'PHONE'|'TABLET',\n 'platform': 'ANDROID'|'IOS',\n 'os': 'string',\n 'cpu': {\n 'frequency': 'string',\n 'architecture': 'string',\n 'clock': 123.0\n },\n 'resolution': {\n 'width': 123,\n 'height': 123\n },\n 'heapSize': 123,\n 'memory': 123,\n 'image': 'string',\n 'carrier': 'string',\n 'radio': 'string',\n 'remoteAccessEnabled': True|False,\n 'remoteDebugEnabled': True|False,\n 'fleetType': 'string',\n 'fleetName': 'string',\n 'instances': [\n {\n 'arn': 'string',\n 'deviceArn': 'string',\n 'labels': [\n 'string',\n ],\n 'status': 'IN_USE'|'PREPARING'|'AVAILABLE'|'NOT_AVAILABLE',\n 'udid': 'string',\n 'instanceProfile': {\n 'arn': 'string',\n 'packageCleanup': True|False,\n 'excludeAppPackagesFromCleanup': [\n 'string',\n ],\n 'rebootAfterUse': True|False,\n 'name': 'string',\n 'description': 'string'\n }\n },\n ],\n 'availability': 'TEMPORARY_NOT_AVAILABLE'|'BUSY'|'AVAILABLE'|'HIGHLY_AVAILABLE'\n },\n 'instanceArn': 'string',\n 'deviceMinutes': {\n 'total': 123.0,\n 'metered': 123.0,\n 'unmetered': 123.0\n },\n 'videoEndpoint': 'string',\n 'videoCapture': True|False\n }\n }\n \n \n :returns: \n PENDING: A pending status.\n PENDING_CONCURRENCY: A pending concurrency status.\n PENDING_DEVICE: A pending device status.\n PROCESSING: A processing status.\n SCHEDULING: A scheduling status.\n PREPARING: A preparing status.\n RUNNING: A running status.\n COMPLETED: A completed status.\n STOPPING: A stopping status.\n \n \"\"\"\n pass\n\ndef get_network_profile(arn=None):\n \"\"\"\n Returns information about a network profile.\n See also: AWS API Documentation\n \n \n :example: response = client.get_network_profile(\n arn='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The Amazon Resource Name (ARN) of the network profile you want to return information about.\n \n\n :rtype: dict\n :return: {\n 'networkProfile': {\n 'arn': 'string',\n 'name': 'string',\n 'description': 'string',\n 'type': 'CURATED'|'PRIVATE',\n 'uplinkBandwidthBits': 123,\n 'downlinkBandwidthBits': 123,\n 'uplinkDelayMs': 123,\n 'downlinkDelayMs': 123,\n 'uplinkJitterMs': 123,\n 'downlinkJitterMs': 123,\n 'uplinkLossPercent': 123,\n 'downlinkLossPercent': 123\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_offering_status(nextToken=None):\n \"\"\"\n Gets the current status and future status of all offerings purchased by an AWS account. The response indicates how many offerings are currently available and the offerings that will be available in the next period. The API returns a NotEligible error if the user is not permitted to invoke the operation. Please contact [email protected] if you believe that you should be able to invoke this operation.\n See also: AWS API Documentation\n \n Examples\n The following example returns information about Device Farm offerings available to your account.\n Expected Output:\n \n :example: response = client.get_offering_status(\n nextToken='string'\n )\n \n \n :type nextToken: string\n :param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.\n\n :rtype: dict\n :return: {\n 'current': {\n 'string': {\n 'type': 'PURCHASE'|'RENEW'|'SYSTEM',\n 'offering': {\n 'id': 'string',\n 'description': 'string',\n 'type': 'RECURRING',\n 'platform': 'ANDROID'|'IOS',\n 'recurringCharges': [\n {\n 'cost': {\n 'amount': 123.0,\n 'currencyCode': 'USD'\n },\n 'frequency': 'MONTHLY'\n },\n ]\n },\n 'quantity': 123,\n 'effectiveOn': datetime(2015, 1, 1)\n }\n },\n 'nextPeriod': {\n 'string': {\n 'type': 'PURCHASE'|'RENEW'|'SYSTEM',\n 'offering': {\n 'id': 'string',\n 'description': 'string',\n 'type': 'RECURRING',\n 'platform': 'ANDROID'|'IOS',\n 'recurringCharges': [\n {\n 'cost': {\n 'amount': 123.0,\n 'currencyCode': 'USD'\n },\n 'frequency': 'MONTHLY'\n },\n ]\n },\n 'quantity': 123,\n 'effectiveOn': datetime(2015, 1, 1)\n }\n },\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_project(arn=None):\n \"\"\"\n Gets information about a project.\n See also: AWS API Documentation\n \n Examples\n The following example gets information about a specific project.\n Expected Output:\n \n :example: response = client.get_project(\n arn='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The project's ARN.\n \n\n :rtype: dict\n :return: {\n 'project': {\n 'arn': 'string',\n 'name': 'string',\n 'defaultJobTimeoutMinutes': 123,\n 'created': datetime(2015, 1, 1)\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_remote_access_session(arn=None):\n \"\"\"\n Returns a link to a currently running remote access session.\n See also: AWS API Documentation\n \n Examples\n The following example gets a specific remote access session.\n Expected Output:\n \n :example: response = client.get_remote_access_session(\n arn='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The Amazon Resource Name (ARN) of the remote access session about which you want to get session information.\n \n\n :rtype: dict\n :return: {\n 'remoteAccessSession': {\n 'arn': 'string',\n 'name': 'string',\n 'created': datetime(2015, 1, 1),\n 'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',\n 'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',\n 'message': 'string',\n 'started': datetime(2015, 1, 1),\n 'stopped': datetime(2015, 1, 1),\n 'device': {\n 'arn': 'string',\n 'name': 'string',\n 'manufacturer': 'string',\n 'model': 'string',\n 'modelId': 'string',\n 'formFactor': 'PHONE'|'TABLET',\n 'platform': 'ANDROID'|'IOS',\n 'os': 'string',\n 'cpu': {\n 'frequency': 'string',\n 'architecture': 'string',\n 'clock': 123.0\n },\n 'resolution': {\n 'width': 123,\n 'height': 123\n },\n 'heapSize': 123,\n 'memory': 123,\n 'image': 'string',\n 'carrier': 'string',\n 'radio': 'string',\n 'remoteAccessEnabled': True|False,\n 'remoteDebugEnabled': True|False,\n 'fleetType': 'string',\n 'fleetName': 'string',\n 'instances': [\n {\n 'arn': 'string',\n 'deviceArn': 'string',\n 'labels': [\n 'string',\n ],\n 'status': 'IN_USE'|'PREPARING'|'AVAILABLE'|'NOT_AVAILABLE',\n 'udid': 'string',\n 'instanceProfile': {\n 'arn': 'string',\n 'packageCleanup': True|False,\n 'excludeAppPackagesFromCleanup': [\n 'string',\n ],\n 'rebootAfterUse': True|False,\n 'name': 'string',\n 'description': 'string'\n }\n },\n ],\n 'availability': 'TEMPORARY_NOT_AVAILABLE'|'BUSY'|'AVAILABLE'|'HIGHLY_AVAILABLE'\n },\n 'instanceArn': 'string',\n 'remoteDebugEnabled': True|False,\n 'remoteRecordEnabled': True|False,\n 'remoteRecordAppArn': 'string',\n 'hostAddress': 'string',\n 'clientId': 'string',\n 'billingMethod': 'METERED'|'UNMETERED',\n 'deviceMinutes': {\n 'total': 123.0,\n 'metered': 123.0,\n 'unmetered': 123.0\n },\n 'endpoint': 'string',\n 'deviceUdid': 'string',\n 'interactionMode': 'INTERACTIVE'|'NO_VIDEO'|'VIDEO_ONLY',\n 'skipAppResign': True|False\n }\n }\n \n \n :returns: \n PENDING: A pending condition.\n PASSED: A passing condition.\n WARNED: A warning condition.\n FAILED: A failed condition.\n SKIPPED: A skipped condition.\n ERRORED: An error condition.\n STOPPED: A stopped condition.\n \n \"\"\"\n pass\n\ndef get_run(arn=None):\n \"\"\"\n Gets information about a run.\n See also: AWS API Documentation\n \n Examples\n The following example gets information about a specific test run.\n Expected Output:\n \n :example: response = client.get_run(\n arn='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The run's ARN.\n \n\n :rtype: dict\n :return: {\n 'run': {\n 'arn': 'string',\n 'name': 'string',\n 'type': 'BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'WEB_PERFORMANCE_PROFILE'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_NODE'|'APPIUM_RUBY'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'APPIUM_WEB_NODE'|'APPIUM_WEB_RUBY'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI'|'REMOTE_ACCESS_RECORD'|'REMOTE_ACCESS_REPLAY',\n 'platform': 'ANDROID'|'IOS',\n 'created': datetime(2015, 1, 1),\n 'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',\n 'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',\n 'started': datetime(2015, 1, 1),\n 'stopped': datetime(2015, 1, 1),\n 'counters': {\n 'total': 123,\n 'passed': 123,\n 'failed': 123,\n 'warned': 123,\n 'errored': 123,\n 'stopped': 123,\n 'skipped': 123\n },\n 'message': 'string',\n 'totalJobs': 123,\n 'completedJobs': 123,\n 'billingMethod': 'METERED'|'UNMETERED',\n 'deviceMinutes': {\n 'total': 123.0,\n 'metered': 123.0,\n 'unmetered': 123.0\n },\n 'networkProfile': {\n 'arn': 'string',\n 'name': 'string',\n 'description': 'string',\n 'type': 'CURATED'|'PRIVATE',\n 'uplinkBandwidthBits': 123,\n 'downlinkBandwidthBits': 123,\n 'uplinkDelayMs': 123,\n 'downlinkDelayMs': 123,\n 'uplinkJitterMs': 123,\n 'downlinkJitterMs': 123,\n 'uplinkLossPercent': 123,\n 'downlinkLossPercent': 123\n },\n 'parsingResultUrl': 'string',\n 'resultCode': 'PARSING_FAILED'|'VPC_ENDPOINT_SETUP_FAILED',\n 'seed': 123,\n 'appUpload': 'string',\n 'eventCount': 123,\n 'jobTimeoutMinutes': 123,\n 'devicePoolArn': 'string',\n 'locale': 'string',\n 'radios': {\n 'wifi': True|False,\n 'bluetooth': True|False,\n 'nfc': True|False,\n 'gps': True|False\n },\n 'location': {\n 'latitude': 123.0,\n 'longitude': 123.0\n },\n 'customerArtifactPaths': {\n 'iosPaths': [\n 'string',\n ],\n 'androidPaths': [\n 'string',\n ],\n 'deviceHostPaths': [\n 'string',\n ]\n },\n 'webUrl': 'string',\n 'skipAppResign': True|False,\n 'testSpecArn': 'string',\n 'deviceSelectionResult': {\n 'filters': [\n {\n 'attribute': 'ARN'|'PLATFORM'|'OS_VERSION'|'MODEL'|'AVAILABILITY'|'FORM_FACTOR'|'MANUFACTURER'|'REMOTE_ACCESS_ENABLED'|'REMOTE_DEBUG_ENABLED'|'INSTANCE_ARN'|'INSTANCE_LABELS'|'FLEET_TYPE',\n 'operator': 'EQUALS'|'LESS_THAN'|'LESS_THAN_OR_EQUALS'|'GREATER_THAN'|'GREATER_THAN_OR_EQUALS'|'IN'|'NOT_IN'|'CONTAINS',\n 'values': [\n 'string',\n ]\n },\n ],\n 'matchedDevicesCount': 123,\n 'maxDevices': 123\n }\n }\n }\n \n \n :returns: \n ANDROID: The Android platform.\n IOS: The iOS platform.\n \n \"\"\"\n pass\n\ndef get_suite(arn=None):\n \"\"\"\n Gets information about a suite.\n See also: AWS API Documentation\n \n Examples\n The following example gets information about a specific test suite.\n Expected Output:\n \n :example: response = client.get_suite(\n arn='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The suite's ARN.\n \n\n :rtype: dict\n :return: {\n 'suite': {\n 'arn': 'string',\n 'name': 'string',\n 'type': 'BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'WEB_PERFORMANCE_PROFILE'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_NODE'|'APPIUM_RUBY'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'APPIUM_WEB_NODE'|'APPIUM_WEB_RUBY'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI'|'REMOTE_ACCESS_RECORD'|'REMOTE_ACCESS_REPLAY',\n 'created': datetime(2015, 1, 1),\n 'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',\n 'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',\n 'started': datetime(2015, 1, 1),\n 'stopped': datetime(2015, 1, 1),\n 'counters': {\n 'total': 123,\n 'passed': 123,\n 'failed': 123,\n 'warned': 123,\n 'errored': 123,\n 'stopped': 123,\n 'skipped': 123\n },\n 'message': 'string',\n 'deviceMinutes': {\n 'total': 123.0,\n 'metered': 123.0,\n 'unmetered': 123.0\n }\n }\n }\n \n \n :returns: \n PENDING: A pending status.\n PENDING_CONCURRENCY: A pending concurrency status.\n PENDING_DEVICE: A pending device status.\n PROCESSING: A processing status.\n SCHEDULING: A scheduling status.\n PREPARING: A preparing status.\n RUNNING: A running status.\n COMPLETED: A completed status.\n STOPPING: A stopping status.\n \n \"\"\"\n pass\n\ndef get_test(arn=None):\n \"\"\"\n Gets information about a test.\n See also: AWS API Documentation\n \n Examples\n The following example gets information about a specific test.\n Expected Output:\n \n :example: response = client.get_test(\n arn='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The test's ARN.\n \n\n :rtype: dict\n :return: {\n 'test': {\n 'arn': 'string',\n 'name': 'string',\n 'type': 'BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'WEB_PERFORMANCE_PROFILE'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_NODE'|'APPIUM_RUBY'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'APPIUM_WEB_NODE'|'APPIUM_WEB_RUBY'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI'|'REMOTE_ACCESS_RECORD'|'REMOTE_ACCESS_REPLAY',\n 'created': datetime(2015, 1, 1),\n 'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',\n 'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',\n 'started': datetime(2015, 1, 1),\n 'stopped': datetime(2015, 1, 1),\n 'counters': {\n 'total': 123,\n 'passed': 123,\n 'failed': 123,\n 'warned': 123,\n 'errored': 123,\n 'stopped': 123,\n 'skipped': 123\n },\n 'message': 'string',\n 'deviceMinutes': {\n 'total': 123.0,\n 'metered': 123.0,\n 'unmetered': 123.0\n }\n }\n }\n \n \n :returns: \n PENDING: A pending status.\n PENDING_CONCURRENCY: A pending concurrency status.\n PENDING_DEVICE: A pending device status.\n PROCESSING: A processing status.\n SCHEDULING: A scheduling status.\n PREPARING: A preparing status.\n RUNNING: A running status.\n COMPLETED: A completed status.\n STOPPING: A stopping status.\n \n \"\"\"\n pass\n\ndef get_upload(arn=None):\n \"\"\"\n Gets information about an upload.\n See also: AWS API Documentation\n \n Examples\n The following example gets information about a specific upload.\n Expected Output:\n \n :example: response = client.get_upload(\n arn='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The upload's ARN.\n \n\n :rtype: dict\n :return: {\n 'upload': {\n 'arn': 'string',\n 'name': 'string',\n 'created': datetime(2015, 1, 1),\n 'type': 'ANDROID_APP'|'IOS_APP'|'WEB_APP'|'EXTERNAL_DATA'|'APPIUM_JAVA_JUNIT_TEST_PACKAGE'|'APPIUM_JAVA_TESTNG_TEST_PACKAGE'|'APPIUM_PYTHON_TEST_PACKAGE'|'APPIUM_NODE_TEST_PACKAGE'|'APPIUM_RUBY_TEST_PACKAGE'|'APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE'|'APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE'|'APPIUM_WEB_PYTHON_TEST_PACKAGE'|'APPIUM_WEB_NODE_TEST_PACKAGE'|'APPIUM_WEB_RUBY_TEST_PACKAGE'|'CALABASH_TEST_PACKAGE'|'INSTRUMENTATION_TEST_PACKAGE'|'UIAUTOMATION_TEST_PACKAGE'|'UIAUTOMATOR_TEST_PACKAGE'|'XCTEST_TEST_PACKAGE'|'XCTEST_UI_TEST_PACKAGE'|'APPIUM_JAVA_JUNIT_TEST_SPEC'|'APPIUM_JAVA_TESTNG_TEST_SPEC'|'APPIUM_PYTHON_TEST_SPEC'|'APPIUM_NODE_TEST_SPEC'|'APPIUM_RUBY_TEST_SPEC'|'APPIUM_WEB_JAVA_JUNIT_TEST_SPEC'|'APPIUM_WEB_JAVA_TESTNG_TEST_SPEC'|'APPIUM_WEB_PYTHON_TEST_SPEC'|'APPIUM_WEB_NODE_TEST_SPEC'|'APPIUM_WEB_RUBY_TEST_SPEC'|'INSTRUMENTATION_TEST_SPEC'|'XCTEST_UI_TEST_SPEC',\n 'status': 'INITIALIZED'|'PROCESSING'|'SUCCEEDED'|'FAILED',\n 'url': 'string',\n 'metadata': 'string',\n 'contentType': 'string',\n 'message': 'string',\n 'category': 'CURATED'|'PRIVATE'\n }\n }\n \n \n :returns: \n FAILED: A failed status.\n INITIALIZED: An initialized status.\n PROCESSING: A processing status.\n SUCCEEDED: A succeeded status.\n \n \"\"\"\n pass\n\ndef get_vpce_configuration(arn=None):\n \"\"\"\n Returns information about the configuration settings for your Amazon Virtual Private Cloud (VPC) endpoint.\n See also: AWS API Documentation\n \n \n :example: response = client.get_vpce_configuration(\n arn='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The Amazon Resource Name (ARN) of the VPC endpoint configuration you want to describe.\n \n\n :rtype: dict\n :return: {\n 'vpceConfiguration': {\n 'arn': 'string',\n 'vpceConfigurationName': 'string',\n 'vpceServiceName': 'string',\n 'serviceDnsName': 'string',\n 'vpceConfigurationDescription': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef install_to_remote_access_session(remoteAccessSessionArn=None, appArn=None):\n \"\"\"\n Installs an application to the device in a remote access session. For Android applications, the file must be in .apk format. For iOS applications, the file must be in .ipa format.\n See also: AWS API Documentation\n \n Examples\n The following example installs a specific app to a device in a specific remote access session.\n Expected Output:\n \n :example: response = client.install_to_remote_access_session(\n remoteAccessSessionArn='string',\n appArn='string'\n )\n \n \n :type remoteAccessSessionArn: string\n :param remoteAccessSessionArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the remote access session about which you are requesting information.\n \n\n :type appArn: string\n :param appArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the app about which you are requesting information.\n \n\n :rtype: dict\n :return: {\n 'appUpload': {\n 'arn': 'string',\n 'name': 'string',\n 'created': datetime(2015, 1, 1),\n 'type': 'ANDROID_APP'|'IOS_APP'|'WEB_APP'|'EXTERNAL_DATA'|'APPIUM_JAVA_JUNIT_TEST_PACKAGE'|'APPIUM_JAVA_TESTNG_TEST_PACKAGE'|'APPIUM_PYTHON_TEST_PACKAGE'|'APPIUM_NODE_TEST_PACKAGE'|'APPIUM_RUBY_TEST_PACKAGE'|'APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE'|'APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE'|'APPIUM_WEB_PYTHON_TEST_PACKAGE'|'APPIUM_WEB_NODE_TEST_PACKAGE'|'APPIUM_WEB_RUBY_TEST_PACKAGE'|'CALABASH_TEST_PACKAGE'|'INSTRUMENTATION_TEST_PACKAGE'|'UIAUTOMATION_TEST_PACKAGE'|'UIAUTOMATOR_TEST_PACKAGE'|'XCTEST_TEST_PACKAGE'|'XCTEST_UI_TEST_PACKAGE'|'APPIUM_JAVA_JUNIT_TEST_SPEC'|'APPIUM_JAVA_TESTNG_TEST_SPEC'|'APPIUM_PYTHON_TEST_SPEC'|'APPIUM_NODE_TEST_SPEC'|'APPIUM_RUBY_TEST_SPEC'|'APPIUM_WEB_JAVA_JUNIT_TEST_SPEC'|'APPIUM_WEB_JAVA_TESTNG_TEST_SPEC'|'APPIUM_WEB_PYTHON_TEST_SPEC'|'APPIUM_WEB_NODE_TEST_SPEC'|'APPIUM_WEB_RUBY_TEST_SPEC'|'INSTRUMENTATION_TEST_SPEC'|'XCTEST_UI_TEST_SPEC',\n 'status': 'INITIALIZED'|'PROCESSING'|'SUCCEEDED'|'FAILED',\n 'url': 'string',\n 'metadata': 'string',\n 'contentType': 'string',\n 'message': 'string',\n 'category': 'CURATED'|'PRIVATE'\n }\n }\n \n \n :returns: \n ANDROID_APP: An Android upload.\n IOS_APP: An iOS upload.\n WEB_APP: A web appliction upload.\n EXTERNAL_DATA: An external data upload.\n APPIUM_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.\n APPIUM_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.\n APPIUM_PYTHON_TEST_PACKAGE: An Appium Python test package upload.\n APPIUM_NODE_TEST_PACKAGE: An Appium Node.js test package upload.\n APPIUM_RUBY_TEST_PACKAGE: An Appium Ruby test package upload.\n APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload for web apps.\n APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload for web apps.\n APPIUM_WEB_PYTHON_TEST_PACKAGE: An Appium Python test package upload for web apps.\n APPIUM_WEB_NODE_TEST_PACKAGE: An Appium Node.js test package upload for web apps.\n APPIUM_WEB_RUBY_TEST_PACKAGE: An Appium Ruby test package upload for web apps.\n CALABASH_TEST_PACKAGE: A Calabash test package upload.\n INSTRUMENTATION_TEST_PACKAGE: An instrumentation upload.\n UIAUTOMATION_TEST_PACKAGE: A uiautomation test package upload.\n UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload.\n XCTEST_TEST_PACKAGE: An XCode test package upload.\n XCTEST_UI_TEST_PACKAGE: An XCode UI test package upload.\n APPIUM_JAVA_JUNIT_TEST_SPEC: An Appium Java JUnit test spec upload.\n APPIUM_JAVA_TESTNG_TEST_SPEC: An Appium Java TestNG test spec upload.\n APPIUM_PYTHON_TEST_SPEC: An Appium Python test spec upload.\n APPIUM_NODE_TEST_SPEC: An Appium Node.js test spec upload.\n APPIUM_RUBY_TEST_SPEC: An Appium Ruby test spec upload.\n APPIUM_WEB_JAVA_JUNIT_TEST_SPEC: An Appium Java JUnit test spec upload for a web app.\n APPIUM_WEB_JAVA_TESTNG_TEST_SPEC: An Appium Java TestNG test spec upload for a web app.\n APPIUM_WEB_PYTHON_TEST_SPEC: An Appium Python test spec upload for a web app.\n APPIUM_WEB_NODE_TEST_SPEC: An Appium Node.js test spec upload for a web app.\n APPIUM_WEB_RUBY_TEST_SPEC: An Appium Ruby test spec upload for a web app.\n INSTRUMENTATION_TEST_SPEC: An instrumentation test spec upload.\n XCTEST_UI_TEST_SPEC: An XCode UI test spec upload.\n \n \"\"\"\n pass\n\ndef list_artifacts(arn=None, type=None, nextToken=None):\n \"\"\"\n Gets information about artifacts.\n See also: AWS API Documentation\n \n Examples\n The following example lists screenshot artifacts for a specific run.\n Expected Output:\n \n :example: response = client.list_artifacts(\n arn='string',\n type='SCREENSHOT'|'FILE'|'LOG',\n nextToken='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The Run, Job, Suite, or Test ARN.\n \n\n :type type: string\n :param type: [REQUIRED]\n The artifacts' type.\n Allowed values include:\n FILE: The artifacts are files.\n LOG: The artifacts are logs.\n SCREENSHOT: The artifacts are screenshots.\n \n\n :type nextToken: string\n :param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.\n\n :rtype: dict\n :return: {\n 'artifacts': [\n {\n 'arn': 'string',\n 'name': 'string',\n 'type': 'UNKNOWN'|'SCREENSHOT'|'DEVICE_LOG'|'MESSAGE_LOG'|'VIDEO_LOG'|'RESULT_LOG'|'SERVICE_LOG'|'WEBKIT_LOG'|'INSTRUMENTATION_OUTPUT'|'EXERCISER_MONKEY_OUTPUT'|'CALABASH_JSON_OUTPUT'|'CALABASH_PRETTY_OUTPUT'|'CALABASH_STANDARD_OUTPUT'|'CALABASH_JAVA_XML_OUTPUT'|'AUTOMATION_OUTPUT'|'APPIUM_SERVER_OUTPUT'|'APPIUM_JAVA_OUTPUT'|'APPIUM_JAVA_XML_OUTPUT'|'APPIUM_PYTHON_OUTPUT'|'APPIUM_PYTHON_XML_OUTPUT'|'EXPLORER_EVENT_LOG'|'EXPLORER_SUMMARY_LOG'|'APPLICATION_CRASH_REPORT'|'XCTEST_LOG'|'VIDEO'|'CUSTOMER_ARTIFACT'|'CUSTOMER_ARTIFACT_LOG'|'TESTSPEC_OUTPUT',\n 'extension': 'string',\n 'url': 'string'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n UNKNOWN: An unknown type.\n SCREENSHOT: The screenshot type.\n DEVICE_LOG: The device log type.\n MESSAGE_LOG: The message log type.\n VIDEO_LOG: The video log type.\n RESULT_LOG: The result log type.\n SERVICE_LOG: The service log type.\n WEBKIT_LOG: The web kit log type.\n INSTRUMENTATION_OUTPUT: The instrumentation type.\n EXERCISER_MONKEY_OUTPUT: For Android, the artifact (log) generated by an Android fuzz test.\n CALABASH_JSON_OUTPUT: The Calabash JSON output type.\n CALABASH_PRETTY_OUTPUT: The Calabash pretty output type.\n CALABASH_STANDARD_OUTPUT: The Calabash standard output type.\n CALABASH_JAVA_XML_OUTPUT: The Calabash Java XML output type.\n AUTOMATION_OUTPUT: The automation output type.\n APPIUM_SERVER_OUTPUT: The Appium server output type.\n APPIUM_JAVA_OUTPUT: The Appium Java output type.\n APPIUM_JAVA_XML_OUTPUT: The Appium Java XML output type.\n APPIUM_PYTHON_OUTPUT: The Appium Python output type.\n APPIUM_PYTHON_XML_OUTPUT: The Appium Python XML output type.\n EXPLORER_EVENT_LOG: The Explorer event log output type.\n EXPLORER_SUMMARY_LOG: The Explorer summary log output type.\n APPLICATION_CRASH_REPORT: The application crash report output type.\n XCTEST_LOG: The XCode test output type.\n VIDEO: The Video output type.\n CUSTOMER_ARTIFACT:The Customer Artifact output type.\n CUSTOMER_ARTIFACT_LOG: The Customer Artifact Log output type.\n TESTSPEC_OUTPUT: The Test Spec Output type.\n \n \"\"\"\n pass\n\ndef list_device_instances(maxResults=None, nextToken=None):\n \"\"\"\n Returns information about the private device instances associated with one or more AWS accounts.\n See also: AWS API Documentation\n \n \n :example: response = client.list_device_instances(\n maxResults=123,\n nextToken='string'\n )\n \n \n :type maxResults: integer\n :param maxResults: An integer specifying the maximum number of items you want to return in the API response.\n\n :type nextToken: string\n :param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.\n\n :rtype: dict\n :return: {\n 'deviceInstances': [\n {\n 'arn': 'string',\n 'deviceArn': 'string',\n 'labels': [\n 'string',\n ],\n 'status': 'IN_USE'|'PREPARING'|'AVAILABLE'|'NOT_AVAILABLE',\n 'udid': 'string',\n 'instanceProfile': {\n 'arn': 'string',\n 'packageCleanup': True|False,\n 'excludeAppPackagesFromCleanup': [\n 'string',\n ],\n 'rebootAfterUse': True|False,\n 'name': 'string',\n 'description': 'string'\n }\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_device_pools(arn=None, type=None, nextToken=None):\n \"\"\"\n Gets information about device pools.\n See also: AWS API Documentation\n \n Examples\n The following example returns information about the private device pools in a specific project.\n Expected Output:\n \n :example: response = client.list_device_pools(\n arn='string',\n type='CURATED'|'PRIVATE',\n nextToken='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The project ARN.\n \n\n :type type: string\n :param type: The device pools' type.\n Allowed values include:\n CURATED: A device pool that is created and managed by AWS Device Farm.\n PRIVATE: A device pool that is created and managed by the device pool developer.\n \n\n :type nextToken: string\n :param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.\n\n :rtype: dict\n :return: {\n 'devicePools': [\n {\n 'arn': 'string',\n 'name': 'string',\n 'description': 'string',\n 'type': 'CURATED'|'PRIVATE',\n 'rules': [\n {\n 'attribute': 'ARN'|'PLATFORM'|'FORM_FACTOR'|'MANUFACTURER'|'REMOTE_ACCESS_ENABLED'|'REMOTE_DEBUG_ENABLED'|'APPIUM_VERSION'|'INSTANCE_ARN'|'INSTANCE_LABELS'|'FLEET_TYPE',\n 'operator': 'EQUALS'|'LESS_THAN'|'GREATER_THAN'|'IN'|'NOT_IN'|'CONTAINS',\n 'value': 'string'\n },\n ]\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n CURATED: A device pool that is created and managed by AWS Device Farm.\n PRIVATE: A device pool that is created and managed by the device pool developer.\n \n \"\"\"\n pass\n\ndef list_devices(arn=None, nextToken=None, filters=None):\n \"\"\"\n Gets information about unique device types.\n See also: AWS API Documentation\n \n Examples\n The following example returns information about the available devices in a specific project.\n Expected Output:\n \n :example: response = client.list_devices(\n arn='string',\n nextToken='string',\n filters=[\n {\n 'attribute': 'ARN'|'PLATFORM'|'OS_VERSION'|'MODEL'|'AVAILABILITY'|'FORM_FACTOR'|'MANUFACTURER'|'REMOTE_ACCESS_ENABLED'|'REMOTE_DEBUG_ENABLED'|'INSTANCE_ARN'|'INSTANCE_LABELS'|'FLEET_TYPE',\n 'operator': 'EQUALS'|'LESS_THAN'|'LESS_THAN_OR_EQUALS'|'GREATER_THAN'|'GREATER_THAN_OR_EQUALS'|'IN'|'NOT_IN'|'CONTAINS',\n 'values': [\n 'string',\n ]\n },\n ]\n )\n \n \n :type arn: string\n :param arn: The Amazon Resource Name (ARN) of the project.\n\n :type nextToken: string\n :param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.\n\n :type filters: list\n :param filters: Used to select a set of devices. A filter is made up of an attribute, an operator, and one or more values.\n Attribute: The aspect of a device such as platform or model used as the selction criteria in a device filter. Allowed values include:\n ARN: The Amazon Resource Name (ARN) of the device. For example, 'arn:aws:devicefarm:us-west-2::device:12345Example'.\n PLATFORM: The device platform. Valid values are 'ANDROID' or 'IOS'.\n OS_VERSION: The operating system version. For example, '10.3.2'.\n MODEL: The device model. For example, 'iPad 5th Gen'.\n AVAILABILITY: The current availability of the device. Valid values are 'AVAILABLE', 'HIGHLY_AVAILABLE', 'BUSY', or 'TEMPORARY_NOT_AVAILABLE'.\n FORM_FACTOR: The device form factor. Valid values are 'PHONE' or 'TABLET'.\n MANUFACTURER: The device manufacturer. For example, 'Apple'.\n REMOTE_ACCESS_ENABLED: Whether the device is enabled for remote access. Valid values are 'TRUE' or 'FALSE'.\n REMOTE_DEBUG_ENABLED: Whether the device is enabled for remote debugging. Valid values are 'TRUE' or 'FALSE'.\n INSTANCE_ARN: The Amazon Resource Name (ARN) of the device instance.\n INSTANCE_LABELS: The label of the device instance.\n FLEET_TYPE: The fleet type. Valid values are 'PUBLIC' or 'PRIVATE'.\n Operator: The filter operator.\n The EQUALS operator is available for every attribute except INSTANCE_LABELS.\n The CONTAINS operator is available for the INSTANCE_LABELS and MODEL attributes.\n The IN and NOT_IN operators are available for the ARN, OS_VERSION, MODEL, MANUFACTURER, and INSTANCE_ARN attributes.\n The LESS_THAN, GREATER_THAN, LESS_THAN_OR_EQUALS, and GREATER_THAN_OR_EQUALS operators are also available for the OS_VERSION attribute.\n Values: An array of one or more filter values.\n The IN and NOT_IN operators take a values array that has one or more elements.\n The other operators require an array with a single element.\n In a request, the AVAILABILITY attribute takes 'AVAILABLE', 'HIGHLY_AVAILABLE', 'BUSY', or 'TEMPORARY_NOT_AVAILABLE' as values.\n \n (dict) --Represents a device filter used to select a set of devices to be included in a test run. This data structure is passed in as the deviceSelectionConfiguration parameter to ScheduleRun. For an example of the JSON request syntax, see ScheduleRun .\n It is also passed in as the filters parameter to ListDevices. For an example of the JSON request syntax, see ListDevices .\n attribute (string) --The aspect of a device such as platform or model used as the selection criteria in a device filter.\n Allowed values include:\n ARN: The Amazon Resource Name (ARN) of the device. For example, 'arn:aws:devicefarm:us-west-2::device:12345Example'.\n PLATFORM: The device platform. Valid values are 'ANDROID' or 'IOS'.\n OS_VERSION: The operating system version. For example, '10.3.2'.\n MODEL: The device model. For example, 'iPad 5th Gen'.\n AVAILABILITY: The current availability of the device. Valid values are 'AVAILABLE', 'HIGHLY_AVAILABLE', 'BUSY', or 'TEMPORARY_NOT_AVAILABLE'.\n FORM_FACTOR: The device form factor. Valid values are 'PHONE' or 'TABLET'.\n MANUFACTURER: The device manufacturer. For example, 'Apple'.\n REMOTE_ACCESS_ENABLED: Whether the device is enabled for remote access. Valid values are 'TRUE' or 'FALSE'.\n REMOTE_DEBUG_ENABLED: Whether the device is enabled for remote debugging. Valid values are 'TRUE' or 'FALSE'.\n INSTANCE_ARN: The Amazon Resource Name (ARN) of the device instance.\n INSTANCE_LABELS: The label of the device instance.\n FLEET_TYPE: The fleet type. Valid values are 'PUBLIC' or 'PRIVATE'.\n operator (string) --The filter operator.\n The EQUALS operator is available for every attribute except INSTANCE_LABELS.\n The CONTAINS operator is available for the INSTANCE_LABELS and MODEL attributes.\n The IN and NOT_IN operators are available for the ARN, OS_VERSION, MODEL, MANUFACTURER, and INSTANCE_ARN attributes.\n The LESS_THAN, GREATER_THAN, LESS_THAN_OR_EQUALS, and GREATER_THAN_OR_EQUALS operators are also available for the OS_VERSION attribute.\n values (list) --An array of one or more filter values used in a device filter.\n Operator Values\n The IN and NOT_IN operators can take a values array that has more than one element.\n The other operators require an array with a single element.\n Attribute Values\n The PLATFORM attribute can be set to 'ANDROID' or 'IOS'.\n The AVAILABILITY attribute can be set to 'AVAILABLE', 'HIGHLY_AVAILABLE', 'BUSY', or 'TEMPORARY_NOT_AVAILABLE'.\n The FORM_FACTOR attribute can be set to 'PHONE' or 'TABLET'.\n The FLEET_TYPE attribute can be set to 'PUBLIC' or 'PRIVATE'.\n (string) --\n \n \n\n :rtype: dict\n :return: {\n 'devices': [\n {\n 'arn': 'string',\n 'name': 'string',\n 'manufacturer': 'string',\n 'model': 'string',\n 'modelId': 'string',\n 'formFactor': 'PHONE'|'TABLET',\n 'platform': 'ANDROID'|'IOS',\n 'os': 'string',\n 'cpu': {\n 'frequency': 'string',\n 'architecture': 'string',\n 'clock': 123.0\n },\n 'resolution': {\n 'width': 123,\n 'height': 123\n },\n 'heapSize': 123,\n 'memory': 123,\n 'image': 'string',\n 'carrier': 'string',\n 'radio': 'string',\n 'remoteAccessEnabled': True|False,\n 'remoteDebugEnabled': True|False,\n 'fleetType': 'string',\n 'fleetName': 'string',\n 'instances': [\n {\n 'arn': 'string',\n 'deviceArn': 'string',\n 'labels': [\n 'string',\n ],\n 'status': 'IN_USE'|'PREPARING'|'AVAILABLE'|'NOT_AVAILABLE',\n 'udid': 'string',\n 'instanceProfile': {\n 'arn': 'string',\n 'packageCleanup': True|False,\n 'excludeAppPackagesFromCleanup': [\n 'string',\n ],\n 'rebootAfterUse': True|False,\n 'name': 'string',\n 'description': 'string'\n }\n },\n ],\n 'availability': 'TEMPORARY_NOT_AVAILABLE'|'BUSY'|'AVAILABLE'|'HIGHLY_AVAILABLE'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n PHONE: The phone form factor.\n TABLET: The tablet form factor.\n \n \"\"\"\n pass\n\ndef list_instance_profiles(maxResults=None, nextToken=None):\n \"\"\"\n Returns information about all the instance profiles in an AWS account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_instance_profiles(\n maxResults=123,\n nextToken='string'\n )\n \n \n :type maxResults: integer\n :param maxResults: An integer specifying the maximum number of items you want to return in the API response.\n\n :type nextToken: string\n :param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.\n\n :rtype: dict\n :return: {\n 'instanceProfiles': [\n {\n 'arn': 'string',\n 'packageCleanup': True|False,\n 'excludeAppPackagesFromCleanup': [\n 'string',\n ],\n 'rebootAfterUse': True|False,\n 'name': 'string',\n 'description': 'string'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_jobs(arn=None, nextToken=None):\n \"\"\"\n Gets information about jobs for a given test run.\n See also: AWS API Documentation\n \n Examples\n The following example returns information about jobs in a specific project.\n Expected Output:\n \n :example: response = client.list_jobs(\n arn='string',\n nextToken='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The run's Amazon Resource Name (ARN).\n \n\n :type nextToken: string\n :param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.\n\n :rtype: dict\n :return: {\n 'jobs': [\n {\n 'arn': 'string',\n 'name': 'string',\n 'type': 'BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'WEB_PERFORMANCE_PROFILE'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_NODE'|'APPIUM_RUBY'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'APPIUM_WEB_NODE'|'APPIUM_WEB_RUBY'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI'|'REMOTE_ACCESS_RECORD'|'REMOTE_ACCESS_REPLAY',\n 'created': datetime(2015, 1, 1),\n 'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',\n 'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',\n 'started': datetime(2015, 1, 1),\n 'stopped': datetime(2015, 1, 1),\n 'counters': {\n 'total': 123,\n 'passed': 123,\n 'failed': 123,\n 'warned': 123,\n 'errored': 123,\n 'stopped': 123,\n 'skipped': 123\n },\n 'message': 'string',\n 'device': {\n 'arn': 'string',\n 'name': 'string',\n 'manufacturer': 'string',\n 'model': 'string',\n 'modelId': 'string',\n 'formFactor': 'PHONE'|'TABLET',\n 'platform': 'ANDROID'|'IOS',\n 'os': 'string',\n 'cpu': {\n 'frequency': 'string',\n 'architecture': 'string',\n 'clock': 123.0\n },\n 'resolution': {\n 'width': 123,\n 'height': 123\n },\n 'heapSize': 123,\n 'memory': 123,\n 'image': 'string',\n 'carrier': 'string',\n 'radio': 'string',\n 'remoteAccessEnabled': True|False,\n 'remoteDebugEnabled': True|False,\n 'fleetType': 'string',\n 'fleetName': 'string',\n 'instances': [\n {\n 'arn': 'string',\n 'deviceArn': 'string',\n 'labels': [\n 'string',\n ],\n 'status': 'IN_USE'|'PREPARING'|'AVAILABLE'|'NOT_AVAILABLE',\n 'udid': 'string',\n 'instanceProfile': {\n 'arn': 'string',\n 'packageCleanup': True|False,\n 'excludeAppPackagesFromCleanup': [\n 'string',\n ],\n 'rebootAfterUse': True|False,\n 'name': 'string',\n 'description': 'string'\n }\n },\n ],\n 'availability': 'TEMPORARY_NOT_AVAILABLE'|'BUSY'|'AVAILABLE'|'HIGHLY_AVAILABLE'\n },\n 'instanceArn': 'string',\n 'deviceMinutes': {\n 'total': 123.0,\n 'metered': 123.0,\n 'unmetered': 123.0\n },\n 'videoEndpoint': 'string',\n 'videoCapture': True|False\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n BUILTIN_FUZZ: The built-in fuzz type.\n BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.\n APPIUM_JAVA_JUNIT: The Appium Java JUnit type.\n APPIUM_JAVA_TESTNG: The Appium Java TestNG type.\n APPIUM_PYTHON: The Appium Python type.\n APPIUM_NODE: The Appium Node.js type.\n APPIUM_RUBY: The Appium Ruby type.\n APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for web apps.\n APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for web apps.\n APPIUM_WEB_PYTHON: The Appium Python type for web apps.\n APPIUM_WEB_NODE: The Appium Node.js type for web apps.\n APPIUM_WEB_RUBY: The Appium Ruby test type for web apps.\n CALABASH: The Calabash type.\n INSTRUMENTATION: The Instrumentation type.\n UIAUTOMATION: The uiautomation type.\n UIAUTOMATOR: The uiautomator type.\n XCTEST: The XCode test type.\n XCTEST_UI: The XCode UI test type.\n \n \"\"\"\n pass\n\ndef list_network_profiles(arn=None, type=None, nextToken=None):\n \"\"\"\n Returns the list of available network profiles.\n See also: AWS API Documentation\n \n \n :example: response = client.list_network_profiles(\n arn='string',\n type='CURATED'|'PRIVATE',\n nextToken='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The Amazon Resource Name (ARN) of the project for which you want to list network profiles.\n \n\n :type type: string\n :param type: The type of network profile you wish to return information about. Valid values are listed below.\n\n :type nextToken: string\n :param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.\n\n :rtype: dict\n :return: {\n 'networkProfiles': [\n {\n 'arn': 'string',\n 'name': 'string',\n 'description': 'string',\n 'type': 'CURATED'|'PRIVATE',\n 'uplinkBandwidthBits': 123,\n 'downlinkBandwidthBits': 123,\n 'uplinkDelayMs': 123,\n 'downlinkDelayMs': 123,\n 'uplinkJitterMs': 123,\n 'downlinkJitterMs': 123,\n 'uplinkLossPercent': 123,\n 'downlinkLossPercent': 123\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_offering_promotions(nextToken=None):\n \"\"\"\n Returns a list of offering promotions. Each offering promotion record contains the ID and description of the promotion. The API returns a NotEligible error if the caller is not permitted to invoke the operation. Contact [email protected] if you believe that you should be able to invoke this operation.\n See also: AWS API Documentation\n \n \n :example: response = client.list_offering_promotions(\n nextToken='string'\n )\n \n \n :type nextToken: string\n :param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.\n\n :rtype: dict\n :return: {\n 'offeringPromotions': [\n {\n 'id': 'string',\n 'description': 'string'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_offering_transactions(nextToken=None):\n \"\"\"\n Returns a list of all historical purchases, renewals, and system renewal transactions for an AWS account. The list is paginated and ordered by a descending timestamp (most recent transactions are first). The API returns a NotEligible error if the user is not permitted to invoke the operation. Please contact [email protected] if you believe that you should be able to invoke this operation.\n See also: AWS API Documentation\n \n Examples\n The following example returns information about Device Farm offering transactions.\n Expected Output:\n \n :example: response = client.list_offering_transactions(\n nextToken='string'\n )\n \n \n :type nextToken: string\n :param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.\n\n :rtype: dict\n :return: {\n 'offeringTransactions': [\n {\n 'offeringStatus': {\n 'type': 'PURCHASE'|'RENEW'|'SYSTEM',\n 'offering': {\n 'id': 'string',\n 'description': 'string',\n 'type': 'RECURRING',\n 'platform': 'ANDROID'|'IOS',\n 'recurringCharges': [\n {\n 'cost': {\n 'amount': 123.0,\n 'currencyCode': 'USD'\n },\n 'frequency': 'MONTHLY'\n },\n ]\n },\n 'quantity': 123,\n 'effectiveOn': datetime(2015, 1, 1)\n },\n 'transactionId': 'string',\n 'offeringPromotionId': 'string',\n 'createdOn': datetime(2015, 1, 1),\n 'cost': {\n 'amount': 123.0,\n 'currencyCode': 'USD'\n }\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_offerings(nextToken=None):\n \"\"\"\n Returns a list of products or offerings that the user can manage through the API. Each offering record indicates the recurring price per unit and the frequency for that offering. The API returns a NotEligible error if the user is not permitted to invoke the operation. Please contact [email protected] if you believe that you should be able to invoke this operation.\n See also: AWS API Documentation\n \n Examples\n The following example returns information about available device offerings.\n Expected Output:\n \n :example: response = client.list_offerings(\n nextToken='string'\n )\n \n \n :type nextToken: string\n :param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.\n\n :rtype: dict\n :return: {\n 'offerings': [\n {\n 'id': 'string',\n 'description': 'string',\n 'type': 'RECURRING',\n 'platform': 'ANDROID'|'IOS',\n 'recurringCharges': [\n {\n 'cost': {\n 'amount': 123.0,\n 'currencyCode': 'USD'\n },\n 'frequency': 'MONTHLY'\n },\n ]\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_projects(arn=None, nextToken=None):\n \"\"\"\n Gets information about projects.\n See also: AWS API Documentation\n \n Examples\n The following example returns information about the specified project in Device Farm.\n Expected Output:\n \n :example: response = client.list_projects(\n arn='string',\n nextToken='string'\n )\n \n \n :type arn: string\n :param arn: Optional. If no Amazon Resource Name (ARN) is specified, then AWS Device Farm returns a list of all projects for the AWS account. You can also specify a project ARN.\n\n :type nextToken: string\n :param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.\n\n :rtype: dict\n :return: {\n 'projects': [\n {\n 'arn': 'string',\n 'name': 'string',\n 'defaultJobTimeoutMinutes': 123,\n 'created': datetime(2015, 1, 1)\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_remote_access_sessions(arn=None, nextToken=None):\n \"\"\"\n Returns a list of all currently running remote access sessions.\n See also: AWS API Documentation\n \n Examples\n The following example returns information about a specific Device Farm remote access session.\n Expected Output:\n \n :example: response = client.list_remote_access_sessions(\n arn='string',\n nextToken='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The Amazon Resource Name (ARN) of the remote access session about which you are requesting information.\n \n\n :type nextToken: string\n :param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.\n\n :rtype: dict\n :return: {\n 'remoteAccessSessions': [\n {\n 'arn': 'string',\n 'name': 'string',\n 'created': datetime(2015, 1, 1),\n 'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',\n 'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',\n 'message': 'string',\n 'started': datetime(2015, 1, 1),\n 'stopped': datetime(2015, 1, 1),\n 'device': {\n 'arn': 'string',\n 'name': 'string',\n 'manufacturer': 'string',\n 'model': 'string',\n 'modelId': 'string',\n 'formFactor': 'PHONE'|'TABLET',\n 'platform': 'ANDROID'|'IOS',\n 'os': 'string',\n 'cpu': {\n 'frequency': 'string',\n 'architecture': 'string',\n 'clock': 123.0\n },\n 'resolution': {\n 'width': 123,\n 'height': 123\n },\n 'heapSize': 123,\n 'memory': 123,\n 'image': 'string',\n 'carrier': 'string',\n 'radio': 'string',\n 'remoteAccessEnabled': True|False,\n 'remoteDebugEnabled': True|False,\n 'fleetType': 'string',\n 'fleetName': 'string',\n 'instances': [\n {\n 'arn': 'string',\n 'deviceArn': 'string',\n 'labels': [\n 'string',\n ],\n 'status': 'IN_USE'|'PREPARING'|'AVAILABLE'|'NOT_AVAILABLE',\n 'udid': 'string',\n 'instanceProfile': {\n 'arn': 'string',\n 'packageCleanup': True|False,\n 'excludeAppPackagesFromCleanup': [\n 'string',\n ],\n 'rebootAfterUse': True|False,\n 'name': 'string',\n 'description': 'string'\n }\n },\n ],\n 'availability': 'TEMPORARY_NOT_AVAILABLE'|'BUSY'|'AVAILABLE'|'HIGHLY_AVAILABLE'\n },\n 'instanceArn': 'string',\n 'remoteDebugEnabled': True|False,\n 'remoteRecordEnabled': True|False,\n 'remoteRecordAppArn': 'string',\n 'hostAddress': 'string',\n 'clientId': 'string',\n 'billingMethod': 'METERED'|'UNMETERED',\n 'deviceMinutes': {\n 'total': 123.0,\n 'metered': 123.0,\n 'unmetered': 123.0\n },\n 'endpoint': 'string',\n 'deviceUdid': 'string',\n 'interactionMode': 'INTERACTIVE'|'NO_VIDEO'|'VIDEO_ONLY',\n 'skipAppResign': True|False\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n PENDING: A pending status.\n PENDING_CONCURRENCY: A pending concurrency status.\n PENDING_DEVICE: A pending device status.\n PROCESSING: A processing status.\n SCHEDULING: A scheduling status.\n PREPARING: A preparing status.\n RUNNING: A running status.\n COMPLETED: A completed status.\n STOPPING: A stopping status.\n \n \"\"\"\n pass\n\ndef list_runs(arn=None, nextToken=None):\n \"\"\"\n Gets information about runs, given an AWS Device Farm project ARN.\n See also: AWS API Documentation\n \n Examples\n The following example returns information about a specific test run.\n Expected Output:\n \n :example: response = client.list_runs(\n arn='string',\n nextToken='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The Amazon Resource Name (ARN) of the project for which you want to list runs.\n \n\n :type nextToken: string\n :param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.\n\n :rtype: dict\n :return: {\n 'runs': [\n {\n 'arn': 'string',\n 'name': 'string',\n 'type': 'BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'WEB_PERFORMANCE_PROFILE'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_NODE'|'APPIUM_RUBY'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'APPIUM_WEB_NODE'|'APPIUM_WEB_RUBY'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI'|'REMOTE_ACCESS_RECORD'|'REMOTE_ACCESS_REPLAY',\n 'platform': 'ANDROID'|'IOS',\n 'created': datetime(2015, 1, 1),\n 'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',\n 'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',\n 'started': datetime(2015, 1, 1),\n 'stopped': datetime(2015, 1, 1),\n 'counters': {\n 'total': 123,\n 'passed': 123,\n 'failed': 123,\n 'warned': 123,\n 'errored': 123,\n 'stopped': 123,\n 'skipped': 123\n },\n 'message': 'string',\n 'totalJobs': 123,\n 'completedJobs': 123,\n 'billingMethod': 'METERED'|'UNMETERED',\n 'deviceMinutes': {\n 'total': 123.0,\n 'metered': 123.0,\n 'unmetered': 123.0\n },\n 'networkProfile': {\n 'arn': 'string',\n 'name': 'string',\n 'description': 'string',\n 'type': 'CURATED'|'PRIVATE',\n 'uplinkBandwidthBits': 123,\n 'downlinkBandwidthBits': 123,\n 'uplinkDelayMs': 123,\n 'downlinkDelayMs': 123,\n 'uplinkJitterMs': 123,\n 'downlinkJitterMs': 123,\n 'uplinkLossPercent': 123,\n 'downlinkLossPercent': 123\n },\n 'parsingResultUrl': 'string',\n 'resultCode': 'PARSING_FAILED'|'VPC_ENDPOINT_SETUP_FAILED',\n 'seed': 123,\n 'appUpload': 'string',\n 'eventCount': 123,\n 'jobTimeoutMinutes': 123,\n 'devicePoolArn': 'string',\n 'locale': 'string',\n 'radios': {\n 'wifi': True|False,\n 'bluetooth': True|False,\n 'nfc': True|False,\n 'gps': True|False\n },\n 'location': {\n 'latitude': 123.0,\n 'longitude': 123.0\n },\n 'customerArtifactPaths': {\n 'iosPaths': [\n 'string',\n ],\n 'androidPaths': [\n 'string',\n ],\n 'deviceHostPaths': [\n 'string',\n ]\n },\n 'webUrl': 'string',\n 'skipAppResign': True|False,\n 'testSpecArn': 'string',\n 'deviceSelectionResult': {\n 'filters': [\n {\n 'attribute': 'ARN'|'PLATFORM'|'OS_VERSION'|'MODEL'|'AVAILABILITY'|'FORM_FACTOR'|'MANUFACTURER'|'REMOTE_ACCESS_ENABLED'|'REMOTE_DEBUG_ENABLED'|'INSTANCE_ARN'|'INSTANCE_LABELS'|'FLEET_TYPE',\n 'operator': 'EQUALS'|'LESS_THAN'|'LESS_THAN_OR_EQUALS'|'GREATER_THAN'|'GREATER_THAN_OR_EQUALS'|'IN'|'NOT_IN'|'CONTAINS',\n 'values': [\n 'string',\n ]\n },\n ],\n 'matchedDevicesCount': 123,\n 'maxDevices': 123\n }\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n BUILTIN_FUZZ: The built-in fuzz type.\n BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.\n APPIUM_JAVA_JUNIT: The Appium Java JUnit type.\n APPIUM_JAVA_TESTNG: The Appium Java TestNG type.\n APPIUM_PYTHON: The Appium Python type.\n APPIUM_NODE: The Appium Node.js type.\n APPIUM_RUBY: The Appium Ruby type.\n APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for web apps.\n APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for web apps.\n APPIUM_WEB_PYTHON: The Appium Python type for web apps.\n APPIUM_WEB_NODE: The Appium Node.js type for web apps.\n APPIUM_WEB_RUBY: The Appium Ruby type for web apps.\n CALABASH: The Calabash type.\n INSTRUMENTATION: The Instrumentation type.\n UIAUTOMATION: The uiautomation type.\n UIAUTOMATOR: The uiautomator type.\n XCTEST: The XCode test type.\n XCTEST_UI: The XCode UI test type.\n \n \"\"\"\n pass\n\ndef list_samples(arn=None, nextToken=None):\n \"\"\"\n Gets information about samples, given an AWS Device Farm job ARN.\n See also: AWS API Documentation\n \n Examples\n The following example returns information about samples, given a specific Device Farm project.\n Expected Output:\n \n :example: response = client.list_samples(\n arn='string',\n nextToken='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The Amazon Resource Name (ARN) of the job used to list samples.\n \n\n :type nextToken: string\n :param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.\n\n :rtype: dict\n :return: {\n 'samples': [\n {\n 'arn': 'string',\n 'type': 'CPU'|'MEMORY'|'THREADS'|'RX_RATE'|'TX_RATE'|'RX'|'TX'|'NATIVE_FRAMES'|'NATIVE_FPS'|'NATIVE_MIN_DRAWTIME'|'NATIVE_AVG_DRAWTIME'|'NATIVE_MAX_DRAWTIME'|'OPENGL_FRAMES'|'OPENGL_FPS'|'OPENGL_MIN_DRAWTIME'|'OPENGL_AVG_DRAWTIME'|'OPENGL_MAX_DRAWTIME',\n 'url': 'string'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n CPU: A CPU sample type. This is expressed as the app processing CPU time (including child processes) as reported by process, as a percentage.\n MEMORY: A memory usage sample type. This is expressed as the total proportional set size of an app process, in kilobytes.\n NATIVE_AVG_DRAWTIME\n NATIVE_FPS\n NATIVE_FRAMES\n NATIVE_MAX_DRAWTIME\n NATIVE_MIN_DRAWTIME\n OPENGL_AVG_DRAWTIME\n OPENGL_FPS\n OPENGL_FRAMES\n OPENGL_MAX_DRAWTIME\n OPENGL_MIN_DRAWTIME\n RX\n RX_RATE: The total number of bytes per second (TCP and UDP) that are sent, by app process.\n THREADS: A threads sample type. This is expressed as the total number of threads per app process.\n TX\n TX_RATE: The total number of bytes per second (TCP and UDP) that are received, by app process.\n \n \"\"\"\n pass\n\ndef list_suites(arn=None, nextToken=None):\n \"\"\"\n Gets information about test suites for a given job.\n See also: AWS API Documentation\n \n Examples\n The following example returns information about suites, given a specific Device Farm project.\n Expected Output:\n \n :example: response = client.list_suites(\n arn='string',\n nextToken='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The job's Amazon Resource Name (ARN).\n \n\n :type nextToken: string\n :param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.\n\n :rtype: dict\n :return: {\n 'suites': [\n {\n 'arn': 'string',\n 'name': 'string',\n 'type': 'BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'WEB_PERFORMANCE_PROFILE'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_NODE'|'APPIUM_RUBY'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'APPIUM_WEB_NODE'|'APPIUM_WEB_RUBY'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI'|'REMOTE_ACCESS_RECORD'|'REMOTE_ACCESS_REPLAY',\n 'created': datetime(2015, 1, 1),\n 'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',\n 'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',\n 'started': datetime(2015, 1, 1),\n 'stopped': datetime(2015, 1, 1),\n 'counters': {\n 'total': 123,\n 'passed': 123,\n 'failed': 123,\n 'warned': 123,\n 'errored': 123,\n 'stopped': 123,\n 'skipped': 123\n },\n 'message': 'string',\n 'deviceMinutes': {\n 'total': 123.0,\n 'metered': 123.0,\n 'unmetered': 123.0\n }\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n BUILTIN_FUZZ: The built-in fuzz type.\n BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.\n APPIUM_JAVA_JUNIT: The Appium Java JUnit type.\n APPIUM_JAVA_TESTNG: The Appium Java TestNG type.\n APPIUM_PYTHON: The Appium Python type.\n APPIUM_NODE: The Appium Node.js type.\n APPIUM_RUBY: The Appium Ruby type.\n APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for web apps.\n APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for web apps.\n APPIUM_WEB_PYTHON: The Appium Python type for web apps.\n APPIUM_WEB_NODE: The Appium Node.js type for web apps.\n APPIUM_WEB_RUBY: The Appium Ruby type for web apps.\n CALABASH: The Calabash type.\n INSTRUMENTATION: The Instrumentation type.\n UIAUTOMATION: The uiautomation type.\n UIAUTOMATOR: The uiautomator type.\n XCTEST: The XCode test type.\n XCTEST_UI: The XCode UI test type.\n \n \"\"\"\n pass\n\ndef list_tests(arn=None, nextToken=None):\n \"\"\"\n Gets information about tests in a given test suite.\n See also: AWS API Documentation\n \n Examples\n The following example returns information about tests, given a specific Device Farm project.\n Expected Output:\n \n :example: response = client.list_tests(\n arn='string',\n nextToken='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The test suite's Amazon Resource Name (ARN).\n \n\n :type nextToken: string\n :param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.\n\n :rtype: dict\n :return: {\n 'tests': [\n {\n 'arn': 'string',\n 'name': 'string',\n 'type': 'BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'WEB_PERFORMANCE_PROFILE'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_NODE'|'APPIUM_RUBY'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'APPIUM_WEB_NODE'|'APPIUM_WEB_RUBY'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI'|'REMOTE_ACCESS_RECORD'|'REMOTE_ACCESS_REPLAY',\n 'created': datetime(2015, 1, 1),\n 'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',\n 'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',\n 'started': datetime(2015, 1, 1),\n 'stopped': datetime(2015, 1, 1),\n 'counters': {\n 'total': 123,\n 'passed': 123,\n 'failed': 123,\n 'warned': 123,\n 'errored': 123,\n 'stopped': 123,\n 'skipped': 123\n },\n 'message': 'string',\n 'deviceMinutes': {\n 'total': 123.0,\n 'metered': 123.0,\n 'unmetered': 123.0\n }\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n BUILTIN_FUZZ: The built-in fuzz type.\n BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.\n APPIUM_JAVA_JUNIT: The Appium Java JUnit type.\n APPIUM_JAVA_TESTNG: The Appium Java TestNG type.\n APPIUM_PYTHON: The Appium Python type.\n APPIUM_NODE: The Appium Node.js type.\n APPIUM_RUBY: The Appium Ruby type.\n APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for web apps.\n APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for web apps.\n APPIUM_WEB_PYTHON: The Appium Python type for web apps.\n APPIUM_WEB_NODE: The Appium Node.js type for web apps.\n APPIUM_WEB_RUBY: The Appium Ruby type for web apps.\n CALABASH: The Calabash type.\n INSTRUMENTATION: The Instrumentation type.\n UIAUTOMATION: The uiautomation type.\n UIAUTOMATOR: The uiautomator type.\n XCTEST: The XCode test type.\n XCTEST_UI: The XCode UI test type.\n \n \"\"\"\n pass\n\ndef list_unique_problems(arn=None, nextToken=None):\n \"\"\"\n Gets information about unique problems.\n See also: AWS API Documentation\n \n Examples\n The following example returns information about unique problems, given a specific Device Farm project.\n Expected Output:\n \n :example: response = client.list_unique_problems(\n arn='string',\n nextToken='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The unique problems' ARNs.\n \n\n :type nextToken: string\n :param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.\n\n :rtype: dict\n :return: {\n 'uniqueProblems': {\n 'string': [\n {\n 'message': 'string',\n 'problems': [\n {\n 'run': {\n 'arn': 'string',\n 'name': 'string'\n },\n 'job': {\n 'arn': 'string',\n 'name': 'string'\n },\n 'suite': {\n 'arn': 'string',\n 'name': 'string'\n },\n 'test': {\n 'arn': 'string',\n 'name': 'string'\n },\n 'device': {\n 'arn': 'string',\n 'name': 'string',\n 'manufacturer': 'string',\n 'model': 'string',\n 'modelId': 'string',\n 'formFactor': 'PHONE'|'TABLET',\n 'platform': 'ANDROID'|'IOS',\n 'os': 'string',\n 'cpu': {\n 'frequency': 'string',\n 'architecture': 'string',\n 'clock': 123.0\n },\n 'resolution': {\n 'width': 123,\n 'height': 123\n },\n 'heapSize': 123,\n 'memory': 123,\n 'image': 'string',\n 'carrier': 'string',\n 'radio': 'string',\n 'remoteAccessEnabled': True|False,\n 'remoteDebugEnabled': True|False,\n 'fleetType': 'string',\n 'fleetName': 'string',\n 'instances': [\n {\n 'arn': 'string',\n 'deviceArn': 'string',\n 'labels': [\n 'string',\n ],\n 'status': 'IN_USE'|'PREPARING'|'AVAILABLE'|'NOT_AVAILABLE',\n 'udid': 'string',\n 'instanceProfile': {\n 'arn': 'string',\n 'packageCleanup': True|False,\n 'excludeAppPackagesFromCleanup': [\n 'string',\n ],\n 'rebootAfterUse': True|False,\n 'name': 'string',\n 'description': 'string'\n }\n },\n ],\n 'availability': 'TEMPORARY_NOT_AVAILABLE'|'BUSY'|'AVAILABLE'|'HIGHLY_AVAILABLE'\n },\n 'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',\n 'message': 'string'\n },\n ]\n },\n ]\n },\n 'nextToken': 'string'\n }\n \n \n :returns: \n PENDING: A pending condition.\n PASSED: A passing condition.\n WARNED: A warning condition.\n FAILED: A failed condition.\n SKIPPED: A skipped condition.\n ERRORED: An error condition.\n STOPPED: A stopped condition.\n \n \"\"\"\n pass\n\ndef list_uploads(arn=None, type=None, nextToken=None):\n \"\"\"\n Gets information about uploads, given an AWS Device Farm project ARN.\n See also: AWS API Documentation\n \n Examples\n The following example returns information about uploads, given a specific Device Farm project.\n Expected Output:\n \n :example: response = client.list_uploads(\n arn='string',\n type='ANDROID_APP'|'IOS_APP'|'WEB_APP'|'EXTERNAL_DATA'|'APPIUM_JAVA_JUNIT_TEST_PACKAGE'|'APPIUM_JAVA_TESTNG_TEST_PACKAGE'|'APPIUM_PYTHON_TEST_PACKAGE'|'APPIUM_NODE_TEST_PACKAGE'|'APPIUM_RUBY_TEST_PACKAGE'|'APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE'|'APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE'|'APPIUM_WEB_PYTHON_TEST_PACKAGE'|'APPIUM_WEB_NODE_TEST_PACKAGE'|'APPIUM_WEB_RUBY_TEST_PACKAGE'|'CALABASH_TEST_PACKAGE'|'INSTRUMENTATION_TEST_PACKAGE'|'UIAUTOMATION_TEST_PACKAGE'|'UIAUTOMATOR_TEST_PACKAGE'|'XCTEST_TEST_PACKAGE'|'XCTEST_UI_TEST_PACKAGE'|'APPIUM_JAVA_JUNIT_TEST_SPEC'|'APPIUM_JAVA_TESTNG_TEST_SPEC'|'APPIUM_PYTHON_TEST_SPEC'|'APPIUM_NODE_TEST_SPEC'|'APPIUM_RUBY_TEST_SPEC'|'APPIUM_WEB_JAVA_JUNIT_TEST_SPEC'|'APPIUM_WEB_JAVA_TESTNG_TEST_SPEC'|'APPIUM_WEB_PYTHON_TEST_SPEC'|'APPIUM_WEB_NODE_TEST_SPEC'|'APPIUM_WEB_RUBY_TEST_SPEC'|'INSTRUMENTATION_TEST_SPEC'|'XCTEST_UI_TEST_SPEC',\n nextToken='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The Amazon Resource Name (ARN) of the project for which you want to list uploads.\n \n\n :type type: string\n :param type: The type of upload.\n Must be one of the following values:\n ANDROID_APP: An Android upload.\n IOS_APP: An iOS upload.\n WEB_APP: A web appliction upload.\n EXTERNAL_DATA: An external data upload.\n APPIUM_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.\n APPIUM_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.\n APPIUM_PYTHON_TEST_PACKAGE: An Appium Python test package upload.\n APPIUM_NODE_TEST_PACKAGE: An Appium Node.js test package upload.\n APPIUM_RUBY_TEST_PACKAGE: An Appium Ruby test package upload.\n APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload for a web app.\n APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload for a web app.\n APPIUM_WEB_PYTHON_TEST_PACKAGE: An Appium Python test package upload for a web app.\n APPIUM_WEB_NODE_TEST_PACKAGE: An Appium Node.js test package upload for a web app.\n APPIUM_WEB_RUBY_TEST_PACKAGE: An Appium Ruby test package upload for a web app.\n CALABASH_TEST_PACKAGE: A Calabash test package upload.\n INSTRUMENTATION_TEST_PACKAGE: An instrumentation upload.\n UIAUTOMATION_TEST_PACKAGE: A uiautomation test package upload.\n UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload.\n XCTEST_TEST_PACKAGE: An XCode test package upload.\n XCTEST_UI_TEST_PACKAGE: An XCode UI test package upload.\n APPIUM_JAVA_JUNIT_TEST_SPEC: An Appium Java JUnit test spec upload.\n APPIUM_JAVA_TESTNG_TEST_SPEC: An Appium Java TestNG test spec upload.\n APPIUM_PYTHON_TEST_SPEC: An Appium Python test spec upload.\n APPIUM_NODE_TEST_SPEC: An Appium Node.js test spec upload.\n APPIUM_RUBY_TEST_SPEC: An Appium Ruby test spec upload.\n APPIUM_WEB_JAVA_JUNIT_TEST_SPEC: An Appium Java JUnit test spec upload for a web app.\n APPIUM_WEB_JAVA_TESTNG_TEST_SPEC: An Appium Java TestNG test spec upload for a web app.\n APPIUM_WEB_PYTHON_TEST_SPEC: An Appium Python test spec upload for a web app.\n APPIUM_WEB_NODE_TEST_SPEC: An Appium Node.js test spec upload for a web app.\n APPIUM_WEB_RUBY_TEST_SPEC: An Appium Ruby test spec upload for a web app.\n INSTRUMENTATION_TEST_SPEC: An instrumentation test spec upload.\n XCTEST_UI_TEST_SPEC: An XCode UI test spec upload.\n \n\n :type nextToken: string\n :param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.\n\n :rtype: dict\n :return: {\n 'uploads': [\n {\n 'arn': 'string',\n 'name': 'string',\n 'created': datetime(2015, 1, 1),\n 'type': 'ANDROID_APP'|'IOS_APP'|'WEB_APP'|'EXTERNAL_DATA'|'APPIUM_JAVA_JUNIT_TEST_PACKAGE'|'APPIUM_JAVA_TESTNG_TEST_PACKAGE'|'APPIUM_PYTHON_TEST_PACKAGE'|'APPIUM_NODE_TEST_PACKAGE'|'APPIUM_RUBY_TEST_PACKAGE'|'APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE'|'APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE'|'APPIUM_WEB_PYTHON_TEST_PACKAGE'|'APPIUM_WEB_NODE_TEST_PACKAGE'|'APPIUM_WEB_RUBY_TEST_PACKAGE'|'CALABASH_TEST_PACKAGE'|'INSTRUMENTATION_TEST_PACKAGE'|'UIAUTOMATION_TEST_PACKAGE'|'UIAUTOMATOR_TEST_PACKAGE'|'XCTEST_TEST_PACKAGE'|'XCTEST_UI_TEST_PACKAGE'|'APPIUM_JAVA_JUNIT_TEST_SPEC'|'APPIUM_JAVA_TESTNG_TEST_SPEC'|'APPIUM_PYTHON_TEST_SPEC'|'APPIUM_NODE_TEST_SPEC'|'APPIUM_RUBY_TEST_SPEC'|'APPIUM_WEB_JAVA_JUNIT_TEST_SPEC'|'APPIUM_WEB_JAVA_TESTNG_TEST_SPEC'|'APPIUM_WEB_PYTHON_TEST_SPEC'|'APPIUM_WEB_NODE_TEST_SPEC'|'APPIUM_WEB_RUBY_TEST_SPEC'|'INSTRUMENTATION_TEST_SPEC'|'XCTEST_UI_TEST_SPEC',\n 'status': 'INITIALIZED'|'PROCESSING'|'SUCCEEDED'|'FAILED',\n 'url': 'string',\n 'metadata': 'string',\n 'contentType': 'string',\n 'message': 'string',\n 'category': 'CURATED'|'PRIVATE'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n ANDROID_APP: An Android upload.\n IOS_APP: An iOS upload.\n WEB_APP: A web appliction upload.\n EXTERNAL_DATA: An external data upload.\n APPIUM_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.\n APPIUM_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.\n APPIUM_PYTHON_TEST_PACKAGE: An Appium Python test package upload.\n APPIUM_NODE_TEST_PACKAGE: An Appium Node.js test package upload.\n APPIUM_RUBY_TEST_PACKAGE: An Appium Ruby test package upload.\n APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload for web apps.\n APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload for web apps.\n APPIUM_WEB_PYTHON_TEST_PACKAGE: An Appium Python test package upload for web apps.\n APPIUM_WEB_NODE_TEST_PACKAGE: An Appium Node.js test package upload for web apps.\n APPIUM_WEB_RUBY_TEST_PACKAGE: An Appium Ruby test package upload for web apps.\n CALABASH_TEST_PACKAGE: A Calabash test package upload.\n INSTRUMENTATION_TEST_PACKAGE: An instrumentation upload.\n UIAUTOMATION_TEST_PACKAGE: A uiautomation test package upload.\n UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload.\n XCTEST_TEST_PACKAGE: An XCode test package upload.\n XCTEST_UI_TEST_PACKAGE: An XCode UI test package upload.\n APPIUM_JAVA_JUNIT_TEST_SPEC: An Appium Java JUnit test spec upload.\n APPIUM_JAVA_TESTNG_TEST_SPEC: An Appium Java TestNG test spec upload.\n APPIUM_PYTHON_TEST_SPEC: An Appium Python test spec upload.\n APPIUM_NODE_TEST_SPEC: An Appium Node.js test spec upload.\n APPIUM_RUBY_TEST_SPEC: An Appium Ruby test spec upload.\n APPIUM_WEB_JAVA_JUNIT_TEST_SPEC: An Appium Java JUnit test spec upload for a web app.\n APPIUM_WEB_JAVA_TESTNG_TEST_SPEC: An Appium Java TestNG test spec upload for a web app.\n APPIUM_WEB_PYTHON_TEST_SPEC: An Appium Python test spec upload for a web app.\n APPIUM_WEB_NODE_TEST_SPEC: An Appium Node.js test spec upload for a web app.\n APPIUM_WEB_RUBY_TEST_SPEC: An Appium Ruby test spec upload for a web app.\n INSTRUMENTATION_TEST_SPEC: An instrumentation test spec upload.\n XCTEST_UI_TEST_SPEC: An XCode UI test spec upload.\n \n \"\"\"\n pass\n\ndef list_vpce_configurations(maxResults=None, nextToken=None):\n \"\"\"\n Returns information about all Amazon Virtual Private Cloud (VPC) endpoint configurations in the AWS account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_vpce_configurations(\n maxResults=123,\n nextToken='string'\n )\n \n \n :type maxResults: integer\n :param maxResults: An integer specifying the maximum number of items you want to return in the API response.\n\n :type nextToken: string\n :param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.\n\n :rtype: dict\n :return: {\n 'vpceConfigurations': [\n {\n 'arn': 'string',\n 'vpceConfigurationName': 'string',\n 'vpceServiceName': 'string',\n 'serviceDnsName': 'string',\n 'vpceConfigurationDescription': 'string'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef purchase_offering(offeringId=None, quantity=None, offeringPromotionId=None):\n \"\"\"\n Immediately purchases offerings for an AWS account. Offerings renew with the latest total purchased quantity for an offering, unless the renewal was overridden. The API returns a NotEligible error if the user is not permitted to invoke the operation. Please contact [email protected] if you believe that you should be able to invoke this operation.\n See also: AWS API Documentation\n \n Examples\n The following example purchases a specific device slot offering.\n Expected Output:\n \n :example: response = client.purchase_offering(\n offeringId='string',\n quantity=123,\n offeringPromotionId='string'\n )\n \n \n :type offeringId: string\n :param offeringId: The ID of the offering.\n\n :type quantity: integer\n :param quantity: The number of device slots you wish to purchase in an offering request.\n\n :type offeringPromotionId: string\n :param offeringPromotionId: The ID of the offering promotion to be applied to the purchase.\n\n :rtype: dict\n :return: {\n 'offeringTransaction': {\n 'offeringStatus': {\n 'type': 'PURCHASE'|'RENEW'|'SYSTEM',\n 'offering': {\n 'id': 'string',\n 'description': 'string',\n 'type': 'RECURRING',\n 'platform': 'ANDROID'|'IOS',\n 'recurringCharges': [\n {\n 'cost': {\n 'amount': 123.0,\n 'currencyCode': 'USD'\n },\n 'frequency': 'MONTHLY'\n },\n ]\n },\n 'quantity': 123,\n 'effectiveOn': datetime(2015, 1, 1)\n },\n 'transactionId': 'string',\n 'offeringPromotionId': 'string',\n 'createdOn': datetime(2015, 1, 1),\n 'cost': {\n 'amount': 123.0,\n 'currencyCode': 'USD'\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef renew_offering(offeringId=None, quantity=None):\n \"\"\"\n Explicitly sets the quantity of devices to renew for an offering, starting from the effectiveDate of the next period. The API returns a NotEligible error if the user is not permitted to invoke the operation. Please contact [email protected] if you believe that you should be able to invoke this operation.\n See also: AWS API Documentation\n \n Examples\n The following example renews a specific device slot offering.\n Expected Output:\n \n :example: response = client.renew_offering(\n offeringId='string',\n quantity=123\n )\n \n \n :type offeringId: string\n :param offeringId: The ID of a request to renew an offering.\n\n :type quantity: integer\n :param quantity: The quantity requested in an offering renewal.\n\n :rtype: dict\n :return: {\n 'offeringTransaction': {\n 'offeringStatus': {\n 'type': 'PURCHASE'|'RENEW'|'SYSTEM',\n 'offering': {\n 'id': 'string',\n 'description': 'string',\n 'type': 'RECURRING',\n 'platform': 'ANDROID'|'IOS',\n 'recurringCharges': [\n {\n 'cost': {\n 'amount': 123.0,\n 'currencyCode': 'USD'\n },\n 'frequency': 'MONTHLY'\n },\n ]\n },\n 'quantity': 123,\n 'effectiveOn': datetime(2015, 1, 1)\n },\n 'transactionId': 'string',\n 'offeringPromotionId': 'string',\n 'createdOn': datetime(2015, 1, 1),\n 'cost': {\n 'amount': 123.0,\n 'currencyCode': 'USD'\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef schedule_run(projectArn=None, appArn=None, devicePoolArn=None, deviceSelectionConfiguration=None, name=None, test=None, configuration=None, executionConfiguration=None):\n \"\"\"\n Schedules a run.\n See also: AWS API Documentation\n \n Examples\n The following example schedules a test run named MyRun.\n Expected Output:\n \n :example: response = client.schedule_run(\n projectArn='string',\n appArn='string',\n devicePoolArn='string',\n deviceSelectionConfiguration={\n 'filters': [\n {\n 'attribute': 'ARN'|'PLATFORM'|'OS_VERSION'|'MODEL'|'AVAILABILITY'|'FORM_FACTOR'|'MANUFACTURER'|'REMOTE_ACCESS_ENABLED'|'REMOTE_DEBUG_ENABLED'|'INSTANCE_ARN'|'INSTANCE_LABELS'|'FLEET_TYPE',\n 'operator': 'EQUALS'|'LESS_THAN'|'LESS_THAN_OR_EQUALS'|'GREATER_THAN'|'GREATER_THAN_OR_EQUALS'|'IN'|'NOT_IN'|'CONTAINS',\n 'values': [\n 'string',\n ]\n },\n ],\n 'maxDevices': 123\n },\n name='string',\n test={\n 'type': 'BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'WEB_PERFORMANCE_PROFILE'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_NODE'|'APPIUM_RUBY'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'APPIUM_WEB_NODE'|'APPIUM_WEB_RUBY'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI'|'REMOTE_ACCESS_RECORD'|'REMOTE_ACCESS_REPLAY',\n 'testPackageArn': 'string',\n 'testSpecArn': 'string',\n 'filter': 'string',\n 'parameters': {\n 'string': 'string'\n }\n },\n configuration={\n 'extraDataPackageArn': 'string',\n 'networkProfileArn': 'string',\n 'locale': 'string',\n 'location': {\n 'latitude': 123.0,\n 'longitude': 123.0\n },\n 'vpceConfigurationArns': [\n 'string',\n ],\n 'customerArtifactPaths': {\n 'iosPaths': [\n 'string',\n ],\n 'androidPaths': [\n 'string',\n ],\n 'deviceHostPaths': [\n 'string',\n ]\n },\n 'radios': {\n 'wifi': True|False,\n 'bluetooth': True|False,\n 'nfc': True|False,\n 'gps': True|False\n },\n 'auxiliaryApps': [\n 'string',\n ],\n 'billingMethod': 'METERED'|'UNMETERED'\n },\n executionConfiguration={\n 'jobTimeoutMinutes': 123,\n 'accountsCleanup': True|False,\n 'appPackagesCleanup': True|False,\n 'videoCapture': True|False,\n 'skipAppResign': True|False\n }\n )\n \n \n :type projectArn: string\n :param projectArn: [REQUIRED]\n The ARN of the project for the run to be scheduled.\n \n\n :type appArn: string\n :param appArn: The ARN of the app to schedule a run.\n\n :type devicePoolArn: string\n :param devicePoolArn: The ARN of the device pool for the run to be scheduled.\n Either ** devicePoolArn ** or ** deviceSelectionConfiguration ** is required in a request.\n \n\n :type deviceSelectionConfiguration: dict\n :param deviceSelectionConfiguration: The filter criteria used to dynamically select a set of devices for a test run, as well as the maximum number of devices to be included in the run.\n Either ** devicePoolArn ** or ** deviceSelectionConfiguration ** is required in a request.\n filters (list) -- [REQUIRED]Used to dynamically select a set of devices for a test run. A filter is made up of an attribute, an operator, and one or more values.\n Attribute The aspect of a device such as platform or model used as the selection criteria in a device filter. Allowed values include:\n ARN: The Amazon Resource Name (ARN) of the device. For example, 'arn:aws:devicefarm:us-west-2::device:12345Example'.\n PLATFORM: The device platform. Valid values are 'ANDROID' or 'IOS'.\n OS_VERSION: The operating system version. For example, '10.3.2'.\n MODEL: The device model. For example, 'iPad 5th Gen'.\n AVAILABILITY: The current availability of the device. Valid values are 'AVAILABLE', 'HIGHLY_AVAILABLE', 'BUSY', or 'TEMPORARY_NOT_AVAILABLE'.\n FORM_FACTOR: The device form factor. Valid values are 'PHONE' or 'TABLET'.\n MANUFACTURER: The device manufacturer. For example, 'Apple'.\n REMOTE_ACCESS_ENABLED: Whether the device is enabled for remote access. Valid values are 'TRUE' or 'FALSE'.\n REMOTE_DEBUG_ENABLED: Whether the device is enabled for remote debugging. Valid values are 'TRUE' or 'FALSE'.\n INSTANCE_ARN: The Amazon Resource Name (ARN) of the device instance.\n INSTANCE_LABELS: The label of the device instance.\n FLEET_TYPE: The fleet type. Valid values are 'PUBLIC' or 'PRIVATE'.\n Operator The filter operator.\n The EQUALS operator is available for every attribute except INSTANCE_LABELS.\n The CONTAINS operator is available for the INSTANCE_LABELS and MODEL attributes.\n The IN and NOT_IN operators are available for the ARN, OS_VERSION, MODEL, MANUFACTURER, and INSTANCE_ARN attributes.\n The LESS_THAN, GREATER_THAN, LESS_THAN_OR_EQUALS, and GREATER_THAN_OR_EQUALS operators are also available for the OS_VERSION attribute.\n Values An array of one or more filter values. Operator Values\n The IN and NOT_IN operators can take a values array that has more than one element.\n The other operators require an array with a single element.\n \n Attribute Values\n The PLATFORM attribute can be set to 'ANDROID' or 'IOS'.\n The AVAILABILITY attribute can be set to 'AVAILABLE', 'HIGHLY_AVAILABLE', 'BUSY', or 'TEMPORARY_NOT_AVAILABLE'.\n The FORM_FACTOR attribute can be set to 'PHONE' or 'TABLET'.\n The FLEET_TYPE attribute can be set to 'PUBLIC' or 'PRIVATE'.\n \n (dict) --Represents a device filter used to select a set of devices to be included in a test run. This data structure is passed in as the deviceSelectionConfiguration parameter to ScheduleRun. For an example of the JSON request syntax, see ScheduleRun .\n It is also passed in as the filters parameter to ListDevices. For an example of the JSON request syntax, see ListDevices .\n attribute (string) --The aspect of a device such as platform or model used as the selection criteria in a device filter.\n Allowed values include:\n ARN: The Amazon Resource Name (ARN) of the device. For example, 'arn:aws:devicefarm:us-west-2::device:12345Example'.\n PLATFORM: The device platform. Valid values are 'ANDROID' or 'IOS'.\n OS_VERSION: The operating system version. For example, '10.3.2'.\n MODEL: The device model. For example, 'iPad 5th Gen'.\n AVAILABILITY: The current availability of the device. Valid values are 'AVAILABLE', 'HIGHLY_AVAILABLE', 'BUSY', or 'TEMPORARY_NOT_AVAILABLE'.\n FORM_FACTOR: The device form factor. Valid values are 'PHONE' or 'TABLET'.\n MANUFACTURER: The device manufacturer. For example, 'Apple'.\n REMOTE_ACCESS_ENABLED: Whether the device is enabled for remote access. Valid values are 'TRUE' or 'FALSE'.\n REMOTE_DEBUG_ENABLED: Whether the device is enabled for remote debugging. Valid values are 'TRUE' or 'FALSE'.\n INSTANCE_ARN: The Amazon Resource Name (ARN) of the device instance.\n INSTANCE_LABELS: The label of the device instance.\n FLEET_TYPE: The fleet type. Valid values are 'PUBLIC' or 'PRIVATE'.\n operator (string) --The filter operator.\n The EQUALS operator is available for every attribute except INSTANCE_LABELS.\n The CONTAINS operator is available for the INSTANCE_LABELS and MODEL attributes.\n The IN and NOT_IN operators are available for the ARN, OS_VERSION, MODEL, MANUFACTURER, and INSTANCE_ARN attributes.\n The LESS_THAN, GREATER_THAN, LESS_THAN_OR_EQUALS, and GREATER_THAN_OR_EQUALS operators are also available for the OS_VERSION attribute.\n values (list) --An array of one or more filter values used in a device filter.\n Operator Values\n The IN and NOT_IN operators can take a values array that has more than one element.\n The other operators require an array with a single element.\n Attribute Values\n The PLATFORM attribute can be set to 'ANDROID' or 'IOS'.\n The AVAILABILITY attribute can be set to 'AVAILABLE', 'HIGHLY_AVAILABLE', 'BUSY', or 'TEMPORARY_NOT_AVAILABLE'.\n The FORM_FACTOR attribute can be set to 'PHONE' or 'TABLET'.\n The FLEET_TYPE attribute can be set to 'PUBLIC' or 'PRIVATE'.\n (string) --\n \n maxDevices (integer) -- [REQUIRED]The maximum number of devices to be included in a test run.\n \n\n :type name: string\n :param name: The name for the run to be scheduled.\n\n :type test: dict\n :param test: [REQUIRED]\n Information about the test for the run to be scheduled.\n type (string) -- [REQUIRED]The test's type.\n Must be one of the following values:\n BUILTIN_FUZZ: The built-in fuzz type.\n BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.\n APPIUM_JAVA_JUNIT: The Appium Java JUnit type.\n APPIUM_JAVA_TESTNG: The Appium Java TestNG type.\n APPIUM_PYTHON: The Appium Python type.\n APPIUM_NODE: The Appium Node.js type.\n APPIUM_RUBY: The Appium Ruby type.\n APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for web apps.\n APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for web apps.\n APPIUM_WEB_PYTHON: The Appium Python type for web apps.\n APPIUM_WEB_NODE: The Appium Node.js type for web apps.\n APPIUM_WEB_RUBY: The Appium Ruby type for web apps.\n CALABASH: The Calabash type.\n INSTRUMENTATION: The Instrumentation type.\n UIAUTOMATION: The uiautomation type.\n UIAUTOMATOR: The uiautomator type.\n XCTEST: The XCode test type.\n XCTEST_UI: The XCode UI test type.\n testPackageArn (string) --The ARN of the uploaded test that will be run.\n testSpecArn (string) --The ARN of the YAML-formatted test specification.\n filter (string) --The test's filter.\n parameters (dict) --The test's parameters, such as test framework parameters and fixture settings. Parameters are represented by name-value pairs of strings.\n For all tests:\n app_performance_monitoring: Performance monitoring is enabled by default. Set this parameter to 'false' to disable it.\n For Calabash tests:\n profile: A cucumber profile, for example, 'my_profile_name'.\n tags: You can limit execution to features or scenarios that have (or don't have) certain tags, for example, '@smoke' or '@smoke,~@wip'.\n For Appium tests (all types):\n appium_version: The Appium version. Currently supported values are '1.6.5' (and higher), 'latest', and 'default'.\n latest will run the latest Appium version supported by Device Farm (1.9.1).\n For default , Device Farm will choose a compatible version of Appium for the device. The current behavior is to run 1.7.2 on Android devices and iOS 9 and earlier, 1.7.2 for iOS 10 and later.\n This behavior is subject to change.\n For Fuzz tests (Android only):\n event_count: The number of events, between 1 and 10000, that the UI fuzz test should perform.\n throttle: The time, in ms, between 0 and 1000, that the UI fuzz test should wait between events.\n seed: A seed to use for randomizing the UI fuzz test. Using the same seed value between tests ensures identical event sequences.\n For Explorer tests:\n username: A username to use if the Explorer encounters a login form. If not supplied, no username will be inserted.\n password: A password to use if the Explorer encounters a login form. If not supplied, no password will be inserted.\n For Instrumentation:\n filter: A test filter string. Examples:\n Running a single test case: 'com.android.abc.Test1'\n Running a single test: 'com.android.abc.Test1#smoke'\n Running multiple tests: 'com.android.abc.Test1,com.android.abc.Test2'\n For XCTest and XCTestUI:\n filter: A test filter string. Examples:\n Running a single test class: 'LoginTests'\n Running a multiple test classes: 'LoginTests,SmokeTests'\n Running a single test: 'LoginTests/testValid'\n Running multiple tests: 'LoginTests/testValid,LoginTests/testInvalid'\n For UIAutomator:\n filter: A test filter string. Examples:\n Running a single test case: 'com.android.abc.Test1'\n Running a single test: 'com.android.abc.Test1#smoke'\n Running multiple tests: 'com.android.abc.Test1,com.android.abc.Test2'\n \n (string) --\n (string) --\n \n \n\n :type configuration: dict\n :param configuration: Information about the settings for the run to be scheduled.\n extraDataPackageArn (string) --The ARN of the extra data for the run. The extra data is a .zip file that AWS Device Farm will extract to external data for Android or the app's sandbox for iOS.\n networkProfileArn (string) --Reserved for internal use.\n locale (string) --Information about the locale that is used for the run.\n location (dict) --Information about the location that is used for the run.\n latitude (float) -- [REQUIRED]The latitude.\n longitude (float) -- [REQUIRED]The longitude.\n vpceConfigurationArns (list) --An array of Amazon Resource Names (ARNs) for your VPC endpoint configurations.\n (string) --\n customerArtifactPaths (dict) --Input CustomerArtifactPaths object for the scheduled run configuration.\n iosPaths (list) --Comma-separated list of paths on the iOS device where the artifacts generated by the customer's tests will be pulled from.\n (string) --\n androidPaths (list) --Comma-separated list of paths on the Android device where the artifacts generated by the customer's tests will be pulled from.\n (string) --\n deviceHostPaths (list) --Comma-separated list of paths in the test execution environment where the artifacts generated by the customer's tests will be pulled from.\n (string) --\n \n radios (dict) --Information about the radio states for the run.\n wifi (boolean) --True if Wi-Fi is enabled at the beginning of the test; otherwise, false.\n bluetooth (boolean) --True if Bluetooth is enabled at the beginning of the test; otherwise, false.\n nfc (boolean) --True if NFC is enabled at the beginning of the test; otherwise, false.\n gps (boolean) --True if GPS is enabled at the beginning of the test; otherwise, false.\n auxiliaryApps (list) --A list of auxiliary apps for the run.\n (string) --\n billingMethod (string) --Specifies the billing method for a test run: metered or unmetered . If the parameter is not specified, the default value is metered .\n \n\n :type executionConfiguration: dict\n :param executionConfiguration: Specifies configuration information about a test run, such as the execution timeout (in minutes).\n jobTimeoutMinutes (integer) --The number of minutes a test run will execute before it times out.\n accountsCleanup (boolean) --True if account cleanup is enabled at the beginning of the test; otherwise, false.\n appPackagesCleanup (boolean) --True if app package cleanup is enabled at the beginning of the test; otherwise, false.\n videoCapture (boolean) --Set to true to enable video capture; otherwise, set to false. The default is true.\n skipAppResign (boolean) --When set to true , for private devices, Device Farm will not sign your app again. For public devices, Device Farm always signs your apps again and this parameter has no effect.\n For more information about how Device Farm re-signs your app(s), see Do you modify my app? in the AWS Device Farm FAQs .\n \n\n :rtype: dict\n :return: {\n 'run': {\n 'arn': 'string',\n 'name': 'string',\n 'type': 'BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'WEB_PERFORMANCE_PROFILE'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_NODE'|'APPIUM_RUBY'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'APPIUM_WEB_NODE'|'APPIUM_WEB_RUBY'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI'|'REMOTE_ACCESS_RECORD'|'REMOTE_ACCESS_REPLAY',\n 'platform': 'ANDROID'|'IOS',\n 'created': datetime(2015, 1, 1),\n 'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',\n 'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',\n 'started': datetime(2015, 1, 1),\n 'stopped': datetime(2015, 1, 1),\n 'counters': {\n 'total': 123,\n 'passed': 123,\n 'failed': 123,\n 'warned': 123,\n 'errored': 123,\n 'stopped': 123,\n 'skipped': 123\n },\n 'message': 'string',\n 'totalJobs': 123,\n 'completedJobs': 123,\n 'billingMethod': 'METERED'|'UNMETERED',\n 'deviceMinutes': {\n 'total': 123.0,\n 'metered': 123.0,\n 'unmetered': 123.0\n },\n 'networkProfile': {\n 'arn': 'string',\n 'name': 'string',\n 'description': 'string',\n 'type': 'CURATED'|'PRIVATE',\n 'uplinkBandwidthBits': 123,\n 'downlinkBandwidthBits': 123,\n 'uplinkDelayMs': 123,\n 'downlinkDelayMs': 123,\n 'uplinkJitterMs': 123,\n 'downlinkJitterMs': 123,\n 'uplinkLossPercent': 123,\n 'downlinkLossPercent': 123\n },\n 'parsingResultUrl': 'string',\n 'resultCode': 'PARSING_FAILED'|'VPC_ENDPOINT_SETUP_FAILED',\n 'seed': 123,\n 'appUpload': 'string',\n 'eventCount': 123,\n 'jobTimeoutMinutes': 123,\n 'devicePoolArn': 'string',\n 'locale': 'string',\n 'radios': {\n 'wifi': True|False,\n 'bluetooth': True|False,\n 'nfc': True|False,\n 'gps': True|False\n },\n 'location': {\n 'latitude': 123.0,\n 'longitude': 123.0\n },\n 'customerArtifactPaths': {\n 'iosPaths': [\n 'string',\n ],\n 'androidPaths': [\n 'string',\n ],\n 'deviceHostPaths': [\n 'string',\n ]\n },\n 'webUrl': 'string',\n 'skipAppResign': True|False,\n 'testSpecArn': 'string',\n 'deviceSelectionResult': {\n 'filters': [\n {\n 'attribute': 'ARN'|'PLATFORM'|'OS_VERSION'|'MODEL'|'AVAILABILITY'|'FORM_FACTOR'|'MANUFACTURER'|'REMOTE_ACCESS_ENABLED'|'REMOTE_DEBUG_ENABLED'|'INSTANCE_ARN'|'INSTANCE_LABELS'|'FLEET_TYPE',\n 'operator': 'EQUALS'|'LESS_THAN'|'LESS_THAN_OR_EQUALS'|'GREATER_THAN'|'GREATER_THAN_OR_EQUALS'|'IN'|'NOT_IN'|'CONTAINS',\n 'values': [\n 'string',\n ]\n },\n ],\n 'matchedDevicesCount': 123,\n 'maxDevices': 123\n }\n }\n }\n \n \n :returns: \n BUILTIN_FUZZ: The built-in fuzz type.\n BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.\n APPIUM_JAVA_JUNIT: The Appium Java JUnit type.\n APPIUM_JAVA_TESTNG: The Appium Java TestNG type.\n APPIUM_PYTHON: The Appium Python type.\n APPIUM_NODE: The Appium Node.js type.\n APPIUM_RUBY: The Appium Ruby type.\n APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for web apps.\n APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for web apps.\n APPIUM_WEB_PYTHON: The Appium Python type for web apps.\n APPIUM_WEB_NODE: The Appium Node.js type for web apps.\n APPIUM_WEB_RUBY: The Appium Ruby type for web apps.\n CALABASH: The Calabash type.\n INSTRUMENTATION: The Instrumentation type.\n UIAUTOMATION: The uiautomation type.\n UIAUTOMATOR: The uiautomator type.\n XCTEST: The XCode test type.\n XCTEST_UI: The XCode UI test type.\n \n \"\"\"\n pass\n\ndef stop_job(arn=None):\n \"\"\"\n Initiates a stop request for the current job. AWS Device Farm will immediately stop the job on the device where tests have not started executing, and you will not be billed for this device. On the device where tests have started executing, Setup Suite and Teardown Suite tests will run to completion before stopping execution on the device. You will be billed for Setup, Teardown, and any tests that were in progress or already completed.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_job(\n arn='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n Represents the Amazon Resource Name (ARN) of the Device Farm job you wish to stop.\n \n\n :rtype: dict\n :return: {\n 'job': {\n 'arn': 'string',\n 'name': 'string',\n 'type': 'BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'WEB_PERFORMANCE_PROFILE'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_NODE'|'APPIUM_RUBY'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'APPIUM_WEB_NODE'|'APPIUM_WEB_RUBY'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI'|'REMOTE_ACCESS_RECORD'|'REMOTE_ACCESS_REPLAY',\n 'created': datetime(2015, 1, 1),\n 'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',\n 'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',\n 'started': datetime(2015, 1, 1),\n 'stopped': datetime(2015, 1, 1),\n 'counters': {\n 'total': 123,\n 'passed': 123,\n 'failed': 123,\n 'warned': 123,\n 'errored': 123,\n 'stopped': 123,\n 'skipped': 123\n },\n 'message': 'string',\n 'device': {\n 'arn': 'string',\n 'name': 'string',\n 'manufacturer': 'string',\n 'model': 'string',\n 'modelId': 'string',\n 'formFactor': 'PHONE'|'TABLET',\n 'platform': 'ANDROID'|'IOS',\n 'os': 'string',\n 'cpu': {\n 'frequency': 'string',\n 'architecture': 'string',\n 'clock': 123.0\n },\n 'resolution': {\n 'width': 123,\n 'height': 123\n },\n 'heapSize': 123,\n 'memory': 123,\n 'image': 'string',\n 'carrier': 'string',\n 'radio': 'string',\n 'remoteAccessEnabled': True|False,\n 'remoteDebugEnabled': True|False,\n 'fleetType': 'string',\n 'fleetName': 'string',\n 'instances': [\n {\n 'arn': 'string',\n 'deviceArn': 'string',\n 'labels': [\n 'string',\n ],\n 'status': 'IN_USE'|'PREPARING'|'AVAILABLE'|'NOT_AVAILABLE',\n 'udid': 'string',\n 'instanceProfile': {\n 'arn': 'string',\n 'packageCleanup': True|False,\n 'excludeAppPackagesFromCleanup': [\n 'string',\n ],\n 'rebootAfterUse': True|False,\n 'name': 'string',\n 'description': 'string'\n }\n },\n ],\n 'availability': 'TEMPORARY_NOT_AVAILABLE'|'BUSY'|'AVAILABLE'|'HIGHLY_AVAILABLE'\n },\n 'instanceArn': 'string',\n 'deviceMinutes': {\n 'total': 123.0,\n 'metered': 123.0,\n 'unmetered': 123.0\n },\n 'videoEndpoint': 'string',\n 'videoCapture': True|False\n }\n }\n \n \n :returns: \n PENDING: A pending status.\n PENDING_CONCURRENCY: A pending concurrency status.\n PENDING_DEVICE: A pending device status.\n PROCESSING: A processing status.\n SCHEDULING: A scheduling status.\n PREPARING: A preparing status.\n RUNNING: A running status.\n COMPLETED: A completed status.\n STOPPING: A stopping status.\n \n \"\"\"\n pass\n\ndef stop_remote_access_session(arn=None):\n \"\"\"\n Ends a specified remote access session.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_remote_access_session(\n arn='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The Amazon Resource Name (ARN) of the remote access session you wish to stop.\n \n\n :rtype: dict\n :return: {\n 'remoteAccessSession': {\n 'arn': 'string',\n 'name': 'string',\n 'created': datetime(2015, 1, 1),\n 'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',\n 'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',\n 'message': 'string',\n 'started': datetime(2015, 1, 1),\n 'stopped': datetime(2015, 1, 1),\n 'device': {\n 'arn': 'string',\n 'name': 'string',\n 'manufacturer': 'string',\n 'model': 'string',\n 'modelId': 'string',\n 'formFactor': 'PHONE'|'TABLET',\n 'platform': 'ANDROID'|'IOS',\n 'os': 'string',\n 'cpu': {\n 'frequency': 'string',\n 'architecture': 'string',\n 'clock': 123.0\n },\n 'resolution': {\n 'width': 123,\n 'height': 123\n },\n 'heapSize': 123,\n 'memory': 123,\n 'image': 'string',\n 'carrier': 'string',\n 'radio': 'string',\n 'remoteAccessEnabled': True|False,\n 'remoteDebugEnabled': True|False,\n 'fleetType': 'string',\n 'fleetName': 'string',\n 'instances': [\n {\n 'arn': 'string',\n 'deviceArn': 'string',\n 'labels': [\n 'string',\n ],\n 'status': 'IN_USE'|'PREPARING'|'AVAILABLE'|'NOT_AVAILABLE',\n 'udid': 'string',\n 'instanceProfile': {\n 'arn': 'string',\n 'packageCleanup': True|False,\n 'excludeAppPackagesFromCleanup': [\n 'string',\n ],\n 'rebootAfterUse': True|False,\n 'name': 'string',\n 'description': 'string'\n }\n },\n ],\n 'availability': 'TEMPORARY_NOT_AVAILABLE'|'BUSY'|'AVAILABLE'|'HIGHLY_AVAILABLE'\n },\n 'instanceArn': 'string',\n 'remoteDebugEnabled': True|False,\n 'remoteRecordEnabled': True|False,\n 'remoteRecordAppArn': 'string',\n 'hostAddress': 'string',\n 'clientId': 'string',\n 'billingMethod': 'METERED'|'UNMETERED',\n 'deviceMinutes': {\n 'total': 123.0,\n 'metered': 123.0,\n 'unmetered': 123.0\n },\n 'endpoint': 'string',\n 'deviceUdid': 'string',\n 'interactionMode': 'INTERACTIVE'|'NO_VIDEO'|'VIDEO_ONLY',\n 'skipAppResign': True|False\n }\n }\n \n \n :returns: \n PENDING: A pending condition.\n PASSED: A passing condition.\n WARNED: A warning condition.\n FAILED: A failed condition.\n SKIPPED: A skipped condition.\n ERRORED: An error condition.\n STOPPED: A stopped condition.\n \n \"\"\"\n pass\n\ndef stop_run(arn=None):\n \"\"\"\n Initiates a stop request for the current test run. AWS Device Farm will immediately stop the run on devices where tests have not started executing, and you will not be billed for these devices. On devices where tests have started executing, Setup Suite and Teardown Suite tests will run to completion before stopping execution on those devices. You will be billed for Setup, Teardown, and any tests that were in progress or already completed.\n See also: AWS API Documentation\n \n Examples\n The following example stops a specific test run.\n Expected Output:\n \n :example: response = client.stop_run(\n arn='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n Represents the Amazon Resource Name (ARN) of the Device Farm run you wish to stop.\n \n\n :rtype: dict\n :return: {\n 'run': {\n 'arn': 'string',\n 'name': 'string',\n 'type': 'BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'WEB_PERFORMANCE_PROFILE'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_NODE'|'APPIUM_RUBY'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'APPIUM_WEB_NODE'|'APPIUM_WEB_RUBY'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI'|'REMOTE_ACCESS_RECORD'|'REMOTE_ACCESS_REPLAY',\n 'platform': 'ANDROID'|'IOS',\n 'created': datetime(2015, 1, 1),\n 'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',\n 'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',\n 'started': datetime(2015, 1, 1),\n 'stopped': datetime(2015, 1, 1),\n 'counters': {\n 'total': 123,\n 'passed': 123,\n 'failed': 123,\n 'warned': 123,\n 'errored': 123,\n 'stopped': 123,\n 'skipped': 123\n },\n 'message': 'string',\n 'totalJobs': 123,\n 'completedJobs': 123,\n 'billingMethod': 'METERED'|'UNMETERED',\n 'deviceMinutes': {\n 'total': 123.0,\n 'metered': 123.0,\n 'unmetered': 123.0\n },\n 'networkProfile': {\n 'arn': 'string',\n 'name': 'string',\n 'description': 'string',\n 'type': 'CURATED'|'PRIVATE',\n 'uplinkBandwidthBits': 123,\n 'downlinkBandwidthBits': 123,\n 'uplinkDelayMs': 123,\n 'downlinkDelayMs': 123,\n 'uplinkJitterMs': 123,\n 'downlinkJitterMs': 123,\n 'uplinkLossPercent': 123,\n 'downlinkLossPercent': 123\n },\n 'parsingResultUrl': 'string',\n 'resultCode': 'PARSING_FAILED'|'VPC_ENDPOINT_SETUP_FAILED',\n 'seed': 123,\n 'appUpload': 'string',\n 'eventCount': 123,\n 'jobTimeoutMinutes': 123,\n 'devicePoolArn': 'string',\n 'locale': 'string',\n 'radios': {\n 'wifi': True|False,\n 'bluetooth': True|False,\n 'nfc': True|False,\n 'gps': True|False\n },\n 'location': {\n 'latitude': 123.0,\n 'longitude': 123.0\n },\n 'customerArtifactPaths': {\n 'iosPaths': [\n 'string',\n ],\n 'androidPaths': [\n 'string',\n ],\n 'deviceHostPaths': [\n 'string',\n ]\n },\n 'webUrl': 'string',\n 'skipAppResign': True|False,\n 'testSpecArn': 'string',\n 'deviceSelectionResult': {\n 'filters': [\n {\n 'attribute': 'ARN'|'PLATFORM'|'OS_VERSION'|'MODEL'|'AVAILABILITY'|'FORM_FACTOR'|'MANUFACTURER'|'REMOTE_ACCESS_ENABLED'|'REMOTE_DEBUG_ENABLED'|'INSTANCE_ARN'|'INSTANCE_LABELS'|'FLEET_TYPE',\n 'operator': 'EQUALS'|'LESS_THAN'|'LESS_THAN_OR_EQUALS'|'GREATER_THAN'|'GREATER_THAN_OR_EQUALS'|'IN'|'NOT_IN'|'CONTAINS',\n 'values': [\n 'string',\n ]\n },\n ],\n 'matchedDevicesCount': 123,\n 'maxDevices': 123\n }\n }\n }\n \n \n :returns: \n ANDROID: The Android platform.\n IOS: The iOS platform.\n \n \"\"\"\n pass\n\ndef update_device_instance(arn=None, profileArn=None, labels=None):\n \"\"\"\n Updates information about an existing private device instance.\n See also: AWS API Documentation\n \n \n :example: response = client.update_device_instance(\n arn='string',\n profileArn='string',\n labels=[\n 'string',\n ]\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The Amazon Resource Name (ARN) of the device instance.\n \n\n :type profileArn: string\n :param profileArn: The Amazon Resource Name (ARN) of the profile that you want to associate with the device instance.\n\n :type labels: list\n :param labels: An array of strings that you want to associate with the device instance.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'deviceInstance': {\n 'arn': 'string',\n 'deviceArn': 'string',\n 'labels': [\n 'string',\n ],\n 'status': 'IN_USE'|'PREPARING'|'AVAILABLE'|'NOT_AVAILABLE',\n 'udid': 'string',\n 'instanceProfile': {\n 'arn': 'string',\n 'packageCleanup': True|False,\n 'excludeAppPackagesFromCleanup': [\n 'string',\n ],\n 'rebootAfterUse': True|False,\n 'name': 'string',\n 'description': 'string'\n }\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef update_device_pool(arn=None, name=None, description=None, rules=None):\n \"\"\"\n Modifies the name, description, and rules in a device pool given the attributes and the pool ARN. Rule updates are all-or-nothing, meaning they can only be updated as a whole (or not at all).\n See also: AWS API Documentation\n \n Examples\n The following example updates the specified device pool with a new name and description. It also enables remote access of devices in the device pool.\n Expected Output:\n \n :example: response = client.update_device_pool(\n arn='string',\n name='string',\n description='string',\n rules=[\n {\n 'attribute': 'ARN'|'PLATFORM'|'FORM_FACTOR'|'MANUFACTURER'|'REMOTE_ACCESS_ENABLED'|'REMOTE_DEBUG_ENABLED'|'APPIUM_VERSION'|'INSTANCE_ARN'|'INSTANCE_LABELS'|'FLEET_TYPE',\n 'operator': 'EQUALS'|'LESS_THAN'|'GREATER_THAN'|'IN'|'NOT_IN'|'CONTAINS',\n 'value': 'string'\n },\n ]\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The Amazon Resourc Name (ARN) of the Device Farm device pool you wish to update.\n \n\n :type name: string\n :param name: A string representing the name of the device pool you wish to update.\n\n :type description: string\n :param description: A description of the device pool you wish to update.\n\n :type rules: list\n :param rules: Represents the rules you wish to modify for the device pool. Updating rules is optional; however, if you choose to update rules for your request, the update will replace the existing rules.\n (dict) --Represents a condition for a device pool. It is passed in as the rules parameter to CreateDevicePool and UpdateDevicePool .\n attribute (string) --The rule's attribute. It is the aspect of a device such as platform or model used as selection criteria to create or update a device pool.\n Allowed values include:\n ARN: The Amazon Resource Name (ARN) of a device. For example, 'arn:aws:devicefarm:us-west-2::device:12345Example'.\n PLATFORM: The device platform. Valid values are 'ANDROID' or 'IOS'.\n FORM_FACTOR: The device form factor. Valid values are 'PHONE' or 'TABLET'.\n MANUFACTURER: The device manufacturer. For example, 'Apple'.\n REMOTE_ACCESS_ENABLED: Whether the device is enabled for remote access. Valid values are 'TRUE' or 'FALSE'.\n REMOTE_DEBUG_ENABLED: Whether the device is enabled for remote debugging. Valid values are 'TRUE' or 'FALSE'.\n APPIUM_VERSION: The Appium version for the test.\n INSTANCE_ARN: The Amazon Resource Name (ARN) of the device instance.\n INSTANCE_LABELS: The label of the device instance.\n FLEET_TYPE: The fleet type. Valid values are 'PUBLIC' or 'PRIVATE'.\n operator (string) --The rule's operator.\n EQUALS: The equals operator.\n GREATER_THAN: The greater-than operator.\n IN: The in operator.\n LESS_THAN: The less-than operator.\n NOT_IN: The not-in operator.\n CONTAINS: The contains operator.\n value (string) --The rule's value.\n The value must be passed in as a string using escaped quotes.\n For example:\n 'value': ''ANDROID''\n \n \n\n :rtype: dict\n :return: {\n 'devicePool': {\n 'arn': 'string',\n 'name': 'string',\n 'description': 'string',\n 'type': 'CURATED'|'PRIVATE',\n 'rules': [\n {\n 'attribute': 'ARN'|'PLATFORM'|'FORM_FACTOR'|'MANUFACTURER'|'REMOTE_ACCESS_ENABLED'|'REMOTE_DEBUG_ENABLED'|'APPIUM_VERSION'|'INSTANCE_ARN'|'INSTANCE_LABELS'|'FLEET_TYPE',\n 'operator': 'EQUALS'|'LESS_THAN'|'GREATER_THAN'|'IN'|'NOT_IN'|'CONTAINS',\n 'value': 'string'\n },\n ]\n }\n }\n \n \n :returns: \n CURATED: A device pool that is created and managed by AWS Device Farm.\n PRIVATE: A device pool that is created and managed by the device pool developer.\n \n \"\"\"\n pass\n\ndef update_instance_profile(arn=None, name=None, description=None, packageCleanup=None, excludeAppPackagesFromCleanup=None, rebootAfterUse=None):\n \"\"\"\n Updates information about an existing private device instance profile.\n See also: AWS API Documentation\n \n \n :example: response = client.update_instance_profile(\n arn='string',\n name='string',\n description='string',\n packageCleanup=True|False,\n excludeAppPackagesFromCleanup=[\n 'string',\n ],\n rebootAfterUse=True|False\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The Amazon Resource Name (ARN) of the instance profile.\n \n\n :type name: string\n :param name: The updated name for your instance profile.\n\n :type description: string\n :param description: The updated description for your instance profile.\n\n :type packageCleanup: boolean\n :param packageCleanup: The updated choice for whether you want to specify package cleanup. The default value is false for private devices.\n\n :type excludeAppPackagesFromCleanup: list\n :param excludeAppPackagesFromCleanup: An array of strings specifying the list of app packages that should not be cleaned up from the device after a test run is over.\n The list of packages is only considered if you set packageCleanup to true .\n (string) --\n \n\n :type rebootAfterUse: boolean\n :param rebootAfterUse: The updated choice for whether you want to reboot the device after use. The default value is true .\n\n :rtype: dict\n :return: {\n 'instanceProfile': {\n 'arn': 'string',\n 'packageCleanup': True|False,\n 'excludeAppPackagesFromCleanup': [\n 'string',\n ],\n 'rebootAfterUse': True|False,\n 'name': 'string',\n 'description': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef update_network_profile(arn=None, name=None, description=None, type=None, uplinkBandwidthBits=None, downlinkBandwidthBits=None, uplinkDelayMs=None, downlinkDelayMs=None, uplinkJitterMs=None, downlinkJitterMs=None, uplinkLossPercent=None, downlinkLossPercent=None):\n \"\"\"\n Updates the network profile with specific settings.\n See also: AWS API Documentation\n \n \n :example: response = client.update_network_profile(\n arn='string',\n name='string',\n description='string',\n type='CURATED'|'PRIVATE',\n uplinkBandwidthBits=123,\n downlinkBandwidthBits=123,\n uplinkDelayMs=123,\n downlinkDelayMs=123,\n uplinkJitterMs=123,\n downlinkJitterMs=123,\n uplinkLossPercent=123,\n downlinkLossPercent=123\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The Amazon Resource Name (ARN) of the project for which you want to update network profile settings.\n \n\n :type name: string\n :param name: The name of the network profile about which you are returning information.\n\n :type description: string\n :param description: The descriptoin of the network profile about which you are returning information.\n\n :type type: string\n :param type: The type of network profile you wish to return information about. Valid values are listed below.\n\n :type uplinkBandwidthBits: integer\n :param uplinkBandwidthBits: The data throughput rate in bits per second, as an integer from 0 to 104857600.\n\n :type downlinkBandwidthBits: integer\n :param downlinkBandwidthBits: The data throughput rate in bits per second, as an integer from 0 to 104857600.\n\n :type uplinkDelayMs: integer\n :param uplinkDelayMs: Delay time for all packets to destination in milliseconds as an integer from 0 to 2000.\n\n :type downlinkDelayMs: integer\n :param downlinkDelayMs: Delay time for all packets to destination in milliseconds as an integer from 0 to 2000.\n\n :type uplinkJitterMs: integer\n :param uplinkJitterMs: Time variation in the delay of received packets in milliseconds as an integer from 0 to 2000.\n\n :type downlinkJitterMs: integer\n :param downlinkJitterMs: Time variation in the delay of received packets in milliseconds as an integer from 0 to 2000.\n\n :type uplinkLossPercent: integer\n :param uplinkLossPercent: Proportion of transmitted packets that fail to arrive from 0 to 100 percent.\n\n :type downlinkLossPercent: integer\n :param downlinkLossPercent: Proportion of received packets that fail to arrive from 0 to 100 percent.\n\n :rtype: dict\n :return: {\n 'networkProfile': {\n 'arn': 'string',\n 'name': 'string',\n 'description': 'string',\n 'type': 'CURATED'|'PRIVATE',\n 'uplinkBandwidthBits': 123,\n 'downlinkBandwidthBits': 123,\n 'uplinkDelayMs': 123,\n 'downlinkDelayMs': 123,\n 'uplinkJitterMs': 123,\n 'downlinkJitterMs': 123,\n 'uplinkLossPercent': 123,\n 'downlinkLossPercent': 123\n }\n }\n \n \n \"\"\"\n pass\n\ndef update_project(arn=None, name=None, defaultJobTimeoutMinutes=None):\n \"\"\"\n Modifies the specified project name, given the project ARN and a new name.\n See also: AWS API Documentation\n \n Examples\n The following example updates the specified project with a new name.\n Expected Output:\n \n :example: response = client.update_project(\n arn='string',\n name='string',\n defaultJobTimeoutMinutes=123\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The Amazon Resource Name (ARN) of the project whose name you wish to update.\n \n\n :type name: string\n :param name: A string representing the new name of the project that you are updating.\n\n :type defaultJobTimeoutMinutes: integer\n :param defaultJobTimeoutMinutes: The number of minutes a test run in the project will execute before it times out.\n\n :rtype: dict\n :return: {\n 'project': {\n 'arn': 'string',\n 'name': 'string',\n 'defaultJobTimeoutMinutes': 123,\n 'created': datetime(2015, 1, 1)\n }\n }\n \n \n \"\"\"\n pass\n\ndef update_upload(arn=None, name=None, contentType=None, editContent=None):\n \"\"\"\n Update an uploaded test specification (test spec).\n See also: AWS API Documentation\n \n \n :example: response = client.update_upload(\n arn='string',\n name='string',\n contentType='string',\n editContent=True|False\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The Amazon Resource Name (ARN) of the uploaded test spec.\n \n\n :type name: string\n :param name: The upload's test spec file name. The name should not contain the '/' character. The test spec file name must end with the .yaml or .yml file extension.\n\n :type contentType: string\n :param contentType: The upload's content type (for example, 'application/x-yaml').\n\n :type editContent: boolean\n :param editContent: Set to true if the YAML file has changed and needs to be updated; otherwise, set to false.\n\n :rtype: dict\n :return: {\n 'upload': {\n 'arn': 'string',\n 'name': 'string',\n 'created': datetime(2015, 1, 1),\n 'type': 'ANDROID_APP'|'IOS_APP'|'WEB_APP'|'EXTERNAL_DATA'|'APPIUM_JAVA_JUNIT_TEST_PACKAGE'|'APPIUM_JAVA_TESTNG_TEST_PACKAGE'|'APPIUM_PYTHON_TEST_PACKAGE'|'APPIUM_NODE_TEST_PACKAGE'|'APPIUM_RUBY_TEST_PACKAGE'|'APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE'|'APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE'|'APPIUM_WEB_PYTHON_TEST_PACKAGE'|'APPIUM_WEB_NODE_TEST_PACKAGE'|'APPIUM_WEB_RUBY_TEST_PACKAGE'|'CALABASH_TEST_PACKAGE'|'INSTRUMENTATION_TEST_PACKAGE'|'UIAUTOMATION_TEST_PACKAGE'|'UIAUTOMATOR_TEST_PACKAGE'|'XCTEST_TEST_PACKAGE'|'XCTEST_UI_TEST_PACKAGE'|'APPIUM_JAVA_JUNIT_TEST_SPEC'|'APPIUM_JAVA_TESTNG_TEST_SPEC'|'APPIUM_PYTHON_TEST_SPEC'|'APPIUM_NODE_TEST_SPEC'|'APPIUM_RUBY_TEST_SPEC'|'APPIUM_WEB_JAVA_JUNIT_TEST_SPEC'|'APPIUM_WEB_JAVA_TESTNG_TEST_SPEC'|'APPIUM_WEB_PYTHON_TEST_SPEC'|'APPIUM_WEB_NODE_TEST_SPEC'|'APPIUM_WEB_RUBY_TEST_SPEC'|'INSTRUMENTATION_TEST_SPEC'|'XCTEST_UI_TEST_SPEC',\n 'status': 'INITIALIZED'|'PROCESSING'|'SUCCEEDED'|'FAILED',\n 'url': 'string',\n 'metadata': 'string',\n 'contentType': 'string',\n 'message': 'string',\n 'category': 'CURATED'|'PRIVATE'\n }\n }\n \n \n :returns: \n ANDROID_APP: An Android upload.\n IOS_APP: An iOS upload.\n WEB_APP: A web appliction upload.\n EXTERNAL_DATA: An external data upload.\n APPIUM_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.\n APPIUM_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.\n APPIUM_PYTHON_TEST_PACKAGE: An Appium Python test package upload.\n APPIUM_NODE_TEST_PACKAGE: An Appium Node.js test package upload.\n APPIUM_RUBY_TEST_PACKAGE: An Appium Ruby test package upload.\n APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload for web apps.\n APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload for web apps.\n APPIUM_WEB_PYTHON_TEST_PACKAGE: An Appium Python test package upload for web apps.\n APPIUM_WEB_NODE_TEST_PACKAGE: An Appium Node.js test package upload for web apps.\n APPIUM_WEB_RUBY_TEST_PACKAGE: An Appium Ruby test package upload for web apps.\n CALABASH_TEST_PACKAGE: A Calabash test package upload.\n INSTRUMENTATION_TEST_PACKAGE: An instrumentation upload.\n UIAUTOMATION_TEST_PACKAGE: A uiautomation test package upload.\n UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload.\n XCTEST_TEST_PACKAGE: An XCode test package upload.\n XCTEST_UI_TEST_PACKAGE: An XCode UI test package upload.\n APPIUM_JAVA_JUNIT_TEST_SPEC: An Appium Java JUnit test spec upload.\n APPIUM_JAVA_TESTNG_TEST_SPEC: An Appium Java TestNG test spec upload.\n APPIUM_PYTHON_TEST_SPEC: An Appium Python test spec upload.\n APPIUM_NODE_TEST_SPEC: An Appium Node.js test spec upload.\n APPIUM_RUBY_TEST_SPEC: An Appium Ruby test spec upload.\n APPIUM_WEB_JAVA_JUNIT_TEST_SPEC: An Appium Java JUnit test spec upload for a web app.\n APPIUM_WEB_JAVA_TESTNG_TEST_SPEC: An Appium Java TestNG test spec upload for a web app.\n APPIUM_WEB_PYTHON_TEST_SPEC: An Appium Python test spec upload for a web app.\n APPIUM_WEB_NODE_TEST_SPEC: An Appium Node.js test spec upload for a web app.\n APPIUM_WEB_RUBY_TEST_SPEC: An Appium Ruby test spec upload for a web app.\n INSTRUMENTATION_TEST_SPEC: An instrumentation test spec upload.\n XCTEST_UI_TEST_SPEC: An XCode UI test spec upload.\n \n \"\"\"\n pass\n\ndef update_vpce_configuration(arn=None, vpceConfigurationName=None, vpceServiceName=None, serviceDnsName=None, vpceConfigurationDescription=None):\n \"\"\"\n Updates information about an existing Amazon Virtual Private Cloud (VPC) endpoint configuration.\n See also: AWS API Documentation\n \n \n :example: response = client.update_vpce_configuration(\n arn='string',\n vpceConfigurationName='string',\n vpceServiceName='string',\n serviceDnsName='string',\n vpceConfigurationDescription='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The Amazon Resource Name (ARN) of the VPC endpoint configuration you want to update.\n \n\n :type vpceConfigurationName: string\n :param vpceConfigurationName: The friendly name you give to your VPC endpoint configuration, to manage your configurations more easily.\n\n :type vpceServiceName: string\n :param vpceServiceName: The name of the VPC endpoint service running inside your AWS account that you want Device Farm to test.\n\n :type serviceDnsName: string\n :param serviceDnsName: The DNS (domain) name used to connect to your private service in your Amazon VPC. The DNS name must not already be in use on the Internet.\n\n :type vpceConfigurationDescription: string\n :param vpceConfigurationDescription: An optional description, providing more details about your VPC endpoint configuration.\n\n :rtype: dict\n :return: {\n 'vpceConfiguration': {\n 'arn': 'string',\n 'vpceConfigurationName': 'string',\n 'vpceServiceName': 'string',\n 'serviceDnsName': 'string',\n 'vpceConfigurationDescription': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.5756846070289612, "alphanum_fraction": 0.581824541091919, "avg_line_length": 29.014659881591797, "blob_id": "2da82df981adb276bd4f9dc176a33e741920f25a", "content_id": "010b8b1c378fcd8eb8ec71fa02b19e38245f47df", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 28665, "license_type": "permissive", "max_line_length": 177, "num_lines": 955, "path": "/pyboto3/ram.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef accept_resource_share_invitation(resourceShareInvitationArn=None, clientToken=None):\n \"\"\"\n Accepts an invitation to a resource share from another AWS account.\n See also: AWS API Documentation\n \n \n :example: response = client.accept_resource_share_invitation(\n resourceShareInvitationArn='string',\n clientToken='string'\n )\n \n \n :type resourceShareInvitationArn: string\n :param resourceShareInvitationArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the invitation.\n \n\n :type clientToken: string\n :param clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.\n\n :rtype: dict\n :return: {\n 'resourceShareInvitation': {\n 'resourceShareInvitationArn': 'string',\n 'resourceShareName': 'string',\n 'resourceShareArn': 'string',\n 'senderAccountId': 'string',\n 'receiverAccountId': 'string',\n 'invitationTimestamp': datetime(2015, 1, 1),\n 'status': 'PENDING'|'ACCEPTED'|'REJECTED'|'EXPIRED',\n 'resourceShareAssociations': [\n {\n 'resourceShareArn': 'string',\n 'associatedEntity': 'string',\n 'associationType': 'PRINCIPAL'|'RESOURCE',\n 'status': 'ASSOCIATING'|'ASSOCIATED'|'FAILED'|'DISASSOCIATING'|'DISASSOCIATED',\n 'statusMessage': 'string',\n 'creationTime': datetime(2015, 1, 1),\n 'lastUpdatedTime': datetime(2015, 1, 1),\n 'external': True|False\n },\n ]\n },\n 'clientToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef associate_resource_share(resourceShareArn=None, resourceArns=None, principals=None, clientToken=None):\n \"\"\"\n Associates the specified resource share with the specified principals and resources.\n See also: AWS API Documentation\n \n \n :example: response = client.associate_resource_share(\n resourceShareArn='string',\n resourceArns=[\n 'string',\n ],\n principals=[\n 'string',\n ],\n clientToken='string'\n )\n \n \n :type resourceShareArn: string\n :param resourceShareArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the resource share.\n \n\n :type resourceArns: list\n :param resourceArns: The Amazon Resource Names (ARN) of the resources.\n (string) --\n \n\n :type principals: list\n :param principals: The principals.\n (string) --\n \n\n :type clientToken: string\n :param clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.\n\n :rtype: dict\n :return: {\n 'resourceShareAssociations': [\n {\n 'resourceShareArn': 'string',\n 'associatedEntity': 'string',\n 'associationType': 'PRINCIPAL'|'RESOURCE',\n 'status': 'ASSOCIATING'|'ASSOCIATED'|'FAILED'|'DISASSOCIATING'|'DISASSOCIATED',\n 'statusMessage': 'string',\n 'creationTime': datetime(2015, 1, 1),\n 'lastUpdatedTime': datetime(2015, 1, 1),\n 'external': True|False\n },\n ],\n 'clientToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_resource_share(name=None, resourceArns=None, principals=None, tags=None, allowExternalPrincipals=None, clientToken=None):\n \"\"\"\n Creates a resource share.\n See also: AWS API Documentation\n \n \n :example: response = client.create_resource_share(\n name='string',\n resourceArns=[\n 'string',\n ],\n principals=[\n 'string',\n ],\n tags=[\n {\n 'key': 'string',\n 'value': 'string'\n },\n ],\n allowExternalPrincipals=True|False,\n clientToken='string'\n )\n \n \n :type name: string\n :param name: [REQUIRED]\n The name of the resource share.\n \n\n :type resourceArns: list\n :param resourceArns: The Amazon Resource Names (ARN) of the resources to associate with the resource share.\n (string) --\n \n\n :type principals: list\n :param principals: The principals to associate with the resource share. The possible values are IDs of AWS accounts, the ARN of an OU or organization from AWS Organizations.\n (string) --\n \n\n :type tags: list\n :param tags: One or more tags.\n (dict) --Information about a tag.\n key (string) --The key of the tag.\n value (string) --The value of the tag.\n \n \n\n :type allowExternalPrincipals: boolean\n :param allowExternalPrincipals: Indicates whether principals outside your organization can be associated with a resource share.\n\n :type clientToken: string\n :param clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.\n\n :rtype: dict\n :return: {\n 'resourceShare': {\n 'resourceShareArn': 'string',\n 'name': 'string',\n 'owningAccountId': 'string',\n 'allowExternalPrincipals': True|False,\n 'status': 'PENDING'|'ACTIVE'|'FAILED'|'DELETING'|'DELETED',\n 'statusMessage': 'string',\n 'tags': [\n {\n 'key': 'string',\n 'value': 'string'\n },\n ],\n 'creationTime': datetime(2015, 1, 1),\n 'lastUpdatedTime': datetime(2015, 1, 1)\n },\n 'clientToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_resource_share(resourceShareArn=None, clientToken=None):\n \"\"\"\n Deletes the specified resource share.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_resource_share(\n resourceShareArn='string',\n clientToken='string'\n )\n \n \n :type resourceShareArn: string\n :param resourceShareArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the resource share.\n \n\n :type clientToken: string\n :param clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.\n\n :rtype: dict\n :return: {\n 'returnValue': True|False,\n 'clientToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef disassociate_resource_share(resourceShareArn=None, resourceArns=None, principals=None, clientToken=None):\n \"\"\"\n Disassociates the specified principals or resources from the specified resource share.\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_resource_share(\n resourceShareArn='string',\n resourceArns=[\n 'string',\n ],\n principals=[\n 'string',\n ],\n clientToken='string'\n )\n \n \n :type resourceShareArn: string\n :param resourceShareArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the resource share.\n \n\n :type resourceArns: list\n :param resourceArns: The Amazon Resource Names (ARN) of the resources.\n (string) --\n \n\n :type principals: list\n :param principals: The principals.\n (string) --\n \n\n :type clientToken: string\n :param clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.\n\n :rtype: dict\n :return: {\n 'resourceShareAssociations': [\n {\n 'resourceShareArn': 'string',\n 'associatedEntity': 'string',\n 'associationType': 'PRINCIPAL'|'RESOURCE',\n 'status': 'ASSOCIATING'|'ASSOCIATED'|'FAILED'|'DISASSOCIATING'|'DISASSOCIATED',\n 'statusMessage': 'string',\n 'creationTime': datetime(2015, 1, 1),\n 'lastUpdatedTime': datetime(2015, 1, 1),\n 'external': True|False\n },\n ],\n 'clientToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef enable_sharing_with_aws_organization():\n \"\"\"\n Enables resource sharing within your organization.\n See also: AWS API Documentation\n \n \n :example: response = client.enable_sharing_with_aws_organization()\n \n \n :rtype: dict\n :return: {\n 'returnValue': True|False\n }\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_resource_policies(resourceArns=None, principal=None, nextToken=None, maxResults=None):\n \"\"\"\n Gets the policies for the specifies resources.\n See also: AWS API Documentation\n \n \n :example: response = client.get_resource_policies(\n resourceArns=[\n 'string',\n ],\n principal='string',\n nextToken='string',\n maxResults=123\n )\n \n \n :type resourceArns: list\n :param resourceArns: [REQUIRED]\n The Amazon Resource Names (ARN) of the resources.\n (string) --\n \n\n :type principal: string\n :param principal: The principal.\n\n :type nextToken: string\n :param nextToken: The token for the next page of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.\n\n :rtype: dict\n :return: {\n 'policies': [\n 'string',\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_resource_share_associations(associationType=None, resourceShareArns=None, resourceArn=None, principal=None, associationStatus=None, nextToken=None, maxResults=None):\n \"\"\"\n Gets the associations for the specified resource share.\n See also: AWS API Documentation\n \n \n :example: response = client.get_resource_share_associations(\n associationType='PRINCIPAL'|'RESOURCE',\n resourceShareArns=[\n 'string',\n ],\n resourceArn='string',\n principal='string',\n associationStatus='ASSOCIATING'|'ASSOCIATED'|'FAILED'|'DISASSOCIATING'|'DISASSOCIATED',\n nextToken='string',\n maxResults=123\n )\n \n \n :type associationType: string\n :param associationType: [REQUIRED]\n The association type.\n \n\n :type resourceShareArns: list\n :param resourceShareArns: The Amazon Resource Names (ARN) of the resource shares.\n (string) --\n \n\n :type resourceArn: string\n :param resourceArn: The Amazon Resource Name (ARN) of the resource.\n\n :type principal: string\n :param principal: The principal.\n\n :type associationStatus: string\n :param associationStatus: The status of the association.\n\n :type nextToken: string\n :param nextToken: The token for the next page of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.\n\n :rtype: dict\n :return: {\n 'resourceShareAssociations': [\n {\n 'resourceShareArn': 'string',\n 'associatedEntity': 'string',\n 'associationType': 'PRINCIPAL'|'RESOURCE',\n 'status': 'ASSOCIATING'|'ASSOCIATED'|'FAILED'|'DISASSOCIATING'|'DISASSOCIATED',\n 'statusMessage': 'string',\n 'creationTime': datetime(2015, 1, 1),\n 'lastUpdatedTime': datetime(2015, 1, 1),\n 'external': True|False\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_resource_share_invitations(resourceShareInvitationArns=None, resourceShareArns=None, nextToken=None, maxResults=None):\n \"\"\"\n Gets the specified invitations for resource sharing.\n See also: AWS API Documentation\n \n \n :example: response = client.get_resource_share_invitations(\n resourceShareInvitationArns=[\n 'string',\n ],\n resourceShareArns=[\n 'string',\n ],\n nextToken='string',\n maxResults=123\n )\n \n \n :type resourceShareInvitationArns: list\n :param resourceShareInvitationArns: The Amazon Resource Names (ARN) of the invitations.\n (string) --\n \n\n :type resourceShareArns: list\n :param resourceShareArns: The Amazon Resource Names (ARN) of the resource shares.\n (string) --\n \n\n :type nextToken: string\n :param nextToken: The token for the next page of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.\n\n :rtype: dict\n :return: {\n 'resourceShareInvitations': [\n {\n 'resourceShareInvitationArn': 'string',\n 'resourceShareName': 'string',\n 'resourceShareArn': 'string',\n 'senderAccountId': 'string',\n 'receiverAccountId': 'string',\n 'invitationTimestamp': datetime(2015, 1, 1),\n 'status': 'PENDING'|'ACCEPTED'|'REJECTED'|'EXPIRED',\n 'resourceShareAssociations': [\n {\n 'resourceShareArn': 'string',\n 'associatedEntity': 'string',\n 'associationType': 'PRINCIPAL'|'RESOURCE',\n 'status': 'ASSOCIATING'|'ASSOCIATED'|'FAILED'|'DISASSOCIATING'|'DISASSOCIATED',\n 'statusMessage': 'string',\n 'creationTime': datetime(2015, 1, 1),\n 'lastUpdatedTime': datetime(2015, 1, 1),\n 'external': True|False\n },\n ]\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_resource_shares(resourceShareArns=None, resourceShareStatus=None, resourceOwner=None, name=None, tagFilters=None, nextToken=None, maxResults=None):\n \"\"\"\n Gets the specified resource shares or all of your resource shares.\n See also: AWS API Documentation\n \n \n :example: response = client.get_resource_shares(\n resourceShareArns=[\n 'string',\n ],\n resourceShareStatus='PENDING'|'ACTIVE'|'FAILED'|'DELETING'|'DELETED',\n resourceOwner='SELF'|'OTHER-ACCOUNTS',\n name='string',\n tagFilters=[\n {\n 'tagKey': 'string',\n 'tagValues': [\n 'string',\n ]\n },\n ],\n nextToken='string',\n maxResults=123\n )\n \n \n :type resourceShareArns: list\n :param resourceShareArns: The Amazon Resource Names (ARN) of the resource shares.\n (string) --\n \n\n :type resourceShareStatus: string\n :param resourceShareStatus: The status of the resource share.\n\n :type resourceOwner: string\n :param resourceOwner: [REQUIRED]\n The type of owner.\n \n\n :type name: string\n :param name: The name of the resource share.\n\n :type tagFilters: list\n :param tagFilters: One or more tag filters.\n (dict) --Used to filter information based on tags.\n tagKey (string) --The tag key.\n tagValues (list) --The tag values.\n (string) --\n \n \n\n :type nextToken: string\n :param nextToken: The token for the next page of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.\n\n :rtype: dict\n :return: {\n 'resourceShares': [\n {\n 'resourceShareArn': 'string',\n 'name': 'string',\n 'owningAccountId': 'string',\n 'allowExternalPrincipals': True|False,\n 'status': 'PENDING'|'ACTIVE'|'FAILED'|'DELETING'|'DELETED',\n 'statusMessage': 'string',\n 'tags': [\n {\n 'key': 'string',\n 'value': 'string'\n },\n ],\n 'creationTime': datetime(2015, 1, 1),\n 'lastUpdatedTime': datetime(2015, 1, 1)\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_principals(resourceOwner=None, resourceArn=None, principals=None, resourceType=None, resourceShareArns=None, nextToken=None, maxResults=None):\n \"\"\"\n Lists the principals with access to the specified resource.\n See also: AWS API Documentation\n \n \n :example: response = client.list_principals(\n resourceOwner='SELF'|'OTHER-ACCOUNTS',\n resourceArn='string',\n principals=[\n 'string',\n ],\n resourceType='string',\n resourceShareArns=[\n 'string',\n ],\n nextToken='string',\n maxResults=123\n )\n \n \n :type resourceOwner: string\n :param resourceOwner: [REQUIRED]\n The type of owner.\n \n\n :type resourceArn: string\n :param resourceArn: The Amazon Resource Name (ARN) of the resource.\n\n :type principals: list\n :param principals: The principals.\n (string) --\n \n\n :type resourceType: string\n :param resourceType: The resource type.\n\n :type resourceShareArns: list\n :param resourceShareArns: The Amazon Resource Names (ARN) of the resource shares.\n (string) --\n \n\n :type nextToken: string\n :param nextToken: The token for the next page of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.\n\n :rtype: dict\n :return: {\n 'principals': [\n {\n 'id': 'string',\n 'resourceShareArn': 'string',\n 'creationTime': datetime(2015, 1, 1),\n 'lastUpdatedTime': datetime(2015, 1, 1),\n 'external': True|False\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_resources(resourceOwner=None, principal=None, resourceType=None, resourceArns=None, resourceShareArns=None, nextToken=None, maxResults=None):\n \"\"\"\n Lists the resources that the specified principal can access.\n See also: AWS API Documentation\n \n \n :example: response = client.list_resources(\n resourceOwner='SELF'|'OTHER-ACCOUNTS',\n principal='string',\n resourceType='string',\n resourceArns=[\n 'string',\n ],\n resourceShareArns=[\n 'string',\n ],\n nextToken='string',\n maxResults=123\n )\n \n \n :type resourceOwner: string\n :param resourceOwner: [REQUIRED]\n The type of owner.\n \n\n :type principal: string\n :param principal: The principal.\n\n :type resourceType: string\n :param resourceType: The resource type.\n\n :type resourceArns: list\n :param resourceArns: The Amazon Resource Names (ARN) of the resources.\n (string) --\n \n\n :type resourceShareArns: list\n :param resourceShareArns: The Amazon Resource Names (ARN) of the resource shares.\n (string) --\n \n\n :type nextToken: string\n :param nextToken: The token for the next page of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.\n\n :rtype: dict\n :return: {\n 'resources': [\n {\n 'arn': 'string',\n 'type': 'string',\n 'resourceShareArn': 'string',\n 'status': 'AVAILABLE'|'ZONAL_RESOURCE_INACCESSIBLE'|'LIMIT_EXCEEDED'|'UNAVAILABLE',\n 'statusMessage': 'string',\n 'creationTime': datetime(2015, 1, 1),\n 'lastUpdatedTime': datetime(2015, 1, 1)\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef reject_resource_share_invitation(resourceShareInvitationArn=None, clientToken=None):\n \"\"\"\n Rejects an invitation to a resource share from another AWS account.\n See also: AWS API Documentation\n \n \n :example: response = client.reject_resource_share_invitation(\n resourceShareInvitationArn='string',\n clientToken='string'\n )\n \n \n :type resourceShareInvitationArn: string\n :param resourceShareInvitationArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the invitation.\n \n\n :type clientToken: string\n :param clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.\n\n :rtype: dict\n :return: {\n 'resourceShareInvitation': {\n 'resourceShareInvitationArn': 'string',\n 'resourceShareName': 'string',\n 'resourceShareArn': 'string',\n 'senderAccountId': 'string',\n 'receiverAccountId': 'string',\n 'invitationTimestamp': datetime(2015, 1, 1),\n 'status': 'PENDING'|'ACCEPTED'|'REJECTED'|'EXPIRED',\n 'resourceShareAssociations': [\n {\n 'resourceShareArn': 'string',\n 'associatedEntity': 'string',\n 'associationType': 'PRINCIPAL'|'RESOURCE',\n 'status': 'ASSOCIATING'|'ASSOCIATED'|'FAILED'|'DISASSOCIATING'|'DISASSOCIATED',\n 'statusMessage': 'string',\n 'creationTime': datetime(2015, 1, 1),\n 'lastUpdatedTime': datetime(2015, 1, 1),\n 'external': True|False\n },\n ]\n },\n 'clientToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef tag_resource(resourceShareArn=None, tags=None):\n \"\"\"\n Adds the specified tags to the specified resource share.\n See also: AWS API Documentation\n \n \n :example: response = client.tag_resource(\n resourceShareArn='string',\n tags=[\n {\n 'key': 'string',\n 'value': 'string'\n },\n ]\n )\n \n \n :type resourceShareArn: string\n :param resourceShareArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the resource share.\n \n\n :type tags: list\n :param tags: [REQUIRED]\n One or more tags.\n (dict) --Information about a tag.\n key (string) --The key of the tag.\n value (string) --The value of the tag.\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef untag_resource(resourceShareArn=None, tagKeys=None):\n \"\"\"\n Removes the specified tags from the specified resource share.\n See also: AWS API Documentation\n \n \n :example: response = client.untag_resource(\n resourceShareArn='string',\n tagKeys=[\n 'string',\n ]\n )\n \n \n :type resourceShareArn: string\n :param resourceShareArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the resource share.\n \n\n :type tagKeys: list\n :param tagKeys: [REQUIRED]\n The tag keys of the tags to remove.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_resource_share(resourceShareArn=None, name=None, allowExternalPrincipals=None, clientToken=None):\n \"\"\"\n Updates the specified resource share.\n See also: AWS API Documentation\n \n \n :example: response = client.update_resource_share(\n resourceShareArn='string',\n name='string',\n allowExternalPrincipals=True|False,\n clientToken='string'\n )\n \n \n :type resourceShareArn: string\n :param resourceShareArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the resource share.\n \n\n :type name: string\n :param name: The name of the resource share.\n\n :type allowExternalPrincipals: boolean\n :param allowExternalPrincipals: Indicates whether principals outside your organization can be associated with a resource share.\n\n :type clientToken: string\n :param clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.\n\n :rtype: dict\n :return: {\n 'resourceShare': {\n 'resourceShareArn': 'string',\n 'name': 'string',\n 'owningAccountId': 'string',\n 'allowExternalPrincipals': True|False,\n 'status': 'PENDING'|'ACTIVE'|'FAILED'|'DELETING'|'DELETED',\n 'statusMessage': 'string',\n 'tags': [\n {\n 'key': 'string',\n 'value': 'string'\n },\n ],\n 'creationTime': datetime(2015, 1, 1),\n 'lastUpdatedTime': datetime(2015, 1, 1)\n },\n 'clientToken': 'string'\n }\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.5971576571464539, "alphanum_fraction": 0.6048444509506226, "avg_line_length": 25.446807861328125, "blob_id": "b7b0d034d5493a21f8120aaa649c58b6e4bc2d6d", "content_id": "05b8c138cb0679479e6608ed86ee5169ace79226", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11188, "license_type": "permissive", "max_line_length": 142, "num_lines": 423, "path": "/pyboto3/iot1clickdevicesservice.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef claim_devices_by_claim_code(ClaimCode=None):\n \"\"\"\n Adds device(s) to your account (i.e., claim one or more devices) if and only if you received a claim code with the device(s).\n See also: AWS API Documentation\n \n \n :example: response = client.claim_devices_by_claim_code(\n ClaimCode='string'\n )\n \n \n :type ClaimCode: string\n :param ClaimCode: [REQUIRED]\n The claim code, starting with 'C-', as provided by the device manufacturer.\n \n\n :rtype: dict\n :return: {\n 'ClaimCode': 'string',\n 'Total': 123\n }\n \n \n \"\"\"\n pass\n\ndef describe_device(DeviceId=None):\n \"\"\"\n Given a device ID, returns a DescribeDeviceResponse object describing the details of the device.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_device(\n DeviceId='string'\n )\n \n \n :type DeviceId: string\n :param DeviceId: [REQUIRED]\n The unique identifier of the device.\n \n\n :rtype: dict\n :return: {\n 'DeviceDescription': {\n 'Attributes': {\n 'string': 'string'\n },\n 'DeviceId': 'string',\n 'Enabled': True|False,\n 'RemainingLife': 123.0,\n 'Type': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef finalize_device_claim(DeviceId=None):\n \"\"\"\n Given a device ID, finalizes the claim request for the associated device.\n See also: AWS API Documentation\n \n \n :example: response = client.finalize_device_claim(\n DeviceId='string'\n )\n \n \n :type DeviceId: string\n :param DeviceId: [REQUIRED]\n The unique identifier of the device.\n \n\n :rtype: dict\n :return: {\n 'State': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_device_methods(DeviceId=None):\n \"\"\"\n Given a device ID, returns the invokable methods associated with the device.\n See also: AWS API Documentation\n \n \n :example: response = client.get_device_methods(\n DeviceId='string'\n )\n \n \n :type DeviceId: string\n :param DeviceId: [REQUIRED]\n The unique identifier of the device.\n \n\n :rtype: dict\n :return: {\n 'DeviceMethods': [\n {\n 'DeviceType': 'string',\n 'MethodName': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef initiate_device_claim(DeviceId=None):\n \"\"\"\n Given a device ID, initiates a claim request for the associated device.\n See also: AWS API Documentation\n \n \n :example: response = client.initiate_device_claim(\n DeviceId='string'\n )\n \n \n :type DeviceId: string\n :param DeviceId: [REQUIRED]\n The unique identifier of the device.\n \n\n :rtype: dict\n :return: {\n 'State': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef invoke_device_method(DeviceId=None, DeviceMethod=None, DeviceMethodParameters=None):\n \"\"\"\n Given a device ID, issues a request to invoke a named device method (with possible parameters). See the \"Example POST\" code snippet below.\n See also: AWS API Documentation\n \n \n :example: response = client.invoke_device_method(\n DeviceId='string',\n DeviceMethod={\n 'DeviceType': 'string',\n 'MethodName': 'string'\n },\n DeviceMethodParameters='string'\n )\n \n \n :type DeviceId: string\n :param DeviceId: [REQUIRED]\n The unique identifier of the device.\n \n\n :type DeviceMethod: dict\n :param DeviceMethod: The device method to invoke.\n DeviceType (string) --The type of the device, such as 'button'.\n MethodName (string) --The name of the method applicable to the deviceType.\n \n\n :type DeviceMethodParameters: string\n :param DeviceMethodParameters: A JSON encoded string containing the device method request parameters.\n\n :rtype: dict\n :return: {\n 'DeviceMethodResponse': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_device_events(DeviceId=None, FromTimeStamp=None, MaxResults=None, NextToken=None, ToTimeStamp=None):\n \"\"\"\n Using a device ID, returns a DeviceEventsResponse object containing an array of events for the device.\n See also: AWS API Documentation\n \n \n :example: response = client.list_device_events(\n DeviceId='string',\n FromTimeStamp=datetime(2015, 1, 1),\n MaxResults=123,\n NextToken='string',\n ToTimeStamp=datetime(2015, 1, 1)\n )\n \n \n :type DeviceId: string\n :param DeviceId: [REQUIRED]\n The unique identifier of the device.\n \n\n :type FromTimeStamp: datetime\n :param FromTimeStamp: [REQUIRED]\n The start date for the device event query, in ISO8061 format. For example, 2018-03-28T15:45:12.880Z\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return per request. If not set, a default value of 100 is used.\n\n :type NextToken: string\n :param NextToken: The token to retrieve the next set of results.\n\n :type ToTimeStamp: datetime\n :param ToTimeStamp: [REQUIRED]\n The end date for the device event query, in ISO8061 format. For example, 2018-03-28T15:45:12.880Z\n \n\n :rtype: dict\n :return: {\n 'Events': [\n {\n 'Device': {\n 'Attributes': {},\n 'DeviceId': 'string',\n 'Type': 'string'\n },\n 'StdEvent': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_devices(DeviceType=None, MaxResults=None, NextToken=None):\n \"\"\"\n Lists the 1-Click compatible devices associated with your AWS account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_devices(\n DeviceType='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type DeviceType: string\n :param DeviceType: The type of the device, such as 'button'.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return per request. If not set, a default value of 100 is used.\n\n :type NextToken: string\n :param NextToken: The token to retrieve the next set of results.\n\n :rtype: dict\n :return: {\n 'Devices': [\n {\n 'Attributes': {\n 'string': 'string'\n },\n 'DeviceId': 'string',\n 'Enabled': True|False,\n 'RemainingLife': 123.0,\n 'Type': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef unclaim_device(DeviceId=None):\n \"\"\"\n Disassociates a device from your AWS account using its device ID.\n See also: AWS API Documentation\n \n \n :example: response = client.unclaim_device(\n DeviceId='string'\n )\n \n \n :type DeviceId: string\n :param DeviceId: [REQUIRED]\n The unique identifier of the device.\n \n\n :rtype: dict\n :return: {\n 'State': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef update_device_state(DeviceId=None, Enabled=None):\n \"\"\"\n Using a Boolean value (true or false), this operation enables or disables the device given a device ID.\n See also: AWS API Documentation\n \n \n :example: response = client.update_device_state(\n DeviceId='string',\n Enabled=True|False\n )\n \n \n :type DeviceId: string\n :param DeviceId: [REQUIRED]\n The unique identifier of the device.\n \n\n :type Enabled: boolean\n :param Enabled: If true, the device is enabled. If false, the device is disabled.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6059664487838745, "alphanum_fraction": 0.623252809047699, "avg_line_length": 36.73229217529297, "blob_id": "bcd1c2b7beb399e214bd04ef128631fe3c3a1548", "content_id": "874dc9f9081121bc914f590bafcc469428511030", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 94294, "license_type": "permissive", "max_line_length": 548, "num_lines": 2499, "path": "/pyboto3/lambda_.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef add_layer_version_permission(LayerName=None, VersionNumber=None, StatementId=None, Action=None, Principal=None, OrganizationId=None, RevisionId=None):\n \"\"\"\n Adds permissions to the resource-based policy of a version of a function layer. Use this action to grant layer usage permission to other accounts. You can grant permission to a single account, all AWS accounts, or all accounts in an organization.\n To revoke permission, call RemoveLayerVersionPermission with the statement ID that you specified when you added it.\n See also: AWS API Documentation\n \n \n :example: response = client.add_layer_version_permission(\n LayerName='string',\n VersionNumber=123,\n StatementId='string',\n Action='string',\n Principal='string',\n OrganizationId='string',\n RevisionId='string'\n )\n \n \n :type LayerName: string\n :param LayerName: [REQUIRED]\n The name of the layer.\n \n\n :type VersionNumber: integer\n :param VersionNumber: [REQUIRED]\n The version number.\n \n\n :type StatementId: string\n :param StatementId: [REQUIRED]\n An identifier that distinguishes the policy from others on the same layer version.\n \n\n :type Action: string\n :param Action: [REQUIRED]\n The API action that grants access to the layer. For example, lambda:GetLayerVersion .\n \n\n :type Principal: string\n :param Principal: [REQUIRED]\n An account ID, or * to grant permission to all AWS accounts.\n \n\n :type OrganizationId: string\n :param OrganizationId: With the principal set to * , grant permission to all accounts in the specified organization.\n\n :type RevisionId: string\n :param RevisionId: Only update the policy if the revision ID matches the ID specified. Use this option to avoid modifying a policy that has changed since you last read it.\n\n :rtype: dict\n :return: {\n 'Statement': 'string',\n 'RevisionId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef add_permission(FunctionName=None, StatementId=None, Action=None, Principal=None, SourceArn=None, SourceAccount=None, EventSourceToken=None, Qualifier=None, RevisionId=None):\n \"\"\"\n Adds a permission to the resource policy associated with the specified AWS Lambda function. You use resource policies to grant permissions to event sources that use the push model. In a push model, event sources (such as Amazon S3 and custom applications) invoke your Lambda function. Each permission you add to the resource policy allows an event source permission to invoke the Lambda function.\n Permissions apply to the Amazon Resource Name (ARN) used to invoke the function, which can be unqualified (the unpublished version of the function), or include a version or alias. If a client uses a version or alias to invoke a function, use the Qualifier parameter to apply permissions to that ARN. For more information about versioning, see AWS Lambda Function Versioning and Aliases .\n This operation requires permission for the lambda:AddPermission action.\n See also: AWS API Documentation\n \n Examples\n This example adds a permission for an S3 bucket to invoke a Lambda function.\n Expected Output:\n \n :example: response = client.add_permission(\n FunctionName='string',\n StatementId='string',\n Action='string',\n Principal='string',\n SourceArn='string',\n SourceAccount='string',\n EventSourceToken='string',\n Qualifier='string',\n RevisionId='string'\n )\n \n \n :type FunctionName: string\n :param FunctionName: [REQUIRED]\n The name of the Lambda function.\n Name formats\n Function name - MyFunction .\n Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction .\n Partial ARN - 123456789012:function:MyFunction .\n The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.\n \n\n :type StatementId: string\n :param StatementId: [REQUIRED]\n A unique statement identifier.\n \n\n :type Action: string\n :param Action: [REQUIRED]\n The AWS Lambda action you want to allow in this statement. Each Lambda action is a string starting with lambda: followed by the API name . For example, lambda:CreateFunction . You can use wildcard (lambda:* ) to grant permission for all AWS Lambda actions.\n \n\n :type Principal: string\n :param Principal: [REQUIRED]\n The principal who is getting this permission. The principal can be an AWS service (e.g. s3.amazonaws.com or sns.amazonaws.com ) for service triggers, or an account ID for cross-account access. If you specify a service as a principal, use the SourceArn parameter to limit who can invoke the function through that service.\n \n\n :type SourceArn: string\n :param SourceArn: The Amazon Resource Name of the invoker.\n Warning\n If you add a permission to a service principal without providing the source ARN, any AWS account that creates a mapping to your function ARN can invoke your Lambda function.\n \n\n :type SourceAccount: string\n :param SourceAccount: This parameter is used for S3 and SES. The AWS account ID (without a hyphen) of the source owner. For example, if the SourceArn identifies a bucket, then this is the bucket owner's account ID. You can use this additional condition to ensure the bucket you specify is owned by a specific account (it is possible the bucket owner deleted the bucket and some other AWS account created the bucket). You can also use this condition to specify all sources (that is, you don't specify the SourceArn ) owned by a specific account.\n\n :type EventSourceToken: string\n :param EventSourceToken: A unique token that must be supplied by the principal invoking the function. This is currently only used for Alexa Smart Home functions.\n\n :type Qualifier: string\n :param Qualifier: Specify a version or alias to add permissions to a published version of the function.\n\n :type RevisionId: string\n :param RevisionId: An optional value you can use to ensure you are updating the latest update of the function version or alias. If the RevisionID you pass doesn't match the latest RevisionId of the function or alias, it will fail with an error message, advising you to retrieve the latest function version or alias RevisionID using either GetFunction or GetAlias\n\n :rtype: dict\n :return: {\n 'Statement': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_alias(FunctionName=None, Name=None, FunctionVersion=None, Description=None, RoutingConfig=None):\n \"\"\"\n Creates an alias that points to the specified Lambda function version. For more information, see Introduction to AWS Lambda Aliases .\n Alias names are unique for a given function. This requires permission for the lambda:CreateAlias action.\n See also: AWS API Documentation\n \n \n :example: response = client.create_alias(\n FunctionName='string',\n Name='string',\n FunctionVersion='string',\n Description='string',\n RoutingConfig={\n 'AdditionalVersionWeights': {\n 'string': 123.0\n }\n }\n )\n \n \n :type FunctionName: string\n :param FunctionName: [REQUIRED]\n The name of the lambda function.\n Name formats\n Function name - MyFunction .\n Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction .\n Partial ARN - 123456789012:function:MyFunction .\n The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.\n \n\n :type Name: string\n :param Name: [REQUIRED]\n Name for the alias you are creating.\n \n\n :type FunctionVersion: string\n :param FunctionVersion: [REQUIRED]\n Lambda function version for which you are creating the alias.\n \n\n :type Description: string\n :param Description: Description of the alias.\n\n :type RoutingConfig: dict\n :param RoutingConfig: Specifies an additional version your alias can point to, allowing you to dictate what percentage of traffic will invoke each version. For more information, see Traffic Shifting Using Aliases .\n AdditionalVersionWeights (dict) --The name of the second alias, and the percentage of traffic that is routed to it.\n (string) --\n (float) --\n \n \n\n :rtype: dict\n :return: {\n 'AliasArn': 'string',\n 'Name': 'string',\n 'FunctionVersion': 'string',\n 'Description': 'string',\n 'RoutingConfig': {\n 'AdditionalVersionWeights': {\n 'string': 123.0\n }\n },\n 'RevisionId': 'string'\n }\n \n \n :returns: \n (string) --\n (float) --\n \n \n \n \"\"\"\n pass\n\ndef create_event_source_mapping(EventSourceArn=None, FunctionName=None, Enabled=None, BatchSize=None, StartingPosition=None, StartingPositionTimestamp=None):\n \"\"\"\n Creates a mapping between an event source and an AWS Lambda function. Lambda reads items from the event source and triggers the function.\n For details about each event source type, see the following topics.\n See also: AWS API Documentation\n \n \n :example: response = client.create_event_source_mapping(\n EventSourceArn='string',\n FunctionName='string',\n Enabled=True|False,\n BatchSize=123,\n StartingPosition='TRIM_HORIZON'|'LATEST'|'AT_TIMESTAMP',\n StartingPositionTimestamp=datetime(2015, 1, 1)\n )\n \n \n :type EventSourceArn: string\n :param EventSourceArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the event source.\n Amazon Kinesis - The ARN of the data stream or a stream consumer.\n Amazon DynamoDB Streams - The ARN of the stream.\n Amazon Simple Queue Service - The ARN of the queue.\n \n\n :type FunctionName: string\n :param FunctionName: [REQUIRED]\n The name of the Lambda function.\n Name formats\n Function name - MyFunction .\n Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction .\n Version or Alias ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD .\n Partial ARN - 123456789012:function:MyFunction .\n The length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.\n \n\n :type Enabled: boolean\n :param Enabled: Disables the event source mapping to pause polling and invocation.\n\n :type BatchSize: integer\n :param BatchSize: The maximum number of items to retrieve in a single batch.\n Amazon Kinesis - Default 100. Max 10,000.\n Amazon DynamoDB Streams - Default 100. Max 1,000.\n Amazon Simple Queue Service - Default 10. Max 10.\n \n\n :type StartingPosition: string\n :param StartingPosition: The position in a stream from which to start reading. Required for Amazon Kinesis and Amazon DynamoDB Streams sources. AT_TIMESTAMP is only supported for Amazon Kinesis streams.\n\n :type StartingPositionTimestamp: datetime\n :param StartingPositionTimestamp: With StartingPosition set to AT_TIMESTAMP , the Unix time in seconds from which to start reading.\n\n :rtype: dict\n :return: {\n 'UUID': 'string',\n 'BatchSize': 123,\n 'EventSourceArn': 'string',\n 'FunctionArn': 'string',\n 'LastModified': datetime(2015, 1, 1),\n 'LastProcessingResult': 'string',\n 'State': 'string',\n 'StateTransitionReason': 'string'\n }\n \n \n :returns: \n EventSourceArn (string) -- [REQUIRED]\n The Amazon Resource Name (ARN) of the event source.\n \n Amazon Kinesis - The ARN of the data stream or a stream consumer.\n Amazon DynamoDB Streams - The ARN of the stream.\n Amazon Simple Queue Service - The ARN of the queue.\n \n \n FunctionName (string) -- [REQUIRED]\n The name of the Lambda function.\n \n Name formats\n \n Function name - MyFunction .\n Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction .\n Version or Alias ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD .\n Partial ARN - 123456789012:function:MyFunction .\n \n The length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.\n \n Enabled (boolean) -- Disables the event source mapping to pause polling and invocation.\n BatchSize (integer) -- The maximum number of items to retrieve in a single batch.\n \n Amazon Kinesis - Default 100. Max 10,000.\n Amazon DynamoDB Streams - Default 100. Max 1,000.\n Amazon Simple Queue Service - Default 10. Max 10.\n \n \n StartingPosition (string) -- The position in a stream from which to start reading. Required for Amazon Kinesis and Amazon DynamoDB Streams sources. AT_TIMESTAMP is only supported for Amazon Kinesis streams.\n StartingPositionTimestamp (datetime) -- With StartingPosition set to AT_TIMESTAMP , the Unix time in seconds from which to start reading.\n \n \"\"\"\n pass\n\ndef create_function(FunctionName=None, Runtime=None, Role=None, Handler=None, Code=None, Description=None, Timeout=None, MemorySize=None, Publish=None, VpcConfig=None, DeadLetterConfig=None, Environment=None, KMSKeyArn=None, TracingConfig=None, Tags=None, Layers=None):\n \"\"\"\n Creates a new Lambda function. The function configuration is created from the request parameters, and the code for the function is provided by a .zip file. The function name is case-sensitive.\n This operation requires permission for the lambda:CreateFunction action.\n See also: AWS API Documentation\n \n Examples\n This example creates a Lambda function.\n Expected Output:\n \n :example: response = client.create_function(\n FunctionName='string',\n Runtime='nodejs'|'nodejs4.3'|'nodejs6.10'|'nodejs8.10'|'java8'|'python2.7'|'python3.6'|'python3.7'|'dotnetcore1.0'|'dotnetcore2.0'|'dotnetcore2.1'|'nodejs4.3-edge'|'go1.x'|'ruby2.5'|'provided',\n Role='string',\n Handler='string',\n Code={\n 'ZipFile': b'bytes',\n 'S3Bucket': 'string',\n 'S3Key': 'string',\n 'S3ObjectVersion': 'string'\n },\n Description='string',\n Timeout=123,\n MemorySize=123,\n Publish=True|False,\n VpcConfig={\n 'SubnetIds': [\n 'string',\n ],\n 'SecurityGroupIds': [\n 'string',\n ]\n },\n DeadLetterConfig={\n 'TargetArn': 'string'\n },\n Environment={\n 'Variables': {\n 'string': 'string'\n }\n },\n KMSKeyArn='string',\n TracingConfig={\n 'Mode': 'Active'|'PassThrough'\n },\n Tags={\n 'string': 'string'\n },\n Layers=[\n 'string',\n ]\n )\n \n \n :type FunctionName: string\n :param FunctionName: [REQUIRED]\n The name of the Lambda function.\n Name formats\n Function name - MyFunction .\n Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction .\n Partial ARN - 123456789012:function:MyFunction .\n The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.\n \n\n :type Runtime: string\n :param Runtime: [REQUIRED]\n The runtime version for the function.\n \n\n :type Role: string\n :param Role: [REQUIRED]\n The Amazon Resource Name (ARN) of the function's execution role .\n \n\n :type Handler: string\n :param Handler: [REQUIRED]\n The name of the method within your code that Lambda calls to execute your function. For more information, see Programming Model .\n \n\n :type Code: dict\n :param Code: [REQUIRED]\n The code for the function.\n ZipFile (bytes) --The base64-encoded contents of your zip file containing your deployment package. AWS SDK and AWS CLI clients handle the encoding for you.\n S3Bucket (string) --An Amazon S3 bucket in the same region as your function.\n S3Key (string) --The Amazon S3 key of the deployment package.\n S3ObjectVersion (string) --For versioned objects, the version of the deployment package object to use.\n \n\n :type Description: string\n :param Description: A description of the function.\n\n :type Timeout: integer\n :param Timeout: The amount of time that Lambda allows a function to run before terminating it. The default is 3 seconds. The maximum allowed value is 900 seconds.\n\n :type MemorySize: integer\n :param MemorySize: The amount of memory that your function has access to. Increasing the function's memory also increases it's CPU allocation. The default value is 128 MB. The value must be a multiple of 64 MB.\n\n :type Publish: boolean\n :param Publish: Set to true to publish the first version of the function during creation.\n\n :type VpcConfig: dict\n :param VpcConfig: If your Lambda function accesses resources in a VPC, you provide this parameter identifying the list of security group IDs and subnet IDs. These must belong to the same VPC. You must provide at least one security group and one subnet ID.\n SubnetIds (list) --A list of VPC subnet IDs.\n (string) --\n SecurityGroupIds (list) --A list of VPC security groups IDs.\n (string) --\n \n\n :type DeadLetterConfig: dict\n :param DeadLetterConfig: A dead letter queue configuration that specifies the queue or topic where Lambda sends asynchronous events when they fail processing. For more information, see Dead Letter Queues .\n TargetArn (string) --The Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic.\n \n\n :type Environment: dict\n :param Environment: Environment variables that are accessible from function code during execution.\n Variables (dict) --Environment variable key-value pairs.\n (string) --\n (string) --\n \n \n\n :type KMSKeyArn: string\n :param KMSKeyArn: The ARN of the KMS key used to encrypt your function's environment variables. If not provided, AWS Lambda will use a default service key.\n\n :type TracingConfig: dict\n :param TracingConfig: Set Mode to Active to sample and trace a subset of incoming requests with AWS X-Ray.\n Mode (string) --The tracing mode.\n \n\n :type Tags: dict\n :param Tags: The list of tags (key-value pairs) assigned to the new function. For more information, see Tagging Lambda Functions in the AWS Lambda Developer Guide .\n (string) --\n (string) --\n \n\n :type Layers: list\n :param Layers: A list of function layers to add to the function's execution environment.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'FunctionName': 'string',\n 'FunctionArn': 'string',\n 'Runtime': 'nodejs'|'nodejs4.3'|'nodejs6.10'|'nodejs8.10'|'java8'|'python2.7'|'python3.6'|'python3.7'|'dotnetcore1.0'|'dotnetcore2.0'|'dotnetcore2.1'|'nodejs4.3-edge'|'go1.x'|'ruby2.5'|'provided',\n 'Role': 'string',\n 'Handler': 'string',\n 'CodeSize': 123,\n 'Description': 'string',\n 'Timeout': 123,\n 'MemorySize': 123,\n 'LastModified': 'string',\n 'CodeSha256': 'string',\n 'Version': 'string',\n 'VpcConfig': {\n 'SubnetIds': [\n 'string',\n ],\n 'SecurityGroupIds': [\n 'string',\n ],\n 'VpcId': 'string'\n },\n 'DeadLetterConfig': {\n 'TargetArn': 'string'\n },\n 'Environment': {\n 'Variables': {\n 'string': 'string'\n },\n 'Error': {\n 'ErrorCode': 'string',\n 'Message': 'string'\n }\n },\n 'KMSKeyArn': 'string',\n 'TracingConfig': {\n 'Mode': 'Active'|'PassThrough'\n },\n 'MasterArn': 'string',\n 'RevisionId': 'string',\n 'Layers': [\n {\n 'Arn': 'string',\n 'CodeSize': 123\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef delete_alias(FunctionName=None, Name=None):\n \"\"\"\n Deletes the specified Lambda function alias. For more information, see Introduction to AWS Lambda Aliases .\n This requires permission for the lambda:DeleteAlias action.\n See also: AWS API Documentation\n \n Examples\n This operation deletes a Lambda function alias\n Expected Output:\n \n :example: response = client.delete_alias(\n FunctionName='string',\n Name='string'\n )\n \n \n :type FunctionName: string\n :param FunctionName: [REQUIRED]\n The name of the lambda function.\n Name formats\n Function name - MyFunction .\n Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction .\n Partial ARN - 123456789012:function:MyFunction .\n The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.\n \n\n :type Name: string\n :param Name: [REQUIRED]\n Name of the alias to delete.\n \n\n :return: response = client.delete_alias(\n FunctionName='myFunction',\n Name='alias',\n )\n \n print(response)\n \n \n \"\"\"\n pass\n\ndef delete_event_source_mapping(UUID=None):\n \"\"\"\n Deletes an event source mapping.\n See also: AWS API Documentation\n \n Examples\n This operation deletes a Lambda function event source mapping\n Expected Output:\n \n :example: response = client.delete_event_source_mapping(\n UUID='string'\n )\n \n \n :type UUID: string\n :param UUID: [REQUIRED]\n The identifier of the event source mapping.\n \n\n :rtype: dict\n :return: {\n 'UUID': 'string',\n 'BatchSize': 123,\n 'EventSourceArn': 'string',\n 'FunctionArn': 'string',\n 'LastModified': datetime(2015, 1, 1),\n 'LastProcessingResult': 'string',\n 'State': 'string',\n 'StateTransitionReason': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_function(FunctionName=None, Qualifier=None):\n \"\"\"\n Deletes a Lambda function. To delete a specific function version, use the Qualifier parameter. Otherwise, all versions and aliases are deleted. Event source mappings are not deleted.\n This operation requires permission for the lambda:DeleteFunction action.\n See also: AWS API Documentation\n \n Examples\n This operation deletes a Lambda function\n Expected Output:\n \n :example: response = client.delete_function(\n FunctionName='string',\n Qualifier='string'\n )\n \n \n :type FunctionName: string\n :param FunctionName: [REQUIRED]\n The name of the Lambda function.\n Name formats\n Function name - MyFunction .\n Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction .\n Partial ARN - 123456789012:function:MyFunction .\n The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.\n \n\n :type Qualifier: string\n :param Qualifier: Specify a version to delete. You cannot delete a version that is referenced by an alias.\n\n :return: response = client.delete_function(\n FunctionName='myFunction',\n Qualifier='1',\n )\n \n print(response)\n \n \n \"\"\"\n pass\n\ndef delete_function_concurrency(FunctionName=None):\n \"\"\"\n Removes concurrent execution limits from this function. For more information, see Managing Concurrency .\n See also: AWS API Documentation\n \n \n :example: response = client.delete_function_concurrency(\n FunctionName='string'\n )\n \n \n :type FunctionName: string\n :param FunctionName: [REQUIRED]\n The name of the Lambda function.\n Name formats\n Function name - MyFunction .\n Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction .\n Partial ARN - 123456789012:function:MyFunction .\n The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.\n \n\n \"\"\"\n pass\n\ndef delete_layer_version(LayerName=None, VersionNumber=None):\n \"\"\"\n Deletes a version of a function layer. Deleted versions can no longer be viewed or added to functions. However, a copy of the version remains in Lambda until no functions refer to it.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_layer_version(\n LayerName='string',\n VersionNumber=123\n )\n \n \n :type LayerName: string\n :param LayerName: [REQUIRED]\n The name of the layer.\n \n\n :type VersionNumber: integer\n :param VersionNumber: [REQUIRED]\n The version number.\n \n\n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_account_settings():\n \"\"\"\n Retrieves details about your account's limits and usage in a region.\n See also: AWS API Documentation\n \n Examples\n This operation retrieves a Lambda customer's account settings\n Expected Output:\n \n :example: response = client.get_account_settings()\n \n \n :rtype: dict\n :return: {\n 'AccountLimit': {\n 'TotalCodeSize': 123,\n 'CodeSizeUnzipped': 123,\n 'CodeSizeZipped': 123,\n 'ConcurrentExecutions': 123,\n 'UnreservedConcurrentExecutions': 123\n },\n 'AccountUsage': {\n 'TotalCodeSize': 123,\n 'FunctionCount': 123\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_alias(FunctionName=None, Name=None):\n \"\"\"\n Returns the specified alias information such as the alias ARN, description, and function version it is pointing to. For more information, see Introduction to AWS Lambda Aliases .\n This requires permission for the lambda:GetAlias action.\n See also: AWS API Documentation\n \n Examples\n This operation retrieves a Lambda function alias\n Expected Output:\n \n :example: response = client.get_alias(\n FunctionName='string',\n Name='string'\n )\n \n \n :type FunctionName: string\n :param FunctionName: [REQUIRED]\n The name of the lambda function.\n Name formats\n Function name - MyFunction .\n Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction .\n Partial ARN - 123456789012:function:MyFunction .\n The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.\n \n\n :type Name: string\n :param Name: [REQUIRED]\n Name of the alias for which you want to retrieve information.\n \n\n :rtype: dict\n :return: {\n 'AliasArn': 'string',\n 'Name': 'string',\n 'FunctionVersion': 'string',\n 'Description': 'string',\n 'RoutingConfig': {\n 'AdditionalVersionWeights': {\n 'string': 123.0\n }\n },\n 'RevisionId': 'string'\n }\n \n \n :returns: \n (string) --\n (float) --\n \n \n \n \"\"\"\n pass\n\ndef get_event_source_mapping(UUID=None):\n \"\"\"\n Returns details about an event source mapping.\n See also: AWS API Documentation\n \n Examples\n This operation retrieves a Lambda function's event source mapping\n Expected Output:\n \n :example: response = client.get_event_source_mapping(\n UUID='string'\n )\n \n \n :type UUID: string\n :param UUID: [REQUIRED]\n The identifier of the event source mapping.\n \n\n :rtype: dict\n :return: {\n 'UUID': 'string',\n 'BatchSize': 123,\n 'EventSourceArn': 'string',\n 'FunctionArn': 'string',\n 'LastModified': datetime(2015, 1, 1),\n 'LastProcessingResult': 'string',\n 'State': 'string',\n 'StateTransitionReason': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_function(FunctionName=None, Qualifier=None):\n \"\"\"\n Returns the configuration information of the Lambda function and a presigned URL link to the .zip file you uploaded with CreateFunction so you can download the .zip file. Note that the URL is valid for up to 10 minutes. The configuration information is the same information you provided as parameters when uploading the function.\n Use the Qualifier parameter to retrieve a published version of the function. Otherwise, returns the unpublished version ($LATEST ). For more information, see AWS Lambda Function Versioning and Aliases .\n This operation requires permission for the lambda:GetFunction action.\n See also: AWS API Documentation\n \n Examples\n This operation retrieves a Lambda function's event source mapping\n Expected Output:\n \n :example: response = client.get_function(\n FunctionName='string',\n Qualifier='string'\n )\n \n \n :type FunctionName: string\n :param FunctionName: [REQUIRED]\n The name of the Lambda function.\n Name formats\n Function name - MyFunction .\n Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction .\n Partial ARN - 123456789012:function:MyFunction .\n The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.\n \n\n :type Qualifier: string\n :param Qualifier: Specify a version or alias to get details about a published version of the function.\n\n :rtype: dict\n :return: {\n 'Configuration': {\n 'FunctionName': 'string',\n 'FunctionArn': 'string',\n 'Runtime': 'nodejs'|'nodejs4.3'|'nodejs6.10'|'nodejs8.10'|'java8'|'python2.7'|'python3.6'|'python3.7'|'dotnetcore1.0'|'dotnetcore2.0'|'dotnetcore2.1'|'nodejs4.3-edge'|'go1.x'|'ruby2.5'|'provided',\n 'Role': 'string',\n 'Handler': 'string',\n 'CodeSize': 123,\n 'Description': 'string',\n 'Timeout': 123,\n 'MemorySize': 123,\n 'LastModified': 'string',\n 'CodeSha256': 'string',\n 'Version': 'string',\n 'VpcConfig': {\n 'SubnetIds': [\n 'string',\n ],\n 'SecurityGroupIds': [\n 'string',\n ],\n 'VpcId': 'string'\n },\n 'DeadLetterConfig': {\n 'TargetArn': 'string'\n },\n 'Environment': {\n 'Variables': {\n 'string': 'string'\n },\n 'Error': {\n 'ErrorCode': 'string',\n 'Message': 'string'\n }\n },\n 'KMSKeyArn': 'string',\n 'TracingConfig': {\n 'Mode': 'Active'|'PassThrough'\n },\n 'MasterArn': 'string',\n 'RevisionId': 'string',\n 'Layers': [\n {\n 'Arn': 'string',\n 'CodeSize': 123\n },\n ]\n },\n 'Code': {\n 'RepositoryType': 'string',\n 'Location': 'string'\n },\n 'Tags': {\n 'string': 'string'\n },\n 'Concurrency': {\n 'ReservedConcurrentExecutions': 123\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_function_configuration(FunctionName=None, Qualifier=None):\n \"\"\"\n Returns the configuration information of the Lambda function. This the same information you provided as parameters when uploading the function by using CreateFunction .\n If you are using the versioning feature, you can retrieve this information for a specific function version by using the optional Qualifier parameter and specifying the function version or alias that points to it. If you don't provide it, the API returns information about the $LATEST version of the function. For more information about versioning, see AWS Lambda Function Versioning and Aliases .\n This operation requires permission for the lambda:GetFunctionConfiguration operation.\n See also: AWS API Documentation\n \n Examples\n This operation retrieves a Lambda function's event source mapping\n Expected Output:\n \n :example: response = client.get_function_configuration(\n FunctionName='string',\n Qualifier='string'\n )\n \n \n :type FunctionName: string\n :param FunctionName: [REQUIRED]\n The name of the Lambda function.\n Name formats\n Function name - MyFunction .\n Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction .\n Partial ARN - 123456789012:function:MyFunction .\n The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.\n \n\n :type Qualifier: string\n :param Qualifier: Specify a version or alias to get details about a published version of the function.\n\n :rtype: dict\n :return: {\n 'FunctionName': 'string',\n 'FunctionArn': 'string',\n 'Runtime': 'nodejs'|'nodejs4.3'|'nodejs6.10'|'nodejs8.10'|'java8'|'python2.7'|'python3.6'|'python3.7'|'dotnetcore1.0'|'dotnetcore2.0'|'dotnetcore2.1'|'nodejs4.3-edge'|'go1.x'|'ruby2.5'|'provided',\n 'Role': 'string',\n 'Handler': 'string',\n 'CodeSize': 123,\n 'Description': 'string',\n 'Timeout': 123,\n 'MemorySize': 123,\n 'LastModified': 'string',\n 'CodeSha256': 'string',\n 'Version': 'string',\n 'VpcConfig': {\n 'SubnetIds': [\n 'string',\n ],\n 'SecurityGroupIds': [\n 'string',\n ],\n 'VpcId': 'string'\n },\n 'DeadLetterConfig': {\n 'TargetArn': 'string'\n },\n 'Environment': {\n 'Variables': {\n 'string': 'string'\n },\n 'Error': {\n 'ErrorCode': 'string',\n 'Message': 'string'\n }\n },\n 'KMSKeyArn': 'string',\n 'TracingConfig': {\n 'Mode': 'Active'|'PassThrough'\n },\n 'MasterArn': 'string',\n 'RevisionId': 'string',\n 'Layers': [\n {\n 'Arn': 'string',\n 'CodeSize': 123\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_layer_version(LayerName=None, VersionNumber=None):\n \"\"\"\n Returns information about a version of a function layer, with a link to download the layer archive that's valid for 10 minutes.\n See also: AWS API Documentation\n \n \n :example: response = client.get_layer_version(\n LayerName='string',\n VersionNumber=123\n )\n \n \n :type LayerName: string\n :param LayerName: [REQUIRED]\n The name of the layer.\n \n\n :type VersionNumber: integer\n :param VersionNumber: [REQUIRED]\n The version number.\n \n\n :rtype: dict\n :return: {\n 'Content': {\n 'Location': 'string',\n 'CodeSha256': 'string',\n 'CodeSize': 123\n },\n 'LayerArn': 'string',\n 'LayerVersionArn': 'string',\n 'Description': 'string',\n 'CreatedDate': 'string',\n 'Version': 123,\n 'CompatibleRuntimes': [\n 'nodejs'|'nodejs4.3'|'nodejs6.10'|'nodejs8.10'|'java8'|'python2.7'|'python3.6'|'python3.7'|'dotnetcore1.0'|'dotnetcore2.0'|'dotnetcore2.1'|'nodejs4.3-edge'|'go1.x'|'ruby2.5'|'provided',\n ],\n 'LicenseInfo': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_layer_version_policy(LayerName=None, VersionNumber=None):\n \"\"\"\n Returns the permission policy for a layer version. For more information, see AddLayerVersionPermission .\n See also: AWS API Documentation\n \n \n :example: response = client.get_layer_version_policy(\n LayerName='string',\n VersionNumber=123\n )\n \n \n :type LayerName: string\n :param LayerName: [REQUIRED]\n The name of the layer.\n \n\n :type VersionNumber: integer\n :param VersionNumber: [REQUIRED]\n The version number.\n \n\n :rtype: dict\n :return: {\n 'Policy': 'string',\n 'RevisionId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_policy(FunctionName=None, Qualifier=None):\n \"\"\"\n Returns the resource policy associated with the specified Lambda function.\n This action requires permission for the lambda:GetPolicy action.\n See also: AWS API Documentation\n \n Examples\n This operation retrieves a Lambda function policy\n Expected Output:\n \n :example: response = client.get_policy(\n FunctionName='string',\n Qualifier='string'\n )\n \n \n :type FunctionName: string\n :param FunctionName: [REQUIRED]\n The name of the lambda function.\n Name formats\n Function name - MyFunction .\n Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction .\n Partial ARN - 123456789012:function:MyFunction .\n The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.\n \n\n :type Qualifier: string\n :param Qualifier: You can specify this optional query parameter to specify a function version or an alias name in which case this API will return all permissions associated with the specific qualified ARN. If you don't provide this parameter, the API will return permissions that apply to the unqualified function ARN.\n\n :rtype: dict\n :return: {\n 'Policy': 'string',\n 'RevisionId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef invoke(FunctionName=None, InvocationType=None, LogType=None, ClientContext=None, Payload=None, Qualifier=None):\n \"\"\"\n Invokes a Lambda function. For an example, see Create the Lambda Function and Test It Manually .\n Specify just a function name to invoke the latest version of the function. To invoke a published version, use the Qualifier parameter to specify a version or alias .\n If you use the RequestResponse (synchronous) invocation option, the function will be invoked only once. If you use the Event (asynchronous) invocation option, the function will be invoked at least once in response to an event and the function must be idempotent to handle this.\n For functions with a long timeout, your client may be disconnected during synchronous invocation while it waits for a response. Configure your HTTP client, SDK, firewall, proxy, or operating system to allow for long connections with timeout or keep-alive settings.\n This operation requires permission for the lambda:InvokeFunction action.\n The TooManyRequestsException noted below will return the following: ConcurrentInvocationLimitExceeded will be returned if you have no functions with reserved concurrency and have exceeded your account concurrent limit or if a function without reserved concurrency exceeds the account's unreserved concurrency limit. ReservedFunctionConcurrentInvocationLimitExceeded will be returned when a function with reserved concurrency exceeds its configured concurrency limit.\n See also: AWS API Documentation\n \n Examples\n This operation invokes a Lambda function\n Expected Output:\n \n :example: response = client.invoke(\n FunctionName='string',\n InvocationType='Event'|'RequestResponse'|'DryRun',\n LogType='None'|'Tail',\n ClientContext='string',\n Payload=b'bytes'|file,\n Qualifier='string'\n )\n \n \n :type FunctionName: string\n :param FunctionName: [REQUIRED]\n The name of the Lambda function.\n Name formats\n Function name - MyFunction .\n Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction .\n Partial ARN - 123456789012:function:MyFunction .\n The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.\n \n\n :type InvocationType: string\n :param InvocationType: Choose from the following options.\n RequestResponse (default) - Invoke the function synchronously. Keep the connection open until the function returns a response or times out.\n Event - Invoke the function asynchronously. Send events that fail multiple times to the function's dead-letter queue (if configured).\n DryRun - Validate parameter values and verify that the user or role has permission to invoke the function.\n \n\n :type LogType: string\n :param LogType: You can set this optional parameter to Tail in the request only if you specify the InvocationType parameter with value RequestResponse . In this case, AWS Lambda returns the base64-encoded last 4 KB of log data produced by your Lambda function in the x-amz-log-result header.\n\n :type ClientContext: string\n :param ClientContext: Using the ClientContext you can pass client-specific information to the Lambda function you are invoking. You can then process the client information in your Lambda function as you choose through the context variable. For an example of a ClientContext JSON, see PutEvents in the Amazon Mobile Analytics API Reference and User Guide .\n The ClientContext JSON must be base64-encoded and has a maximum size of 3583 bytes.\n Note\n ClientContext information is returned only if you use the synchronous (RequestResponse ) invocation type.\n \n\n :type Payload: bytes or seekable file-like object\n :param Payload: JSON that you want to provide to your Lambda function as input.\n\n :type Qualifier: string\n :param Qualifier: Specify a version or alias to invoke a published version of the function.\n\n :rtype: dict\n :return: {\n 'StatusCode': 123,\n 'FunctionError': 'string',\n 'LogResult': 'string',\n 'Payload': StreamingBody(),\n 'ExecutedVersion': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef invoke_async(FunctionName=None, InvokeArgs=None):\n \"\"\"\n Submits an invocation request to AWS Lambda. Upon receiving the request, Lambda executes the specified function asynchronously. To see the logs generated by the Lambda function execution, see the CloudWatch Logs console.\n This operation requires permission for the lambda:InvokeFunction action.\n See also: AWS API Documentation\n \n Examples\n This operation invokes a Lambda function asynchronously\n Expected Output:\n \n :example: response = client.invoke_async(\n FunctionName='string',\n InvokeArgs=b'bytes'|file\n )\n \n \n :type FunctionName: string\n :param FunctionName: [REQUIRED]\n The name of the Lambda function.\n Name formats\n Function name - MyFunction .\n Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction .\n Partial ARN - 123456789012:function:MyFunction .\n The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.\n \n\n :type InvokeArgs: bytes or seekable file-like object\n :param InvokeArgs: [REQUIRED]\n JSON that you want to provide to your Lambda function as input.\n \n\n :rtype: dict\n :return: {\n 'Status': 123\n }\n \n \n \"\"\"\n pass\n\ndef list_aliases(FunctionName=None, FunctionVersion=None, Marker=None, MaxItems=None):\n \"\"\"\n Returns list of aliases created for a Lambda function. For each alias, the response includes information such as the alias ARN, description, alias name, and the function version to which it points. For more information, see Introduction to AWS Lambda Aliases .\n This requires permission for the lambda:ListAliases action.\n See also: AWS API Documentation\n \n Examples\n This operation retrieves a Lambda function's aliases\n Expected Output:\n \n :example: response = client.list_aliases(\n FunctionName='string',\n FunctionVersion='string',\n Marker='string',\n MaxItems=123\n )\n \n \n :type FunctionName: string\n :param FunctionName: [REQUIRED]\n The name of the lambda function.\n Name formats\n Function name - MyFunction .\n Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction .\n Partial ARN - 123456789012:function:MyFunction .\n The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.\n \n\n :type FunctionVersion: string\n :param FunctionVersion: If you specify this optional parameter, the API returns only the aliases that are pointing to the specific Lambda function version, otherwise the API returns all of the aliases created for the Lambda function.\n\n :type Marker: string\n :param Marker: Optional string. An opaque pagination token returned from a previous ListAliases operation. If present, indicates where to continue the listing.\n\n :type MaxItems: integer\n :param MaxItems: Optional integer. Specifies the maximum number of aliases to return in response. This parameter value must be greater than 0.\n\n :rtype: dict\n :return: {\n 'NextMarker': 'string',\n 'Aliases': [\n {\n 'AliasArn': 'string',\n 'Name': 'string',\n 'FunctionVersion': 'string',\n 'Description': 'string',\n 'RoutingConfig': {\n 'AdditionalVersionWeights': {\n 'string': 123.0\n }\n },\n 'RevisionId': 'string'\n },\n ]\n }\n \n \n :returns: \n (string) --\n (float) --\n \n \n \n \"\"\"\n pass\n\ndef list_event_source_mappings(EventSourceArn=None, FunctionName=None, Marker=None, MaxItems=None):\n \"\"\"\n Lists event source mappings. Specify an EventSourceArn to only show event source mappings for a single event source.\n See also: AWS API Documentation\n \n \n :example: response = client.list_event_source_mappings(\n EventSourceArn='string',\n FunctionName='string',\n Marker='string',\n MaxItems=123\n )\n \n \n :type EventSourceArn: string\n :param EventSourceArn: The Amazon Resource Name (ARN) of the event source.\n Amazon Kinesis - The ARN of the data stream or a stream consumer.\n Amazon DynamoDB Streams - The ARN of the stream.\n Amazon Simple Queue Service - The ARN of the queue.\n \n\n :type FunctionName: string\n :param FunctionName: The name of the Lambda function.\n Name formats\n Function name - MyFunction .\n Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction .\n Version or Alias ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD .\n Partial ARN - 123456789012:function:MyFunction .\n The length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.\n \n\n :type Marker: string\n :param Marker: A pagination token returned by a previous call.\n\n :type MaxItems: integer\n :param MaxItems: The maximum number of event source mappings to return.\n\n :rtype: dict\n :return: {\n 'NextMarker': 'string',\n 'EventSourceMappings': [\n {\n 'UUID': 'string',\n 'BatchSize': 123,\n 'EventSourceArn': 'string',\n 'FunctionArn': 'string',\n 'LastModified': datetime(2015, 1, 1),\n 'LastProcessingResult': 'string',\n 'State': 'string',\n 'StateTransitionReason': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef list_functions(MasterRegion=None, FunctionVersion=None, Marker=None, MaxItems=None):\n \"\"\"\n Returns a list of your Lambda functions. For each function, the response includes the function configuration information. You must use GetFunction to retrieve the code for your function.\n This operation requires permission for the lambda:ListFunctions action.\n If you are using the versioning feature, you can list all of your functions or only $LATEST versions. For information about the versioning feature, see AWS Lambda Function Versioning and Aliases .\n See also: AWS API Documentation\n \n Examples\n This operation retrieves a Lambda functions\n Expected Output:\n \n :example: response = client.list_functions(\n MasterRegion='string',\n FunctionVersion='ALL',\n Marker='string',\n MaxItems=123\n )\n \n \n :type MasterRegion: string\n :param MasterRegion: Specify a region (e.g. us-east-2 ) to only list functions that were created in that region, or ALL to include functions replicated from any region. If specified, you also must specify the FunctionVersion .\n\n :type FunctionVersion: string\n :param FunctionVersion: Set to ALL to list all published versions. If not specified, only the latest unpublished version ARN is returned.\n\n :type Marker: string\n :param Marker: Optional string. An opaque pagination token returned from a previous ListFunctions operation. If present, indicates where to continue the listing.\n\n :type MaxItems: integer\n :param MaxItems: Optional integer. Specifies the maximum number of AWS Lambda functions to return in response. This parameter value must be greater than 0. The absolute maximum of AWS Lambda functions that can be returned is 50.\n\n :rtype: dict\n :return: {\n 'NextMarker': 'string',\n 'Functions': [\n {\n 'FunctionName': 'string',\n 'FunctionArn': 'string',\n 'Runtime': 'nodejs'|'nodejs4.3'|'nodejs6.10'|'nodejs8.10'|'java8'|'python2.7'|'python3.6'|'python3.7'|'dotnetcore1.0'|'dotnetcore2.0'|'dotnetcore2.1'|'nodejs4.3-edge'|'go1.x'|'ruby2.5'|'provided',\n 'Role': 'string',\n 'Handler': 'string',\n 'CodeSize': 123,\n 'Description': 'string',\n 'Timeout': 123,\n 'MemorySize': 123,\n 'LastModified': 'string',\n 'CodeSha256': 'string',\n 'Version': 'string',\n 'VpcConfig': {\n 'SubnetIds': [\n 'string',\n ],\n 'SecurityGroupIds': [\n 'string',\n ],\n 'VpcId': 'string'\n },\n 'DeadLetterConfig': {\n 'TargetArn': 'string'\n },\n 'Environment': {\n 'Variables': {\n 'string': 'string'\n },\n 'Error': {\n 'ErrorCode': 'string',\n 'Message': 'string'\n }\n },\n 'KMSKeyArn': 'string',\n 'TracingConfig': {\n 'Mode': 'Active'|'PassThrough'\n },\n 'MasterArn': 'string',\n 'RevisionId': 'string',\n 'Layers': [\n {\n 'Arn': 'string',\n 'CodeSize': 123\n },\n ]\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_layer_versions(CompatibleRuntime=None, LayerName=None, Marker=None, MaxItems=None):\n \"\"\"\n Lists the versions of a function layer. Versions that have been deleted aren't listed. Specify a runtime identifier to list only versions that indicate that they're compatible with that runtime.\n See also: AWS API Documentation\n \n \n :example: response = client.list_layer_versions(\n CompatibleRuntime='nodejs'|'nodejs4.3'|'nodejs6.10'|'nodejs8.10'|'java8'|'python2.7'|'python3.6'|'python3.7'|'dotnetcore1.0'|'dotnetcore2.0'|'dotnetcore2.1'|'nodejs4.3-edge'|'go1.x'|'ruby2.5'|'provided',\n LayerName='string',\n Marker='string',\n MaxItems=123\n )\n \n \n :type CompatibleRuntime: string\n :param CompatibleRuntime: A runtime identifier. For example, go1.x .\n\n :type LayerName: string\n :param LayerName: [REQUIRED]\n The name of the layer.\n \n\n :type Marker: string\n :param Marker: A pagination token returned by a previous call.\n\n :type MaxItems: integer\n :param MaxItems: The maximum number of versions to return.\n\n :rtype: dict\n :return: {\n 'NextMarker': 'string',\n 'LayerVersions': [\n {\n 'LayerVersionArn': 'string',\n 'Version': 123,\n 'Description': 'string',\n 'CreatedDate': 'string',\n 'CompatibleRuntimes': [\n 'nodejs'|'nodejs4.3'|'nodejs6.10'|'nodejs8.10'|'java8'|'python2.7'|'python3.6'|'python3.7'|'dotnetcore1.0'|'dotnetcore2.0'|'dotnetcore2.1'|'nodejs4.3-edge'|'go1.x'|'ruby2.5'|'provided',\n ],\n 'LicenseInfo': 'string'\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_layers(CompatibleRuntime=None, Marker=None, MaxItems=None):\n \"\"\"\n Lists function layers and shows information about the latest version of each. Specify a runtime identifier to list only layers that indicate that they're compatible with that runtime.\n See also: AWS API Documentation\n \n \n :example: response = client.list_layers(\n CompatibleRuntime='nodejs'|'nodejs4.3'|'nodejs6.10'|'nodejs8.10'|'java8'|'python2.7'|'python3.6'|'python3.7'|'dotnetcore1.0'|'dotnetcore2.0'|'dotnetcore2.1'|'nodejs4.3-edge'|'go1.x'|'ruby2.5'|'provided',\n Marker='string',\n MaxItems=123\n )\n \n \n :type CompatibleRuntime: string\n :param CompatibleRuntime: A runtime identifier. For example, go1.x .\n\n :type Marker: string\n :param Marker: A pagination token returned by a previous call.\n\n :type MaxItems: integer\n :param MaxItems: The maximum number of layers to return.\n\n :rtype: dict\n :return: {\n 'NextMarker': 'string',\n 'Layers': [\n {\n 'LayerName': 'string',\n 'LayerArn': 'string',\n 'LatestMatchingVersion': {\n 'LayerVersionArn': 'string',\n 'Version': 123,\n 'Description': 'string',\n 'CreatedDate': 'string',\n 'CompatibleRuntimes': [\n 'nodejs'|'nodejs4.3'|'nodejs6.10'|'nodejs8.10'|'java8'|'python2.7'|'python3.6'|'python3.7'|'dotnetcore1.0'|'dotnetcore2.0'|'dotnetcore2.1'|'nodejs4.3-edge'|'go1.x'|'ruby2.5'|'provided',\n ],\n 'LicenseInfo': 'string'\n }\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_tags(Resource=None):\n \"\"\"\n Returns a list of tags assigned to a function when supplied the function ARN (Amazon Resource Name). For more information on Tagging, see Tagging Lambda Functions in the AWS Lambda Developer Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.list_tags(\n Resource='string'\n )\n \n \n :type Resource: string\n :param Resource: [REQUIRED]\n The ARN (Amazon Resource Name) of the function. For more information, see Tagging Lambda Functions in the AWS Lambda Developer Guide .\n \n\n :rtype: dict\n :return: {\n 'Tags': {\n 'string': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef list_versions_by_function(FunctionName=None, Marker=None, MaxItems=None):\n \"\"\"\n Lists all versions of a function. For information about versioning, see AWS Lambda Function Versioning and Aliases .\n See also: AWS API Documentation\n \n Examples\n This operation retrieves a Lambda function versions\n Expected Output:\n \n :example: response = client.list_versions_by_function(\n FunctionName='string',\n Marker='string',\n MaxItems=123\n )\n \n \n :type FunctionName: string\n :param FunctionName: [REQUIRED]\n The name of the lambda function.\n Name formats\n Function name - MyFunction .\n Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction .\n Partial ARN - 123456789012:function:MyFunction .\n The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.\n \n\n :type Marker: string\n :param Marker: Optional string. An opaque pagination token returned from a previous ListVersionsByFunction operation. If present, indicates where to continue the listing.\n\n :type MaxItems: integer\n :param MaxItems: Optional integer. Specifies the maximum number of AWS Lambda function versions to return in response. This parameter value must be greater than 0.\n\n :rtype: dict\n :return: {\n 'NextMarker': 'string',\n 'Versions': [\n {\n 'FunctionName': 'string',\n 'FunctionArn': 'string',\n 'Runtime': 'nodejs'|'nodejs4.3'|'nodejs6.10'|'nodejs8.10'|'java8'|'python2.7'|'python3.6'|'python3.7'|'dotnetcore1.0'|'dotnetcore2.0'|'dotnetcore2.1'|'nodejs4.3-edge'|'go1.x'|'ruby2.5'|'provided',\n 'Role': 'string',\n 'Handler': 'string',\n 'CodeSize': 123,\n 'Description': 'string',\n 'Timeout': 123,\n 'MemorySize': 123,\n 'LastModified': 'string',\n 'CodeSha256': 'string',\n 'Version': 'string',\n 'VpcConfig': {\n 'SubnetIds': [\n 'string',\n ],\n 'SecurityGroupIds': [\n 'string',\n ],\n 'VpcId': 'string'\n },\n 'DeadLetterConfig': {\n 'TargetArn': 'string'\n },\n 'Environment': {\n 'Variables': {\n 'string': 'string'\n },\n 'Error': {\n 'ErrorCode': 'string',\n 'Message': 'string'\n }\n },\n 'KMSKeyArn': 'string',\n 'TracingConfig': {\n 'Mode': 'Active'|'PassThrough'\n },\n 'MasterArn': 'string',\n 'RevisionId': 'string',\n 'Layers': [\n {\n 'Arn': 'string',\n 'CodeSize': 123\n },\n ]\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef publish_layer_version(LayerName=None, Description=None, Content=None, CompatibleRuntimes=None, LicenseInfo=None):\n \"\"\"\n Creates a function layer from a ZIP archive. Each time you call PublishLayerVersion with the same version name, a new version is created.\n Add layers to your function with CreateFunction or UpdateFunctionConfiguration .\n See also: AWS API Documentation\n \n \n :example: response = client.publish_layer_version(\n LayerName='string',\n Description='string',\n Content={\n 'S3Bucket': 'string',\n 'S3Key': 'string',\n 'S3ObjectVersion': 'string',\n 'ZipFile': b'bytes'\n },\n CompatibleRuntimes=[\n 'nodejs'|'nodejs4.3'|'nodejs6.10'|'nodejs8.10'|'java8'|'python2.7'|'python3.6'|'python3.7'|'dotnetcore1.0'|'dotnetcore2.0'|'dotnetcore2.1'|'nodejs4.3-edge'|'go1.x'|'ruby2.5'|'provided',\n ],\n LicenseInfo='string'\n )\n \n \n :type LayerName: string\n :param LayerName: [REQUIRED]\n The name of the layer.\n \n\n :type Description: string\n :param Description: The description of the version.\n\n :type Content: dict\n :param Content: [REQUIRED]\n The function layer archive.\n S3Bucket (string) --The Amazon S3 bucket of the layer archive.\n S3Key (string) --The Amazon S3 key of the layer archive.\n S3ObjectVersion (string) --For versioned objects, the version of the layer archive object to use.\n ZipFile (bytes) --The base64-encoded contents of the layer archive. AWS SDK and AWS CLI clients handle the encoding for you.\n \n\n :type CompatibleRuntimes: list\n :param CompatibleRuntimes: A list of compatible function runtimes . Used for filtering with ListLayers and ListLayerVersions .\n (string) --\n \n\n :type LicenseInfo: string\n :param LicenseInfo: The layer's software license. It can be any of the following:\n An SPDX license identifier . For example, MIT .\n The URL of a license hosted on the internet. For example, https://opensource.org/licenses/MIT .\n The full text of the license.\n \n\n :rtype: dict\n :return: {\n 'Content': {\n 'Location': 'string',\n 'CodeSha256': 'string',\n 'CodeSize': 123\n },\n 'LayerArn': 'string',\n 'LayerVersionArn': 'string',\n 'Description': 'string',\n 'CreatedDate': 'string',\n 'Version': 123,\n 'CompatibleRuntimes': [\n 'nodejs'|'nodejs4.3'|'nodejs6.10'|'nodejs8.10'|'java8'|'python2.7'|'python3.6'|'python3.7'|'dotnetcore1.0'|'dotnetcore2.0'|'dotnetcore2.1'|'nodejs4.3-edge'|'go1.x'|'ruby2.5'|'provided',\n ],\n 'LicenseInfo': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef publish_version(FunctionName=None, CodeSha256=None, Description=None, RevisionId=None):\n \"\"\"\n Publishes a version of your function from the current snapshot of $LATEST. That is, AWS Lambda takes a snapshot of the function code and configuration information from $LATEST and publishes a new version. The code and configuration cannot be modified after publication. For information about the versioning feature, see AWS Lambda Function Versioning and Aliases .\n See also: AWS API Documentation\n \n Examples\n This operation publishes a version of a Lambda function\n Expected Output:\n \n :example: response = client.publish_version(\n FunctionName='string',\n CodeSha256='string',\n Description='string',\n RevisionId='string'\n )\n \n \n :type FunctionName: string\n :param FunctionName: [REQUIRED]\n The name of the lambda function.\n Name formats\n Function name - MyFunction .\n Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction .\n Partial ARN - 123456789012:function:MyFunction .\n The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.\n \n\n :type CodeSha256: string\n :param CodeSha256: The SHA256 hash of the deployment package you want to publish. This provides validation on the code you are publishing. If you provide this parameter, the value must match the SHA256 of the $LATEST version for the publication to succeed. You can use the DryRun parameter of UpdateFunctionCode to verify the hash value that will be returned before publishing your new version.\n\n :type Description: string\n :param Description: The description for the version you are publishing. If not provided, AWS Lambda copies the description from the $LATEST version.\n\n :type RevisionId: string\n :param RevisionId: An optional value you can use to ensure you are updating the latest update of the function version or alias. If the RevisionID you pass doesn't match the latest RevisionId of the function or alias, it will fail with an error message, advising you retrieve the latest function version or alias RevisionID using either GetFunction or GetAlias .\n\n :rtype: dict\n :return: {\n 'FunctionName': 'string',\n 'FunctionArn': 'string',\n 'Runtime': 'nodejs'|'nodejs4.3'|'nodejs6.10'|'nodejs8.10'|'java8'|'python2.7'|'python3.6'|'python3.7'|'dotnetcore1.0'|'dotnetcore2.0'|'dotnetcore2.1'|'nodejs4.3-edge'|'go1.x'|'ruby2.5'|'provided',\n 'Role': 'string',\n 'Handler': 'string',\n 'CodeSize': 123,\n 'Description': 'string',\n 'Timeout': 123,\n 'MemorySize': 123,\n 'LastModified': 'string',\n 'CodeSha256': 'string',\n 'Version': 'string',\n 'VpcConfig': {\n 'SubnetIds': [\n 'string',\n ],\n 'SecurityGroupIds': [\n 'string',\n ],\n 'VpcId': 'string'\n },\n 'DeadLetterConfig': {\n 'TargetArn': 'string'\n },\n 'Environment': {\n 'Variables': {\n 'string': 'string'\n },\n 'Error': {\n 'ErrorCode': 'string',\n 'Message': 'string'\n }\n },\n 'KMSKeyArn': 'string',\n 'TracingConfig': {\n 'Mode': 'Active'|'PassThrough'\n },\n 'MasterArn': 'string',\n 'RevisionId': 'string',\n 'Layers': [\n {\n 'Arn': 'string',\n 'CodeSize': 123\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef put_function_concurrency(FunctionName=None, ReservedConcurrentExecutions=None):\n \"\"\"\n Sets a limit on the number of concurrent executions available to this function. It is a subset of your account's total concurrent execution limit per region. Note that Lambda automatically reserves a buffer of 100 concurrent executions for functions without any reserved concurrency limit. This means if your account limit is 1000, you have a total of 900 available to allocate to individual functions. For more information, see Managing Concurrency .\n See also: AWS API Documentation\n \n \n :example: response = client.put_function_concurrency(\n FunctionName='string',\n ReservedConcurrentExecutions=123\n )\n \n \n :type FunctionName: string\n :param FunctionName: [REQUIRED]\n The name of the Lambda function.\n Name formats\n Function name - MyFunction .\n Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction .\n Partial ARN - 123456789012:function:MyFunction .\n The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.\n \n\n :type ReservedConcurrentExecutions: integer\n :param ReservedConcurrentExecutions: [REQUIRED]\n The concurrent execution limit reserved for this function.\n \n\n :rtype: dict\n :return: {\n 'ReservedConcurrentExecutions': 123\n }\n \n \n \"\"\"\n pass\n\ndef remove_layer_version_permission(LayerName=None, VersionNumber=None, StatementId=None, RevisionId=None):\n \"\"\"\n Removes a statement from the permissions policy for a layer version. For more information, see AddLayerVersionPermission .\n See also: AWS API Documentation\n \n \n :example: response = client.remove_layer_version_permission(\n LayerName='string',\n VersionNumber=123,\n StatementId='string',\n RevisionId='string'\n )\n \n \n :type LayerName: string\n :param LayerName: [REQUIRED]\n The name of the layer.\n \n\n :type VersionNumber: integer\n :param VersionNumber: [REQUIRED]\n The version number.\n \n\n :type StatementId: string\n :param StatementId: [REQUIRED]\n The identifier that was specified when the statement was added.\n \n\n :type RevisionId: string\n :param RevisionId: Only update the policy if the revision ID matches the ID specified. Use this option to avoid modifying a policy that has changed since you last read it.\n\n \"\"\"\n pass\n\ndef remove_permission(FunctionName=None, StatementId=None, Qualifier=None, RevisionId=None):\n \"\"\"\n Removes permissions from a function. You can remove individual permissions from an resource policy associated with a Lambda function by providing a statement ID that you provided when you added the permission. When you remove permissions, disable the event source mapping or trigger configuration first to avoid errors.\n Permissions apply to the Amazon Resource Name (ARN) used to invoke the function, which can be unqualified (the unpublished version of the function), or include a version or alias. If a client uses a version or alias to invoke a function, use the Qualifier parameter to apply permissions to that ARN. For more information about versioning, see AWS Lambda Function Versioning and Aliases .\n You need permission for the lambda:RemovePermission action.\n See also: AWS API Documentation\n \n Examples\n This operation removes a Lambda function's permissions\n Expected Output:\n \n :example: response = client.remove_permission(\n FunctionName='string',\n StatementId='string',\n Qualifier='string',\n RevisionId='string'\n )\n \n \n :type FunctionName: string\n :param FunctionName: [REQUIRED]\n The name of the Lambda function.\n Name formats\n Function name - MyFunction .\n Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction .\n Partial ARN - 123456789012:function:MyFunction .\n The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.\n \n\n :type StatementId: string\n :param StatementId: [REQUIRED]\n Statement ID of the permission to remove.\n \n\n :type Qualifier: string\n :param Qualifier: Specify a version or alias to remove permissions from a published version of the function.\n\n :type RevisionId: string\n :param RevisionId: An optional value you can use to ensure you are updating the latest update of the function version or alias. If the RevisionID you pass doesn't match the latest RevisionId of the function or alias, it will fail with an error message, advising you to retrieve the latest function version or alias RevisionID using either GetFunction or GetAlias .\n\n :return: response = client.remove_permission(\n FunctionName='myFunction',\n Qualifier='1',\n StatementId='role-statement-id',\n )\n \n print(response)\n \n \n \"\"\"\n pass\n\ndef tag_resource(Resource=None, Tags=None):\n \"\"\"\n Creates a list of tags (key-value pairs) on the Lambda function. Requires the Lambda function ARN (Amazon Resource Name). If a key is specified without a value, Lambda creates a tag with the specified key and a value of null. For more information, see Tagging Lambda Functions in the AWS Lambda Developer Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.tag_resource(\n Resource='string',\n Tags={\n 'string': 'string'\n }\n )\n \n \n :type Resource: string\n :param Resource: [REQUIRED]\n The ARN (Amazon Resource Name) of the Lambda function. For more information, see Tagging Lambda Functions in the AWS Lambda Developer Guide .\n \n\n :type Tags: dict\n :param Tags: [REQUIRED]\n The list of tags (key-value pairs) you are assigning to the Lambda function. For more information, see Tagging Lambda Functions in the AWS Lambda Developer Guide .\n (string) --\n (string) --\n \n\n \"\"\"\n pass\n\ndef untag_resource(Resource=None, TagKeys=None):\n \"\"\"\n Removes tags from a Lambda function. Requires the function ARN (Amazon Resource Name). For more information, see Tagging Lambda Functions in the AWS Lambda Developer Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.untag_resource(\n Resource='string',\n TagKeys=[\n 'string',\n ]\n )\n \n \n :type Resource: string\n :param Resource: [REQUIRED]\n The ARN (Amazon Resource Name) of the function. For more information, see Tagging Lambda Functions in the AWS Lambda Developer Guide .\n \n\n :type TagKeys: list\n :param TagKeys: [REQUIRED]\n The list of tag keys to be deleted from the function. For more information, see Tagging Lambda Functions in the AWS Lambda Developer Guide .\n (string) --\n \n\n \"\"\"\n pass\n\ndef update_alias(FunctionName=None, Name=None, FunctionVersion=None, Description=None, RoutingConfig=None, RevisionId=None):\n \"\"\"\n Using this API you can update the function version to which the alias points and the alias description. For more information, see Introduction to AWS Lambda Aliases .\n This requires permission for the lambda:UpdateAlias action.\n See also: AWS API Documentation\n \n Examples\n This operation updates a Lambda function alias\n Expected Output:\n \n :example: response = client.update_alias(\n FunctionName='string',\n Name='string',\n FunctionVersion='string',\n Description='string',\n RoutingConfig={\n 'AdditionalVersionWeights': {\n 'string': 123.0\n }\n },\n RevisionId='string'\n )\n \n \n :type FunctionName: string\n :param FunctionName: [REQUIRED]\n The name of the lambda function.\n Name formats\n Function name - MyFunction .\n Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction .\n Partial ARN - 123456789012:function:MyFunction .\n The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.\n \n\n :type Name: string\n :param Name: [REQUIRED]\n The alias name.\n \n\n :type FunctionVersion: string\n :param FunctionVersion: Using this parameter you can change the Lambda function version to which the alias points.\n\n :type Description: string\n :param Description: You can change the description of the alias using this parameter.\n\n :type RoutingConfig: dict\n :param RoutingConfig: Specifies an additional version your alias can point to, allowing you to dictate what percentage of traffic will invoke each version. For more information, see Traffic Shifting Using Aliases .\n AdditionalVersionWeights (dict) --The name of the second alias, and the percentage of traffic that is routed to it.\n (string) --\n (float) --\n \n \n\n :type RevisionId: string\n :param RevisionId: An optional value you can use to ensure you are updating the latest update of the function version or alias. If the RevisionID you pass doesn't match the latest RevisionId of the function or alias, it will fail with an error message, advising you retrieve the latest function version or alias RevisionID using either GetFunction or GetAlias .\n\n :rtype: dict\n :return: {\n 'AliasArn': 'string',\n 'Name': 'string',\n 'FunctionVersion': 'string',\n 'Description': 'string',\n 'RoutingConfig': {\n 'AdditionalVersionWeights': {\n 'string': 123.0\n }\n },\n 'RevisionId': 'string'\n }\n \n \n :returns: \n (string) --\n (float) --\n \n \n \n \"\"\"\n pass\n\ndef update_event_source_mapping(UUID=None, FunctionName=None, Enabled=None, BatchSize=None):\n \"\"\"\n Updates an event source mapping. You can change the function that AWS Lambda invokes, or pause invocation and resume later from the same location.\n See also: AWS API Documentation\n \n Examples\n This operation updates a Lambda function event source mapping\n Expected Output:\n \n :example: response = client.update_event_source_mapping(\n UUID='string',\n FunctionName='string',\n Enabled=True|False,\n BatchSize=123\n )\n \n \n :type UUID: string\n :param UUID: [REQUIRED]\n The identifier of the event source mapping.\n \n\n :type FunctionName: string\n :param FunctionName: The name of the Lambda function.\n Name formats\n Function name - MyFunction .\n Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction .\n Version or Alias ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD .\n Partial ARN - 123456789012:function:MyFunction .\n The length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.\n \n\n :type Enabled: boolean\n :param Enabled: Disables the event source mapping to pause polling and invocation.\n\n :type BatchSize: integer\n :param BatchSize: The maximum number of items to retrieve in a single batch.\n Amazon Kinesis - Default 100. Max 10,000.\n Amazon DynamoDB Streams - Default 100. Max 1,000.\n Amazon Simple Queue Service - Default 10. Max 10.\n \n\n :rtype: dict\n :return: {\n 'UUID': 'string',\n 'BatchSize': 123,\n 'EventSourceArn': 'string',\n 'FunctionArn': 'string',\n 'LastModified': datetime(2015, 1, 1),\n 'LastProcessingResult': 'string',\n 'State': 'string',\n 'StateTransitionReason': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef update_function_code(FunctionName=None, ZipFile=None, S3Bucket=None, S3Key=None, S3ObjectVersion=None, Publish=None, DryRun=None, RevisionId=None):\n \"\"\"\n Updates the code for the specified Lambda function. This operation must only be used on an existing Lambda function and cannot be used to update the function configuration.\n If you are using the versioning feature, note this API will always update the $LATEST version of your Lambda function. For information about the versioning feature, see AWS Lambda Function Versioning and Aliases .\n This operation requires permission for the lambda:UpdateFunctionCode action.\n See also: AWS API Documentation\n \n Examples\n This operation updates a Lambda function's code\n Expected Output:\n \n :example: response = client.update_function_code(\n FunctionName='string',\n ZipFile=b'bytes',\n S3Bucket='string',\n S3Key='string',\n S3ObjectVersion='string',\n Publish=True|False,\n DryRun=True|False,\n RevisionId='string'\n )\n \n \n :type FunctionName: string\n :param FunctionName: [REQUIRED]\n The name of the Lambda function.\n Name formats\n Function name - MyFunction .\n Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction .\n Partial ARN - 123456789012:function:MyFunction .\n The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.\n \n\n :type ZipFile: bytes\n :param ZipFile: The contents of your zip file containing your deployment package. If you are using the web API directly, the contents of the zip file must be base64-encoded. If you are using the AWS SDKs or the AWS CLI, the SDKs or CLI will do the encoding for you. For more information about creating a .zip file, see Execution Permissions .\n This value will be base64 encoded automatically. Do not base64 encode this value prior to performing the operation.\n \n\n :type S3Bucket: string\n :param S3Bucket: Amazon S3 bucket name where the .zip file containing your deployment package is stored. This bucket must reside in the same AWS Region where you are creating the Lambda function.\n\n :type S3Key: string\n :param S3Key: The Amazon S3 object (the deployment package) key name you want to upload.\n\n :type S3ObjectVersion: string\n :param S3ObjectVersion: The Amazon S3 object (the deployment package) version you want to upload.\n\n :type Publish: boolean\n :param Publish: This boolean parameter can be used to request AWS Lambda to update the Lambda function and publish a version as an atomic operation.\n\n :type DryRun: boolean\n :param DryRun: This boolean parameter can be used to test your request to AWS Lambda to update the Lambda function and publish a version as an atomic operation. It will do all necessary computation and validation of your code but will not upload it or a publish a version. Each time this operation is invoked, the CodeSha256 hash value of the provided code will also be computed and returned in the response.\n\n :type RevisionId: string\n :param RevisionId: An optional value you can use to ensure you are updating the latest update of the function version or alias. If the RevisionID you pass doesn't match the latest RevisionId of the function or alias, it will fail with an error message, advising you to retrieve the latest function version or alias RevisionID using either using using either GetFunction or GetAlias .\n\n :rtype: dict\n :return: {\n 'FunctionName': 'string',\n 'FunctionArn': 'string',\n 'Runtime': 'nodejs'|'nodejs4.3'|'nodejs6.10'|'nodejs8.10'|'java8'|'python2.7'|'python3.6'|'python3.7'|'dotnetcore1.0'|'dotnetcore2.0'|'dotnetcore2.1'|'nodejs4.3-edge'|'go1.x'|'ruby2.5'|'provided',\n 'Role': 'string',\n 'Handler': 'string',\n 'CodeSize': 123,\n 'Description': 'string',\n 'Timeout': 123,\n 'MemorySize': 123,\n 'LastModified': 'string',\n 'CodeSha256': 'string',\n 'Version': 'string',\n 'VpcConfig': {\n 'SubnetIds': [\n 'string',\n ],\n 'SecurityGroupIds': [\n 'string',\n ],\n 'VpcId': 'string'\n },\n 'DeadLetterConfig': {\n 'TargetArn': 'string'\n },\n 'Environment': {\n 'Variables': {\n 'string': 'string'\n },\n 'Error': {\n 'ErrorCode': 'string',\n 'Message': 'string'\n }\n },\n 'KMSKeyArn': 'string',\n 'TracingConfig': {\n 'Mode': 'Active'|'PassThrough'\n },\n 'MasterArn': 'string',\n 'RevisionId': 'string',\n 'Layers': [\n {\n 'Arn': 'string',\n 'CodeSize': 123\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef update_function_configuration(FunctionName=None, Role=None, Handler=None, Description=None, Timeout=None, MemorySize=None, VpcConfig=None, Environment=None, Runtime=None, DeadLetterConfig=None, KMSKeyArn=None, TracingConfig=None, RevisionId=None, Layers=None):\n \"\"\"\n Updates the configuration parameters for the specified Lambda function by using the values provided in the request. You provide only the parameters you want to change. This operation must only be used on an existing Lambda function and cannot be used to update the function's code.\n If you are using the versioning feature, note this API will always update the $LATEST version of your Lambda function. For information about the versioning feature, see AWS Lambda Function Versioning and Aliases .\n This operation requires permission for the lambda:UpdateFunctionConfiguration action.\n See also: AWS API Documentation\n \n Examples\n This operation updates a Lambda function's configuration\n Expected Output:\n \n :example: response = client.update_function_configuration(\n FunctionName='string',\n Role='string',\n Handler='string',\n Description='string',\n Timeout=123,\n MemorySize=123,\n VpcConfig={\n 'SubnetIds': [\n 'string',\n ],\n 'SecurityGroupIds': [\n 'string',\n ]\n },\n Environment={\n 'Variables': {\n 'string': 'string'\n }\n },\n Runtime='nodejs'|'nodejs4.3'|'nodejs6.10'|'nodejs8.10'|'java8'|'python2.7'|'python3.6'|'python3.7'|'dotnetcore1.0'|'dotnetcore2.0'|'dotnetcore2.1'|'nodejs4.3-edge'|'go1.x'|'ruby2.5'|'provided',\n DeadLetterConfig={\n 'TargetArn': 'string'\n },\n KMSKeyArn='string',\n TracingConfig={\n 'Mode': 'Active'|'PassThrough'\n },\n RevisionId='string',\n Layers=[\n 'string',\n ]\n )\n \n \n :type FunctionName: string\n :param FunctionName: [REQUIRED]\n The name of the Lambda function.\n Name formats\n Function name - MyFunction .\n Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction .\n Partial ARN - 123456789012:function:MyFunction .\n The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.\n \n\n :type Role: string\n :param Role: The Amazon Resource Name (ARN) of the IAM role that Lambda will assume when it executes your function.\n\n :type Handler: string\n :param Handler: The function that Lambda calls to begin executing your function. For Node.js, it is the module-name.export value in your function.\n\n :type Description: string\n :param Description: A short user-defined function description. AWS Lambda does not use this value. Assign a meaningful description as you see fit.\n\n :type Timeout: integer\n :param Timeout: The amount of time that Lambda allows a function to run before terminating it. The default is 3 seconds. The maximum allowed value is 900 seconds.\n\n :type MemorySize: integer\n :param MemorySize: The amount of memory, in MB, your Lambda function is given. AWS Lambda uses this memory size to infer the amount of CPU allocated to your function. Your function use-case determines your CPU and memory requirements. For example, a database operation might need less memory compared to an image processing function. The default value is 128 MB. The value must be a multiple of 64 MB.\n\n :type VpcConfig: dict\n :param VpcConfig: Specify security groups and subnets in a VPC to which your Lambda function needs access.\n SubnetIds (list) --A list of VPC subnet IDs.\n (string) --\n SecurityGroupIds (list) --A list of VPC security groups IDs.\n (string) --\n \n\n :type Environment: dict\n :param Environment: The parent object that contains your environment's configuration settings.\n Variables (dict) --Environment variable key-value pairs.\n (string) --\n (string) --\n \n \n\n :type Runtime: string\n :param Runtime: The runtime version for the function.\n\n :type DeadLetterConfig: dict\n :param DeadLetterConfig: A dead letter queue configuration that specifies the queue or topic where Lambda sends asynchronous events when they fail processing. For more information, see Dead Letter Queues .\n TargetArn (string) --The Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic.\n \n\n :type KMSKeyArn: string\n :param KMSKeyArn: The Amazon Resource Name (ARN) of the KMS key used to encrypt your function's environment variables. If you elect to use the AWS Lambda default service key, pass in an empty string ('') for this parameter.\n\n :type TracingConfig: dict\n :param TracingConfig: Set Mode to Active to sample and trace a subset of incoming requests with AWS X-Ray.\n Mode (string) --The tracing mode.\n \n\n :type RevisionId: string\n :param RevisionId: An optional value you can use to ensure you are updating the latest update of the function version or alias. If the RevisionID you pass doesn't match the latest RevisionId of the function or alias, it will fail with an error message, advising you to retrieve the latest function version or alias RevisionID using either GetFunction or GetAlias .\n\n :type Layers: list\n :param Layers: A list of function layers to add to the function's execution environment.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'FunctionName': 'string',\n 'FunctionArn': 'string',\n 'Runtime': 'nodejs'|'nodejs4.3'|'nodejs6.10'|'nodejs8.10'|'java8'|'python2.7'|'python3.6'|'python3.7'|'dotnetcore1.0'|'dotnetcore2.0'|'dotnetcore2.1'|'nodejs4.3-edge'|'go1.x'|'ruby2.5'|'provided',\n 'Role': 'string',\n 'Handler': 'string',\n 'CodeSize': 123,\n 'Description': 'string',\n 'Timeout': 123,\n 'MemorySize': 123,\n 'LastModified': 'string',\n 'CodeSha256': 'string',\n 'Version': 'string',\n 'VpcConfig': {\n 'SubnetIds': [\n 'string',\n ],\n 'SecurityGroupIds': [\n 'string',\n ],\n 'VpcId': 'string'\n },\n 'DeadLetterConfig': {\n 'TargetArn': 'string'\n },\n 'Environment': {\n 'Variables': {\n 'string': 'string'\n },\n 'Error': {\n 'ErrorCode': 'string',\n 'Message': 'string'\n }\n },\n 'KMSKeyArn': 'string',\n 'TracingConfig': {\n 'Mode': 'Active'|'PassThrough'\n },\n 'MasterArn': 'string',\n 'RevisionId': 'string',\n 'Layers': [\n {\n 'Arn': 'string',\n 'CodeSize': 123\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.4528529942035675, "alphanum_fraction": 0.4679400324821472, "avg_line_length": 31.311717987060547, "blob_id": "9ebe532d40309e85becb1bac516603ebee95746e", "content_id": "63cac37b99b38b12742a8e685dfc271edc2ccba3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 41360, "license_type": "permissive", "max_line_length": 513, "num_lines": 1280, "path": "/pyboto3/xray.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef batch_get_traces(TraceIds=None, NextToken=None):\n \"\"\"\n Retrieves a list of traces specified by ID. Each trace is a collection of segment documents that originates from a single request. Use GetTraceSummaries to get a list of trace IDs.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_get_traces(\n TraceIds=[\n 'string',\n ],\n NextToken='string'\n )\n \n \n :type TraceIds: list\n :param TraceIds: [REQUIRED]\n Specify the trace IDs of requests for which to retrieve segments.\n (string) --\n \n\n :type NextToken: string\n :param NextToken: Pagination token. Not used.\n\n :rtype: dict\n :return: {\n 'Traces': [\n {\n 'Id': 'string',\n 'Duration': 123.0,\n 'Segments': [\n {\n 'Id': 'string',\n 'Document': 'string'\n },\n ]\n },\n ],\n 'UnprocessedTraceIds': [\n 'string',\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_group(GroupName=None, FilterExpression=None):\n \"\"\"\n Creates a group resource with a name and a filter expression.\n See also: AWS API Documentation\n \n \n :example: response = client.create_group(\n GroupName='string',\n FilterExpression='string'\n )\n \n \n :type GroupName: string\n :param GroupName: [REQUIRED]\n The case-sensitive name of the new group. Default is a reserved name and names must be unique.\n \n\n :type FilterExpression: string\n :param FilterExpression: The filter expression defining criteria by which to group traces.\n\n :rtype: dict\n :return: {\n 'Group': {\n 'GroupName': 'string',\n 'GroupARN': 'string',\n 'FilterExpression': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef create_sampling_rule(SamplingRule=None):\n \"\"\"\n Creates a rule to control sampling behavior for instrumented applications. Services retrieve rules with GetSamplingRules , and evaluate each rule in ascending order of priority for each request. If a rule matches, the service records a trace, borrowing it from the reservoir size. After 10 seconds, the service reports back to X-Ray with GetSamplingTargets to get updated versions of each in-use rule. The updated rule contains a trace quota that the service can use instead of borrowing from the reservoir.\n See also: AWS API Documentation\n \n \n :example: response = client.create_sampling_rule(\n SamplingRule={\n 'RuleName': 'string',\n 'RuleARN': 'string',\n 'ResourceARN': 'string',\n 'Priority': 123,\n 'FixedRate': 123.0,\n 'ReservoirSize': 123,\n 'ServiceName': 'string',\n 'ServiceType': 'string',\n 'Host': 'string',\n 'HTTPMethod': 'string',\n 'URLPath': 'string',\n 'Version': 123,\n 'Attributes': {\n 'string': 'string'\n }\n }\n )\n \n \n :type SamplingRule: dict\n :param SamplingRule: [REQUIRED]\n The rule definition.\n RuleName (string) --The name of the sampling rule. Specify a rule by either name or ARN, but not both.\n RuleARN (string) --The ARN of the sampling rule. Specify a rule by either name or ARN, but not both.\n ResourceARN (string) -- [REQUIRED]Matches the ARN of the AWS resource on which the service runs.\n Priority (integer) -- [REQUIRED]The priority of the sampling rule.\n FixedRate (float) -- [REQUIRED]The percentage of matching requests to instrument, after the reservoir is exhausted.\n ReservoirSize (integer) -- [REQUIRED]A fixed number of matching requests to instrument per second, prior to applying the fixed rate. The reservoir is not used directly by services, but applies to all services using the rule collectively.\n ServiceName (string) -- [REQUIRED]Matches the name that the service uses to identify itself in segments.\n ServiceType (string) -- [REQUIRED]Matches the origin that the service uses to identify its type in segments.\n Host (string) -- [REQUIRED]Matches the hostname from a request URL.\n HTTPMethod (string) -- [REQUIRED]Matches the HTTP method of a request.\n URLPath (string) -- [REQUIRED]Matches the path from a request URL.\n Version (integer) -- [REQUIRED]The version of the sampling rule format (1 ).\n Attributes (dict) --Matches attributes derived from the request.\n (string) --\n (string) --\n \n \n\n :rtype: dict\n :return: {\n 'SamplingRuleRecord': {\n 'SamplingRule': {\n 'RuleName': 'string',\n 'RuleARN': 'string',\n 'ResourceARN': 'string',\n 'Priority': 123,\n 'FixedRate': 123.0,\n 'ReservoirSize': 123,\n 'ServiceName': 'string',\n 'ServiceType': 'string',\n 'Host': 'string',\n 'HTTPMethod': 'string',\n 'URLPath': 'string',\n 'Version': 123,\n 'Attributes': {\n 'string': 'string'\n }\n },\n 'CreatedAt': datetime(2015, 1, 1),\n 'ModifiedAt': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef delete_group(GroupName=None, GroupARN=None):\n \"\"\"\n Deletes a group resource.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_group(\n GroupName='string',\n GroupARN='string'\n )\n \n \n :type GroupName: string\n :param GroupName: The case-sensitive name of the group.\n\n :type GroupARN: string\n :param GroupARN: The ARN of the group that was generated on creation.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_sampling_rule(RuleName=None, RuleARN=None):\n \"\"\"\n Deletes a sampling rule.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_sampling_rule(\n RuleName='string',\n RuleARN='string'\n )\n \n \n :type RuleName: string\n :param RuleName: The name of the sampling rule. Specify a rule by either name or ARN, but not both.\n\n :type RuleARN: string\n :param RuleARN: The ARN of the sampling rule. Specify a rule by either name or ARN, but not both.\n\n :rtype: dict\n :return: {\n 'SamplingRuleRecord': {\n 'SamplingRule': {\n 'RuleName': 'string',\n 'RuleARN': 'string',\n 'ResourceARN': 'string',\n 'Priority': 123,\n 'FixedRate': 123.0,\n 'ReservoirSize': 123,\n 'ServiceName': 'string',\n 'ServiceType': 'string',\n 'Host': 'string',\n 'HTTPMethod': 'string',\n 'URLPath': 'string',\n 'Version': 123,\n 'Attributes': {\n 'string': 'string'\n }\n },\n 'CreatedAt': datetime(2015, 1, 1),\n 'ModifiedAt': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_encryption_config():\n \"\"\"\n Retrieves the current encryption configuration for X-Ray data.\n See also: AWS API Documentation\n \n \n :example: response = client.get_encryption_config()\n \n \n :rtype: dict\n :return: {\n 'EncryptionConfig': {\n 'KeyId': 'string',\n 'Status': 'UPDATING'|'ACTIVE',\n 'Type': 'NONE'|'KMS'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_group(GroupName=None, GroupARN=None):\n \"\"\"\n Retrieves group resource details.\n See also: AWS API Documentation\n \n \n :example: response = client.get_group(\n GroupName='string',\n GroupARN='string'\n )\n \n \n :type GroupName: string\n :param GroupName: The case-sensitive name of the group.\n\n :type GroupARN: string\n :param GroupARN: The ARN of the group that was generated on creation.\n\n :rtype: dict\n :return: {\n 'Group': {\n 'GroupName': 'string',\n 'GroupARN': 'string',\n 'FilterExpression': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_groups(NextToken=None):\n \"\"\"\n Retrieves all active group details.\n See also: AWS API Documentation\n \n \n :example: response = client.get_groups(\n NextToken='string'\n )\n \n \n :type NextToken: string\n :param NextToken: Pagination token. Not used.\n\n :rtype: dict\n :return: {\n 'Groups': [\n {\n 'GroupName': 'string',\n 'GroupARN': 'string',\n 'FilterExpression': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_sampling_rules(NextToken=None):\n \"\"\"\n Retrieves all sampling rules.\n See also: AWS API Documentation\n \n \n :example: response = client.get_sampling_rules(\n NextToken='string'\n )\n \n \n :type NextToken: string\n :param NextToken: Pagination token. Not used.\n\n :rtype: dict\n :return: {\n 'SamplingRuleRecords': [\n {\n 'SamplingRule': {\n 'RuleName': 'string',\n 'RuleARN': 'string',\n 'ResourceARN': 'string',\n 'Priority': 123,\n 'FixedRate': 123.0,\n 'ReservoirSize': 123,\n 'ServiceName': 'string',\n 'ServiceType': 'string',\n 'Host': 'string',\n 'HTTPMethod': 'string',\n 'URLPath': 'string',\n 'Version': 123,\n 'Attributes': {\n 'string': 'string'\n }\n },\n 'CreatedAt': datetime(2015, 1, 1),\n 'ModifiedAt': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_sampling_statistic_summaries(NextToken=None):\n \"\"\"\n Retrieves information about recent sampling results for all sampling rules.\n See also: AWS API Documentation\n \n \n :example: response = client.get_sampling_statistic_summaries(\n NextToken='string'\n )\n \n \n :type NextToken: string\n :param NextToken: Pagination token. Not used.\n\n :rtype: dict\n :return: {\n 'SamplingStatisticSummaries': [\n {\n 'RuleName': 'string',\n 'Timestamp': datetime(2015, 1, 1),\n 'RequestCount': 123,\n 'BorrowCount': 123,\n 'SampledCount': 123\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_sampling_targets(SamplingStatisticsDocuments=None):\n \"\"\"\n Requests a sampling quota for rules that the service is using to sample requests.\n See also: AWS API Documentation\n \n \n :example: response = client.get_sampling_targets(\n SamplingStatisticsDocuments=[\n {\n 'RuleName': 'string',\n 'ClientID': 'string',\n 'Timestamp': datetime(2015, 1, 1),\n 'RequestCount': 123,\n 'SampledCount': 123,\n 'BorrowCount': 123\n },\n ]\n )\n \n \n :type SamplingStatisticsDocuments: list\n :param SamplingStatisticsDocuments: [REQUIRED]\n Information about rules that the service is using to sample requests.\n (dict) --Request sampling results for a single rule from a service. Results are for the last 10 seconds unless the service has been assigned a longer reporting interval after a previous call to GetSamplingTargets .\n RuleName (string) -- [REQUIRED]The name of the sampling rule.\n ClientID (string) -- [REQUIRED]A unique identifier for the service in hexadecimal.\n Timestamp (datetime) -- [REQUIRED]The current time.\n RequestCount (integer) -- [REQUIRED]The number of requests that matched the rule.\n SampledCount (integer) -- [REQUIRED]The number of requests recorded.\n BorrowCount (integer) --The number of requests recorded with borrowed reservoir quota.\n \n \n\n :rtype: dict\n :return: {\n 'SamplingTargetDocuments': [\n {\n 'RuleName': 'string',\n 'FixedRate': 123.0,\n 'ReservoirQuota': 123,\n 'ReservoirQuotaTTL': datetime(2015, 1, 1),\n 'Interval': 123\n },\n ],\n 'LastRuleModification': datetime(2015, 1, 1),\n 'UnprocessedStatistics': [\n {\n 'RuleName': 'string',\n 'ErrorCode': 'string',\n 'Message': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_service_graph(StartTime=None, EndTime=None, GroupName=None, GroupARN=None, NextToken=None):\n \"\"\"\n Retrieves a document that describes services that process incoming requests, and downstream services that they call as a result. Root services process incoming requests and make calls to downstream services. Root services are applications that use the AWS X-Ray SDK. Downstream services can be other applications, AWS resources, HTTP web APIs, or SQL databases.\n See also: AWS API Documentation\n \n \n :example: response = client.get_service_graph(\n StartTime=datetime(2015, 1, 1),\n EndTime=datetime(2015, 1, 1),\n GroupName='string',\n GroupARN='string',\n NextToken='string'\n )\n \n \n :type StartTime: datetime\n :param StartTime: [REQUIRED]\n The start of the time frame for which to generate a graph.\n \n\n :type EndTime: datetime\n :param EndTime: [REQUIRED]\n The end of the timeframe for which to generate a graph.\n \n\n :type GroupName: string\n :param GroupName: The name of a group to generate a graph based on.\n\n :type GroupARN: string\n :param GroupARN: The ARN of a group to generate a graph based on.\n\n :type NextToken: string\n :param NextToken: Pagination token. Not used.\n\n :rtype: dict\n :return: {\n 'StartTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'Services': [\n {\n 'ReferenceId': 123,\n 'Name': 'string',\n 'Names': [\n 'string',\n ],\n 'Root': True|False,\n 'AccountId': 'string',\n 'Type': 'string',\n 'State': 'string',\n 'StartTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'Edges': [\n {\n 'ReferenceId': 123,\n 'StartTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'SummaryStatistics': {\n 'OkCount': 123,\n 'ErrorStatistics': {\n 'ThrottleCount': 123,\n 'OtherCount': 123,\n 'TotalCount': 123\n },\n 'FaultStatistics': {\n 'OtherCount': 123,\n 'TotalCount': 123\n },\n 'TotalCount': 123,\n 'TotalResponseTime': 123.0\n },\n 'ResponseTimeHistogram': [\n {\n 'Value': 123.0,\n 'Count': 123\n },\n ],\n 'Aliases': [\n {\n 'Name': 'string',\n 'Names': [\n 'string',\n ],\n 'Type': 'string'\n },\n ]\n },\n ],\n 'SummaryStatistics': {\n 'OkCount': 123,\n 'ErrorStatistics': {\n 'ThrottleCount': 123,\n 'OtherCount': 123,\n 'TotalCount': 123\n },\n 'FaultStatistics': {\n 'OtherCount': 123,\n 'TotalCount': 123\n },\n 'TotalCount': 123,\n 'TotalResponseTime': 123.0\n },\n 'DurationHistogram': [\n {\n 'Value': 123.0,\n 'Count': 123\n },\n ],\n 'ResponseTimeHistogram': [\n {\n 'Value': 123.0,\n 'Count': 123\n },\n ]\n },\n ],\n 'ContainsOldGroupVersions': True|False,\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_trace_graph(TraceIds=None, NextToken=None):\n \"\"\"\n Retrieves a service graph for one or more specific trace IDs.\n See also: AWS API Documentation\n \n \n :example: response = client.get_trace_graph(\n TraceIds=[\n 'string',\n ],\n NextToken='string'\n )\n \n \n :type TraceIds: list\n :param TraceIds: [REQUIRED]\n Trace IDs of requests for which to generate a service graph.\n (string) --\n \n\n :type NextToken: string\n :param NextToken: Pagination token. Not used.\n\n :rtype: dict\n :return: {\n 'Services': [\n {\n 'ReferenceId': 123,\n 'Name': 'string',\n 'Names': [\n 'string',\n ],\n 'Root': True|False,\n 'AccountId': 'string',\n 'Type': 'string',\n 'State': 'string',\n 'StartTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'Edges': [\n {\n 'ReferenceId': 123,\n 'StartTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'SummaryStatistics': {\n 'OkCount': 123,\n 'ErrorStatistics': {\n 'ThrottleCount': 123,\n 'OtherCount': 123,\n 'TotalCount': 123\n },\n 'FaultStatistics': {\n 'OtherCount': 123,\n 'TotalCount': 123\n },\n 'TotalCount': 123,\n 'TotalResponseTime': 123.0\n },\n 'ResponseTimeHistogram': [\n {\n 'Value': 123.0,\n 'Count': 123\n },\n ],\n 'Aliases': [\n {\n 'Name': 'string',\n 'Names': [\n 'string',\n ],\n 'Type': 'string'\n },\n ]\n },\n ],\n 'SummaryStatistics': {\n 'OkCount': 123,\n 'ErrorStatistics': {\n 'ThrottleCount': 123,\n 'OtherCount': 123,\n 'TotalCount': 123\n },\n 'FaultStatistics': {\n 'OtherCount': 123,\n 'TotalCount': 123\n },\n 'TotalCount': 123,\n 'TotalResponseTime': 123.0\n },\n 'DurationHistogram': [\n {\n 'Value': 123.0,\n 'Count': 123\n },\n ],\n 'ResponseTimeHistogram': [\n {\n 'Value': 123.0,\n 'Count': 123\n },\n ]\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_trace_summaries(StartTime=None, EndTime=None, Sampling=None, FilterExpression=None, NextToken=None):\n \"\"\"\n Retrieves IDs and metadata for traces available for a specified time frame using an optional filter. To get the full traces, pass the trace IDs to BatchGetTraces .\n A filter expression can target traced requests that hit specific service nodes or edges, have errors, or come from a known user. For example, the following filter expression targets traces that pass through api.example.com :\n This filter expression finds traces that have an annotation named account with the value 12345 :\n For a full list of indexed fields and keywords that you can use in filter expressions, see Using Filter Expressions in the AWS X-Ray Developer Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.get_trace_summaries(\n StartTime=datetime(2015, 1, 1),\n EndTime=datetime(2015, 1, 1),\n Sampling=True|False,\n FilterExpression='string',\n NextToken='string'\n )\n \n \n :type StartTime: datetime\n :param StartTime: [REQUIRED]\n The start of the time frame for which to retrieve traces.\n \n\n :type EndTime: datetime\n :param EndTime: [REQUIRED]\n The end of the time frame for which to retrieve traces.\n \n\n :type Sampling: boolean\n :param Sampling: Set to true to get summaries for only a subset of available traces.\n\n :type FilterExpression: string\n :param FilterExpression: Specify a filter expression to retrieve trace summaries for services or requests that meet certain requirements.\n\n :type NextToken: string\n :param NextToken: Specify the pagination token returned by a previous request to retrieve the next page of results.\n\n :rtype: dict\n :return: {\n 'TraceSummaries': [\n {\n 'Id': 'string',\n 'Duration': 123.0,\n 'ResponseTime': 123.0,\n 'HasFault': True|False,\n 'HasError': True|False,\n 'HasThrottle': True|False,\n 'IsPartial': True|False,\n 'Http': {\n 'HttpURL': 'string',\n 'HttpStatus': 123,\n 'HttpMethod': 'string',\n 'UserAgent': 'string',\n 'ClientIp': 'string'\n },\n 'Annotations': {\n 'string': [\n {\n 'AnnotationValue': {\n 'NumberValue': 123.0,\n 'BooleanValue': True|False,\n 'StringValue': 'string'\n },\n 'ServiceIds': [\n {\n 'Name': 'string',\n 'Names': [\n 'string',\n ],\n 'AccountId': 'string',\n 'Type': 'string'\n },\n ]\n },\n ]\n },\n 'Users': [\n {\n 'UserName': 'string',\n 'ServiceIds': [\n {\n 'Name': 'string',\n 'Names': [\n 'string',\n ],\n 'AccountId': 'string',\n 'Type': 'string'\n },\n ]\n },\n ],\n 'ServiceIds': [\n {\n 'Name': 'string',\n 'Names': [\n 'string',\n ],\n 'AccountId': 'string',\n 'Type': 'string'\n },\n ],\n 'ResourceARNs': [\n {\n 'ARN': 'string'\n },\n ],\n 'InstanceIds': [\n {\n 'Id': 'string'\n },\n ],\n 'AvailabilityZones': [\n {\n 'Name': 'string'\n },\n ],\n 'EntryPoint': {\n 'Name': 'string',\n 'Names': [\n 'string',\n ],\n 'AccountId': 'string',\n 'Type': 'string'\n },\n 'FaultRootCauses': [\n {\n 'Services': [\n {\n 'Name': 'string',\n 'Names': [\n 'string',\n ],\n 'Type': 'string',\n 'AccountId': 'string',\n 'EntityPath': [\n {\n 'Name': 'string',\n 'Exceptions': [\n {\n 'Name': 'string',\n 'Message': 'string'\n },\n ],\n 'Remote': True|False\n },\n ],\n 'Inferred': True|False\n },\n ]\n },\n ],\n 'ErrorRootCauses': [\n {\n 'Services': [\n {\n 'Name': 'string',\n 'Names': [\n 'string',\n ],\n 'Type': 'string',\n 'AccountId': 'string',\n 'EntityPath': [\n {\n 'Name': 'string',\n 'Exceptions': [\n {\n 'Name': 'string',\n 'Message': 'string'\n },\n ],\n 'Remote': True|False\n },\n ],\n 'Inferred': True|False\n },\n ]\n },\n ],\n 'ResponseTimeRootCauses': [\n {\n 'Services': [\n {\n 'Name': 'string',\n 'Names': [\n 'string',\n ],\n 'Type': 'string',\n 'AccountId': 'string',\n 'EntityPath': [\n {\n 'Name': 'string',\n 'Coverage': 123.0,\n 'Remote': True|False\n },\n ],\n 'Inferred': True|False\n },\n ]\n },\n ],\n 'Revision': 123\n },\n ],\n 'ApproximateTime': datetime(2015, 1, 1),\n 'TracesProcessedCount': 123,\n 'NextToken': 'string'\n }\n \n \n :returns: \n (dict) --\n Name (string) --\n Names (list) --\n (string) --\n \n \n AccountId (string) --\n Type (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef put_encryption_config(KeyId=None, Type=None):\n \"\"\"\n Updates the encryption configuration for X-Ray data.\n See also: AWS API Documentation\n \n \n :example: response = client.put_encryption_config(\n KeyId='string',\n Type='NONE'|'KMS'\n )\n \n \n :type KeyId: string\n :param KeyId: An AWS KMS customer master key (CMK) in one of the following formats:\n Alias - The name of the key. For example, alias/MyKey .\n Key ID - The KMS key ID of the key. For example, ae4aa6d49-a4d8-9df9-a475-4ff6d7898456 .\n ARN - The full Amazon Resource Name of the key ID or alias. For example, arn:aws:kms:us-east-2:123456789012:key/ae4aa6d49-a4d8-9df9-a475-4ff6d7898456 . Use this format to specify a key in a different account.\n Omit this key if you set Type to NONE .\n \n\n :type Type: string\n :param Type: [REQUIRED]\n The type of encryption. Set to KMS to use your own key for encryption. Set to NONE for default encryption.\n \n\n :rtype: dict\n :return: {\n 'EncryptionConfig': {\n 'KeyId': 'string',\n 'Status': 'UPDATING'|'ACTIVE',\n 'Type': 'NONE'|'KMS'\n }\n }\n \n \n \"\"\"\n pass\n\ndef put_telemetry_records(TelemetryRecords=None, EC2InstanceId=None, Hostname=None, ResourceARN=None):\n \"\"\"\n Used by the AWS X-Ray daemon to upload telemetry.\n See also: AWS API Documentation\n \n \n :example: response = client.put_telemetry_records(\n TelemetryRecords=[\n {\n 'Timestamp': datetime(2015, 1, 1),\n 'SegmentsReceivedCount': 123,\n 'SegmentsSentCount': 123,\n 'SegmentsSpilloverCount': 123,\n 'SegmentsRejectedCount': 123,\n 'BackendConnectionErrors': {\n 'TimeoutCount': 123,\n 'ConnectionRefusedCount': 123,\n 'HTTPCode4XXCount': 123,\n 'HTTPCode5XXCount': 123,\n 'UnknownHostCount': 123,\n 'OtherCount': 123\n }\n },\n ],\n EC2InstanceId='string',\n Hostname='string',\n ResourceARN='string'\n )\n \n \n :type TelemetryRecords: list\n :param TelemetryRecords: [REQUIRED]\n (dict) --\n Timestamp (datetime) -- [REQUIRED]\n SegmentsReceivedCount (integer) --\n SegmentsSentCount (integer) --\n SegmentsSpilloverCount (integer) --\n SegmentsRejectedCount (integer) --\n BackendConnectionErrors (dict) --\n TimeoutCount (integer) --\n ConnectionRefusedCount (integer) --\n HTTPCode4XXCount (integer) --\n HTTPCode5XXCount (integer) --\n UnknownHostCount (integer) --\n OtherCount (integer) --\n \n \n\n :type EC2InstanceId: string\n :param EC2InstanceId: \n\n :type Hostname: string\n :param Hostname: \n\n :type ResourceARN: string\n :param ResourceARN: \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef put_trace_segments(TraceSegmentDocuments=None):\n \"\"\"\n Uploads segment documents to AWS X-Ray. The X-Ray SDK generates segment documents and sends them to the X-Ray daemon, which uploads them in batches. A segment document can be a completed segment, an in-progress segment, or an array of subsegments.\n Segments must include the following fields. For the full segment document schema, see AWS X-Ray Segment Documents in the AWS X-Ray Developer Guide .\n A trace_id consists of three numbers separated by hyphens. For example, 1-58406520-a006649127e371903a2de979. This includes:\n See also: AWS API Documentation\n \n \n :example: response = client.put_trace_segments(\n TraceSegmentDocuments=[\n 'string',\n ]\n )\n \n \n :type TraceSegmentDocuments: list\n :param TraceSegmentDocuments: [REQUIRED]\n A string containing a JSON document defining one or more segments or subsegments.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'UnprocessedTraceSegments': [\n {\n 'Id': 'string',\n 'ErrorCode': 'string',\n 'Message': 'string'\n },\n ]\n }\n \n \n :returns: \n The version number, i.e. 1 .\n The time of the original request, in Unix epoch time, in 8 hexadecimal digits. For example, 10:00AM December 2nd, 2016 PST in epoch time is 1480615200 seconds, or 58406520 in hexadecimal.\n A 96-bit identifier for the trace, globally unique, in 24 hexadecimal digits.\n \n \"\"\"\n pass\n\ndef update_group(GroupName=None, GroupARN=None, FilterExpression=None):\n \"\"\"\n Updates a group resource.\n See also: AWS API Documentation\n \n \n :example: response = client.update_group(\n GroupName='string',\n GroupARN='string',\n FilterExpression='string'\n )\n \n \n :type GroupName: string\n :param GroupName: The case-sensitive name of the group.\n\n :type GroupARN: string\n :param GroupARN: The ARN that was generated upon creation.\n\n :type FilterExpression: string\n :param FilterExpression: The updated filter expression defining criteria by which to group traces.\n\n :rtype: dict\n :return: {\n 'Group': {\n 'GroupName': 'string',\n 'GroupARN': 'string',\n 'FilterExpression': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef update_sampling_rule(SamplingRuleUpdate=None):\n \"\"\"\n Modifies a sampling rule's configuration.\n See also: AWS API Documentation\n \n \n :example: response = client.update_sampling_rule(\n SamplingRuleUpdate={\n 'RuleName': 'string',\n 'RuleARN': 'string',\n 'ResourceARN': 'string',\n 'Priority': 123,\n 'FixedRate': 123.0,\n 'ReservoirSize': 123,\n 'Host': 'string',\n 'ServiceName': 'string',\n 'ServiceType': 'string',\n 'HTTPMethod': 'string',\n 'URLPath': 'string',\n 'Attributes': {\n 'string': 'string'\n }\n }\n )\n \n \n :type SamplingRuleUpdate: dict\n :param SamplingRuleUpdate: [REQUIRED]\n The rule and fields to change.\n RuleName (string) --The name of the sampling rule. Specify a rule by either name or ARN, but not both.\n RuleARN (string) --The ARN of the sampling rule. Specify a rule by either name or ARN, but not both.\n ResourceARN (string) --Matches the ARN of the AWS resource on which the service runs.\n Priority (integer) --The priority of the sampling rule.\n FixedRate (float) --The percentage of matching requests to instrument, after the reservoir is exhausted.\n ReservoirSize (integer) --A fixed number of matching requests to instrument per second, prior to applying the fixed rate. The reservoir is not used directly by services, but applies to all services using the rule collectively.\n Host (string) --Matches the hostname from a request URL.\n ServiceName (string) --Matches the name that the service uses to identify itself in segments.\n ServiceType (string) --Matches the origin that the service uses to identify its type in segments.\n HTTPMethod (string) --Matches the HTTP method of a request.\n URLPath (string) --Matches the path from a request URL.\n Attributes (dict) --Matches attributes derived from the request.\n (string) --\n (string) --\n \n \n\n :rtype: dict\n :return: {\n 'SamplingRuleRecord': {\n 'SamplingRule': {\n 'RuleName': 'string',\n 'RuleARN': 'string',\n 'ResourceARN': 'string',\n 'Priority': 123,\n 'FixedRate': 123.0,\n 'ReservoirSize': 123,\n 'ServiceName': 'string',\n 'ServiceType': 'string',\n 'Host': 'string',\n 'HTTPMethod': 'string',\n 'URLPath': 'string',\n 'Version': 123,\n 'Attributes': {\n 'string': 'string'\n }\n },\n 'CreatedAt': datetime(2015, 1, 1),\n 'ModifiedAt': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.7166471481323242, "alphanum_fraction": 0.7182884216308594, "avg_line_length": 49.170589447021484, "blob_id": "15eca6c314554056b734a2e0926815a6017dcf2c", "content_id": "7c3c2202adb52ff2e6c42d8e70f644fd728e566f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8530, "license_type": "permissive", "max_line_length": 289, "num_lines": 170, "path": "/pyboto3/kinesisvideomedia.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_media(StreamName=None, StreamARN=None, StartSelector=None):\n \"\"\"\n Use this API to retrieve media content from a Kinesis video stream. In the request, you identify stream name or stream Amazon Resource Name (ARN), and the starting chunk. Kinesis Video Streams then returns a stream of chunks in order by fragment number.\n When you put media data (fragments) on a stream, Kinesis Video Streams stores each incoming fragment and related metadata in what is called a \"chunk.\" For more information, see . The GetMedia API returns a stream of these chunks starting from the chunk that you specify in the request.\n The following limits apply when using the GetMedia API:\n See also: AWS API Documentation\n \n \n :example: response = client.get_media(\n StreamName='string',\n StreamARN='string',\n StartSelector={\n 'StartSelectorType': 'FRAGMENT_NUMBER'|'SERVER_TIMESTAMP'|'PRODUCER_TIMESTAMP'|'NOW'|'EARLIEST'|'CONTINUATION_TOKEN',\n 'AfterFragmentNumber': 'string',\n 'StartTimestamp': datetime(2015, 1, 1),\n 'ContinuationToken': 'string'\n }\n )\n \n \n :type StreamName: string\n :param StreamName: The Kinesis video stream name from where you want to get the media content. If you don't specify the streamName , you must specify the streamARN .\n\n :type StreamARN: string\n :param StreamARN: The ARN of the stream from where you want to get the media content. If you don't specify the streamARN , you must specify the streamName .\n\n :type StartSelector: dict\n :param StartSelector: [REQUIRED]\n Identifies the starting chunk to get from the specified stream.\n StartSelectorType (string) -- [REQUIRED]Identifies the fragment on the Kinesis video stream where you want to start getting the data from.\n NOW - Start with the latest chunk on the stream.\n EARLIEST - Start with earliest available chunk on the stream.\n FRAGMENT_NUMBER - Start with the chunk containing the specific fragment. You must also specify the StartFragmentNumber .\n PRODUCER_TIMESTAMP or SERVER_TIMESTAMP - Start with the chunk containing a fragment with the specified producer or server time stamp. You specify the time stamp by adding StartTimestamp .\n CONTINUATION_TOKEN - Read using the specified continuation token.\n Note\n If you choose the NOW, EARLIEST, or CONTINUATION_TOKEN as the startSelectorType , you don't provide any additional information in the startSelector .\n AfterFragmentNumber (string) --Specifies the fragment number from where you want the GetMedia API to start returning the fragments.\n StartTimestamp (datetime) --A time stamp value. This value is required if you choose the PRODUCER_TIMESTAMP or the SERVER_TIMESTAMP as the startSelectorType . The GetMedia API then starts with the chunk containing the fragment that has the specified time stamp.\n ContinuationToken (string) --Continuation token that Kinesis Video Streams returned in the previous GetMedia response. The GetMedia API then starts with the chunk identified by the continuation token.\n \n\n :rtype: dict\n :return: {\n 'ContentType': 'string',\n 'Payload': StreamingBody()\n }\n \n \n :returns: \n StreamName (string) -- The Kinesis video stream name from where you want to get the media content. If you don't specify the streamName , you must specify the streamARN .\n StreamARN (string) -- The ARN of the stream from where you want to get the media content. If you don't specify the streamARN , you must specify the streamName .\n StartSelector (dict) -- [REQUIRED]\n Identifies the starting chunk to get from the specified stream.\n \n StartSelectorType (string) -- [REQUIRED]Identifies the fragment on the Kinesis video stream where you want to start getting the data from.\n \n NOW - Start with the latest chunk on the stream.\n EARLIEST - Start with earliest available chunk on the stream.\n FRAGMENT_NUMBER - Start with the chunk containing the specific fragment. You must also specify the StartFragmentNumber .\n PRODUCER_TIMESTAMP or SERVER_TIMESTAMP - Start with the chunk containing a fragment with the specified producer or server time stamp. You specify the time stamp by adding StartTimestamp .\n CONTINUATION_TOKEN - Read using the specified continuation token.\n \n \n Note\n If you choose the NOW, EARLIEST, or CONTINUATION_TOKEN as the startSelectorType , you don't provide any additional information in the startSelector .\n \n \n AfterFragmentNumber (string) --Specifies the fragment number from where you want the GetMedia API to start returning the fragments.\n \n StartTimestamp (datetime) --A time stamp value. This value is required if you choose the PRODUCER_TIMESTAMP or the SERVER_TIMESTAMP as the startSelectorType . The GetMedia API then starts with the chunk containing the fragment that has the specified time stamp.\n \n ContinuationToken (string) --Continuation token that Kinesis Video Streams returned in the previous GetMedia response. The GetMedia API then starts with the chunk identified by the continuation token.\n \n \n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6956341862678528, "alphanum_fraction": 0.6975623369216919, "avg_line_length": 39.558658599853516, "blob_id": "b7ada1eba2d30f0876d1ebc7b3185b5bb30a1fd2", "content_id": "df93eedc29f8c5da23610f7271e91b618454aa92", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7261, "license_type": "permissive", "max_line_length": 270, "num_lines": 179, "path": "/pyboto3/s3control.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef delete_public_access_block(AccountId=None):\n \"\"\"\n Removes the Public Access Block configuration for an Amazon Web Services account.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_public_access_block(\n AccountId='string'\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The Account ID for the Amazon Web Services account whose Public Access Block configuration you want to remove.\n \n\n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_public_access_block(AccountId=None):\n \"\"\"\n Retrieves the Public Access Block configuration for an Amazon Web Services account.\n See also: AWS API Documentation\n \n \n :example: response = client.get_public_access_block(\n AccountId='string'\n )\n \n \n :type AccountId: string\n :param AccountId: [REQUIRED]\n The Account ID for the Amazon Web Services account whose Public Access Block configuration you want to retrieve.\n \n\n :rtype: dict\n :return: {\n 'PublicAccessBlockConfiguration': {\n 'BlockPublicAcls': True|False,\n 'IgnorePublicAcls': True|False,\n 'BlockPublicPolicy': True|False,\n 'RestrictPublicBuckets': True|False\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef put_public_access_block(PublicAccessBlockConfiguration=None, AccountId=None):\n \"\"\"\n Creates or modifies the Public Access Block configuration for an Amazon Web Services account.\n See also: AWS API Documentation\n \n \n :example: response = client.put_public_access_block(\n PublicAccessBlockConfiguration={\n 'BlockPublicAcls': True|False,\n 'IgnorePublicAcls': True|False,\n 'BlockPublicPolicy': True|False,\n 'RestrictPublicBuckets': True|False\n },\n AccountId='string'\n )\n \n \n :type PublicAccessBlockConfiguration: dict\n :param PublicAccessBlockConfiguration: [REQUIRED]\n The Public Access Block configuration that you want to apply to this Amazon Web Services account.\n BlockPublicAcls (boolean) --Specifies whether Amazon S3 should block public ACLs for buckets in this account. Setting this element to TRUE causes the following behavior:\n PUT Bucket acl and PUT Object acl calls will fail if the specified ACL allows public access.\n PUT Object calls will fail if the request includes an object ACL.\n Note that enabling this setting doesn't affect existing policies or ACLs.\n IgnorePublicAcls (boolean) --Specifies whether Amazon S3 should ignore public ACLs for buckets in this account. Setting this element to TRUE causes Amazon S3 to ignore all public ACLs on buckets in this account and any objects that they contain.\n Note that enabling this setting doesn't affect the persistence of any existing ACLs and doesn't prevent new public ACLs from being set.\n BlockPublicPolicy (boolean) --Specifies whether Amazon S3 should block public bucket policies for buckets in this account. Setting this element to TRUE causes Amazon S3 to reject calls to PUT Bucket policy if the specified bucket policy allows public access.\n Note that enabling this setting doesn't affect existing bucket policies.\n RestrictPublicBuckets (boolean) --Specifies whether Amazon S3 should restrict public bucket policies for buckets in this account. If this element is set to TRUE , then only the bucket owner and AWS Services can access buckets with public policies.\n Note that enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.\n \n\n :type AccountId: string\n :param AccountId: [REQUIRED]\n The Account ID for the Amazon Web Services account whose Public Access Block configuration you want to set.\n \n\n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.5279941558837891, "alphanum_fraction": 0.5333478450775146, "avg_line_length": 32.95240783691406, "blob_id": "6c41499aa0466b1c50fc13f371aa33747f0ce072", "content_id": "686d7b7b7ce8286f400390f9c30b817811de8d4a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 193326, "license_type": "permissive", "max_line_length": 443, "num_lines": 5694, "path": "/pyboto3/glue.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef batch_create_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionInputList=None):\n \"\"\"\n Creates one or more partitions in a batch operation.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_create_partition(\n CatalogId='string',\n DatabaseName='string',\n TableName='string',\n PartitionInputList=[\n {\n 'Values': [\n 'string',\n ],\n 'LastAccessTime': datetime(2015, 1, 1),\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string'\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'Parameters': {\n 'string': 'string'\n },\n 'LastAnalyzedTime': datetime(2015, 1, 1)\n },\n ]\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the catalog in which the partion is to be created. Currently, this should be the AWS account ID.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\n The name of the metadata database in which the partition is to be created.\n \n\n :type TableName: string\n :param TableName: [REQUIRED]\n The name of the metadata table in which the partition is to be created.\n \n\n :type PartitionInputList: list\n :param PartitionInputList: [REQUIRED]\n A list of PartitionInput structures that define the partitions to be created.\n (dict) --The structure used to create and update a partion.\n Values (list) --The values of the partition.\n (string) --\n LastAccessTime (datetime) --The last time at which the partition was accessed.\n StorageDescriptor (dict) --Provides information about the physical location where the partition is stored.\n Columns (list) --A list of the Columns in the table.\n (dict) --A column in a Table .\n Name (string) -- [REQUIRED]The name of the Column .\n Type (string) --The datatype of data in the Column .\n Comment (string) --Free-form text comment.\n \n Location (string) --The physical location of the table. By default this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n InputFormat (string) --The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n OutputFormat (string) --The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n Compressed (boolean) --True if the data in the table is compressed, or False if not.\n NumberOfBuckets (integer) --Must be specified if the table contains any dimension columns.\n SerdeInfo (dict) --Serialization/deserialization (SerDe) information.\n Name (string) --Name of the SerDe.\n SerializationLibrary (string) --Usually the class that implements the SerDe. An example is: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n Parameters (dict) --These key-value pairs define initialization parameters for the SerDe.\n (string) --\n (string) --\n \n BucketColumns (list) --A list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n (string) --\n SortColumns (list) --A list specifying the sort order of each bucket in the table.\n (dict) --Specifies the sort order of a sorted column.\n Column (string) -- [REQUIRED]The name of the column.\n SortOrder (integer) -- [REQUIRED]Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n \n Parameters (dict) --User-supplied properties in key-value form.\n (string) --\n (string) --\n \n SkewedInfo (dict) --Information about values that appear very frequently in a column (skewed values).\n SkewedColumnNames (list) --A list of names of columns that contain skewed values.\n (string) --\n SkewedColumnValues (list) --A list of values that appear so frequently as to be considered skewed.\n (string) --\n SkewedColumnValueLocationMaps (dict) --A mapping of skewed values to the columns that contain them.\n (string) --\n (string) --\n \n StoredAsSubDirectories (boolean) --True if the table data is stored in subdirectories, or False if not.\n Parameters (dict) --These key-value pairs define partition parameters.\n (string) --\n (string) --\n \n LastAnalyzedTime (datetime) --The last time at which column statistics were computed for this partition.\n \n \n\n :rtype: dict\n :return: {\n 'Errors': [\n {\n 'PartitionValues': [\n 'string',\n ],\n 'ErrorDetail': {\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n }\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef batch_delete_connection(CatalogId=None, ConnectionNameList=None):\n \"\"\"\n Deletes a list of connection definitions from the Data Catalog.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_delete_connection(\n CatalogId='string',\n ConnectionNameList=[\n 'string',\n ]\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog in which the connections reside. If none is supplied, the AWS account ID is used by default.\n\n :type ConnectionNameList: list\n :param ConnectionNameList: [REQUIRED]\n A list of names of the connections to delete.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'Succeeded': [\n 'string',\n ],\n 'Errors': {\n 'string': {\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n }\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef batch_delete_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionsToDelete=None):\n \"\"\"\n Deletes one or more partitions in a batch operation.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_delete_partition(\n CatalogId='string',\n DatabaseName='string',\n TableName='string',\n PartitionsToDelete=[\n {\n 'Values': [\n 'string',\n ]\n },\n ]\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the partition to be deleted resides. If none is supplied, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\n The name of the catalog database in which the table in question resides.\n \n\n :type TableName: string\n :param TableName: [REQUIRED]\n The name of the table where the partitions to be deleted is located.\n \n\n :type PartitionsToDelete: list\n :param PartitionsToDelete: [REQUIRED]\n A list of PartitionInput structures that define the partitions to be deleted.\n (dict) --Contains a list of values defining partitions.\n Values (list) -- [REQUIRED]The list of values.\n (string) --\n \n \n\n :rtype: dict\n :return: {\n 'Errors': [\n {\n 'PartitionValues': [\n 'string',\n ],\n 'ErrorDetail': {\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n }\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef batch_delete_table(CatalogId=None, DatabaseName=None, TablesToDelete=None):\n \"\"\"\n Deletes multiple tables at once.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_delete_table(\n CatalogId='string',\n DatabaseName='string',\n TablesToDelete=[\n 'string',\n ]\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the table resides. If none is supplied, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\n The name of the catalog database where the tables to delete reside. For Hive compatibility, this name is entirely lowercase.\n \n\n :type TablesToDelete: list\n :param TablesToDelete: [REQUIRED]\n A list of the table to delete.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'Errors': [\n {\n 'TableName': 'string',\n 'ErrorDetail': {\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n }\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef batch_delete_table_version(CatalogId=None, DatabaseName=None, TableName=None, VersionIds=None):\n \"\"\"\n Deletes a specified batch of versions of a table.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_delete_table_version(\n CatalogId='string',\n DatabaseName='string',\n TableName='string',\n VersionIds=[\n 'string',\n ]\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the tables reside. If none is supplied, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\n The database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.\n \n\n :type TableName: string\n :param TableName: [REQUIRED]\n The name of the table. For Hive compatibility, this name is entirely lowercase.\n \n\n :type VersionIds: list\n :param VersionIds: [REQUIRED]\n A list of the IDs of versions to be deleted. A VersionId is a string representation of an integer. Each version is incremented by 1.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'Errors': [\n {\n 'TableName': 'string',\n 'VersionId': 'string',\n 'ErrorDetail': {\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n }\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef batch_get_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionsToGet=None):\n \"\"\"\n Retrieves partitions in a batch request.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_get_partition(\n CatalogId='string',\n DatabaseName='string',\n TableName='string',\n PartitionsToGet=[\n {\n 'Values': [\n 'string',\n ]\n },\n ]\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the partitions in question reside. If none is supplied, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\n The name of the catalog database where the partitions reside.\n \n\n :type TableName: string\n :param TableName: [REQUIRED]\n The name of the partitions' table.\n \n\n :type PartitionsToGet: list\n :param PartitionsToGet: [REQUIRED]\n A list of partition values identifying the partitions to retrieve.\n (dict) --Contains a list of values defining partitions.\n Values (list) -- [REQUIRED]The list of values.\n (string) --\n \n \n\n :rtype: dict\n :return: {\n 'Partitions': [\n {\n 'Values': [\n 'string',\n ],\n 'DatabaseName': 'string',\n 'TableName': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastAccessTime': datetime(2015, 1, 1),\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string'\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'Parameters': {\n 'string': 'string'\n },\n 'LastAnalyzedTime': datetime(2015, 1, 1)\n },\n ],\n 'UnprocessedKeys': [\n {\n 'Values': [\n 'string',\n ]\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef batch_stop_job_run(JobName=None, JobRunIds=None):\n \"\"\"\n Stops one or more job runs for a specified job definition.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_stop_job_run(\n JobName='string',\n JobRunIds=[\n 'string',\n ]\n )\n \n \n :type JobName: string\n :param JobName: [REQUIRED]\n The name of the job definition for which to stop job runs.\n \n\n :type JobRunIds: list\n :param JobRunIds: [REQUIRED]\n A list of the JobRunIds that should be stopped for that job definition.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'SuccessfulSubmissions': [\n {\n 'JobName': 'string',\n 'JobRunId': 'string'\n },\n ],\n 'Errors': [\n {\n 'JobName': 'string',\n 'JobRunId': 'string',\n 'ErrorDetail': {\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n }\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_classifier(GrokClassifier=None, XMLClassifier=None, JsonClassifier=None):\n \"\"\"\n Creates a classifier in the user's account. This may be a GrokClassifier , an XMLClassifier , or abbrev JsonClassifier , depending on which field of the request is present.\n See also: AWS API Documentation\n \n \n :example: response = client.create_classifier(\n GrokClassifier={\n 'Classification': 'string',\n 'Name': 'string',\n 'GrokPattern': 'string',\n 'CustomPatterns': 'string'\n },\n XMLClassifier={\n 'Classification': 'string',\n 'Name': 'string',\n 'RowTag': 'string'\n },\n JsonClassifier={\n 'Name': 'string',\n 'JsonPath': 'string'\n }\n )\n \n \n :type GrokClassifier: dict\n :param GrokClassifier: A GrokClassifier object specifying the classifier to create.\n Classification (string) -- [REQUIRED]An identifier of the data format that the classifier matches, such as Twitter, JSON, Omniture logs, Amazon CloudWatch Logs, and so on.\n Name (string) -- [REQUIRED]The name of the new classifier.\n GrokPattern (string) -- [REQUIRED]The grok pattern used by this classifier.\n CustomPatterns (string) --Optional custom grok patterns used by this classifier.\n \n\n :type XMLClassifier: dict\n :param XMLClassifier: An XMLClassifier object specifying the classifier to create.\n Classification (string) -- [REQUIRED]An identifier of the data format that the classifier matches.\n Name (string) -- [REQUIRED]The name of the classifier.\n RowTag (string) --The XML tag designating the element that contains each record in an XML document being parsed. Note that this cannot identify a self-closing element (closed by /> ). An empty row element that contains only attributes can be parsed as long as it ends with a closing tag (for example, <row item_a='A' item_b='B'></row> is okay, but <row item_a='A' item_b='B' /> is not).\n \n\n :type JsonClassifier: dict\n :param JsonClassifier: A JsonClassifier object specifying the classifier to create.\n Name (string) -- [REQUIRED]The name of the classifier.\n JsonPath (string) -- [REQUIRED]A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath, as described in Writing JsonPath Custom Classifiers .\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef create_connection(CatalogId=None, ConnectionInput=None):\n \"\"\"\n Creates a connection definition in the Data Catalog.\n See also: AWS API Documentation\n \n \n :example: response = client.create_connection(\n CatalogId='string',\n ConnectionInput={\n 'Name': 'string',\n 'Description': 'string',\n 'ConnectionType': 'JDBC'|'SFTP',\n 'MatchCriteria': [\n 'string',\n ],\n 'ConnectionProperties': {\n 'string': 'string'\n },\n 'PhysicalConnectionRequirements': {\n 'SubnetId': 'string',\n 'SecurityGroupIdList': [\n 'string',\n ],\n 'AvailabilityZone': 'string'\n }\n }\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default.\n\n :type ConnectionInput: dict\n :param ConnectionInput: [REQUIRED]\n A ConnectionInput object defining the connection to create.\n Name (string) -- [REQUIRED]The name of the connection.\n Description (string) --Description of the connection.\n ConnectionType (string) -- [REQUIRED]The type of the connection. Currently, only JDBC is supported; SFTP is not supported.\n MatchCriteria (list) --A list of criteria that can be used in selecting this connection.\n (string) --\n ConnectionProperties (dict) -- [REQUIRED]These key-value pairs define parameters for the connection.\n (string) --\n (string) --\n \n PhysicalConnectionRequirements (dict) --A map of physical connection requirements, such as VPC and SecurityGroup, needed for making this connection successfully.\n SubnetId (string) --The subnet ID used by the connection.\n SecurityGroupIdList (list) --The security group ID list used by the connection.\n (string) --\n AvailabilityZone (string) --The connection's availability zone. This field is redundant, since the specified subnet implies the availability zone to be used. The field must be populated now, but will be deprecated in the future.\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef create_crawler(Name=None, Role=None, DatabaseName=None, Description=None, Targets=None, Schedule=None, Classifiers=None, TablePrefix=None, SchemaChangePolicy=None, Configuration=None, CrawlerSecurityConfiguration=None):\n \"\"\"\n Creates a new crawler with specified targets, role, configuration, and optional schedule. At least one crawl target must be specified, in the s3Targets field, the jdbcTargets field, or the DynamoDBTargets field.\n See also: AWS API Documentation\n \n \n :example: response = client.create_crawler(\n Name='string',\n Role='string',\n DatabaseName='string',\n Description='string',\n Targets={\n 'S3Targets': [\n {\n 'Path': 'string',\n 'Exclusions': [\n 'string',\n ]\n },\n ],\n 'JdbcTargets': [\n {\n 'ConnectionName': 'string',\n 'Path': 'string',\n 'Exclusions': [\n 'string',\n ]\n },\n ],\n 'DynamoDBTargets': [\n {\n 'Path': 'string'\n },\n ]\n },\n Schedule='string',\n Classifiers=[\n 'string',\n ],\n TablePrefix='string',\n SchemaChangePolicy={\n 'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',\n 'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'\n },\n Configuration='string',\n CrawlerSecurityConfiguration='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n Name of the new crawler.\n \n\n :type Role: string\n :param Role: [REQUIRED]\n The IAM role (or ARN of an IAM role) used by the new crawler to access customer resources.\n \n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\n The AWS Glue database where results are written, such as: arn:aws:daylight:us-east-1::database/sometable/* .\n \n\n :type Description: string\n :param Description: A description of the new crawler.\n\n :type Targets: dict\n :param Targets: [REQUIRED]\n A list of collection of targets to crawl.\n S3Targets (list) --Specifies Amazon S3 targets.\n (dict) --Specifies a data store in Amazon S3.\n Path (string) --The path to the Amazon S3 target.\n Exclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .\n (string) --\n \n JdbcTargets (list) --Specifies JDBC targets.\n (dict) --Specifies a JDBC data store to crawl.\n ConnectionName (string) --The name of the connection to use to connect to the JDBC target.\n Path (string) --The path of the JDBC target.\n Exclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .\n (string) --\n \n DynamoDBTargets (list) --Specifies DynamoDB targets.\n (dict) --Specifies a DynamoDB table to crawl.\n Path (string) --The name of the DynamoDB table to crawl.\n \n \n\n :type Schedule: string\n :param Schedule: A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .\n\n :type Classifiers: list\n :param Classifiers: A list of custom classifiers that the user has registered. By default, all built-in classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification.\n (string) --\n \n\n :type TablePrefix: string\n :param TablePrefix: The table prefix used for catalog tables that are created.\n\n :type SchemaChangePolicy: dict\n :param SchemaChangePolicy: Policy for the crawler's update and deletion behavior.\n UpdateBehavior (string) --The update behavior when the crawler finds a changed schema.\n DeleteBehavior (string) --The deletion behavior when the crawler finds a deleted object.\n \n\n :type Configuration: string\n :param Configuration: Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see Configuring a Crawler .\n\n :type CrawlerSecurityConfiguration: string\n :param CrawlerSecurityConfiguration: The name of the SecurityConfiguration structure to be used by this Crawler.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef create_database(CatalogId=None, DatabaseInput=None):\n \"\"\"\n Creates a new database in a Data Catalog.\n See also: AWS API Documentation\n \n \n :example: response = client.create_database(\n CatalogId='string',\n DatabaseInput={\n 'Name': 'string',\n 'Description': 'string',\n 'LocationUri': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n }\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog in which to create the database. If none is supplied, the AWS account ID is used by default.\n\n :type DatabaseInput: dict\n :param DatabaseInput: [REQUIRED]\n A DatabaseInput object defining the metadata database to create in the catalog.\n Name (string) -- [REQUIRED]Name of the database. For Hive compatibility, this is folded to lowercase when it is stored.\n Description (string) --Description of the database\n LocationUri (string) --The location of the database (for example, an HDFS path).\n Parameters (dict) --Thes key-value pairs define parameters and properties of the database.\n (string) --\n (string) --\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef create_dev_endpoint(EndpointName=None, RoleArn=None, SecurityGroupIds=None, SubnetId=None, PublicKey=None, PublicKeys=None, NumberOfNodes=None, ExtraPythonLibsS3Path=None, ExtraJarsS3Path=None, SecurityConfiguration=None):\n \"\"\"\n Creates a new DevEndpoint.\n See also: AWS API Documentation\n \n \n :example: response = client.create_dev_endpoint(\n EndpointName='string',\n RoleArn='string',\n SecurityGroupIds=[\n 'string',\n ],\n SubnetId='string',\n PublicKey='string',\n PublicKeys=[\n 'string',\n ],\n NumberOfNodes=123,\n ExtraPythonLibsS3Path='string',\n ExtraJarsS3Path='string',\n SecurityConfiguration='string'\n )\n \n \n :type EndpointName: string\n :param EndpointName: [REQUIRED]\n The name to be assigned to the new DevEndpoint.\n \n\n :type RoleArn: string\n :param RoleArn: [REQUIRED]\n The IAM role for the DevEndpoint.\n \n\n :type SecurityGroupIds: list\n :param SecurityGroupIds: Security group IDs for the security groups to be used by the new DevEndpoint.\n (string) --\n \n\n :type SubnetId: string\n :param SubnetId: The subnet ID for the new DevEndpoint to use.\n\n :type PublicKey: string\n :param PublicKey: The public key to be used by this DevEndpoint for authentication. This attribute is provided for backward compatibility, as the recommended attribute to use is public keys.\n\n :type PublicKeys: list\n :param PublicKeys: A list of public keys to be used by the DevEndpoints for authentication. The use of this attribute is preferred over a single public key because the public keys allow you to have a different private key per client.\n Note\n If you previously created an endpoint with a public key, you must remove that key to be able to set a list of public keys: call the UpdateDevEndpoint API with the public key content in the deletePublicKeys attribute, and the list of new keys in the addPublicKeys attribute.\n (string) --\n \n\n :type NumberOfNodes: integer\n :param NumberOfNodes: The number of AWS Glue Data Processing Units (DPUs) to allocate to this DevEndpoint.\n\n :type ExtraPythonLibsS3Path: string\n :param ExtraPythonLibsS3Path: Path(s) to one or more Python libraries in an S3 bucket that should be loaded in your DevEndpoint. Multiple values must be complete paths separated by a comma.\n Please note that only pure Python libraries can currently be used on a DevEndpoint. Libraries that rely on C extensions, such as the pandas Python data analysis library, are not yet supported.\n \n\n :type ExtraJarsS3Path: string\n :param ExtraJarsS3Path: Path to one or more Java Jars in an S3 bucket that should be loaded in your DevEndpoint.\n\n :type SecurityConfiguration: string\n :param SecurityConfiguration: The name of the SecurityConfiguration structure to be used with this DevEndpoint.\n\n :rtype: dict\n :return: {\n 'EndpointName': 'string',\n 'Status': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'SubnetId': 'string',\n 'RoleArn': 'string',\n 'YarnEndpointAddress': 'string',\n 'ZeppelinRemoteSparkInterpreterPort': 123,\n 'NumberOfNodes': 123,\n 'AvailabilityZone': 'string',\n 'VpcId': 'string',\n 'ExtraPythonLibsS3Path': 'string',\n 'ExtraJarsS3Path': 'string',\n 'FailureReason': 'string',\n 'SecurityConfiguration': 'string',\n 'CreatedTimestamp': datetime(2015, 1, 1)\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef create_job(Name=None, Description=None, LogUri=None, Role=None, ExecutionProperty=None, Command=None, DefaultArguments=None, Connections=None, MaxRetries=None, AllocatedCapacity=None, Timeout=None, NotificationProperty=None, SecurityConfiguration=None):\n \"\"\"\n Creates a new job definition.\n See also: AWS API Documentation\n \n \n :example: response = client.create_job(\n Name='string',\n Description='string',\n LogUri='string',\n Role='string',\n ExecutionProperty={\n 'MaxConcurrentRuns': 123\n },\n Command={\n 'Name': 'string',\n 'ScriptLocation': 'string'\n },\n DefaultArguments={\n 'string': 'string'\n },\n Connections={\n 'Connections': [\n 'string',\n ]\n },\n MaxRetries=123,\n AllocatedCapacity=123,\n Timeout=123,\n NotificationProperty={\n 'NotifyDelayAfter': 123\n },\n SecurityConfiguration='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name you assign to this job definition. It must be unique in your account.\n \n\n :type Description: string\n :param Description: Description of the job being defined.\n\n :type LogUri: string\n :param LogUri: This field is reserved for future use.\n\n :type Role: string\n :param Role: [REQUIRED]\n The name or ARN of the IAM role associated with this job.\n \n\n :type ExecutionProperty: dict\n :param ExecutionProperty: An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.\n MaxConcurrentRuns (integer) --The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when this threshold is reached. The maximum value you can specify is controlled by a service limit.\n \n\n :type Command: dict\n :param Command: [REQUIRED]\n The JobCommand that executes this job.\n Name (string) --The name of the job command: this must be glueetl .\n ScriptLocation (string) --Specifies the S3 path to a script that executes a job (required).\n \n\n :type DefaultArguments: dict\n :param DefaultArguments: The default arguments for this job.\n You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\n For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\n For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n (string) --\n (string) --\n \n\n :type Connections: dict\n :param Connections: The connections used for this job.\n Connections (list) --A list of connections used by the job.\n (string) --\n \n\n :type MaxRetries: integer\n :param MaxRetries: The maximum number of times to retry this job if it fails.\n\n :type AllocatedCapacity: integer\n :param AllocatedCapacity: The number of AWS Glue data processing units (DPUs) to allocate to this Job. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n\n :type Timeout: integer\n :param Timeout: The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).\n\n :type NotificationProperty: dict\n :param NotificationProperty: Specifies configuration properties of a job notification.\n NotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\n \n\n :type SecurityConfiguration: string\n :param SecurityConfiguration: The name of the SecurityConfiguration structure to be used with this job.\n\n :rtype: dict\n :return: {\n 'Name': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionInput=None):\n \"\"\"\n Creates a new partition.\n See also: AWS API Documentation\n \n \n :example: response = client.create_partition(\n CatalogId='string',\n DatabaseName='string',\n TableName='string',\n PartitionInput={\n 'Values': [\n 'string',\n ],\n 'LastAccessTime': datetime(2015, 1, 1),\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string'\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'Parameters': {\n 'string': 'string'\n },\n 'LastAnalyzedTime': datetime(2015, 1, 1)\n }\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the catalog in which the partion is to be created. Currently, this should be the AWS account ID.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\n The name of the metadata database in which the partition is to be created.\n \n\n :type TableName: string\n :param TableName: [REQUIRED]\n The name of the metadata table in which the partition is to be created.\n \n\n :type PartitionInput: dict\n :param PartitionInput: [REQUIRED]\n A PartitionInput structure defining the partition to be created.\n Values (list) --The values of the partition.\n (string) --\n LastAccessTime (datetime) --The last time at which the partition was accessed.\n StorageDescriptor (dict) --Provides information about the physical location where the partition is stored.\n Columns (list) --A list of the Columns in the table.\n (dict) --A column in a Table .\n Name (string) -- [REQUIRED]The name of the Column .\n Type (string) --The datatype of data in the Column .\n Comment (string) --Free-form text comment.\n \n Location (string) --The physical location of the table. By default this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n InputFormat (string) --The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n OutputFormat (string) --The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n Compressed (boolean) --True if the data in the table is compressed, or False if not.\n NumberOfBuckets (integer) --Must be specified if the table contains any dimension columns.\n SerdeInfo (dict) --Serialization/deserialization (SerDe) information.\n Name (string) --Name of the SerDe.\n SerializationLibrary (string) --Usually the class that implements the SerDe. An example is: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n Parameters (dict) --These key-value pairs define initialization parameters for the SerDe.\n (string) --\n (string) --\n \n BucketColumns (list) --A list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n (string) --\n SortColumns (list) --A list specifying the sort order of each bucket in the table.\n (dict) --Specifies the sort order of a sorted column.\n Column (string) -- [REQUIRED]The name of the column.\n SortOrder (integer) -- [REQUIRED]Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n \n Parameters (dict) --User-supplied properties in key-value form.\n (string) --\n (string) --\n \n SkewedInfo (dict) --Information about values that appear very frequently in a column (skewed values).\n SkewedColumnNames (list) --A list of names of columns that contain skewed values.\n (string) --\n SkewedColumnValues (list) --A list of values that appear so frequently as to be considered skewed.\n (string) --\n SkewedColumnValueLocationMaps (dict) --A mapping of skewed values to the columns that contain them.\n (string) --\n (string) --\n \n StoredAsSubDirectories (boolean) --True if the table data is stored in subdirectories, or False if not.\n Parameters (dict) --These key-value pairs define partition parameters.\n (string) --\n (string) --\n \n LastAnalyzedTime (datetime) --The last time at which column statistics were computed for this partition.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef create_script(DagNodes=None, DagEdges=None, Language=None):\n \"\"\"\n Transforms a directed acyclic graph (DAG) into code.\n See also: AWS API Documentation\n \n \n :example: response = client.create_script(\n DagNodes=[\n {\n 'Id': 'string',\n 'NodeType': 'string',\n 'Args': [\n {\n 'Name': 'string',\n 'Value': 'string',\n 'Param': True|False\n },\n ],\n 'LineNumber': 123\n },\n ],\n DagEdges=[\n {\n 'Source': 'string',\n 'Target': 'string',\n 'TargetParameter': 'string'\n },\n ],\n Language='PYTHON'|'SCALA'\n )\n \n \n :type DagNodes: list\n :param DagNodes: A list of the nodes in the DAG.\n (dict) --Represents a node in a directed acyclic graph (DAG)\n Id (string) -- [REQUIRED]A node identifier that is unique within the node's graph.\n NodeType (string) -- [REQUIRED]The type of node this is.\n Args (list) -- [REQUIRED]Properties of the node, in the form of name-value pairs.\n (dict) --An argument or property of a node.\n Name (string) -- [REQUIRED]The name of the argument or property.\n Value (string) -- [REQUIRED]The value of the argument or property.\n Param (boolean) --True if the value is used as a parameter.\n \n LineNumber (integer) --The line number of the node.\n \n \n\n :type DagEdges: list\n :param DagEdges: A list of the edges in the DAG.\n (dict) --Represents a directional edge in a directed acyclic graph (DAG).\n Source (string) -- [REQUIRED]The ID of the node at which the edge starts.\n Target (string) -- [REQUIRED]The ID of the node at which the edge ends.\n TargetParameter (string) --The target of the edge.\n \n \n\n :type Language: string\n :param Language: The programming language of the resulting code from the DAG.\n\n :rtype: dict\n :return: {\n 'PythonScript': 'string',\n 'ScalaCode': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_security_configuration(Name=None, EncryptionConfiguration=None):\n \"\"\"\n Creates a new security configuration.\n See also: AWS API Documentation\n \n \n :example: response = client.create_security_configuration(\n Name='string',\n EncryptionConfiguration={\n 'S3Encryption': [\n {\n 'S3EncryptionMode': 'DISABLED'|'SSE-KMS'|'SSE-S3',\n 'KmsKeyArn': 'string'\n },\n ],\n 'CloudWatchEncryption': {\n 'CloudWatchEncryptionMode': 'DISABLED'|'SSE-KMS',\n 'KmsKeyArn': 'string'\n },\n 'JobBookmarksEncryption': {\n 'JobBookmarksEncryptionMode': 'DISABLED'|'CSE-KMS',\n 'KmsKeyArn': 'string'\n }\n }\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name for the new security configuration.\n \n\n :type EncryptionConfiguration: dict\n :param EncryptionConfiguration: [REQUIRED]\n The encryption configuration for the new security configuration.\n S3Encryption (list) --The encryption configuration for S3 data.\n (dict) --Specifies how S3 data should be encrypted.\n S3EncryptionMode (string) --The encryption mode to use for S3 data.\n KmsKeyArn (string) --The AWS ARN of the KMS key to be used to encrypt the data.\n \n CloudWatchEncryption (dict) --The encryption configuration for CloudWatch.\n CloudWatchEncryptionMode (string) --The encryption mode to use for CloudWatch data.\n KmsKeyArn (string) --The AWS ARN of the KMS key to be used to encrypt the data.\n JobBookmarksEncryption (dict) --The encryption configuration for Job Bookmarks.\n JobBookmarksEncryptionMode (string) --The encryption mode to use for Job bookmarks data.\n KmsKeyArn (string) --The AWS ARN of the KMS key to be used to encrypt the data.\n \n \n\n :rtype: dict\n :return: {\n 'Name': 'string',\n 'CreatedTimestamp': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef create_table(CatalogId=None, DatabaseName=None, TableInput=None):\n \"\"\"\n Creates a new table definition in the Data Catalog.\n See also: AWS API Documentation\n \n \n :example: response = client.create_table(\n CatalogId='string',\n DatabaseName='string',\n TableInput={\n 'Name': 'string',\n 'Description': 'string',\n 'Owner': 'string',\n 'LastAccessTime': datetime(2015, 1, 1),\n 'LastAnalyzedTime': datetime(2015, 1, 1),\n 'Retention': 123,\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string'\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'PartitionKeys': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string'\n },\n ],\n 'ViewOriginalText': 'string',\n 'ViewExpandedText': 'string',\n 'TableType': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n }\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog in which to create the Table . If none is supplied, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\n The catalog database in which to create the new table. For Hive compatibility, this name is entirely lowercase.\n \n\n :type TableInput: dict\n :param TableInput: [REQUIRED]\n The TableInput object that defines the metadata table to create in the catalog.\n Name (string) -- [REQUIRED]Name of the table. For Hive compatibility, this is folded to lowercase when it is stored.\n Description (string) --Description of the table.\n Owner (string) --Owner of the table.\n LastAccessTime (datetime) --Last time the table was accessed.\n LastAnalyzedTime (datetime) --Last time column statistics were computed for this table.\n Retention (integer) --Retention time for this table.\n StorageDescriptor (dict) --A storage descriptor containing information about the physical storage of this table.\n Columns (list) --A list of the Columns in the table.\n (dict) --A column in a Table .\n Name (string) -- [REQUIRED]The name of the Column .\n Type (string) --The datatype of data in the Column .\n Comment (string) --Free-form text comment.\n \n Location (string) --The physical location of the table. By default this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n InputFormat (string) --The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n OutputFormat (string) --The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n Compressed (boolean) --True if the data in the table is compressed, or False if not.\n NumberOfBuckets (integer) --Must be specified if the table contains any dimension columns.\n SerdeInfo (dict) --Serialization/deserialization (SerDe) information.\n Name (string) --Name of the SerDe.\n SerializationLibrary (string) --Usually the class that implements the SerDe. An example is: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n Parameters (dict) --These key-value pairs define initialization parameters for the SerDe.\n (string) --\n (string) --\n \n BucketColumns (list) --A list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n (string) --\n SortColumns (list) --A list specifying the sort order of each bucket in the table.\n (dict) --Specifies the sort order of a sorted column.\n Column (string) -- [REQUIRED]The name of the column.\n SortOrder (integer) -- [REQUIRED]Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n \n Parameters (dict) --User-supplied properties in key-value form.\n (string) --\n (string) --\n \n SkewedInfo (dict) --Information about values that appear very frequently in a column (skewed values).\n SkewedColumnNames (list) --A list of names of columns that contain skewed values.\n (string) --\n SkewedColumnValues (list) --A list of values that appear so frequently as to be considered skewed.\n (string) --\n SkewedColumnValueLocationMaps (dict) --A mapping of skewed values to the columns that contain them.\n (string) --\n (string) --\n \n StoredAsSubDirectories (boolean) --True if the table data is stored in subdirectories, or False if not.\n PartitionKeys (list) --A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.\n When creating a table used by Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:\n 'PartitionKeys': []\n (dict) --A column in a Table .\n Name (string) -- [REQUIRED]The name of the Column .\n Type (string) --The datatype of data in the Column .\n Comment (string) --Free-form text comment.\n \n ViewOriginalText (string) --If the table is a view, the original text of the view; otherwise null .\n ViewExpandedText (string) --If the table is a view, the expanded text of the view; otherwise null .\n TableType (string) --The type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).\n Parameters (dict) --These key-value pairs define properties associated with the table.\n (string) --\n (string) --\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef create_trigger(Name=None, Type=None, Schedule=None, Predicate=None, Actions=None, Description=None, StartOnCreation=None):\n \"\"\"\n Creates a new trigger.\n See also: AWS API Documentation\n \n \n :example: response = client.create_trigger(\n Name='string',\n Type='SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',\n Schedule='string',\n Predicate={\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT'\n },\n ]\n },\n Actions=[\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'SecurityConfiguration': 'string'\n },\n ],\n Description='string',\n StartOnCreation=True|False\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the trigger.\n \n\n :type Type: string\n :param Type: [REQUIRED]\n The type of the new trigger.\n \n\n :type Schedule: string\n :param Schedule: A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .\n This field is required when the trigger type is SCHEDULED.\n \n\n :type Predicate: dict\n :param Predicate: A predicate to specify when the new trigger should fire.\n This field is required when the trigger type is CONDITIONAL.\n Logical (string) --Optional field if only one condition is listed. If multiple conditions are listed, then this field is required.\n Conditions (list) --A list of the conditions that determine when the trigger will fire.\n (dict) --Defines a condition under which a trigger fires.\n LogicalOperator (string) --A logical operator.\n JobName (string) --The name of the Job to whose JobRuns this condition applies and on which this trigger waits.\n State (string) --The condition state. Currently, the values supported are SUCCEEDED, STOPPED, TIMEOUT and FAILED.\n \n \n\n :type Actions: list\n :param Actions: [REQUIRED]\n The actions initiated by this trigger when it fires.\n (dict) --Defines an action to be initiated by a trigger.\n JobName (string) --The name of a job to be executed.\n Arguments (dict) --Arguments to be passed to the job run.\n You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\n For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\n For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n (string) --\n (string) --\n \n Timeout (integer) --The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n NotificationProperty (dict) --Specifies configuration properties of a job run notification.\n NotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\n SecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this action.\n \n \n\n :type Description: string\n :param Description: A description of the new trigger.\n\n :type StartOnCreation: boolean\n :param StartOnCreation: Set to true to start SCHEDULED and CONDITIONAL triggers when created. True not supported for ON_DEMAND triggers.\n\n :rtype: dict\n :return: {\n 'Name': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_user_defined_function(CatalogId=None, DatabaseName=None, FunctionInput=None):\n \"\"\"\n Creates a new function definition in the Data Catalog.\n See also: AWS API Documentation\n \n \n :example: response = client.create_user_defined_function(\n CatalogId='string',\n DatabaseName='string',\n FunctionInput={\n 'FunctionName': 'string',\n 'ClassName': 'string',\n 'OwnerName': 'string',\n 'OwnerType': 'USER'|'ROLE'|'GROUP',\n 'ResourceUris': [\n {\n 'ResourceType': 'JAR'|'FILE'|'ARCHIVE',\n 'Uri': 'string'\n },\n ]\n }\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog in which to create the function. If none is supplied, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\n The name of the catalog database in which to create the function.\n \n\n :type FunctionInput: dict\n :param FunctionInput: [REQUIRED]\n A FunctionInput object that defines the function to create in the Data Catalog.\n FunctionName (string) --The name of the function.\n ClassName (string) --The Java class that contains the function code.\n OwnerName (string) --The owner of the function.\n OwnerType (string) --The owner type.\n ResourceUris (list) --The resource URIs for the function.\n (dict) --URIs for function resources.\n ResourceType (string) --The type of the resource.\n Uri (string) --The URI for accessing the resource.\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_classifier(Name=None):\n \"\"\"\n Removes a classifier from the Data Catalog.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_classifier(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n Name of the classifier to remove.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_connection(CatalogId=None, ConnectionName=None):\n \"\"\"\n Deletes a connection from the Data Catalog.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_connection(\n CatalogId='string',\n ConnectionName='string'\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog in which the connection resides. If none is supplied, the AWS account ID is used by default.\n\n :type ConnectionName: string\n :param ConnectionName: [REQUIRED]\n The name of the connection to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_crawler(Name=None):\n \"\"\"\n Removes a specified crawler from the Data Catalog, unless the crawler state is RUNNING .\n See also: AWS API Documentation\n \n \n :example: response = client.delete_crawler(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n Name of the crawler to remove.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_database(CatalogId=None, Name=None):\n \"\"\"\n Removes a specified Database from a Data Catalog.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_database(\n CatalogId='string',\n Name='string'\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog in which the database resides. If none is supplied, the AWS account ID is used by default.\n\n :type Name: string\n :param Name: [REQUIRED]\n The name of the Database to delete. For Hive compatibility, this must be all lowercase.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_dev_endpoint(EndpointName=None):\n \"\"\"\n Deletes a specified DevEndpoint.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_dev_endpoint(\n EndpointName='string'\n )\n \n \n :type EndpointName: string\n :param EndpointName: [REQUIRED]\n The name of the DevEndpoint.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_job(JobName=None):\n \"\"\"\n Deletes a specified job definition. If the job definition is not found, no exception is thrown.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_job(\n JobName='string'\n )\n \n \n :type JobName: string\n :param JobName: [REQUIRED]\n The name of the job definition to delete.\n \n\n :rtype: dict\n :return: {\n 'JobName': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionValues=None):\n \"\"\"\n Deletes a specified partition.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_partition(\n CatalogId='string',\n DatabaseName='string',\n TableName='string',\n PartitionValues=[\n 'string',\n ]\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the partition to be deleted resides. If none is supplied, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\n The name of the catalog database in which the table in question resides.\n \n\n :type TableName: string\n :param TableName: [REQUIRED]\n The name of the table where the partition to be deleted is located.\n \n\n :type PartitionValues: list\n :param PartitionValues: [REQUIRED]\n The values that define the partition.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_resource_policy(PolicyHashCondition=None):\n \"\"\"\n Deletes a specified policy.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_resource_policy(\n PolicyHashCondition='string'\n )\n \n \n :type PolicyHashCondition: string\n :param PolicyHashCondition: The hash value returned when this policy was set.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_security_configuration(Name=None):\n \"\"\"\n Deletes a specified security configuration.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_security_configuration(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the security configuration to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_table(CatalogId=None, DatabaseName=None, Name=None):\n \"\"\"\n Removes a table definition from the Data Catalog.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_table(\n CatalogId='string',\n DatabaseName='string',\n Name='string'\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the table resides. If none is supplied, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\n The name of the catalog database in which the table resides. For Hive compatibility, this name is entirely lowercase.\n \n\n :type Name: string\n :param Name: [REQUIRED]\n The name of the table to be deleted. For Hive compatibility, this name is entirely lowercase.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_table_version(CatalogId=None, DatabaseName=None, TableName=None, VersionId=None):\n \"\"\"\n Deletes a specified version of a table.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_table_version(\n CatalogId='string',\n DatabaseName='string',\n TableName='string',\n VersionId='string'\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the tables reside. If none is supplied, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\n The database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.\n \n\n :type TableName: string\n :param TableName: [REQUIRED]\n The name of the table. For Hive compatibility, this name is entirely lowercase.\n \n\n :type VersionId: string\n :param VersionId: [REQUIRED]\n The ID of the table version to be deleted. A VersionID is a string representation of an integer. Each version is incremented by 1.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_trigger(Name=None):\n \"\"\"\n Deletes a specified trigger. If the trigger is not found, no exception is thrown.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_trigger(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the trigger to delete.\n \n\n :rtype: dict\n :return: {\n 'Name': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_user_defined_function(CatalogId=None, DatabaseName=None, FunctionName=None):\n \"\"\"\n Deletes an existing function definition from the Data Catalog.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_user_defined_function(\n CatalogId='string',\n DatabaseName='string',\n FunctionName='string'\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the function to be deleted is located. If none is supplied, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\n The name of the catalog database where the function is located.\n \n\n :type FunctionName: string\n :param FunctionName: [REQUIRED]\n The name of the function definition to be deleted.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_catalog_import_status(CatalogId=None):\n \"\"\"\n Retrieves the status of a migration operation.\n See also: AWS API Documentation\n \n \n :example: response = client.get_catalog_import_status(\n CatalogId='string'\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the catalog to migrate. Currently, this should be the AWS account ID.\n\n :rtype: dict\n :return: {\n 'ImportStatus': {\n 'ImportCompleted': True|False,\n 'ImportTime': datetime(2015, 1, 1),\n 'ImportedBy': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_classifier(Name=None):\n \"\"\"\n Retrieve a classifier by name.\n See also: AWS API Documentation\n \n \n :example: response = client.get_classifier(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n Name of the classifier to retrieve.\n \n\n :rtype: dict\n :return: {\n 'Classifier': {\n 'GrokClassifier': {\n 'Name': 'string',\n 'Classification': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'Version': 123,\n 'GrokPattern': 'string',\n 'CustomPatterns': 'string'\n },\n 'XMLClassifier': {\n 'Name': 'string',\n 'Classification': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'Version': 123,\n 'RowTag': 'string'\n },\n 'JsonClassifier': {\n 'Name': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'Version': 123,\n 'JsonPath': 'string'\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_classifiers(MaxResults=None, NextToken=None):\n \"\"\"\n Lists all classifier objects in the Data Catalog.\n See also: AWS API Documentation\n \n \n :example: response = client.get_classifiers(\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type MaxResults: integer\n :param MaxResults: Size of the list to return (optional).\n\n :type NextToken: string\n :param NextToken: An optional continuation token.\n\n :rtype: dict\n :return: {\n 'Classifiers': [\n {\n 'GrokClassifier': {\n 'Name': 'string',\n 'Classification': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'Version': 123,\n 'GrokPattern': 'string',\n 'CustomPatterns': 'string'\n },\n 'XMLClassifier': {\n 'Name': 'string',\n 'Classification': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'Version': 123,\n 'RowTag': 'string'\n },\n 'JsonClassifier': {\n 'Name': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'Version': 123,\n 'JsonPath': 'string'\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_connection(CatalogId=None, Name=None, HidePassword=None):\n \"\"\"\n Retrieves a connection definition from the Data Catalog.\n See also: AWS API Documentation\n \n \n :example: response = client.get_connection(\n CatalogId='string',\n Name='string',\n HidePassword=True|False\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog in which the connection resides. If none is supplied, the AWS account ID is used by default.\n\n :type Name: string\n :param Name: [REQUIRED]\n The name of the connection definition to retrieve.\n \n\n :type HidePassword: boolean\n :param HidePassword: Allow you to retrieve the connection metadata without displaying the password. For instance, the AWS Glue console uses this flag to retrieve connections, since the console does not display passwords. Set this parameter where the caller may not have permission to use the KMS key to decrypt the password, but does have permission to access the rest of the connection metadata (that is, the other connection properties).\n\n :rtype: dict\n :return: {\n 'Connection': {\n 'Name': 'string',\n 'Description': 'string',\n 'ConnectionType': 'JDBC'|'SFTP',\n 'MatchCriteria': [\n 'string',\n ],\n 'ConnectionProperties': {\n 'string': 'string'\n },\n 'PhysicalConnectionRequirements': {\n 'SubnetId': 'string',\n 'SecurityGroupIdList': [\n 'string',\n ],\n 'AvailabilityZone': 'string'\n },\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdatedTime': datetime(2015, 1, 1),\n 'LastUpdatedBy': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_connections(CatalogId=None, Filter=None, HidePassword=None, NextToken=None, MaxResults=None):\n \"\"\"\n Retrieves a list of connection definitions from the Data Catalog.\n See also: AWS API Documentation\n \n \n :example: response = client.get_connections(\n CatalogId='string',\n Filter={\n 'MatchCriteria': [\n 'string',\n ],\n 'ConnectionType': 'JDBC'|'SFTP'\n },\n HidePassword=True|False,\n NextToken='string',\n MaxResults=123\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog in which the connections reside. If none is supplied, the AWS account ID is used by default.\n\n :type Filter: dict\n :param Filter: A filter that controls which connections will be returned.\n MatchCriteria (list) --A criteria string that must match the criteria recorded in the connection definition for that connection definition to be returned.\n (string) --\n ConnectionType (string) --The type of connections to return. Currently, only JDBC is supported; SFTP is not supported.\n \n\n :type HidePassword: boolean\n :param HidePassword: Allow you to retrieve the connection metadata without displaying the password. For instance, the AWS Glue console uses this flag to retrieve connections, since the console does not display passwords. Set this parameter where the caller may not have permission to use the KMS key to decrypt the password, but does have permission to access the rest of the connection metadata (that is, the other connection properties).\n\n :type NextToken: string\n :param NextToken: A continuation token, if this is a continuation call.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of connections to return in one response.\n\n :rtype: dict\n :return: {\n 'ConnectionList': [\n {\n 'Name': 'string',\n 'Description': 'string',\n 'ConnectionType': 'JDBC'|'SFTP',\n 'MatchCriteria': [\n 'string',\n ],\n 'ConnectionProperties': {\n 'string': 'string'\n },\n 'PhysicalConnectionRequirements': {\n 'SubnetId': 'string',\n 'SecurityGroupIdList': [\n 'string',\n ],\n 'AvailabilityZone': 'string'\n },\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdatedTime': datetime(2015, 1, 1),\n 'LastUpdatedBy': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_crawler(Name=None):\n \"\"\"\n Retrieves metadata for a specified crawler.\n See also: AWS API Documentation\n \n \n :example: response = client.get_crawler(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n Name of the crawler to retrieve metadata for.\n \n\n :rtype: dict\n :return: {\n 'Crawler': {\n 'Name': 'string',\n 'Role': 'string',\n 'Targets': {\n 'S3Targets': [\n {\n 'Path': 'string',\n 'Exclusions': [\n 'string',\n ]\n },\n ],\n 'JdbcTargets': [\n {\n 'ConnectionName': 'string',\n 'Path': 'string',\n 'Exclusions': [\n 'string',\n ]\n },\n ],\n 'DynamoDBTargets': [\n {\n 'Path': 'string'\n },\n ]\n },\n 'DatabaseName': 'string',\n 'Description': 'string',\n 'Classifiers': [\n 'string',\n ],\n 'SchemaChangePolicy': {\n 'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',\n 'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'\n },\n 'State': 'READY'|'RUNNING'|'STOPPING',\n 'TablePrefix': 'string',\n 'Schedule': {\n 'ScheduleExpression': 'string',\n 'State': 'SCHEDULED'|'NOT_SCHEDULED'|'TRANSITIONING'\n },\n 'CrawlElapsedTime': 123,\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'LastCrawl': {\n 'Status': 'SUCCEEDED'|'CANCELLED'|'FAILED',\n 'ErrorMessage': 'string',\n 'LogGroup': 'string',\n 'LogStream': 'string',\n 'MessagePrefix': 'string',\n 'StartTime': datetime(2015, 1, 1)\n },\n 'Version': 123,\n 'Configuration': 'string',\n 'CrawlerSecurityConfiguration': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_crawler_metrics(CrawlerNameList=None, MaxResults=None, NextToken=None):\n \"\"\"\n Retrieves metrics about specified crawlers.\n See also: AWS API Documentation\n \n \n :example: response = client.get_crawler_metrics(\n CrawlerNameList=[\n 'string',\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type CrawlerNameList: list\n :param CrawlerNameList: A list of the names of crawlers about which to retrieve metrics.\n (string) --\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum size of a list to return.\n\n :type NextToken: string\n :param NextToken: A continuation token, if this is a continuation call.\n\n :rtype: dict\n :return: {\n 'CrawlerMetricsList': [\n {\n 'CrawlerName': 'string',\n 'TimeLeftSeconds': 123.0,\n 'StillEstimating': True|False,\n 'LastRuntimeSeconds': 123.0,\n 'MedianRuntimeSeconds': 123.0,\n 'TablesCreated': 123,\n 'TablesUpdated': 123,\n 'TablesDeleted': 123\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_crawlers(MaxResults=None, NextToken=None):\n \"\"\"\n Retrieves metadata for all crawlers defined in the customer account.\n See also: AWS API Documentation\n \n \n :example: response = client.get_crawlers(\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type MaxResults: integer\n :param MaxResults: The number of crawlers to return on each call.\n\n :type NextToken: string\n :param NextToken: A continuation token, if this is a continuation request.\n\n :rtype: dict\n :return: {\n 'Crawlers': [\n {\n 'Name': 'string',\n 'Role': 'string',\n 'Targets': {\n 'S3Targets': [\n {\n 'Path': 'string',\n 'Exclusions': [\n 'string',\n ]\n },\n ],\n 'JdbcTargets': [\n {\n 'ConnectionName': 'string',\n 'Path': 'string',\n 'Exclusions': [\n 'string',\n ]\n },\n ],\n 'DynamoDBTargets': [\n {\n 'Path': 'string'\n },\n ]\n },\n 'DatabaseName': 'string',\n 'Description': 'string',\n 'Classifiers': [\n 'string',\n ],\n 'SchemaChangePolicy': {\n 'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',\n 'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'\n },\n 'State': 'READY'|'RUNNING'|'STOPPING',\n 'TablePrefix': 'string',\n 'Schedule': {\n 'ScheduleExpression': 'string',\n 'State': 'SCHEDULED'|'NOT_SCHEDULED'|'TRANSITIONING'\n },\n 'CrawlElapsedTime': 123,\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'LastCrawl': {\n 'Status': 'SUCCEEDED'|'CANCELLED'|'FAILED',\n 'ErrorMessage': 'string',\n 'LogGroup': 'string',\n 'LogStream': 'string',\n 'MessagePrefix': 'string',\n 'StartTime': datetime(2015, 1, 1)\n },\n 'Version': 123,\n 'Configuration': 'string',\n 'CrawlerSecurityConfiguration': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_data_catalog_encryption_settings(CatalogId=None):\n \"\"\"\n Retrieves the security configuration for a specified catalog.\n See also: AWS API Documentation\n \n \n :example: response = client.get_data_catalog_encryption_settings(\n CatalogId='string'\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog for which to retrieve the security configuration. If none is supplied, the AWS account ID is used by default.\n\n :rtype: dict\n :return: {\n 'DataCatalogEncryptionSettings': {\n 'EncryptionAtRest': {\n 'CatalogEncryptionMode': 'DISABLED'|'SSE-KMS',\n 'SseAwsKmsKeyId': 'string'\n },\n 'ConnectionPasswordEncryption': {\n 'ReturnConnectionPasswordEncrypted': True|False,\n 'AwsKmsKeyId': 'string'\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_database(CatalogId=None, Name=None):\n \"\"\"\n Retrieves the definition of a specified database.\n See also: AWS API Documentation\n \n \n :example: response = client.get_database(\n CatalogId='string',\n Name='string'\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog in which the database resides. If none is supplied, the AWS account ID is used by default.\n\n :type Name: string\n :param Name: [REQUIRED]\n The name of the database to retrieve. For Hive compatibility, this should be all lowercase.\n \n\n :rtype: dict\n :return: {\n 'Database': {\n 'Name': 'string',\n 'Description': 'string',\n 'LocationUri': 'string',\n 'Parameters': {\n 'string': 'string'\n },\n 'CreateTime': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_databases(CatalogId=None, NextToken=None, MaxResults=None):\n \"\"\"\n Retrieves all Databases defined in a given Data Catalog.\n See also: AWS API Documentation\n \n \n :example: response = client.get_databases(\n CatalogId='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog from which to retrieve Databases . If none is supplied, the AWS account ID is used by default.\n\n :type NextToken: string\n :param NextToken: A continuation token, if this is a continuation call.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of databases to return in one response.\n\n :rtype: dict\n :return: {\n 'DatabaseList': [\n {\n 'Name': 'string',\n 'Description': 'string',\n 'LocationUri': 'string',\n 'Parameters': {\n 'string': 'string'\n },\n 'CreateTime': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_dataflow_graph(PythonScript=None):\n \"\"\"\n Transforms a Python script into a directed acyclic graph (DAG).\n See also: AWS API Documentation\n \n \n :example: response = client.get_dataflow_graph(\n PythonScript='string'\n )\n \n \n :type PythonScript: string\n :param PythonScript: The Python script to transform.\n\n :rtype: dict\n :return: {\n 'DagNodes': [\n {\n 'Id': 'string',\n 'NodeType': 'string',\n 'Args': [\n {\n 'Name': 'string',\n 'Value': 'string',\n 'Param': True|False\n },\n ],\n 'LineNumber': 123\n },\n ],\n 'DagEdges': [\n {\n 'Source': 'string',\n 'Target': 'string',\n 'TargetParameter': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_dev_endpoint(EndpointName=None):\n \"\"\"\n Retrieves information about a specified DevEndpoint.\n See also: AWS API Documentation\n \n \n :example: response = client.get_dev_endpoint(\n EndpointName='string'\n )\n \n \n :type EndpointName: string\n :param EndpointName: [REQUIRED]\n Name of the DevEndpoint for which to retrieve information.\n \n\n :rtype: dict\n :return: {\n 'DevEndpoint': {\n 'EndpointName': 'string',\n 'RoleArn': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'SubnetId': 'string',\n 'YarnEndpointAddress': 'string',\n 'PrivateAddress': 'string',\n 'ZeppelinRemoteSparkInterpreterPort': 123,\n 'PublicAddress': 'string',\n 'Status': 'string',\n 'NumberOfNodes': 123,\n 'AvailabilityZone': 'string',\n 'VpcId': 'string',\n 'ExtraPythonLibsS3Path': 'string',\n 'ExtraJarsS3Path': 'string',\n 'FailureReason': 'string',\n 'LastUpdateStatus': 'string',\n 'CreatedTimestamp': datetime(2015, 1, 1),\n 'LastModifiedTimestamp': datetime(2015, 1, 1),\n 'PublicKey': 'string',\n 'PublicKeys': [\n 'string',\n ],\n 'SecurityConfiguration': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_dev_endpoints(MaxResults=None, NextToken=None):\n \"\"\"\n Retrieves all the DevEndpoints in this AWS account.\n See also: AWS API Documentation\n \n \n :example: response = client.get_dev_endpoints(\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type MaxResults: integer\n :param MaxResults: The maximum size of information to return.\n\n :type NextToken: string\n :param NextToken: A continuation token, if this is a continuation call.\n\n :rtype: dict\n :return: {\n 'DevEndpoints': [\n {\n 'EndpointName': 'string',\n 'RoleArn': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'SubnetId': 'string',\n 'YarnEndpointAddress': 'string',\n 'PrivateAddress': 'string',\n 'ZeppelinRemoteSparkInterpreterPort': 123,\n 'PublicAddress': 'string',\n 'Status': 'string',\n 'NumberOfNodes': 123,\n 'AvailabilityZone': 'string',\n 'VpcId': 'string',\n 'ExtraPythonLibsS3Path': 'string',\n 'ExtraJarsS3Path': 'string',\n 'FailureReason': 'string',\n 'LastUpdateStatus': 'string',\n 'CreatedTimestamp': datetime(2015, 1, 1),\n 'LastModifiedTimestamp': datetime(2015, 1, 1),\n 'PublicKey': 'string',\n 'PublicKeys': [\n 'string',\n ],\n 'SecurityConfiguration': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_job(JobName=None):\n \"\"\"\n Retrieves an existing job definition.\n See also: AWS API Documentation\n \n \n :example: response = client.get_job(\n JobName='string'\n )\n \n \n :type JobName: string\n :param JobName: [REQUIRED]\n The name of the job definition to retrieve.\n \n\n :rtype: dict\n :return: {\n 'Job': {\n 'Name': 'string',\n 'Description': 'string',\n 'LogUri': 'string',\n 'Role': 'string',\n 'CreatedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'ExecutionProperty': {\n 'MaxConcurrentRuns': 123\n },\n 'Command': {\n 'Name': 'string',\n 'ScriptLocation': 'string'\n },\n 'DefaultArguments': {\n 'string': 'string'\n },\n 'Connections': {\n 'Connections': [\n 'string',\n ]\n },\n 'MaxRetries': 123,\n 'AllocatedCapacity': 123,\n 'Timeout': 123,\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'SecurityConfiguration': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_job_run(JobName=None, RunId=None, PredecessorsIncluded=None):\n \"\"\"\n Retrieves the metadata for a given job run.\n See also: AWS API Documentation\n \n \n :example: response = client.get_job_run(\n JobName='string',\n RunId='string',\n PredecessorsIncluded=True|False\n )\n \n \n :type JobName: string\n :param JobName: [REQUIRED]\n Name of the job definition being run.\n \n\n :type RunId: string\n :param RunId: [REQUIRED]\n The ID of the job run.\n \n\n :type PredecessorsIncluded: boolean\n :param PredecessorsIncluded: True if a list of predecessor runs should be returned.\n\n :rtype: dict\n :return: {\n 'JobRun': {\n 'Id': 'string',\n 'Attempt': 123,\n 'PreviousRunId': 'string',\n 'TriggerName': 'string',\n 'JobName': 'string',\n 'StartedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'Arguments': {\n 'string': 'string'\n },\n 'ErrorMessage': 'string',\n 'PredecessorRuns': [\n {\n 'JobName': 'string',\n 'RunId': 'string'\n },\n ],\n 'AllocatedCapacity': 123,\n 'ExecutionTime': 123,\n 'Timeout': 123,\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'SecurityConfiguration': 'string',\n 'LogGroupName': 'string'\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_job_runs(JobName=None, NextToken=None, MaxResults=None):\n \"\"\"\n Retrieves metadata for all runs of a given job definition.\n See also: AWS API Documentation\n \n \n :example: response = client.get_job_runs(\n JobName='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type JobName: string\n :param JobName: [REQUIRED]\n The name of the job definition for which to retrieve all job runs.\n \n\n :type NextToken: string\n :param NextToken: A continuation token, if this is a continuation call.\n\n :type MaxResults: integer\n :param MaxResults: The maximum size of the response.\n\n :rtype: dict\n :return: {\n 'JobRuns': [\n {\n 'Id': 'string',\n 'Attempt': 123,\n 'PreviousRunId': 'string',\n 'TriggerName': 'string',\n 'JobName': 'string',\n 'StartedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'Arguments': {\n 'string': 'string'\n },\n 'ErrorMessage': 'string',\n 'PredecessorRuns': [\n {\n 'JobName': 'string',\n 'RunId': 'string'\n },\n ],\n 'AllocatedCapacity': 123,\n 'ExecutionTime': 123,\n 'Timeout': 123,\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'SecurityConfiguration': 'string',\n 'LogGroupName': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_jobs(NextToken=None, MaxResults=None):\n \"\"\"\n Retrieves all current job definitions.\n See also: AWS API Documentation\n \n \n :example: response = client.get_jobs(\n NextToken='string',\n MaxResults=123\n )\n \n \n :type NextToken: string\n :param NextToken: A continuation token, if this is a continuation call.\n\n :type MaxResults: integer\n :param MaxResults: The maximum size of the response.\n\n :rtype: dict\n :return: {\n 'Jobs': [\n {\n 'Name': 'string',\n 'Description': 'string',\n 'LogUri': 'string',\n 'Role': 'string',\n 'CreatedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'ExecutionProperty': {\n 'MaxConcurrentRuns': 123\n },\n 'Command': {\n 'Name': 'string',\n 'ScriptLocation': 'string'\n },\n 'DefaultArguments': {\n 'string': 'string'\n },\n 'Connections': {\n 'Connections': [\n 'string',\n ]\n },\n 'MaxRetries': 123,\n 'AllocatedCapacity': 123,\n 'Timeout': 123,\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'SecurityConfiguration': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_mapping(Source=None, Sinks=None, Location=None):\n \"\"\"\n Creates mappings.\n See also: AWS API Documentation\n \n \n :example: response = client.get_mapping(\n Source={\n 'DatabaseName': 'string',\n 'TableName': 'string'\n },\n Sinks=[\n {\n 'DatabaseName': 'string',\n 'TableName': 'string'\n },\n ],\n Location={\n 'Jdbc': [\n {\n 'Name': 'string',\n 'Value': 'string',\n 'Param': True|False\n },\n ],\n 'S3': [\n {\n 'Name': 'string',\n 'Value': 'string',\n 'Param': True|False\n },\n ],\n 'DynamoDB': [\n {\n 'Name': 'string',\n 'Value': 'string',\n 'Param': True|False\n },\n ]\n }\n )\n \n \n :type Source: dict\n :param Source: [REQUIRED]\n Specifies the source table.\n DatabaseName (string) -- [REQUIRED]The database in which the table metadata resides.\n TableName (string) -- [REQUIRED]The name of the table in question.\n \n\n :type Sinks: list\n :param Sinks: A list of target tables.\n (dict) --Specifies a table definition in the Data Catalog.\n DatabaseName (string) -- [REQUIRED]The database in which the table metadata resides.\n TableName (string) -- [REQUIRED]The name of the table in question.\n \n \n\n :type Location: dict\n :param Location: Parameters for the mapping.\n Jdbc (list) --A JDBC location.\n (dict) --An argument or property of a node.\n Name (string) -- [REQUIRED]The name of the argument or property.\n Value (string) -- [REQUIRED]The value of the argument or property.\n Param (boolean) --True if the value is used as a parameter.\n \n S3 (list) --An Amazon S3 location.\n (dict) --An argument or property of a node.\n Name (string) -- [REQUIRED]The name of the argument or property.\n Value (string) -- [REQUIRED]The value of the argument or property.\n Param (boolean) --True if the value is used as a parameter.\n \n DynamoDB (list) --A DynamoDB Table location.\n (dict) --An argument or property of a node.\n Name (string) -- [REQUIRED]The name of the argument or property.\n Value (string) -- [REQUIRED]The value of the argument or property.\n Param (boolean) --True if the value is used as a parameter.\n \n \n\n :rtype: dict\n :return: {\n 'Mapping': [\n {\n 'SourceTable': 'string',\n 'SourcePath': 'string',\n 'SourceType': 'string',\n 'TargetTable': 'string',\n 'TargetPath': 'string',\n 'TargetType': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionValues=None):\n \"\"\"\n Retrieves information about a specified partition.\n See also: AWS API Documentation\n \n \n :example: response = client.get_partition(\n CatalogId='string',\n DatabaseName='string',\n TableName='string',\n PartitionValues=[\n 'string',\n ]\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the partition in question resides. If none is supplied, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\n The name of the catalog database where the partition resides.\n \n\n :type TableName: string\n :param TableName: [REQUIRED]\n The name of the partition's table.\n \n\n :type PartitionValues: list\n :param PartitionValues: [REQUIRED]\n The values that define the partition.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'Partition': {\n 'Values': [\n 'string',\n ],\n 'DatabaseName': 'string',\n 'TableName': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastAccessTime': datetime(2015, 1, 1),\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string'\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'Parameters': {\n 'string': 'string'\n },\n 'LastAnalyzedTime': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_partitions(CatalogId=None, DatabaseName=None, TableName=None, Expression=None, NextToken=None, Segment=None, MaxResults=None):\n \"\"\"\n Retrieves information about the partitions in a table.\n See also: AWS API Documentation\n \n \n :example: response = client.get_partitions(\n CatalogId='string',\n DatabaseName='string',\n TableName='string',\n Expression='string',\n NextToken='string',\n Segment={\n 'SegmentNumber': 123,\n 'TotalSegments': 123\n },\n MaxResults=123\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the partitions in question reside. If none is supplied, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\n The name of the catalog database where the partitions reside.\n \n\n :type TableName: string\n :param TableName: [REQUIRED]\n The name of the partitions' table.\n \n\n :type Expression: string\n :param Expression: An expression filtering the partitions to be returned.\n The expression uses SQL syntax similar to the SQL WHERE filter clause. The SQL statement parser JSQLParser parses the expression.\n Operators : The following are the operators that you can use in the Expression API call:\n =\n Checks if the values of the two operands are equal or not; if yes, then the condition becomes true.\n Example: Assume 'variable a' holds 10 and 'variable b' holds 20.\n (a = b) is not true.\n < >\n Checks if the values of two operands are equal or not; if the values are not equal, then the condition becomes true.\n Example: (a < > b) is true.\n >\n Checks if the value of the left operand is greater than the value of the right operand; if yes, then the condition becomes true.\n Example: (a > b) is not true.\n <\n Checks if the value of the left operand is less than the value of the right operand; if yes, then the condition becomes true.\n Example: (a < b) is true.\n >=\n Checks if the value of the left operand is greater than or equal to the value of the right operand; if yes, then the condition becomes true.\n Example: (a >= b) is not true.\n <=\n Checks if the value of the left operand is less than or equal to the value of the right operand; if yes, then the condition becomes true.\n Example: (a <= b) is true.\n AND, OR, IN, BETWEEN, LIKE, NOT, IS NULL\n Logical operators.\n Supported Partition Key Types : The following are the the supported partition keys.\n string\n date\n timestamp\n int\n bigint\n long\n tinyint\n smallint\n decimal\n If an invalid type is encountered, an exception is thrown.\n The following list shows the valid operators on each type. When you define a crawler, the partitionKey type is created as a STRING , to be compatible with the catalog partitions.\n Sample API Call :\n \n\n :type NextToken: string\n :param NextToken: A continuation token, if this is not the first call to retrieve these partitions.\n\n :type Segment: dict\n :param Segment: The segment of the table's partitions to scan in this request.\n SegmentNumber (integer) -- [REQUIRED]The zero-based index number of the this segment. For example, if the total number of segments is 4, SegmentNumber values will range from zero through three.\n TotalSegments (integer) -- [REQUIRED]The total numer of segments.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of partitions to return in a single response.\n\n :rtype: dict\n :return: {\n 'Partitions': [\n {\n 'Values': [\n 'string',\n ],\n 'DatabaseName': 'string',\n 'TableName': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastAccessTime': datetime(2015, 1, 1),\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string'\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'Parameters': {\n 'string': 'string'\n },\n 'LastAnalyzedTime': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_plan(Mapping=None, Source=None, Sinks=None, Location=None, Language=None):\n \"\"\"\n Gets code to perform a specified mapping.\n See also: AWS API Documentation\n \n \n :example: response = client.get_plan(\n Mapping=[\n {\n 'SourceTable': 'string',\n 'SourcePath': 'string',\n 'SourceType': 'string',\n 'TargetTable': 'string',\n 'TargetPath': 'string',\n 'TargetType': 'string'\n },\n ],\n Source={\n 'DatabaseName': 'string',\n 'TableName': 'string'\n },\n Sinks=[\n {\n 'DatabaseName': 'string',\n 'TableName': 'string'\n },\n ],\n Location={\n 'Jdbc': [\n {\n 'Name': 'string',\n 'Value': 'string',\n 'Param': True|False\n },\n ],\n 'S3': [\n {\n 'Name': 'string',\n 'Value': 'string',\n 'Param': True|False\n },\n ],\n 'DynamoDB': [\n {\n 'Name': 'string',\n 'Value': 'string',\n 'Param': True|False\n },\n ]\n },\n Language='PYTHON'|'SCALA'\n )\n \n \n :type Mapping: list\n :param Mapping: [REQUIRED]\n The list of mappings from a source table to target tables.\n (dict) --Defines a mapping.\n SourceTable (string) --The name of the source table.\n SourcePath (string) --The source path.\n SourceType (string) --The source type.\n TargetTable (string) --The target table.\n TargetPath (string) --The target path.\n TargetType (string) --The target type.\n \n \n\n :type Source: dict\n :param Source: [REQUIRED]\n The source table.\n DatabaseName (string) -- [REQUIRED]The database in which the table metadata resides.\n TableName (string) -- [REQUIRED]The name of the table in question.\n \n\n :type Sinks: list\n :param Sinks: The target tables.\n (dict) --Specifies a table definition in the Data Catalog.\n DatabaseName (string) -- [REQUIRED]The database in which the table metadata resides.\n TableName (string) -- [REQUIRED]The name of the table in question.\n \n \n\n :type Location: dict\n :param Location: Parameters for the mapping.\n Jdbc (list) --A JDBC location.\n (dict) --An argument or property of a node.\n Name (string) -- [REQUIRED]The name of the argument or property.\n Value (string) -- [REQUIRED]The value of the argument or property.\n Param (boolean) --True if the value is used as a parameter.\n \n S3 (list) --An Amazon S3 location.\n (dict) --An argument or property of a node.\n Name (string) -- [REQUIRED]The name of the argument or property.\n Value (string) -- [REQUIRED]The value of the argument or property.\n Param (boolean) --True if the value is used as a parameter.\n \n DynamoDB (list) --A DynamoDB Table location.\n (dict) --An argument or property of a node.\n Name (string) -- [REQUIRED]The name of the argument or property.\n Value (string) -- [REQUIRED]The value of the argument or property.\n Param (boolean) --True if the value is used as a parameter.\n \n \n\n :type Language: string\n :param Language: The programming language of the code to perform the mapping.\n\n :rtype: dict\n :return: {\n 'PythonScript': 'string',\n 'ScalaCode': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_resource_policy():\n \"\"\"\n Retrieves a specified resource policy.\n See also: AWS API Documentation\n \n \n :example: response = client.get_resource_policy()\n \n \n :rtype: dict\n :return: {\n 'PolicyInJson': 'string',\n 'PolicyHash': 'string',\n 'CreateTime': datetime(2015, 1, 1),\n 'UpdateTime': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef get_security_configuration(Name=None):\n \"\"\"\n Retrieves a specified security configuration.\n See also: AWS API Documentation\n \n \n :example: response = client.get_security_configuration(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the security configuration to retrieve.\n \n\n :rtype: dict\n :return: {\n 'SecurityConfiguration': {\n 'Name': 'string',\n 'CreatedTimeStamp': datetime(2015, 1, 1),\n 'EncryptionConfiguration': {\n 'S3Encryption': [\n {\n 'S3EncryptionMode': 'DISABLED'|'SSE-KMS'|'SSE-S3',\n 'KmsKeyArn': 'string'\n },\n ],\n 'CloudWatchEncryption': {\n 'CloudWatchEncryptionMode': 'DISABLED'|'SSE-KMS',\n 'KmsKeyArn': 'string'\n },\n 'JobBookmarksEncryption': {\n 'JobBookmarksEncryptionMode': 'DISABLED'|'CSE-KMS',\n 'KmsKeyArn': 'string'\n }\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_security_configurations(MaxResults=None, NextToken=None):\n \"\"\"\n Retrieves a list of all security configurations.\n See also: AWS API Documentation\n \n \n :example: response = client.get_security_configurations(\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return.\n\n :type NextToken: string\n :param NextToken: A continuation token, if this is a continuation call.\n\n :rtype: dict\n :return: {\n 'SecurityConfigurations': [\n {\n 'Name': 'string',\n 'CreatedTimeStamp': datetime(2015, 1, 1),\n 'EncryptionConfiguration': {\n 'S3Encryption': [\n {\n 'S3EncryptionMode': 'DISABLED'|'SSE-KMS'|'SSE-S3',\n 'KmsKeyArn': 'string'\n },\n ],\n 'CloudWatchEncryption': {\n 'CloudWatchEncryptionMode': 'DISABLED'|'SSE-KMS',\n 'KmsKeyArn': 'string'\n },\n 'JobBookmarksEncryption': {\n 'JobBookmarksEncryptionMode': 'DISABLED'|'CSE-KMS',\n 'KmsKeyArn': 'string'\n }\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_table(CatalogId=None, DatabaseName=None, Name=None):\n \"\"\"\n Retrieves the Table definition in a Data Catalog for a specified table.\n See also: AWS API Documentation\n \n \n :example: response = client.get_table(\n CatalogId='string',\n DatabaseName='string',\n Name='string'\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the table resides. If none is supplied, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\n The name of the database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.\n \n\n :type Name: string\n :param Name: [REQUIRED]\n The name of the table for which to retrieve the definition. For Hive compatibility, this name is entirely lowercase.\n \n\n :rtype: dict\n :return: {\n 'Table': {\n 'Name': 'string',\n 'DatabaseName': 'string',\n 'Description': 'string',\n 'Owner': 'string',\n 'CreateTime': datetime(2015, 1, 1),\n 'UpdateTime': datetime(2015, 1, 1),\n 'LastAccessTime': datetime(2015, 1, 1),\n 'LastAnalyzedTime': datetime(2015, 1, 1),\n 'Retention': 123,\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string'\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'PartitionKeys': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string'\n },\n ],\n 'ViewOriginalText': 'string',\n 'ViewExpandedText': 'string',\n 'TableType': 'string',\n 'Parameters': {\n 'string': 'string'\n },\n 'CreatedBy': 'string'\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_table_version(CatalogId=None, DatabaseName=None, TableName=None, VersionId=None):\n \"\"\"\n Retrieves a specified version of a table.\n See also: AWS API Documentation\n \n \n :example: response = client.get_table_version(\n CatalogId='string',\n DatabaseName='string',\n TableName='string',\n VersionId='string'\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the tables reside. If none is supplied, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\n The database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.\n \n\n :type TableName: string\n :param TableName: [REQUIRED]\n The name of the table. For Hive compatibility, this name is entirely lowercase.\n \n\n :type VersionId: string\n :param VersionId: The ID value of the table version to be retrieved. A VersionID is a string representation of an integer. Each version is incremented by 1.\n\n :rtype: dict\n :return: {\n 'TableVersion': {\n 'Table': {\n 'Name': 'string',\n 'DatabaseName': 'string',\n 'Description': 'string',\n 'Owner': 'string',\n 'CreateTime': datetime(2015, 1, 1),\n 'UpdateTime': datetime(2015, 1, 1),\n 'LastAccessTime': datetime(2015, 1, 1),\n 'LastAnalyzedTime': datetime(2015, 1, 1),\n 'Retention': 123,\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string'\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'PartitionKeys': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string'\n },\n ],\n 'ViewOriginalText': 'string',\n 'ViewExpandedText': 'string',\n 'TableType': 'string',\n 'Parameters': {\n 'string': 'string'\n },\n 'CreatedBy': 'string'\n },\n 'VersionId': 'string'\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_table_versions(CatalogId=None, DatabaseName=None, TableName=None, NextToken=None, MaxResults=None):\n \"\"\"\n Retrieves a list of strings that identify available versions of a specified table.\n See also: AWS API Documentation\n \n \n :example: response = client.get_table_versions(\n CatalogId='string',\n DatabaseName='string',\n TableName='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the tables reside. If none is supplied, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\n The database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.\n \n\n :type TableName: string\n :param TableName: [REQUIRED]\n The name of the table. For Hive compatibility, this name is entirely lowercase.\n \n\n :type NextToken: string\n :param NextToken: A continuation token, if this is not the first call.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of table versions to return in one response.\n\n :rtype: dict\n :return: {\n 'TableVersions': [\n {\n 'Table': {\n 'Name': 'string',\n 'DatabaseName': 'string',\n 'Description': 'string',\n 'Owner': 'string',\n 'CreateTime': datetime(2015, 1, 1),\n 'UpdateTime': datetime(2015, 1, 1),\n 'LastAccessTime': datetime(2015, 1, 1),\n 'LastAnalyzedTime': datetime(2015, 1, 1),\n 'Retention': 123,\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string'\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'PartitionKeys': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string'\n },\n ],\n 'ViewOriginalText': 'string',\n 'ViewExpandedText': 'string',\n 'TableType': 'string',\n 'Parameters': {\n 'string': 'string'\n },\n 'CreatedBy': 'string'\n },\n 'VersionId': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_tables(CatalogId=None, DatabaseName=None, Expression=None, NextToken=None, MaxResults=None):\n \"\"\"\n Retrieves the definitions of some or all of the tables in a given Database .\n See also: AWS API Documentation\n \n \n :example: response = client.get_tables(\n CatalogId='string',\n DatabaseName='string',\n Expression='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the tables reside. If none is supplied, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\n The database in the catalog whose tables to list. For Hive compatibility, this name is entirely lowercase.\n \n\n :type Expression: string\n :param Expression: A regular expression pattern. If present, only those tables whose names match the pattern are returned.\n\n :type NextToken: string\n :param NextToken: A continuation token, included if this is a continuation call.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of tables to return in a single response.\n\n :rtype: dict\n :return: {\n 'TableList': [\n {\n 'Name': 'string',\n 'DatabaseName': 'string',\n 'Description': 'string',\n 'Owner': 'string',\n 'CreateTime': datetime(2015, 1, 1),\n 'UpdateTime': datetime(2015, 1, 1),\n 'LastAccessTime': datetime(2015, 1, 1),\n 'LastAnalyzedTime': datetime(2015, 1, 1),\n 'Retention': 123,\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string'\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'PartitionKeys': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string'\n },\n ],\n 'ViewOriginalText': 'string',\n 'ViewExpandedText': 'string',\n 'TableType': 'string',\n 'Parameters': {\n 'string': 'string'\n },\n 'CreatedBy': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_trigger(Name=None):\n \"\"\"\n Retrieves the definition of a trigger.\n See also: AWS API Documentation\n \n \n :example: response = client.get_trigger(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the trigger to retrieve.\n \n\n :rtype: dict\n :return: {\n 'Trigger': {\n 'Name': 'string',\n 'Id': 'string',\n 'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',\n 'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',\n 'Description': 'string',\n 'Schedule': 'string',\n 'Actions': [\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'SecurityConfiguration': 'string'\n },\n ],\n 'Predicate': {\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT'\n },\n ]\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_triggers(NextToken=None, DependentJobName=None, MaxResults=None):\n \"\"\"\n Gets all the triggers associated with a job.\n See also: AWS API Documentation\n \n \n :example: response = client.get_triggers(\n NextToken='string',\n DependentJobName='string',\n MaxResults=123\n )\n \n \n :type NextToken: string\n :param NextToken: A continuation token, if this is a continuation call.\n\n :type DependentJobName: string\n :param DependentJobName: The name of the job for which to retrieve triggers. The trigger that can start this job will be returned, and if there is no such trigger, all triggers will be returned.\n\n :type MaxResults: integer\n :param MaxResults: The maximum size of the response.\n\n :rtype: dict\n :return: {\n 'Triggers': [\n {\n 'Name': 'string',\n 'Id': 'string',\n 'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',\n 'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',\n 'Description': 'string',\n 'Schedule': 'string',\n 'Actions': [\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'SecurityConfiguration': 'string'\n },\n ],\n 'Predicate': {\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT'\n },\n ]\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_user_defined_function(CatalogId=None, DatabaseName=None, FunctionName=None):\n \"\"\"\n Retrieves a specified function definition from the Data Catalog.\n See also: AWS API Documentation\n \n \n :example: response = client.get_user_defined_function(\n CatalogId='string',\n DatabaseName='string',\n FunctionName='string'\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the function to be retrieved is located. If none is supplied, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\n The name of the catalog database where the function is located.\n \n\n :type FunctionName: string\n :param FunctionName: [REQUIRED]\n The name of the function.\n \n\n :rtype: dict\n :return: {\n 'UserDefinedFunction': {\n 'FunctionName': 'string',\n 'ClassName': 'string',\n 'OwnerName': 'string',\n 'OwnerType': 'USER'|'ROLE'|'GROUP',\n 'CreateTime': datetime(2015, 1, 1),\n 'ResourceUris': [\n {\n 'ResourceType': 'JAR'|'FILE'|'ARCHIVE',\n 'Uri': 'string'\n },\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_user_defined_functions(CatalogId=None, DatabaseName=None, Pattern=None, NextToken=None, MaxResults=None):\n \"\"\"\n Retrieves a multiple function definitions from the Data Catalog.\n See also: AWS API Documentation\n \n \n :example: response = client.get_user_defined_functions(\n CatalogId='string',\n DatabaseName='string',\n Pattern='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the functions to be retrieved are located. If none is supplied, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\n The name of the catalog database where the functions are located.\n \n\n :type Pattern: string\n :param Pattern: [REQUIRED]\n An optional function-name pattern string that filters the function definitions returned.\n \n\n :type NextToken: string\n :param NextToken: A continuation token, if this is a continuation call.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of functions to return in one response.\n\n :rtype: dict\n :return: {\n 'UserDefinedFunctions': [\n {\n 'FunctionName': 'string',\n 'ClassName': 'string',\n 'OwnerName': 'string',\n 'OwnerType': 'USER'|'ROLE'|'GROUP',\n 'CreateTime': datetime(2015, 1, 1),\n 'ResourceUris': [\n {\n 'ResourceType': 'JAR'|'FILE'|'ARCHIVE',\n 'Uri': 'string'\n },\n ]\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef import_catalog_to_glue(CatalogId=None):\n \"\"\"\n Imports an existing Athena Data Catalog to AWS Glue\n See also: AWS API Documentation\n \n \n :example: response = client.import_catalog_to_glue(\n CatalogId='string'\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the catalog to import. Currently, this should be the AWS account ID.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef put_data_catalog_encryption_settings(CatalogId=None, DataCatalogEncryptionSettings=None):\n \"\"\"\n Sets the security configuration for a specified catalog. Once the configuration has been set, the specified encryption is applied to every catalog write thereafter.\n See also: AWS API Documentation\n \n \n :example: response = client.put_data_catalog_encryption_settings(\n CatalogId='string',\n DataCatalogEncryptionSettings={\n 'EncryptionAtRest': {\n 'CatalogEncryptionMode': 'DISABLED'|'SSE-KMS',\n 'SseAwsKmsKeyId': 'string'\n },\n 'ConnectionPasswordEncryption': {\n 'ReturnConnectionPasswordEncrypted': True|False,\n 'AwsKmsKeyId': 'string'\n }\n }\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog for which to set the security configuration. If none is supplied, the AWS account ID is used by default.\n\n :type DataCatalogEncryptionSettings: dict\n :param DataCatalogEncryptionSettings: [REQUIRED]\n The security configuration to set.\n EncryptionAtRest (dict) --Specifies encryption-at-rest configuration for the Data Catalog.\n CatalogEncryptionMode (string) -- [REQUIRED]The encryption-at-rest mode for encrypting Data Catalog data.\n SseAwsKmsKeyId (string) --The ID of the AWS KMS key to use for encryption at rest.\n ConnectionPasswordEncryption (dict) --When password protection is enabled, the Data Catalog uses a customer-provided key to encrypt the password as part of CreateConnection or UpdateConnection and store it in the ENCRYPTED_PASSWORD field in the connection properties. You can enable catalog encryption or only password encryption.\n ReturnConnectionPasswordEncrypted (boolean) -- [REQUIRED]When the ReturnConnectionPasswordEncrypted flag is set to 'true', passwords remain encrypted in the responses of GetConnection and GetConnections . This encryption takes effect independently from catalog encryption.\n AwsKmsKeyId (string) --A KMS key used to protect access to the JDBC source.\n All users in your account should be granted the kms:encrypt permission to encrypt passwords before storing them in the Data Catalog (through the AWS Glue CreateConnection operation).\n The decrypt permission should be granted only to KMS key admins and IAM roles designated for AWS Glue crawlers.\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef put_resource_policy(PolicyInJson=None, PolicyHashCondition=None, PolicyExistsCondition=None):\n \"\"\"\n Sets the Data Catalog resource policy for access control.\n See also: AWS API Documentation\n \n \n :example: response = client.put_resource_policy(\n PolicyInJson='string',\n PolicyHashCondition='string',\n PolicyExistsCondition='MUST_EXIST'|'NOT_EXIST'|'NONE'\n )\n \n \n :type PolicyInJson: string\n :param PolicyInJson: [REQUIRED]\n Contains the policy document to set, in JSON format.\n \n\n :type PolicyHashCondition: string\n :param PolicyHashCondition: This is the hash value returned when the previous policy was set using PutResourcePolicy. Its purpose is to prevent concurrent modifications of a policy. Do not use this parameter if no previous policy has been set.\n\n :type PolicyExistsCondition: string\n :param PolicyExistsCondition: A value of MUST_EXIST is used to update a policy. A value of NOT_EXIST is used to create a new policy. If a value of NONE or a null value is used, the call will not depend on the existence of a policy.\n\n :rtype: dict\n :return: {\n 'PolicyHash': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef reset_job_bookmark(JobName=None):\n \"\"\"\n Resets a bookmark entry.\n See also: AWS API Documentation\n \n \n :example: response = client.reset_job_bookmark(\n JobName='string'\n )\n \n \n :type JobName: string\n :param JobName: [REQUIRED]\n The name of the job in question.\n \n\n :rtype: dict\n :return: {\n 'JobBookmarkEntry': {\n 'JobName': 'string',\n 'Version': 123,\n 'Run': 123,\n 'Attempt': 123,\n 'JobBookmark': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef start_crawler(Name=None):\n \"\"\"\n Starts a crawl using the specified crawler, regardless of what is scheduled. If the crawler is already running, returns a CrawlerRunningException .\n See also: AWS API Documentation\n \n \n :example: response = client.start_crawler(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n Name of the crawler to start.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef start_crawler_schedule(CrawlerName=None):\n \"\"\"\n Changes the schedule state of the specified crawler to SCHEDULED , unless the crawler is already running or the schedule state is already SCHEDULED .\n See also: AWS API Documentation\n \n \n :example: response = client.start_crawler_schedule(\n CrawlerName='string'\n )\n \n \n :type CrawlerName: string\n :param CrawlerName: [REQUIRED]\n Name of the crawler to schedule.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef start_job_run(JobName=None, JobRunId=None, Arguments=None, AllocatedCapacity=None, Timeout=None, NotificationProperty=None, SecurityConfiguration=None):\n \"\"\"\n Starts a job run using a job definition.\n See also: AWS API Documentation\n \n \n :example: response = client.start_job_run(\n JobName='string',\n JobRunId='string',\n Arguments={\n 'string': 'string'\n },\n AllocatedCapacity=123,\n Timeout=123,\n NotificationProperty={\n 'NotifyDelayAfter': 123\n },\n SecurityConfiguration='string'\n )\n \n \n :type JobName: string\n :param JobName: [REQUIRED]\n The name of the job definition to use.\n \n\n :type JobRunId: string\n :param JobRunId: The ID of a previous JobRun to retry.\n\n :type Arguments: dict\n :param Arguments: The job arguments specifically for this run. They override the equivalent default arguments set for in the job definition itself.\n You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\n For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\n For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n (string) --\n (string) --\n \n\n :type AllocatedCapacity: integer\n :param AllocatedCapacity: The number of AWS Glue data processing units (DPUs) to allocate to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n\n :type Timeout: integer\n :param Timeout: The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n\n :type NotificationProperty: dict\n :param NotificationProperty: Specifies configuration properties of a job run notification.\n NotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\n \n\n :type SecurityConfiguration: string\n :param SecurityConfiguration: The name of the SecurityConfiguration structure to be used with this job run.\n\n :rtype: dict\n :return: {\n 'JobRunId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef start_trigger(Name=None):\n \"\"\"\n Starts an existing trigger. See Triggering Jobs for information about how different types of trigger are started.\n See also: AWS API Documentation\n \n \n :example: response = client.start_trigger(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the trigger to start.\n \n\n :rtype: dict\n :return: {\n 'Name': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef stop_crawler(Name=None):\n \"\"\"\n If the specified crawler is running, stops the crawl.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_crawler(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n Name of the crawler to stop.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef stop_crawler_schedule(CrawlerName=None):\n \"\"\"\n Sets the schedule state of the specified crawler to NOT_SCHEDULED , but does not stop the crawler if it is already running.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_crawler_schedule(\n CrawlerName='string'\n )\n \n \n :type CrawlerName: string\n :param CrawlerName: [REQUIRED]\n Name of the crawler whose schedule state to set.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef stop_trigger(Name=None):\n \"\"\"\n Stops a specified trigger.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_trigger(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the trigger to stop.\n \n\n :rtype: dict\n :return: {\n 'Name': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef update_classifier(GrokClassifier=None, XMLClassifier=None, JsonClassifier=None):\n \"\"\"\n Modifies an existing classifier (a GrokClassifier , XMLClassifier , or JsonClassifier , depending on which field is present).\n See also: AWS API Documentation\n \n \n :example: response = client.update_classifier(\n GrokClassifier={\n 'Name': 'string',\n 'Classification': 'string',\n 'GrokPattern': 'string',\n 'CustomPatterns': 'string'\n },\n XMLClassifier={\n 'Name': 'string',\n 'Classification': 'string',\n 'RowTag': 'string'\n },\n JsonClassifier={\n 'Name': 'string',\n 'JsonPath': 'string'\n }\n )\n \n \n :type GrokClassifier: dict\n :param GrokClassifier: A GrokClassifier object with updated fields.\n Name (string) -- [REQUIRED]The name of the GrokClassifier .\n Classification (string) --An identifier of the data format that the classifier matches, such as Twitter, JSON, Omniture logs, Amazon CloudWatch Logs, and so on.\n GrokPattern (string) --The grok pattern used by this classifier.\n CustomPatterns (string) --Optional custom grok patterns used by this classifier.\n \n\n :type XMLClassifier: dict\n :param XMLClassifier: An XMLClassifier object with updated fields.\n Name (string) -- [REQUIRED]The name of the classifier.\n Classification (string) --An identifier of the data format that the classifier matches.\n RowTag (string) --The XML tag designating the element that contains each record in an XML document being parsed. Note that this cannot identify a self-closing element (closed by /> ). An empty row element that contains only attributes can be parsed as long as it ends with a closing tag (for example, <row item_a='A' item_b='B'></row> is okay, but <row item_a='A' item_b='B' /> is not).\n \n\n :type JsonClassifier: dict\n :param JsonClassifier: A JsonClassifier object with updated fields.\n Name (string) -- [REQUIRED]The name of the classifier.\n JsonPath (string) --A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath, as described in Writing JsonPath Custom Classifiers .\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_connection(CatalogId=None, Name=None, ConnectionInput=None):\n \"\"\"\n Updates a connection definition in the Data Catalog.\n See also: AWS API Documentation\n \n \n :example: response = client.update_connection(\n CatalogId='string',\n Name='string',\n ConnectionInput={\n 'Name': 'string',\n 'Description': 'string',\n 'ConnectionType': 'JDBC'|'SFTP',\n 'MatchCriteria': [\n 'string',\n ],\n 'ConnectionProperties': {\n 'string': 'string'\n },\n 'PhysicalConnectionRequirements': {\n 'SubnetId': 'string',\n 'SecurityGroupIdList': [\n 'string',\n ],\n 'AvailabilityZone': 'string'\n }\n }\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog in which the connection resides. If none is supplied, the AWS account ID is used by default.\n\n :type Name: string\n :param Name: [REQUIRED]\n The name of the connection definition to update.\n \n\n :type ConnectionInput: dict\n :param ConnectionInput: [REQUIRED]\n A ConnectionInput object that redefines the connection in question.\n Name (string) -- [REQUIRED]The name of the connection.\n Description (string) --Description of the connection.\n ConnectionType (string) -- [REQUIRED]The type of the connection. Currently, only JDBC is supported; SFTP is not supported.\n MatchCriteria (list) --A list of criteria that can be used in selecting this connection.\n (string) --\n ConnectionProperties (dict) -- [REQUIRED]These key-value pairs define parameters for the connection.\n (string) --\n (string) --\n \n PhysicalConnectionRequirements (dict) --A map of physical connection requirements, such as VPC and SecurityGroup, needed for making this connection successfully.\n SubnetId (string) --The subnet ID used by the connection.\n SecurityGroupIdList (list) --The security group ID list used by the connection.\n (string) --\n AvailabilityZone (string) --The connection's availability zone. This field is redundant, since the specified subnet implies the availability zone to be used. The field must be populated now, but will be deprecated in the future.\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_crawler(Name=None, Role=None, DatabaseName=None, Description=None, Targets=None, Schedule=None, Classifiers=None, TablePrefix=None, SchemaChangePolicy=None, Configuration=None, CrawlerSecurityConfiguration=None):\n \"\"\"\n Updates a crawler. If a crawler is running, you must stop it using StopCrawler before updating it.\n See also: AWS API Documentation\n \n \n :example: response = client.update_crawler(\n Name='string',\n Role='string',\n DatabaseName='string',\n Description='string',\n Targets={\n 'S3Targets': [\n {\n 'Path': 'string',\n 'Exclusions': [\n 'string',\n ]\n },\n ],\n 'JdbcTargets': [\n {\n 'ConnectionName': 'string',\n 'Path': 'string',\n 'Exclusions': [\n 'string',\n ]\n },\n ],\n 'DynamoDBTargets': [\n {\n 'Path': 'string'\n },\n ]\n },\n Schedule='string',\n Classifiers=[\n 'string',\n ],\n TablePrefix='string',\n SchemaChangePolicy={\n 'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',\n 'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'\n },\n Configuration='string',\n CrawlerSecurityConfiguration='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n Name of the new crawler.\n \n\n :type Role: string\n :param Role: The IAM role (or ARN of an IAM role) used by the new crawler to access customer resources.\n\n :type DatabaseName: string\n :param DatabaseName: The AWS Glue database where results are stored, such as: arn:aws:daylight:us-east-1::database/sometable/* .\n\n :type Description: string\n :param Description: A description of the new crawler.\n\n :type Targets: dict\n :param Targets: A list of targets to crawl.\n S3Targets (list) --Specifies Amazon S3 targets.\n (dict) --Specifies a data store in Amazon S3.\n Path (string) --The path to the Amazon S3 target.\n Exclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .\n (string) --\n \n JdbcTargets (list) --Specifies JDBC targets.\n (dict) --Specifies a JDBC data store to crawl.\n ConnectionName (string) --The name of the connection to use to connect to the JDBC target.\n Path (string) --The path of the JDBC target.\n Exclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .\n (string) --\n \n DynamoDBTargets (list) --Specifies DynamoDB targets.\n (dict) --Specifies a DynamoDB table to crawl.\n Path (string) --The name of the DynamoDB table to crawl.\n \n \n\n :type Schedule: string\n :param Schedule: A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .\n\n :type Classifiers: list\n :param Classifiers: A list of custom classifiers that the user has registered. By default, all built-in classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification.\n (string) --\n \n\n :type TablePrefix: string\n :param TablePrefix: The table prefix used for catalog tables that are created.\n\n :type SchemaChangePolicy: dict\n :param SchemaChangePolicy: Policy for the crawler's update and deletion behavior.\n UpdateBehavior (string) --The update behavior when the crawler finds a changed schema.\n DeleteBehavior (string) --The deletion behavior when the crawler finds a deleted object.\n \n\n :type Configuration: string\n :param Configuration: Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see Configuring a Crawler .\n\n :type CrawlerSecurityConfiguration: string\n :param CrawlerSecurityConfiguration: The name of the SecurityConfiguration structure to be used by this Crawler.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_crawler_schedule(CrawlerName=None, Schedule=None):\n \"\"\"\n Updates the schedule of a crawler using a cron expression.\n See also: AWS API Documentation\n \n \n :example: response = client.update_crawler_schedule(\n CrawlerName='string',\n Schedule='string'\n )\n \n \n :type CrawlerName: string\n :param CrawlerName: [REQUIRED]\n Name of the crawler whose schedule to update.\n \n\n :type Schedule: string\n :param Schedule: The updated cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_database(CatalogId=None, Name=None, DatabaseInput=None):\n \"\"\"\n Updates an existing database definition in a Data Catalog.\n See also: AWS API Documentation\n \n \n :example: response = client.update_database(\n CatalogId='string',\n Name='string',\n DatabaseInput={\n 'Name': 'string',\n 'Description': 'string',\n 'LocationUri': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n }\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog in which the metadata database resides. If none is supplied, the AWS account ID is used by default.\n\n :type Name: string\n :param Name: [REQUIRED]\n The name of the database to update in the catalog. For Hive compatibility, this is folded to lowercase.\n \n\n :type DatabaseInput: dict\n :param DatabaseInput: [REQUIRED]\n A DatabaseInput object specifying the new definition of the metadata database in the catalog.\n Name (string) -- [REQUIRED]Name of the database. For Hive compatibility, this is folded to lowercase when it is stored.\n Description (string) --Description of the database\n LocationUri (string) --The location of the database (for example, an HDFS path).\n Parameters (dict) --Thes key-value pairs define parameters and properties of the database.\n (string) --\n (string) --\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_dev_endpoint(EndpointName=None, PublicKey=None, AddPublicKeys=None, DeletePublicKeys=None, CustomLibraries=None, UpdateEtlLibraries=None):\n \"\"\"\n Updates a specified DevEndpoint.\n See also: AWS API Documentation\n \n \n :example: response = client.update_dev_endpoint(\n EndpointName='string',\n PublicKey='string',\n AddPublicKeys=[\n 'string',\n ],\n DeletePublicKeys=[\n 'string',\n ],\n CustomLibraries={\n 'ExtraPythonLibsS3Path': 'string',\n 'ExtraJarsS3Path': 'string'\n },\n UpdateEtlLibraries=True|False\n )\n \n \n :type EndpointName: string\n :param EndpointName: [REQUIRED]\n The name of the DevEndpoint to be updated.\n \n\n :type PublicKey: string\n :param PublicKey: The public key for the DevEndpoint to use.\n\n :type AddPublicKeys: list\n :param AddPublicKeys: The list of public keys for the DevEndpoint to use.\n (string) --\n \n\n :type DeletePublicKeys: list\n :param DeletePublicKeys: The list of public keys to be deleted from the DevEndpoint.\n (string) --\n \n\n :type CustomLibraries: dict\n :param CustomLibraries: Custom Python or Java libraries to be loaded in the DevEndpoint.\n ExtraPythonLibsS3Path (string) --Path(s) to one or more Python libraries in an S3 bucket that should be loaded in your DevEndpoint. Multiple values must be complete paths separated by a comma.\n Please note that only pure Python libraries can currently be used on a DevEndpoint. Libraries that rely on C extensions, such as the pandas Python data analysis library, are not yet supported.\n ExtraJarsS3Path (string) --Path to one or more Java Jars in an S3 bucket that should be loaded in your DevEndpoint.\n Please note that only pure Java/Scala libraries can currently be used on a DevEndpoint.\n \n\n :type UpdateEtlLibraries: boolean\n :param UpdateEtlLibraries: True if the list of custom libraries to be loaded in the development endpoint needs to be updated, or False otherwise.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_job(JobName=None, JobUpdate=None):\n \"\"\"\n Updates an existing job definition.\n See also: AWS API Documentation\n \n \n :example: response = client.update_job(\n JobName='string',\n JobUpdate={\n 'Description': 'string',\n 'LogUri': 'string',\n 'Role': 'string',\n 'ExecutionProperty': {\n 'MaxConcurrentRuns': 123\n },\n 'Command': {\n 'Name': 'string',\n 'ScriptLocation': 'string'\n },\n 'DefaultArguments': {\n 'string': 'string'\n },\n 'Connections': {\n 'Connections': [\n 'string',\n ]\n },\n 'MaxRetries': 123,\n 'AllocatedCapacity': 123,\n 'Timeout': 123,\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'SecurityConfiguration': 'string'\n }\n )\n \n \n :type JobName: string\n :param JobName: [REQUIRED]\n Name of the job definition to update.\n \n\n :type JobUpdate: dict\n :param JobUpdate: [REQUIRED]\n Specifies the values with which to update the job definition.\n Description (string) --Description of the job being defined.\n LogUri (string) --This field is reserved for future use.\n Role (string) --The name or ARN of the IAM role associated with this job (required).\n ExecutionProperty (dict) --An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.\n MaxConcurrentRuns (integer) --The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when this threshold is reached. The maximum value you can specify is controlled by a service limit.\n Command (dict) --The JobCommand that executes this job (required).\n Name (string) --The name of the job command: this must be glueetl .\n ScriptLocation (string) --Specifies the S3 path to a script that executes a job (required).\n DefaultArguments (dict) --The default arguments for this job.\n You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\n For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\n For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n (string) --\n (string) --\n \n Connections (dict) --The connections used for this job.\n Connections (list) --A list of connections used by the job.\n (string) --\n \n MaxRetries (integer) --The maximum number of times to retry this job if it fails.\n AllocatedCapacity (integer) --The number of AWS Glue data processing units (DPUs) to allocate to this Job. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n Timeout (integer) --The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).\n NotificationProperty (dict) --Specifies configuration properties of a job notification.\n NotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\n SecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this job.\n \n\n :rtype: dict\n :return: {\n 'JobName': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef update_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionValueList=None, PartitionInput=None):\n \"\"\"\n Updates a partition.\n See also: AWS API Documentation\n \n \n :example: response = client.update_partition(\n CatalogId='string',\n DatabaseName='string',\n TableName='string',\n PartitionValueList=[\n 'string',\n ],\n PartitionInput={\n 'Values': [\n 'string',\n ],\n 'LastAccessTime': datetime(2015, 1, 1),\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string'\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'Parameters': {\n 'string': 'string'\n },\n 'LastAnalyzedTime': datetime(2015, 1, 1)\n }\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the partition to be updated resides. If none is supplied, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\n The name of the catalog database in which the table in question resides.\n \n\n :type TableName: string\n :param TableName: [REQUIRED]\n The name of the table where the partition to be updated is located.\n \n\n :type PartitionValueList: list\n :param PartitionValueList: [REQUIRED]\n A list of the values defining the partition.\n (string) --\n \n\n :type PartitionInput: dict\n :param PartitionInput: [REQUIRED]\n The new partition object to which to update the partition.\n Values (list) --The values of the partition.\n (string) --\n LastAccessTime (datetime) --The last time at which the partition was accessed.\n StorageDescriptor (dict) --Provides information about the physical location where the partition is stored.\n Columns (list) --A list of the Columns in the table.\n (dict) --A column in a Table .\n Name (string) -- [REQUIRED]The name of the Column .\n Type (string) --The datatype of data in the Column .\n Comment (string) --Free-form text comment.\n \n Location (string) --The physical location of the table. By default this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n InputFormat (string) --The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n OutputFormat (string) --The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n Compressed (boolean) --True if the data in the table is compressed, or False if not.\n NumberOfBuckets (integer) --Must be specified if the table contains any dimension columns.\n SerdeInfo (dict) --Serialization/deserialization (SerDe) information.\n Name (string) --Name of the SerDe.\n SerializationLibrary (string) --Usually the class that implements the SerDe. An example is: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n Parameters (dict) --These key-value pairs define initialization parameters for the SerDe.\n (string) --\n (string) --\n \n BucketColumns (list) --A list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n (string) --\n SortColumns (list) --A list specifying the sort order of each bucket in the table.\n (dict) --Specifies the sort order of a sorted column.\n Column (string) -- [REQUIRED]The name of the column.\n SortOrder (integer) -- [REQUIRED]Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n \n Parameters (dict) --User-supplied properties in key-value form.\n (string) --\n (string) --\n \n SkewedInfo (dict) --Information about values that appear very frequently in a column (skewed values).\n SkewedColumnNames (list) --A list of names of columns that contain skewed values.\n (string) --\n SkewedColumnValues (list) --A list of values that appear so frequently as to be considered skewed.\n (string) --\n SkewedColumnValueLocationMaps (dict) --A mapping of skewed values to the columns that contain them.\n (string) --\n (string) --\n \n StoredAsSubDirectories (boolean) --True if the table data is stored in subdirectories, or False if not.\n Parameters (dict) --These key-value pairs define partition parameters.\n (string) --\n (string) --\n \n LastAnalyzedTime (datetime) --The last time at which column statistics were computed for this partition.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_table(CatalogId=None, DatabaseName=None, TableInput=None, SkipArchive=None):\n \"\"\"\n Updates a metadata table in the Data Catalog.\n See also: AWS API Documentation\n \n \n :example: response = client.update_table(\n CatalogId='string',\n DatabaseName='string',\n TableInput={\n 'Name': 'string',\n 'Description': 'string',\n 'Owner': 'string',\n 'LastAccessTime': datetime(2015, 1, 1),\n 'LastAnalyzedTime': datetime(2015, 1, 1),\n 'Retention': 123,\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string'\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'PartitionKeys': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string'\n },\n ],\n 'ViewOriginalText': 'string',\n 'ViewExpandedText': 'string',\n 'TableType': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n SkipArchive=True|False\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the table resides. If none is supplied, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\n The name of the catalog database in which the table resides. For Hive compatibility, this name is entirely lowercase.\n \n\n :type TableInput: dict\n :param TableInput: [REQUIRED]\n An updated TableInput object to define the metadata table in the catalog.\n Name (string) -- [REQUIRED]Name of the table. For Hive compatibility, this is folded to lowercase when it is stored.\n Description (string) --Description of the table.\n Owner (string) --Owner of the table.\n LastAccessTime (datetime) --Last time the table was accessed.\n LastAnalyzedTime (datetime) --Last time column statistics were computed for this table.\n Retention (integer) --Retention time for this table.\n StorageDescriptor (dict) --A storage descriptor containing information about the physical storage of this table.\n Columns (list) --A list of the Columns in the table.\n (dict) --A column in a Table .\n Name (string) -- [REQUIRED]The name of the Column .\n Type (string) --The datatype of data in the Column .\n Comment (string) --Free-form text comment.\n \n Location (string) --The physical location of the table. By default this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n InputFormat (string) --The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n OutputFormat (string) --The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n Compressed (boolean) --True if the data in the table is compressed, or False if not.\n NumberOfBuckets (integer) --Must be specified if the table contains any dimension columns.\n SerdeInfo (dict) --Serialization/deserialization (SerDe) information.\n Name (string) --Name of the SerDe.\n SerializationLibrary (string) --Usually the class that implements the SerDe. An example is: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n Parameters (dict) --These key-value pairs define initialization parameters for the SerDe.\n (string) --\n (string) --\n \n BucketColumns (list) --A list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n (string) --\n SortColumns (list) --A list specifying the sort order of each bucket in the table.\n (dict) --Specifies the sort order of a sorted column.\n Column (string) -- [REQUIRED]The name of the column.\n SortOrder (integer) -- [REQUIRED]Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n \n Parameters (dict) --User-supplied properties in key-value form.\n (string) --\n (string) --\n \n SkewedInfo (dict) --Information about values that appear very frequently in a column (skewed values).\n SkewedColumnNames (list) --A list of names of columns that contain skewed values.\n (string) --\n SkewedColumnValues (list) --A list of values that appear so frequently as to be considered skewed.\n (string) --\n SkewedColumnValueLocationMaps (dict) --A mapping of skewed values to the columns that contain them.\n (string) --\n (string) --\n \n StoredAsSubDirectories (boolean) --True if the table data is stored in subdirectories, or False if not.\n PartitionKeys (list) --A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.\n When creating a table used by Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:\n 'PartitionKeys': []\n (dict) --A column in a Table .\n Name (string) -- [REQUIRED]The name of the Column .\n Type (string) --The datatype of data in the Column .\n Comment (string) --Free-form text comment.\n \n ViewOriginalText (string) --If the table is a view, the original text of the view; otherwise null .\n ViewExpandedText (string) --If the table is a view, the expanded text of the view; otherwise null .\n TableType (string) --The type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).\n Parameters (dict) --These key-value pairs define properties associated with the table.\n (string) --\n (string) --\n \n \n\n :type SkipArchive: boolean\n :param SkipArchive: By default, UpdateTable always creates an archived version of the table before updating it. If skipArchive is set to true, however, UpdateTable does not create the archived version.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_trigger(Name=None, TriggerUpdate=None):\n \"\"\"\n Updates a trigger definition.\n See also: AWS API Documentation\n \n \n :example: response = client.update_trigger(\n Name='string',\n TriggerUpdate={\n 'Name': 'string',\n 'Description': 'string',\n 'Schedule': 'string',\n 'Actions': [\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'SecurityConfiguration': 'string'\n },\n ],\n 'Predicate': {\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT'\n },\n ]\n }\n }\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The name of the trigger to update.\n \n\n :type TriggerUpdate: dict\n :param TriggerUpdate: [REQUIRED]\n The new values with which to update the trigger.\n Name (string) --Reserved for future use.\n Description (string) --A description of this trigger.\n Schedule (string) --A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .\n Actions (list) --The actions initiated by this trigger.\n (dict) --Defines an action to be initiated by a trigger.\n JobName (string) --The name of a job to be executed.\n Arguments (dict) --Arguments to be passed to the job run.\n You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\n For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\n For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n (string) --\n (string) --\n \n Timeout (integer) --The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n NotificationProperty (dict) --Specifies configuration properties of a job run notification.\n NotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\n SecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this action.\n \n Predicate (dict) --The predicate of this trigger, which defines when it will fire.\n Logical (string) --Optional field if only one condition is listed. If multiple conditions are listed, then this field is required.\n Conditions (list) --A list of the conditions that determine when the trigger will fire.\n (dict) --Defines a condition under which a trigger fires.\n LogicalOperator (string) --A logical operator.\n JobName (string) --The name of the Job to whose JobRuns this condition applies and on which this trigger waits.\n State (string) --The condition state. Currently, the values supported are SUCCEEDED, STOPPED, TIMEOUT and FAILED.\n \n \n \n\n :rtype: dict\n :return: {\n 'Trigger': {\n 'Name': 'string',\n 'Id': 'string',\n 'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',\n 'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',\n 'Description': 'string',\n 'Schedule': 'string',\n 'Actions': [\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'SecurityConfiguration': 'string'\n },\n ],\n 'Predicate': {\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT'\n },\n ]\n }\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef update_user_defined_function(CatalogId=None, DatabaseName=None, FunctionName=None, FunctionInput=None):\n \"\"\"\n Updates an existing function definition in the Data Catalog.\n See also: AWS API Documentation\n \n \n :example: response = client.update_user_defined_function(\n CatalogId='string',\n DatabaseName='string',\n FunctionName='string',\n FunctionInput={\n 'FunctionName': 'string',\n 'ClassName': 'string',\n 'OwnerName': 'string',\n 'OwnerType': 'USER'|'ROLE'|'GROUP',\n 'ResourceUris': [\n {\n 'ResourceType': 'JAR'|'FILE'|'ARCHIVE',\n 'Uri': 'string'\n },\n ]\n }\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the function to be updated is located. If none is supplied, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\n The name of the catalog database where the function to be updated is located.\n \n\n :type FunctionName: string\n :param FunctionName: [REQUIRED]\n The name of the function.\n \n\n :type FunctionInput: dict\n :param FunctionInput: [REQUIRED]\n A FunctionInput object that re-defines the function in the Data Catalog.\n FunctionName (string) --The name of the function.\n ClassName (string) --The Java class that contains the function code.\n OwnerName (string) --The owner of the function.\n OwnerType (string) --The owner type.\n ResourceUris (list) --The resource URIs for the function.\n (dict) --URIs for function resources.\n ResourceType (string) --The type of the resource.\n Uri (string) --The URI for accessing the resource.\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.592022716999054, "alphanum_fraction": 0.5968436598777771, "avg_line_length": 32.9686393737793, "blob_id": "3be168776e1be5a6e65ee94572fd33640a2a1e0f", "content_id": "d1dfe60d0427540b43847cb0ab922e290a059d0b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 68244, "license_type": "permissive", "max_line_length": 471, "num_lines": 2009, "path": "/pyboto3/codecommit.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef batch_get_repositories(repositoryNames=None):\n \"\"\"\n Returns information about one or more repositories.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_get_repositories(\n repositoryNames=[\n 'string',\n ]\n )\n \n \n :type repositoryNames: list\n :param repositoryNames: [REQUIRED]\n The names of the repositories to get information about.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'repositories': [\n {\n 'accountId': 'string',\n 'repositoryId': 'string',\n 'repositoryName': 'string',\n 'repositoryDescription': 'string',\n 'defaultBranch': 'string',\n 'lastModifiedDate': datetime(2015, 1, 1),\n 'creationDate': datetime(2015, 1, 1),\n 'cloneUrlHttp': 'string',\n 'cloneUrlSsh': 'string',\n 'Arn': 'string'\n },\n ],\n 'repositoriesNotFound': [\n 'string',\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_branch(repositoryName=None, branchName=None, commitId=None):\n \"\"\"\n Creates a new branch in a repository and points the branch to a commit.\n See also: AWS API Documentation\n \n \n :example: response = client.create_branch(\n repositoryName='string',\n branchName='string',\n commitId='string'\n )\n \n \n :type repositoryName: string\n :param repositoryName: [REQUIRED]\n The name of the repository in which you want to create the new branch.\n \n\n :type branchName: string\n :param branchName: [REQUIRED]\n The name of the new branch to create.\n \n\n :type commitId: string\n :param commitId: [REQUIRED]\n The ID of the commit to point the new branch to.\n \n\n \"\"\"\n pass\n\ndef create_pull_request(title=None, description=None, targets=None, clientRequestToken=None):\n \"\"\"\n Creates a pull request in the specified repository.\n See also: AWS API Documentation\n \n \n :example: response = client.create_pull_request(\n title='string',\n description='string',\n targets=[\n {\n 'repositoryName': 'string',\n 'sourceReference': 'string',\n 'destinationReference': 'string'\n },\n ],\n clientRequestToken='string'\n )\n \n \n :type title: string\n :param title: [REQUIRED]\n The title of the pull request. This title will be used to identify the pull request to other users in the repository.\n \n\n :type description: string\n :param description: A description of the pull request.\n\n :type targets: list\n :param targets: [REQUIRED]\n The targets for the pull request, including the source of the code to be reviewed (the source branch), and the destination where the creator of the pull request intends the code to be merged after the pull request is closed (the destination branch).\n (dict) --Returns information about a target for a pull request.\n repositoryName (string) -- [REQUIRED]The name of the repository that contains the pull request.\n sourceReference (string) -- [REQUIRED]The branch of the repository that contains the changes for the pull request. Also known as the source branch.\n destinationReference (string) --The branch of the repository where the pull request changes will be merged into. Also known as the destination branch.\n \n \n\n :type clientRequestToken: string\n :param clientRequestToken: A unique, client-generated idempotency token that when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request will return information about the initial request that used that token.\n Note\n The AWS SDKs prepopulate client request tokens. If using an AWS SDK, you do not have to generate an idempotency token, as this will be done for you.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'pullRequest': {\n 'pullRequestId': 'string',\n 'title': 'string',\n 'description': 'string',\n 'lastActivityDate': datetime(2015, 1, 1),\n 'creationDate': datetime(2015, 1, 1),\n 'pullRequestStatus': 'OPEN'|'CLOSED',\n 'authorArn': 'string',\n 'pullRequestTargets': [\n {\n 'repositoryName': 'string',\n 'sourceReference': 'string',\n 'destinationReference': 'string',\n 'destinationCommit': 'string',\n 'sourceCommit': 'string',\n 'mergeBase': 'string',\n 'mergeMetadata': {\n 'isMerged': True|False,\n 'mergedBy': 'string'\n }\n },\n ],\n 'clientRequestToken': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef create_repository(repositoryName=None, repositoryDescription=None):\n \"\"\"\n Creates a new, empty repository.\n See also: AWS API Documentation\n \n \n :example: response = client.create_repository(\n repositoryName='string',\n repositoryDescription='string'\n )\n \n \n :type repositoryName: string\n :param repositoryName: [REQUIRED]\n The name of the new repository to be created.\n Note\n The repository name must be unique across the calling AWS account. In addition, repository names are limited to 100 alphanumeric, dash, and underscore characters, and cannot include certain characters. For a full description of the limits on repository names, see Limits in the AWS CodeCommit User Guide. The suffix '.git' is prohibited.\n \n\n :type repositoryDescription: string\n :param repositoryDescription: A comment or description about the new repository.\n Note\n The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a web page could expose users to potentially malicious code. Make sure that you HTML-encode the description field in any application that uses this API to display the repository description on a web page.\n \n\n :rtype: dict\n :return: {\n 'repositoryMetadata': {\n 'accountId': 'string',\n 'repositoryId': 'string',\n 'repositoryName': 'string',\n 'repositoryDescription': 'string',\n 'defaultBranch': 'string',\n 'lastModifiedDate': datetime(2015, 1, 1),\n 'creationDate': datetime(2015, 1, 1),\n 'cloneUrlHttp': 'string',\n 'cloneUrlSsh': 'string',\n 'Arn': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef delete_branch(repositoryName=None, branchName=None):\n \"\"\"\n Deletes a branch from a repository, unless that branch is the default branch for the repository.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_branch(\n repositoryName='string',\n branchName='string'\n )\n \n \n :type repositoryName: string\n :param repositoryName: [REQUIRED]\n The name of the repository that contains the branch to be deleted.\n \n\n :type branchName: string\n :param branchName: [REQUIRED]\n The name of the branch to delete.\n \n\n :rtype: dict\n :return: {\n 'deletedBranch': {\n 'branchName': 'string',\n 'commitId': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef delete_comment_content(commentId=None):\n \"\"\"\n Deletes the content of a comment made on a change, file, or commit in a repository.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_comment_content(\n commentId='string'\n )\n \n \n :type commentId: string\n :param commentId: [REQUIRED]\n The unique, system-generated ID of the comment. To get this ID, use GetCommentsForComparedCommit or GetCommentsForPullRequest .\n \n\n :rtype: dict\n :return: {\n 'comment': {\n 'commentId': 'string',\n 'content': 'string',\n 'inReplyTo': 'string',\n 'creationDate': datetime(2015, 1, 1),\n 'lastModifiedDate': datetime(2015, 1, 1),\n 'authorArn': 'string',\n 'deleted': True|False,\n 'clientRequestToken': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef delete_file(repositoryName=None, branchName=None, filePath=None, parentCommitId=None, keepEmptyFolders=None, commitMessage=None, name=None, email=None):\n \"\"\"\n Deletes a specified file from a specified branch. A commit is created on the branch that contains the revision. The file will still exist in the commits prior to the commit that contains the deletion.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_file(\n repositoryName='string',\n branchName='string',\n filePath='string',\n parentCommitId='string',\n keepEmptyFolders=True|False,\n commitMessage='string',\n name='string',\n email='string'\n )\n \n \n :type repositoryName: string\n :param repositoryName: [REQUIRED]\n The name of the repository that contains the file to delete.\n \n\n :type branchName: string\n :param branchName: [REQUIRED]\n The name of the branch where the commit will be made deleting the file.\n \n\n :type filePath: string\n :param filePath: [REQUIRED]\n The fully-qualified path to the file that will be deleted, including the full name and extension of that file. For example, /examples/file.md is a fully qualified path to a file named file.md in a folder named examples.\n \n\n :type parentCommitId: string\n :param parentCommitId: [REQUIRED]\n The ID of the commit that is the tip of the branch where you want to create the commit that will delete the file. This must be the HEAD commit for the branch. The commit that deletes the file will be created from this commit ID.\n \n\n :type keepEmptyFolders: boolean\n :param keepEmptyFolders: Specifies whether to delete the folder or directory that contains the file you want to delete if that file is the only object in the folder or directory. By default, empty folders will be deleted. This includes empty folders that are part of the directory structure. For example, if the path to a file is dir1/dir2/dir3/dir4, and dir2 and dir3 are empty, deleting the last file in dir4 will also delete the empty folders dir4, dir3, and dir2.\n\n :type commitMessage: string\n :param commitMessage: The commit message you want to include as part of deleting the file. Commit messages are limited to 256 KB. If no message is specified, a default message will be used.\n\n :type name: string\n :param name: The name of the author of the commit that deletes the file. If no name is specified, the user's ARN will be used as the author name and committer name.\n\n :type email: string\n :param email: The email address for the commit that deletes the file. If no email address is specified, the email address will be left blank.\n\n :rtype: dict\n :return: {\n 'commitId': 'string',\n 'blobId': 'string',\n 'treeId': 'string',\n 'filePath': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_repository(repositoryName=None):\n \"\"\"\n Deletes a repository. If a specified repository was already deleted, a null repository ID will be returned.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_repository(\n repositoryName='string'\n )\n \n \n :type repositoryName: string\n :param repositoryName: [REQUIRED]\n The name of the repository to delete.\n \n\n :rtype: dict\n :return: {\n 'repositoryId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_pull_request_events(pullRequestId=None, pullRequestEventType=None, actorArn=None, nextToken=None, maxResults=None):\n \"\"\"\n Returns information about one or more pull request events.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_pull_request_events(\n pullRequestId='string',\n pullRequestEventType='PULL_REQUEST_CREATED'|'PULL_REQUEST_STATUS_CHANGED'|'PULL_REQUEST_SOURCE_REFERENCE_UPDATED'|'PULL_REQUEST_MERGE_STATE_CHANGED',\n actorArn='string',\n nextToken='string',\n maxResults=123\n )\n \n \n :type pullRequestId: string\n :param pullRequestId: [REQUIRED]\n The system-generated ID of the pull request. To get this ID, use ListPullRequests .\n \n\n :type pullRequestEventType: string\n :param pullRequestEventType: Optional. The pull request event type about which you want to return information.\n\n :type actorArn: string\n :param actorArn: The Amazon Resource Name (ARN) of the user whose actions resulted in the event. Examples include updating the pull request with additional commits or changing the status of a pull request.\n\n :type nextToken: string\n :param nextToken: An enumeration token that when provided in a request, returns the next batch of the results.\n\n :type maxResults: integer\n :param maxResults: A non-negative integer used to limit the number of returned results. The default is 100 events, which is also the maximum number of events that can be returned in a result.\n\n :rtype: dict\n :return: {\n 'pullRequestEvents': [\n {\n 'pullRequestId': 'string',\n 'eventDate': datetime(2015, 1, 1),\n 'pullRequestEventType': 'PULL_REQUEST_CREATED'|'PULL_REQUEST_STATUS_CHANGED'|'PULL_REQUEST_SOURCE_REFERENCE_UPDATED'|'PULL_REQUEST_MERGE_STATE_CHANGED',\n 'actorArn': 'string',\n 'pullRequestCreatedEventMetadata': {\n 'repositoryName': 'string',\n 'sourceCommitId': 'string',\n 'destinationCommitId': 'string',\n 'mergeBase': 'string'\n },\n 'pullRequestStatusChangedEventMetadata': {\n 'pullRequestStatus': 'OPEN'|'CLOSED'\n },\n 'pullRequestSourceReferenceUpdatedEventMetadata': {\n 'repositoryName': 'string',\n 'beforeCommitId': 'string',\n 'afterCommitId': 'string',\n 'mergeBase': 'string'\n },\n 'pullRequestMergedStateChangedEventMetadata': {\n 'repositoryName': 'string',\n 'destinationReference': 'string',\n 'mergeMetadata': {\n 'isMerged': True|False,\n 'mergedBy': 'string'\n }\n }\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_blob(repositoryName=None, blobId=None):\n \"\"\"\n Returns the base-64 encoded content of an individual blob within a repository.\n See also: AWS API Documentation\n \n \n :example: response = client.get_blob(\n repositoryName='string',\n blobId='string'\n )\n \n \n :type repositoryName: string\n :param repositoryName: [REQUIRED]\n The name of the repository that contains the blob.\n \n\n :type blobId: string\n :param blobId: [REQUIRED]\n The ID of the blob, which is its SHA-1 pointer.\n \n\n :rtype: dict\n :return: {\n 'content': b'bytes'\n }\n \n \n \"\"\"\n pass\n\ndef get_branch(repositoryName=None, branchName=None):\n \"\"\"\n Returns information about a repository branch, including its name and the last commit ID.\n See also: AWS API Documentation\n \n \n :example: response = client.get_branch(\n repositoryName='string',\n branchName='string'\n )\n \n \n :type repositoryName: string\n :param repositoryName: The name of the repository that contains the branch for which you want to retrieve information.\n\n :type branchName: string\n :param branchName: The name of the branch for which you want to retrieve information.\n\n :rtype: dict\n :return: {\n 'branch': {\n 'branchName': 'string',\n 'commitId': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_comment(commentId=None):\n \"\"\"\n Returns the content of a comment made on a change, file, or commit in a repository.\n See also: AWS API Documentation\n \n \n :example: response = client.get_comment(\n commentId='string'\n )\n \n \n :type commentId: string\n :param commentId: [REQUIRED]\n The unique, system-generated ID of the comment. To get this ID, use GetCommentsForComparedCommit or GetCommentsForPullRequest .\n \n\n :rtype: dict\n :return: {\n 'comment': {\n 'commentId': 'string',\n 'content': 'string',\n 'inReplyTo': 'string',\n 'creationDate': datetime(2015, 1, 1),\n 'lastModifiedDate': datetime(2015, 1, 1),\n 'authorArn': 'string',\n 'deleted': True|False,\n 'clientRequestToken': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_comments_for_compared_commit(repositoryName=None, beforeCommitId=None, afterCommitId=None, nextToken=None, maxResults=None):\n \"\"\"\n Returns information about comments made on the comparison between two commits.\n See also: AWS API Documentation\n \n \n :example: response = client.get_comments_for_compared_commit(\n repositoryName='string',\n beforeCommitId='string',\n afterCommitId='string',\n nextToken='string',\n maxResults=123\n )\n \n \n :type repositoryName: string\n :param repositoryName: [REQUIRED]\n The name of the repository where you want to compare commits.\n \n\n :type beforeCommitId: string\n :param beforeCommitId: To establish the directionality of the comparison, the full commit ID of the 'before' commit.\n\n :type afterCommitId: string\n :param afterCommitId: [REQUIRED]\n To establish the directionality of the comparison, the full commit ID of the 'after' commit.\n \n\n :type nextToken: string\n :param nextToken: An enumeration token that when provided in a request, returns the next batch of the results.\n\n :type maxResults: integer\n :param maxResults: A non-negative integer used to limit the number of returned results. The default is 100 comments, and is configurable up to 500.\n\n :rtype: dict\n :return: {\n 'commentsForComparedCommitData': [\n {\n 'repositoryName': 'string',\n 'beforeCommitId': 'string',\n 'afterCommitId': 'string',\n 'beforeBlobId': 'string',\n 'afterBlobId': 'string',\n 'location': {\n 'filePath': 'string',\n 'filePosition': 123,\n 'relativeFileVersion': 'BEFORE'|'AFTER'\n },\n 'comments': [\n {\n 'commentId': 'string',\n 'content': 'string',\n 'inReplyTo': 'string',\n 'creationDate': datetime(2015, 1, 1),\n 'lastModifiedDate': datetime(2015, 1, 1),\n 'authorArn': 'string',\n 'deleted': True|False,\n 'clientRequestToken': 'string'\n },\n ]\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_comments_for_pull_request(pullRequestId=None, repositoryName=None, beforeCommitId=None, afterCommitId=None, nextToken=None, maxResults=None):\n \"\"\"\n Returns comments made on a pull request.\n See also: AWS API Documentation\n \n \n :example: response = client.get_comments_for_pull_request(\n pullRequestId='string',\n repositoryName='string',\n beforeCommitId='string',\n afterCommitId='string',\n nextToken='string',\n maxResults=123\n )\n \n \n :type pullRequestId: string\n :param pullRequestId: [REQUIRED]\n The system-generated ID of the pull request. To get this ID, use ListPullRequests .\n \n\n :type repositoryName: string\n :param repositoryName: The name of the repository that contains the pull request.\n\n :type beforeCommitId: string\n :param beforeCommitId: The full commit ID of the commit in the destination branch that was the tip of the branch at the time the pull request was created.\n\n :type afterCommitId: string\n :param afterCommitId: The full commit ID of the commit in the source branch that was the tip of the branch at the time the comment was made.\n\n :type nextToken: string\n :param nextToken: An enumeration token that when provided in a request, returns the next batch of the results.\n\n :type maxResults: integer\n :param maxResults: A non-negative integer used to limit the number of returned results. The default is 100 comments. You can return up to 500 comments with a single request.\n\n :rtype: dict\n :return: {\n 'commentsForPullRequestData': [\n {\n 'pullRequestId': 'string',\n 'repositoryName': 'string',\n 'beforeCommitId': 'string',\n 'afterCommitId': 'string',\n 'beforeBlobId': 'string',\n 'afterBlobId': 'string',\n 'location': {\n 'filePath': 'string',\n 'filePosition': 123,\n 'relativeFileVersion': 'BEFORE'|'AFTER'\n },\n 'comments': [\n {\n 'commentId': 'string',\n 'content': 'string',\n 'inReplyTo': 'string',\n 'creationDate': datetime(2015, 1, 1),\n 'lastModifiedDate': datetime(2015, 1, 1),\n 'authorArn': 'string',\n 'deleted': True|False,\n 'clientRequestToken': 'string'\n },\n ]\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_commit(repositoryName=None, commitId=None):\n \"\"\"\n Returns information about a commit, including commit message and committer information.\n See also: AWS API Documentation\n \n \n :example: response = client.get_commit(\n repositoryName='string',\n commitId='string'\n )\n \n \n :type repositoryName: string\n :param repositoryName: [REQUIRED]\n The name of the repository to which the commit was made.\n \n\n :type commitId: string\n :param commitId: [REQUIRED]\n The commit ID. Commit IDs are the full SHA of the commit.\n \n\n :rtype: dict\n :return: {\n 'commit': {\n 'commitId': 'string',\n 'treeId': 'string',\n 'parents': [\n 'string',\n ],\n 'message': 'string',\n 'author': {\n 'name': 'string',\n 'email': 'string',\n 'date': 'string'\n },\n 'committer': {\n 'name': 'string',\n 'email': 'string',\n 'date': 'string'\n },\n 'additionalData': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_differences(repositoryName=None, beforeCommitSpecifier=None, afterCommitSpecifier=None, beforePath=None, afterPath=None, MaxResults=None, NextToken=None):\n \"\"\"\n Returns information about the differences in a valid commit specifier (such as a branch, tag, HEAD, commit ID or other fully qualified reference). Results can be limited to a specified path.\n See also: AWS API Documentation\n \n \n :example: response = client.get_differences(\n repositoryName='string',\n beforeCommitSpecifier='string',\n afterCommitSpecifier='string',\n beforePath='string',\n afterPath='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type repositoryName: string\n :param repositoryName: [REQUIRED]\n The name of the repository where you want to get differences.\n \n\n :type beforeCommitSpecifier: string\n :param beforeCommitSpecifier: The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, the full commit ID. Optional. If not specified, all changes prior to the afterCommitSpecifier value will be shown. If you do not use beforeCommitSpecifier in your request, consider limiting the results with maxResults .\n\n :type afterCommitSpecifier: string\n :param afterCommitSpecifier: [REQUIRED]\n The branch, tag, HEAD, or other fully qualified reference used to identify a commit.\n \n\n :type beforePath: string\n :param beforePath: The file path in which to check for differences. Limits the results to this path. Can also be used to specify the previous name of a directory or folder. If beforePath and afterPath are not specified, differences will be shown for all paths.\n\n :type afterPath: string\n :param afterPath: The file path in which to check differences. Limits the results to this path. Can also be used to specify the changed name of a directory or folder, if it has changed. If not specified, differences will be shown for all paths.\n\n :type MaxResults: integer\n :param MaxResults: A non-negative integer used to limit the number of returned results.\n\n :type NextToken: string\n :param NextToken: An enumeration token that when provided in a request, returns the next batch of the results.\n\n :rtype: dict\n :return: {\n 'differences': [\n {\n 'beforeBlob': {\n 'blobId': 'string',\n 'path': 'string',\n 'mode': 'string'\n },\n 'afterBlob': {\n 'blobId': 'string',\n 'path': 'string',\n 'mode': 'string'\n },\n 'changeType': 'A'|'M'|'D'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n 100644 indicates read/write\n 100755 indicates read/write/execute\n 160000 indicates a submodule\n 120000 indicates a symlink\n \n \"\"\"\n pass\n\ndef get_file(repositoryName=None, commitSpecifier=None, filePath=None):\n \"\"\"\n Returns the base-64 encoded contents of a specified file and its metadata.\n See also: AWS API Documentation\n \n \n :example: response = client.get_file(\n repositoryName='string',\n commitSpecifier='string',\n filePath='string'\n )\n \n \n :type repositoryName: string\n :param repositoryName: [REQUIRED]\n The name of the repository that contains the file.\n \n\n :type commitSpecifier: string\n :param commitSpecifier: The fully-quaified reference that identifies the commit that contains the file. For example, you could specify a full commit ID, a tag, a branch name, or a reference such as refs/heads/master. If none is provided, then the head commit will be used.\n\n :type filePath: string\n :param filePath: [REQUIRED]\n The fully-qualified path to the file, including the full name and extension of the file. For example, /examples/file.md is the fully-qualified path to a file named file.md in a folder named examples.\n \n\n :rtype: dict\n :return: {\n 'commitId': 'string',\n 'blobId': 'string',\n 'filePath': 'string',\n 'fileMode': 'EXECUTABLE'|'NORMAL'|'SYMLINK',\n 'fileSize': 123,\n 'fileContent': b'bytes'\n }\n \n \n \"\"\"\n pass\n\ndef get_folder(repositoryName=None, commitSpecifier=None, folderPath=None):\n \"\"\"\n Returns the contents of a specified folder in a repository.\n See also: AWS API Documentation\n \n \n :example: response = client.get_folder(\n repositoryName='string',\n commitSpecifier='string',\n folderPath='string'\n )\n \n \n :type repositoryName: string\n :param repositoryName: [REQUIRED]\n The name of the repository.\n \n\n :type commitSpecifier: string\n :param commitSpecifier: A fully-qualified reference used to identify a commit that contains the version of the folder's content to return. A fully-qualified reference can be a commit ID, branch name, tag, or reference such as HEAD. If no specifier is provided, the folder content will be returned as it exists in the HEAD commit.\n\n :type folderPath: string\n :param folderPath: [REQUIRED]\n The fully-qualified path to the folder whose contents will be returned, including the folder name. For example, /examples is a fully-qualified path to a folder named examples that was created off of the root directory (/) of a repository.\n \n\n :rtype: dict\n :return: {\n 'commitId': 'string',\n 'folderPath': 'string',\n 'treeId': 'string',\n 'subFolders': [\n {\n 'treeId': 'string',\n 'absolutePath': 'string',\n 'relativePath': 'string'\n },\n ],\n 'files': [\n {\n 'blobId': 'string',\n 'absolutePath': 'string',\n 'relativePath': 'string',\n 'fileMode': 'EXECUTABLE'|'NORMAL'|'SYMLINK'\n },\n ],\n 'symbolicLinks': [\n {\n 'blobId': 'string',\n 'absolutePath': 'string',\n 'relativePath': 'string',\n 'fileMode': 'EXECUTABLE'|'NORMAL'|'SYMLINK'\n },\n ],\n 'subModules': [\n {\n 'commitId': 'string',\n 'absolutePath': 'string',\n 'relativePath': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_merge_conflicts(repositoryName=None, destinationCommitSpecifier=None, sourceCommitSpecifier=None, mergeOption=None):\n \"\"\"\n Returns information about merge conflicts between the before and after commit IDs for a pull request in a repository.\n See also: AWS API Documentation\n \n \n :example: response = client.get_merge_conflicts(\n repositoryName='string',\n destinationCommitSpecifier='string',\n sourceCommitSpecifier='string',\n mergeOption='FAST_FORWARD_MERGE'\n )\n \n \n :type repositoryName: string\n :param repositoryName: [REQUIRED]\n The name of the repository where the pull request was created.\n \n\n :type destinationCommitSpecifier: string\n :param destinationCommitSpecifier: [REQUIRED]\n The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.\n \n\n :type sourceCommitSpecifier: string\n :param sourceCommitSpecifier: [REQUIRED]\n The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.\n \n\n :type mergeOption: string\n :param mergeOption: [REQUIRED]\n The merge option or strategy you want to use to merge the code. The only valid value is FAST_FORWARD_MERGE.\n \n\n :rtype: dict\n :return: {\n 'mergeable': True|False,\n 'destinationCommitId': 'string',\n 'sourceCommitId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_pull_request(pullRequestId=None):\n \"\"\"\n Gets information about a pull request in a specified repository.\n See also: AWS API Documentation\n \n \n :example: response = client.get_pull_request(\n pullRequestId='string'\n )\n \n \n :type pullRequestId: string\n :param pullRequestId: [REQUIRED]\n The system-generated ID of the pull request. To get this ID, use ListPullRequests .\n \n\n :rtype: dict\n :return: {\n 'pullRequest': {\n 'pullRequestId': 'string',\n 'title': 'string',\n 'description': 'string',\n 'lastActivityDate': datetime(2015, 1, 1),\n 'creationDate': datetime(2015, 1, 1),\n 'pullRequestStatus': 'OPEN'|'CLOSED',\n 'authorArn': 'string',\n 'pullRequestTargets': [\n {\n 'repositoryName': 'string',\n 'sourceReference': 'string',\n 'destinationReference': 'string',\n 'destinationCommit': 'string',\n 'sourceCommit': 'string',\n 'mergeBase': 'string',\n 'mergeMetadata': {\n 'isMerged': True|False,\n 'mergedBy': 'string'\n }\n },\n ],\n 'clientRequestToken': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_repository(repositoryName=None):\n \"\"\"\n Returns information about a repository.\n See also: AWS API Documentation\n \n \n :example: response = client.get_repository(\n repositoryName='string'\n )\n \n \n :type repositoryName: string\n :param repositoryName: [REQUIRED]\n The name of the repository to get information about.\n \n\n :rtype: dict\n :return: {\n 'repositoryMetadata': {\n 'accountId': 'string',\n 'repositoryId': 'string',\n 'repositoryName': 'string',\n 'repositoryDescription': 'string',\n 'defaultBranch': 'string',\n 'lastModifiedDate': datetime(2015, 1, 1),\n 'creationDate': datetime(2015, 1, 1),\n 'cloneUrlHttp': 'string',\n 'cloneUrlSsh': 'string',\n 'Arn': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_repository_triggers(repositoryName=None):\n \"\"\"\n Gets information about triggers configured for a repository.\n See also: AWS API Documentation\n \n \n :example: response = client.get_repository_triggers(\n repositoryName='string'\n )\n \n \n :type repositoryName: string\n :param repositoryName: [REQUIRED]\n The name of the repository for which the trigger is configured.\n \n\n :rtype: dict\n :return: {\n 'configurationId': 'string',\n 'triggers': [\n {\n 'name': 'string',\n 'destinationArn': 'string',\n 'customData': 'string',\n 'branches': [\n 'string',\n ],\n 'events': [\n 'all'|'updateReference'|'createReference'|'deleteReference',\n ]\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_branches(repositoryName=None, nextToken=None):\n \"\"\"\n Gets information about one or more branches in a repository.\n See also: AWS API Documentation\n \n \n :example: response = client.list_branches(\n repositoryName='string',\n nextToken='string'\n )\n \n \n :type repositoryName: string\n :param repositoryName: [REQUIRED]\n The name of the repository that contains the branches.\n \n\n :type nextToken: string\n :param nextToken: An enumeration token that allows the operation to batch the results.\n\n :rtype: dict\n :return: {\n 'branches': [\n 'string',\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_pull_requests(repositoryName=None, authorArn=None, pullRequestStatus=None, nextToken=None, maxResults=None):\n \"\"\"\n Returns a list of pull requests for a specified repository. The return list can be refined by pull request status or pull request author ARN.\n See also: AWS API Documentation\n \n \n :example: response = client.list_pull_requests(\n repositoryName='string',\n authorArn='string',\n pullRequestStatus='OPEN'|'CLOSED',\n nextToken='string',\n maxResults=123\n )\n \n \n :type repositoryName: string\n :param repositoryName: [REQUIRED]\n The name of the repository for which you want to list pull requests.\n \n\n :type authorArn: string\n :param authorArn: Optional. The Amazon Resource Name (ARN) of the user who created the pull request. If used, this filters the results to pull requests created by that user.\n\n :type pullRequestStatus: string\n :param pullRequestStatus: Optional. The status of the pull request. If used, this refines the results to the pull requests that match the specified status.\n\n :type nextToken: string\n :param nextToken: An enumeration token that when provided in a request, returns the next batch of the results.\n\n :type maxResults: integer\n :param maxResults: A non-negative integer used to limit the number of returned results.\n\n :rtype: dict\n :return: {\n 'pullRequestIds': [\n 'string',\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_repositories(nextToken=None, sortBy=None, order=None):\n \"\"\"\n Gets information about one or more repositories.\n See also: AWS API Documentation\n \n \n :example: response = client.list_repositories(\n nextToken='string',\n sortBy='repositoryName'|'lastModifiedDate',\n order='ascending'|'descending'\n )\n \n \n :type nextToken: string\n :param nextToken: An enumeration token that allows the operation to batch the results of the operation. Batch sizes are 1,000 for list repository operations. When the client sends the token back to AWS CodeCommit, another page of 1,000 records is retrieved.\n\n :type sortBy: string\n :param sortBy: The criteria used to sort the results of a list repositories operation.\n\n :type order: string\n :param order: The order in which to sort the results of a list repositories operation.\n\n :rtype: dict\n :return: {\n 'repositories': [\n {\n 'repositoryName': 'string',\n 'repositoryId': 'string'\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef merge_pull_request_by_fast_forward(pullRequestId=None, repositoryName=None, sourceCommitId=None):\n \"\"\"\n Closes a pull request and attempts to merge the source commit of a pull request into the specified destination branch for that pull request at the specified commit using the fast-forward merge option.\n See also: AWS API Documentation\n \n \n :example: response = client.merge_pull_request_by_fast_forward(\n pullRequestId='string',\n repositoryName='string',\n sourceCommitId='string'\n )\n \n \n :type pullRequestId: string\n :param pullRequestId: [REQUIRED]\n The system-generated ID of the pull request. To get this ID, use ListPullRequests .\n \n\n :type repositoryName: string\n :param repositoryName: [REQUIRED]\n The name of the repository where the pull request was created.\n \n\n :type sourceCommitId: string\n :param sourceCommitId: The full commit ID of the original or updated commit in the pull request source branch. Pass this value if you want an exception thrown if the current commit ID of the tip of the source branch does not match this commit ID.\n\n :rtype: dict\n :return: {\n 'pullRequest': {\n 'pullRequestId': 'string',\n 'title': 'string',\n 'description': 'string',\n 'lastActivityDate': datetime(2015, 1, 1),\n 'creationDate': datetime(2015, 1, 1),\n 'pullRequestStatus': 'OPEN'|'CLOSED',\n 'authorArn': 'string',\n 'pullRequestTargets': [\n {\n 'repositoryName': 'string',\n 'sourceReference': 'string',\n 'destinationReference': 'string',\n 'destinationCommit': 'string',\n 'sourceCommit': 'string',\n 'mergeBase': 'string',\n 'mergeMetadata': {\n 'isMerged': True|False,\n 'mergedBy': 'string'\n }\n },\n ],\n 'clientRequestToken': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef post_comment_for_compared_commit(repositoryName=None, beforeCommitId=None, afterCommitId=None, location=None, content=None, clientRequestToken=None):\n \"\"\"\n Posts a comment on the comparison between two commits.\n See also: AWS API Documentation\n \n \n :example: response = client.post_comment_for_compared_commit(\n repositoryName='string',\n beforeCommitId='string',\n afterCommitId='string',\n location={\n 'filePath': 'string',\n 'filePosition': 123,\n 'relativeFileVersion': 'BEFORE'|'AFTER'\n },\n content='string',\n clientRequestToken='string'\n )\n \n \n :type repositoryName: string\n :param repositoryName: [REQUIRED]\n The name of the repository where you want to post a comment on the comparison between commits.\n \n\n :type beforeCommitId: string\n :param beforeCommitId: To establish the directionality of the comparison, the full commit ID of the 'before' commit.\n\n :type afterCommitId: string\n :param afterCommitId: [REQUIRED]\n To establish the directionality of the comparison, the full commit ID of the 'after' commit.\n \n\n :type location: dict\n :param location: The location of the comparison where you want to comment.\n filePath (string) --The name of the file being compared, including its extension and subdirectory, if any.\n filePosition (integer) --The position of a change within a compared file, in line number format.\n relativeFileVersion (string) --In a comparison of commits or a pull request, whether the change is in the 'before' or 'after' of that comparison.\n \n\n :type content: string\n :param content: [REQUIRED]\n The content of the comment you want to make.\n \n\n :type clientRequestToken: string\n :param clientRequestToken: A unique, client-generated idempotency token that when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request will return information about the initial request that used that token.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'repositoryName': 'string',\n 'beforeCommitId': 'string',\n 'afterCommitId': 'string',\n 'beforeBlobId': 'string',\n 'afterBlobId': 'string',\n 'location': {\n 'filePath': 'string',\n 'filePosition': 123,\n 'relativeFileVersion': 'BEFORE'|'AFTER'\n },\n 'comment': {\n 'commentId': 'string',\n 'content': 'string',\n 'inReplyTo': 'string',\n 'creationDate': datetime(2015, 1, 1),\n 'lastModifiedDate': datetime(2015, 1, 1),\n 'authorArn': 'string',\n 'deleted': True|False,\n 'clientRequestToken': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef post_comment_for_pull_request(pullRequestId=None, repositoryName=None, beforeCommitId=None, afterCommitId=None, location=None, content=None, clientRequestToken=None):\n \"\"\"\n Posts a comment on a pull request.\n See also: AWS API Documentation\n \n \n :example: response = client.post_comment_for_pull_request(\n pullRequestId='string',\n repositoryName='string',\n beforeCommitId='string',\n afterCommitId='string',\n location={\n 'filePath': 'string',\n 'filePosition': 123,\n 'relativeFileVersion': 'BEFORE'|'AFTER'\n },\n content='string',\n clientRequestToken='string'\n )\n \n \n :type pullRequestId: string\n :param pullRequestId: [REQUIRED]\n The system-generated ID of the pull request. To get this ID, use ListPullRequests .\n \n\n :type repositoryName: string\n :param repositoryName: [REQUIRED]\n The name of the repository where you want to post a comment on a pull request.\n \n\n :type beforeCommitId: string\n :param beforeCommitId: [REQUIRED]\n The full commit ID of the commit in the destination branch that was the tip of the branch at the time the pull request was created.\n \n\n :type afterCommitId: string\n :param afterCommitId: [REQUIRED]\n The full commit ID of the commit in the source branch that is the current tip of the branch for the pull request when you post the comment.\n \n\n :type location: dict\n :param location: The location of the change where you want to post your comment. If no location is provided, the comment will be posted as a general comment on the pull request difference between the before commit ID and the after commit ID.\n filePath (string) --The name of the file being compared, including its extension and subdirectory, if any.\n filePosition (integer) --The position of a change within a compared file, in line number format.\n relativeFileVersion (string) --In a comparison of commits or a pull request, whether the change is in the 'before' or 'after' of that comparison.\n \n\n :type content: string\n :param content: [REQUIRED]\n The content of your comment on the change.\n \n\n :type clientRequestToken: string\n :param clientRequestToken: A unique, client-generated idempotency token that when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request will return information about the initial request that used that token.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'repositoryName': 'string',\n 'pullRequestId': 'string',\n 'beforeCommitId': 'string',\n 'afterCommitId': 'string',\n 'beforeBlobId': 'string',\n 'afterBlobId': 'string',\n 'location': {\n 'filePath': 'string',\n 'filePosition': 123,\n 'relativeFileVersion': 'BEFORE'|'AFTER'\n },\n 'comment': {\n 'commentId': 'string',\n 'content': 'string',\n 'inReplyTo': 'string',\n 'creationDate': datetime(2015, 1, 1),\n 'lastModifiedDate': datetime(2015, 1, 1),\n 'authorArn': 'string',\n 'deleted': True|False,\n 'clientRequestToken': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef post_comment_reply(inReplyTo=None, clientRequestToken=None, content=None):\n \"\"\"\n Posts a comment in reply to an existing comment on a comparison between commits or a pull request.\n See also: AWS API Documentation\n \n \n :example: response = client.post_comment_reply(\n inReplyTo='string',\n clientRequestToken='string',\n content='string'\n )\n \n \n :type inReplyTo: string\n :param inReplyTo: [REQUIRED]\n The system-generated ID of the comment to which you want to reply. To get this ID, use GetCommentsForComparedCommit or GetCommentsForPullRequest .\n \n\n :type clientRequestToken: string\n :param clientRequestToken: A unique, client-generated idempotency token that when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request will return information about the initial request that used that token.\n This field is autopopulated if not provided.\n \n\n :type content: string\n :param content: [REQUIRED]\n The contents of your reply to a comment.\n \n\n :rtype: dict\n :return: {\n 'comment': {\n 'commentId': 'string',\n 'content': 'string',\n 'inReplyTo': 'string',\n 'creationDate': datetime(2015, 1, 1),\n 'lastModifiedDate': datetime(2015, 1, 1),\n 'authorArn': 'string',\n 'deleted': True|False,\n 'clientRequestToken': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef put_file(repositoryName=None, branchName=None, fileContent=None, filePath=None, fileMode=None, parentCommitId=None, commitMessage=None, name=None, email=None):\n \"\"\"\n Adds or updates a file in a branch in an AWS CodeCommit repository, and generates a commit for the addition in the specified branch.\n See also: AWS API Documentation\n \n \n :example: response = client.put_file(\n repositoryName='string',\n branchName='string',\n fileContent=b'bytes',\n filePath='string',\n fileMode='EXECUTABLE'|'NORMAL'|'SYMLINK',\n parentCommitId='string',\n commitMessage='string',\n name='string',\n email='string'\n )\n \n \n :type repositoryName: string\n :param repositoryName: [REQUIRED]\n The name of the repository where you want to add or update the file.\n \n\n :type branchName: string\n :param branchName: [REQUIRED]\n The name of the branch where you want to add or update the file. If this is an empty repository, this branch will be created.\n \n\n :type fileContent: bytes\n :param fileContent: [REQUIRED]\n The content of the file, in binary object format.\n \n\n :type filePath: string\n :param filePath: [REQUIRED]\n The name of the file you want to add or update, including the relative path to the file in the repository.\n Note\n If the path does not currently exist in the repository, the path will be created as part of adding the file.\n \n\n :type fileMode: string\n :param fileMode: The file mode permissions of the blob. Valid file mode permissions are listed below.\n\n :type parentCommitId: string\n :param parentCommitId: The full commit ID of the head commit in the branch where you want to add or update the file. If this is an empty repository, no commit ID is required. If this is not an empty repository, a commit ID is required.\n The commit ID must match the ID of the head commit at the time of the operation, or an error will occur, and the file will not be added or updated.\n \n\n :type commitMessage: string\n :param commitMessage: A message about why this file was added or updated. While optional, adding a message is strongly encouraged in order to provide a more useful commit history for your repository.\n\n :type name: string\n :param name: The name of the person adding or updating the file. While optional, adding a name is strongly encouraged in order to provide a more useful commit history for your repository.\n\n :type email: string\n :param email: An email address for the person adding or updating the file.\n\n :rtype: dict\n :return: {\n 'commitId': 'string',\n 'blobId': 'string',\n 'treeId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef put_repository_triggers(repositoryName=None, triggers=None):\n \"\"\"\n Replaces all triggers for a repository. This can be used to create or delete triggers.\n See also: AWS API Documentation\n \n \n :example: response = client.put_repository_triggers(\n repositoryName='string',\n triggers=[\n {\n 'name': 'string',\n 'destinationArn': 'string',\n 'customData': 'string',\n 'branches': [\n 'string',\n ],\n 'events': [\n 'all'|'updateReference'|'createReference'|'deleteReference',\n ]\n },\n ]\n )\n \n \n :type repositoryName: string\n :param repositoryName: [REQUIRED]\n The name of the repository where you want to create or update the trigger.\n \n\n :type triggers: list\n :param triggers: [REQUIRED]\n The JSON block of configuration information for each trigger.\n (dict) --Information about a trigger for a repository.\n name (string) -- [REQUIRED]The name of the trigger.\n destinationArn (string) -- [REQUIRED]The ARN of the resource that is the target for a trigger. For example, the ARN of a topic in Amazon Simple Notification Service (SNS).\n customData (string) --Any custom data associated with the trigger that will be included in the information sent to the target of the trigger.\n branches (list) --The branches that will be included in the trigger configuration. If you specify an empty array, the trigger will apply to all branches.\n Note\n While no content is required in the array, you must include the array itself.\n (string) --\n events (list) -- [REQUIRED]The repository events that will cause the trigger to run actions in another service, such as sending a notification through Amazon Simple Notification Service (SNS).\n Note\n The valid value 'all' cannot be used with any other values.\n (string) --\n \n \n\n :rtype: dict\n :return: {\n 'configurationId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef test_repository_triggers(repositoryName=None, triggers=None):\n \"\"\"\n Tests the functionality of repository triggers by sending information to the trigger target. If real data is available in the repository, the test will send data from the last commit. If no data is available, sample data will be generated.\n See also: AWS API Documentation\n \n \n :example: response = client.test_repository_triggers(\n repositoryName='string',\n triggers=[\n {\n 'name': 'string',\n 'destinationArn': 'string',\n 'customData': 'string',\n 'branches': [\n 'string',\n ],\n 'events': [\n 'all'|'updateReference'|'createReference'|'deleteReference',\n ]\n },\n ]\n )\n \n \n :type repositoryName: string\n :param repositoryName: [REQUIRED]\n The name of the repository in which to test the triggers.\n \n\n :type triggers: list\n :param triggers: [REQUIRED]\n The list of triggers to test.\n (dict) --Information about a trigger for a repository.\n name (string) -- [REQUIRED]The name of the trigger.\n destinationArn (string) -- [REQUIRED]The ARN of the resource that is the target for a trigger. For example, the ARN of a topic in Amazon Simple Notification Service (SNS).\n customData (string) --Any custom data associated with the trigger that will be included in the information sent to the target of the trigger.\n branches (list) --The branches that will be included in the trigger configuration. If you specify an empty array, the trigger will apply to all branches.\n Note\n While no content is required in the array, you must include the array itself.\n (string) --\n events (list) -- [REQUIRED]The repository events that will cause the trigger to run actions in another service, such as sending a notification through Amazon Simple Notification Service (SNS).\n Note\n The valid value 'all' cannot be used with any other values.\n (string) --\n \n \n\n :rtype: dict\n :return: {\n 'successfulExecutions': [\n 'string',\n ],\n 'failedExecutions': [\n {\n 'trigger': 'string',\n 'failureMessage': 'string'\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef update_comment(commentId=None, content=None):\n \"\"\"\n Replaces the contents of a comment.\n See also: AWS API Documentation\n \n \n :example: response = client.update_comment(\n commentId='string',\n content='string'\n )\n \n \n :type commentId: string\n :param commentId: [REQUIRED]\n The system-generated ID of the comment you want to update. To get this ID, use GetCommentsForComparedCommit or GetCommentsForPullRequest .\n \n\n :type content: string\n :param content: [REQUIRED]\n The updated content with which you want to replace the existing content of the comment.\n \n\n :rtype: dict\n :return: {\n 'comment': {\n 'commentId': 'string',\n 'content': 'string',\n 'inReplyTo': 'string',\n 'creationDate': datetime(2015, 1, 1),\n 'lastModifiedDate': datetime(2015, 1, 1),\n 'authorArn': 'string',\n 'deleted': True|False,\n 'clientRequestToken': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef update_default_branch(repositoryName=None, defaultBranchName=None):\n \"\"\"\n Sets or changes the default branch name for the specified repository.\n See also: AWS API Documentation\n \n \n :example: response = client.update_default_branch(\n repositoryName='string',\n defaultBranchName='string'\n )\n \n \n :type repositoryName: string\n :param repositoryName: [REQUIRED]\n The name of the repository to set or change the default branch for.\n \n\n :type defaultBranchName: string\n :param defaultBranchName: [REQUIRED]\n The name of the branch to set as the default.\n \n\n \"\"\"\n pass\n\ndef update_pull_request_description(pullRequestId=None, description=None):\n \"\"\"\n Replaces the contents of the description of a pull request.\n See also: AWS API Documentation\n \n \n :example: response = client.update_pull_request_description(\n pullRequestId='string',\n description='string'\n )\n \n \n :type pullRequestId: string\n :param pullRequestId: [REQUIRED]\n The system-generated ID of the pull request. To get this ID, use ListPullRequests .\n \n\n :type description: string\n :param description: [REQUIRED]\n The updated content of the description for the pull request. This content will replace the existing description.\n \n\n :rtype: dict\n :return: {\n 'pullRequest': {\n 'pullRequestId': 'string',\n 'title': 'string',\n 'description': 'string',\n 'lastActivityDate': datetime(2015, 1, 1),\n 'creationDate': datetime(2015, 1, 1),\n 'pullRequestStatus': 'OPEN'|'CLOSED',\n 'authorArn': 'string',\n 'pullRequestTargets': [\n {\n 'repositoryName': 'string',\n 'sourceReference': 'string',\n 'destinationReference': 'string',\n 'destinationCommit': 'string',\n 'sourceCommit': 'string',\n 'mergeBase': 'string',\n 'mergeMetadata': {\n 'isMerged': True|False,\n 'mergedBy': 'string'\n }\n },\n ],\n 'clientRequestToken': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef update_pull_request_status(pullRequestId=None, pullRequestStatus=None):\n \"\"\"\n Updates the status of a pull request.\n See also: AWS API Documentation\n \n \n :example: response = client.update_pull_request_status(\n pullRequestId='string',\n pullRequestStatus='OPEN'|'CLOSED'\n )\n \n \n :type pullRequestId: string\n :param pullRequestId: [REQUIRED]\n The system-generated ID of the pull request. To get this ID, use ListPullRequests .\n \n\n :type pullRequestStatus: string\n :param pullRequestStatus: [REQUIRED]\n The status of the pull request. The only valid operations are to update the status from OPEN to OPEN , OPEN to CLOSED or from from CLOSED to CLOSED .\n \n\n :rtype: dict\n :return: {\n 'pullRequest': {\n 'pullRequestId': 'string',\n 'title': 'string',\n 'description': 'string',\n 'lastActivityDate': datetime(2015, 1, 1),\n 'creationDate': datetime(2015, 1, 1),\n 'pullRequestStatus': 'OPEN'|'CLOSED',\n 'authorArn': 'string',\n 'pullRequestTargets': [\n {\n 'repositoryName': 'string',\n 'sourceReference': 'string',\n 'destinationReference': 'string',\n 'destinationCommit': 'string',\n 'sourceCommit': 'string',\n 'mergeBase': 'string',\n 'mergeMetadata': {\n 'isMerged': True|False,\n 'mergedBy': 'string'\n }\n },\n ],\n 'clientRequestToken': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef update_pull_request_title(pullRequestId=None, title=None):\n \"\"\"\n Replaces the title of a pull request.\n See also: AWS API Documentation\n \n \n :example: response = client.update_pull_request_title(\n pullRequestId='string',\n title='string'\n )\n \n \n :type pullRequestId: string\n :param pullRequestId: [REQUIRED]\n The system-generated ID of the pull request. To get this ID, use ListPullRequests .\n \n\n :type title: string\n :param title: [REQUIRED]\n The updated title of the pull request. This will replace the existing title.\n \n\n :rtype: dict\n :return: {\n 'pullRequest': {\n 'pullRequestId': 'string',\n 'title': 'string',\n 'description': 'string',\n 'lastActivityDate': datetime(2015, 1, 1),\n 'creationDate': datetime(2015, 1, 1),\n 'pullRequestStatus': 'OPEN'|'CLOSED',\n 'authorArn': 'string',\n 'pullRequestTargets': [\n {\n 'repositoryName': 'string',\n 'sourceReference': 'string',\n 'destinationReference': 'string',\n 'destinationCommit': 'string',\n 'sourceCommit': 'string',\n 'mergeBase': 'string',\n 'mergeMetadata': {\n 'isMerged': True|False,\n 'mergedBy': 'string'\n }\n },\n ],\n 'clientRequestToken': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef update_repository_description(repositoryName=None, repositoryDescription=None):\n \"\"\"\n Sets or changes the comment or description for a repository.\n See also: AWS API Documentation\n \n \n :example: response = client.update_repository_description(\n repositoryName='string',\n repositoryDescription='string'\n )\n \n \n :type repositoryName: string\n :param repositoryName: [REQUIRED]\n The name of the repository to set or change the comment or description for.\n \n\n :type repositoryDescription: string\n :param repositoryDescription: The new comment or description for the specified repository. Repository descriptions are limited to 1,000 characters.\n\n \"\"\"\n pass\n\ndef update_repository_name(oldName=None, newName=None):\n \"\"\"\n Renames a repository. The repository name must be unique across the calling AWS account. In addition, repository names are limited to 100 alphanumeric, dash, and underscore characters, and cannot include certain characters. The suffix \".git\" is prohibited. For a full description of the limits on repository names, see Limits in the AWS CodeCommit User Guide.\n See also: AWS API Documentation\n \n \n :example: response = client.update_repository_name(\n oldName='string',\n newName='string'\n )\n \n \n :type oldName: string\n :param oldName: [REQUIRED]\n The existing name of the repository.\n \n\n :type newName: string\n :param newName: [REQUIRED]\n The new name for the repository.\n \n\n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6069018244743347, "alphanum_fraction": 0.6129980683326721, "avg_line_length": 39.39568328857422, "blob_id": "fb44ba76a1e4baa25972133bd20b261dd46d1024", "content_id": "5ee608c9e0045d94b260bc5edf0c639961135d45", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 218986, "license_type": "permissive", "max_line_length": 830, "num_lines": 5421, "path": "/pyboto3/s3.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef abort_multipart_upload(Bucket=None, Key=None, UploadId=None, RequestPayer=None):\n \"\"\"\n Aborts a multipart upload.\n To verify that all parts have been removed, so you don't get charged for the part storage, you should call the List Parts operation and ensure the parts list is empty.\n See also: AWS API Documentation\n \n \n :example: response = client.abort_multipart_upload(\n Bucket='string',\n Key='string',\n UploadId='string',\n RequestPayer='requester'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type Key: string\n :param Key: [REQUIRED]\n\n :type UploadId: string\n :param UploadId: [REQUIRED]\n\n :type RequestPayer: string\n :param RequestPayer: Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html\n\n :rtype: dict\n :return: {\n 'RequestCharged': 'requester'\n }\n \n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef complete_multipart_upload(Bucket=None, Key=None, MultipartUpload=None, UploadId=None, RequestPayer=None):\n \"\"\"\n Completes a multipart upload by assembling previously uploaded parts.\n See also: AWS API Documentation\n \n \n :example: response = client.complete_multipart_upload(\n Bucket='string',\n Key='string',\n MultipartUpload={\n 'Parts': [\n {\n 'ETag': 'string',\n 'PartNumber': 123\n },\n ]\n },\n UploadId='string',\n RequestPayer='requester'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type Key: string\n :param Key: [REQUIRED]\n\n :type MultipartUpload: dict\n :param MultipartUpload: \n Parts (list) --\n (dict) --\n ETag (string) --Entity tag returned when the part was uploaded.\n PartNumber (integer) --Part number that identifies the part. This is a positive integer between 1 and 10,000.\n \n \n\n :type UploadId: string\n :param UploadId: [REQUIRED]\n\n :type RequestPayer: string\n :param RequestPayer: Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html\n\n :rtype: dict\n :return: {\n 'Location': 'string',\n 'Bucket': 'string',\n 'Key': 'string',\n 'Expiration': 'string',\n 'ETag': 'string',\n 'ServerSideEncryption': 'AES256'|'aws:kms',\n 'VersionId': 'string',\n 'SSEKMSKeyId': 'string',\n 'RequestCharged': 'requester'\n }\n \n \n \"\"\"\n pass\n\ndef copy(CopySource=None, Bucket=None, Key=None, ExtraArgs=None, Callback=None, SourceClient=None, Config=None):\n \"\"\"\n Copy an object from one S3 location to another.\n This is a managed transfer which will perform a multipart copy in\n multiple threads if necessary.\n :\n \n :example: import boto3\n s3 = boto3.resource('s3')\n copy_source = {\n 'Bucket': 'mybucket',\n 'Key': 'mykey'\n }\n s3.meta.client.copy(copy_source, 'otherbucket', 'otherkey')\n \n \n :type CopySource: dict\n :param CopySource: The name of the source bucket, key name of the\n source object, and optional version ID of the source object. The\n dictionary format is:\n {'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}. Note\n that the VersionId key is optional and may be omitted.\n\n :type Bucket: str\n :param Bucket: The name of the bucket to copy to\n\n :type Key: str\n :param Key: The name of the key to copy to\n\n :type ExtraArgs: dict\n :param ExtraArgs: Extra arguments that may be passed to the\n client operation\n\n :type Callback: function\n :param Callback: A method which takes a number of bytes transferred to\n be periodically called during the copy.\n\n :type SourceClient: botocore or boto3 Client\n :param SourceClient: The client to be used for operation that\n may happen at the source object. For example, this client is\n used for the head_object that determines the size of the copy.\n If no client is provided, the current client is used as the client\n for the source object.\n\n :type Config: boto3.s3.transfer.TransferConfig\n :param Config: The transfer configuration to be used when performing the\n copy.\n\n \"\"\"\n pass\n\ndef copy_object(ACL=None, Bucket=None, CacheControl=None, ContentDisposition=None, ContentEncoding=None, ContentLanguage=None, ContentType=None, CopySource=None, CopySourceIfMatch=None, CopySourceIfModifiedSince=None, CopySourceIfNoneMatch=None, CopySourceIfUnmodifiedSince=None, Expires=None, GrantFullControl=None, GrantRead=None, GrantReadACP=None, GrantWriteACP=None, Key=None, Metadata=None, MetadataDirective=None, TaggingDirective=None, ServerSideEncryption=None, StorageClass=None, WebsiteRedirectLocation=None, SSECustomerAlgorithm=None, SSECustomerKey=None, SSECustomerKeyMD5=None, SSEKMSKeyId=None, CopySourceSSECustomerAlgorithm=None, CopySourceSSECustomerKey=None, CopySourceSSECustomerKeyMD5=None, RequestPayer=None, Tagging=None, ObjectLockMode=None, ObjectLockRetainUntilDate=None, ObjectLockLegalHoldStatus=None):\n \"\"\"\n Creates a copy of an object that is already stored in Amazon S3.\n See also: AWS API Documentation\n \n \n :example: response = client.copy_object(\n ACL='private'|'public-read'|'public-read-write'|'authenticated-read'|'aws-exec-read'|'bucket-owner-read'|'bucket-owner-full-control',\n Bucket='string',\n CacheControl='string',\n ContentDisposition='string',\n ContentEncoding='string',\n ContentLanguage='string',\n ContentType='string',\n CopySource='string' or {'Bucket': 'string', 'Key': 'string', 'VersionId': 'string'},\n CopySourceIfMatch='string',\n CopySourceIfModifiedSince=datetime(2015, 1, 1),\n CopySourceIfNoneMatch='string',\n CopySourceIfUnmodifiedSince=datetime(2015, 1, 1),\n Expires=datetime(2015, 1, 1),\n GrantFullControl='string',\n GrantRead='string',\n GrantReadACP='string',\n GrantWriteACP='string',\n Key='string',\n Metadata={\n 'string': 'string'\n },\n MetadataDirective='COPY'|'REPLACE',\n TaggingDirective='COPY'|'REPLACE',\n ServerSideEncryption='AES256'|'aws:kms',\n StorageClass='STANDARD'|'REDUCED_REDUNDANCY'|'STANDARD_IA'|'ONEZONE_IA'|'INTELLIGENT_TIERING'|'GLACIER',\n WebsiteRedirectLocation='string',\n SSECustomerAlgorithm='string',\n SSECustomerKey='string',\n SSEKMSKeyId='string',\n CopySourceSSECustomerAlgorithm='string',\n CopySourceSSECustomerKey='string',\n RequestPayer='requester',\n Tagging='string',\n ObjectLockMode='GOVERNANCE'|'COMPLIANCE',\n ObjectLockRetainUntilDate=datetime(2015, 1, 1),\n ObjectLockLegalHoldStatus='ON'|'OFF'\n )\n \n \n :type ACL: string\n :param ACL: The canned ACL to apply to the object.\n\n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type CacheControl: string\n :param CacheControl: Specifies caching behavior along the request/reply chain.\n\n :type ContentDisposition: string\n :param ContentDisposition: Specifies presentational information for the object.\n\n :type ContentEncoding: string\n :param ContentEncoding: Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.\n\n :type ContentLanguage: string\n :param ContentLanguage: The language the content is in.\n\n :type ContentType: string\n :param ContentType: A standard MIME type describing the format of the object data.\n\n :type CopySource: str or dict\n :param CopySource: [REQUIRED] The name of the source bucket, key name of the source object, and optional version ID of the source object. You can either provide this value as a string or a dictionary. The string form is {bucket}/{key} or {bucket}/{key}?versionId={versionId} if you want to copy a specific version. You can also provide this value as a dictionary. The dictionary format is recommended over the string format because it is more explicit. The dictionary format is: {'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}. Note that the VersionId key is optional and may be omitted.\n\n :type CopySourceIfMatch: string\n :param CopySourceIfMatch: Copies the object if its entity tag (ETag) matches the specified tag.\n\n :type CopySourceIfModifiedSince: datetime\n :param CopySourceIfModifiedSince: Copies the object if it has been modified since the specified time.\n\n :type CopySourceIfNoneMatch: string\n :param CopySourceIfNoneMatch: Copies the object if its entity tag (ETag) is different than the specified ETag.\n\n :type CopySourceIfUnmodifiedSince: datetime\n :param CopySourceIfUnmodifiedSince: Copies the object if it hasn't been modified since the specified time.\n\n :type Expires: datetime\n :param Expires: The date and time at which the object is no longer cacheable.\n\n :type GrantFullControl: string\n :param GrantFullControl: Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.\n\n :type GrantRead: string\n :param GrantRead: Allows grantee to read the object data and its metadata.\n\n :type GrantReadACP: string\n :param GrantReadACP: Allows grantee to read the object ACL.\n\n :type GrantWriteACP: string\n :param GrantWriteACP: Allows grantee to write the ACL for the applicable object.\n\n :type Key: string\n :param Key: [REQUIRED]\n\n :type Metadata: dict\n :param Metadata: A map of metadata to store with the object in S3.\n (string) --\n (string) --\n \n\n :type MetadataDirective: string\n :param MetadataDirective: Specifies whether the metadata is copied from the source object or replaced with metadata provided in the request.\n\n :type TaggingDirective: string\n :param TaggingDirective: Specifies whether the object tag-set are copied from the source object or replaced with tag-set provided in the request.\n\n :type ServerSideEncryption: string\n :param ServerSideEncryption: The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).\n\n :type StorageClass: string\n :param StorageClass: The type of storage to use for the object. Defaults to 'STANDARD'.\n\n :type WebsiteRedirectLocation: string\n :param WebsiteRedirectLocation: If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.\n\n :type SSECustomerAlgorithm: string\n :param SSECustomerAlgorithm: Specifies the algorithm to use to when encrypting the object (e.g., AES256).\n\n :type SSECustomerKey: string\n :param SSECustomerKey: Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side -encryption -customer-algorithm header.\n\n :type SSECustomerKeyMD5: string\n :param SSECustomerKeyMD5: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.\n Please note that this parameter is automatically populated if it is not provided. Including this parameter is not required\n \n\n :type SSEKMSKeyId: string\n :param SSEKMSKeyId: Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version\n\n :type CopySourceSSECustomerAlgorithm: string\n :param CopySourceSSECustomerAlgorithm: Specifies the algorithm to use when decrypting the source object (e.g., AES256).\n\n :type CopySourceSSECustomerKey: string\n :param CopySourceSSECustomerKey: Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source object. The encryption key provided in this header must be one that was used when the source object was created.\n\n :type CopySourceSSECustomerKeyMD5: string\n :param CopySourceSSECustomerKeyMD5: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.\n Please note that this parameter is automatically populated if it is not provided. Including this parameter is not required\n \n\n :type RequestPayer: string\n :param RequestPayer: Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html\n\n :type Tagging: string\n :param Tagging: The tag-set for the object destination object this value must be used in conjunction with the TaggingDirective. The tag-set must be encoded as URL Query parameters\n\n :type ObjectLockMode: string\n :param ObjectLockMode: The Object Lock mode that you want to apply to the copied object.\n\n :type ObjectLockRetainUntilDate: datetime\n :param ObjectLockRetainUntilDate: The date and time when you want the copied object's Object Lock to expire.\n\n :type ObjectLockLegalHoldStatus: string\n :param ObjectLockLegalHoldStatus: Specifies whether you want to apply a Legal Hold to the copied object.\n\n :rtype: dict\n :return: {\n 'CopyObjectResult': {\n 'ETag': 'string',\n 'LastModified': datetime(2015, 1, 1)\n },\n 'Expiration': 'string',\n 'CopySourceVersionId': 'string',\n 'VersionId': 'string',\n 'ServerSideEncryption': 'AES256'|'aws:kms',\n 'SSECustomerAlgorithm': 'string',\n 'SSECustomerKeyMD5': 'string',\n 'SSEKMSKeyId': 'string',\n 'RequestCharged': 'requester'\n }\n \n \n :returns: \n ETag (string) --\n LastModified (datetime) --\n \n \"\"\"\n pass\n\ndef create_bucket(ACL=None, Bucket=None, CreateBucketConfiguration=None, GrantFullControl=None, GrantRead=None, GrantReadACP=None, GrantWrite=None, GrantWriteACP=None, ObjectLockEnabledForBucket=None):\n \"\"\"\n Creates a new bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.create_bucket(\n ACL='private'|'public-read'|'public-read-write'|'authenticated-read',\n Bucket='string',\n CreateBucketConfiguration={\n 'LocationConstraint': 'EU'|'eu-west-1'|'us-west-1'|'us-west-2'|'ap-south-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'sa-east-1'|'cn-north-1'|'eu-central-1'\n },\n GrantFullControl='string',\n GrantRead='string',\n GrantReadACP='string',\n GrantWrite='string',\n GrantWriteACP='string',\n ObjectLockEnabledForBucket=True|False\n )\n \n \n :type ACL: string\n :param ACL: The canned ACL to apply to the bucket.\n\n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type CreateBucketConfiguration: dict\n :param CreateBucketConfiguration: \n LocationConstraint (string) --Specifies the region where the bucket will be created. If you don't specify a region, the bucket will be created in US Standard.\n \n\n :type GrantFullControl: string\n :param GrantFullControl: Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.\n\n :type GrantRead: string\n :param GrantRead: Allows grantee to list the objects in the bucket.\n\n :type GrantReadACP: string\n :param GrantReadACP: Allows grantee to read the bucket ACL.\n\n :type GrantWrite: string\n :param GrantWrite: Allows grantee to create, overwrite, and delete any object in the bucket.\n\n :type GrantWriteACP: string\n :param GrantWriteACP: Allows grantee to write the ACL for the applicable bucket.\n\n :type ObjectLockEnabledForBucket: boolean\n :param ObjectLockEnabledForBucket: Specifies whether you want S3 Object Lock to be enabled for the new bucket.\n\n :rtype: dict\n :return: {\n 'Location': 'string'\n }\n \n \n :returns: \n (dict) --\n Location (string) --\n \n \n \n \"\"\"\n pass\n\ndef create_multipart_upload(ACL=None, Bucket=None, CacheControl=None, ContentDisposition=None, ContentEncoding=None, ContentLanguage=None, ContentType=None, Expires=None, GrantFullControl=None, GrantRead=None, GrantReadACP=None, GrantWriteACP=None, Key=None, Metadata=None, ServerSideEncryption=None, StorageClass=None, WebsiteRedirectLocation=None, SSECustomerAlgorithm=None, SSECustomerKey=None, SSECustomerKeyMD5=None, SSEKMSKeyId=None, RequestPayer=None, Tagging=None, ObjectLockMode=None, ObjectLockRetainUntilDate=None, ObjectLockLegalHoldStatus=None):\n \"\"\"\n Initiates a multipart upload and returns an upload ID.\n See also: AWS API Documentation\n \n \n :example: response = client.create_multipart_upload(\n ACL='private'|'public-read'|'public-read-write'|'authenticated-read'|'aws-exec-read'|'bucket-owner-read'|'bucket-owner-full-control',\n Bucket='string',\n CacheControl='string',\n ContentDisposition='string',\n ContentEncoding='string',\n ContentLanguage='string',\n ContentType='string',\n Expires=datetime(2015, 1, 1),\n GrantFullControl='string',\n GrantRead='string',\n GrantReadACP='string',\n GrantWriteACP='string',\n Key='string',\n Metadata={\n 'string': 'string'\n },\n ServerSideEncryption='AES256'|'aws:kms',\n StorageClass='STANDARD'|'REDUCED_REDUNDANCY'|'STANDARD_IA'|'ONEZONE_IA'|'INTELLIGENT_TIERING'|'GLACIER',\n WebsiteRedirectLocation='string',\n SSECustomerAlgorithm='string',\n SSECustomerKey='string',\n SSEKMSKeyId='string',\n RequestPayer='requester',\n Tagging='string',\n ObjectLockMode='GOVERNANCE'|'COMPLIANCE',\n ObjectLockRetainUntilDate=datetime(2015, 1, 1),\n ObjectLockLegalHoldStatus='ON'|'OFF'\n )\n \n \n :type ACL: string\n :param ACL: The canned ACL to apply to the object.\n\n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type CacheControl: string\n :param CacheControl: Specifies caching behavior along the request/reply chain.\n\n :type ContentDisposition: string\n :param ContentDisposition: Specifies presentational information for the object.\n\n :type ContentEncoding: string\n :param ContentEncoding: Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.\n\n :type ContentLanguage: string\n :param ContentLanguage: The language the content is in.\n\n :type ContentType: string\n :param ContentType: A standard MIME type describing the format of the object data.\n\n :type Expires: datetime\n :param Expires: The date and time at which the object is no longer cacheable.\n\n :type GrantFullControl: string\n :param GrantFullControl: Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.\n\n :type GrantRead: string\n :param GrantRead: Allows grantee to read the object data and its metadata.\n\n :type GrantReadACP: string\n :param GrantReadACP: Allows grantee to read the object ACL.\n\n :type GrantWriteACP: string\n :param GrantWriteACP: Allows grantee to write the ACL for the applicable object.\n\n :type Key: string\n :param Key: [REQUIRED]\n\n :type Metadata: dict\n :param Metadata: A map of metadata to store with the object in S3.\n (string) --\n (string) --\n \n\n :type ServerSideEncryption: string\n :param ServerSideEncryption: The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).\n\n :type StorageClass: string\n :param StorageClass: The type of storage to use for the object. Defaults to 'STANDARD'.\n\n :type WebsiteRedirectLocation: string\n :param WebsiteRedirectLocation: If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.\n\n :type SSECustomerAlgorithm: string\n :param SSECustomerAlgorithm: Specifies the algorithm to use to when encrypting the object (e.g., AES256).\n\n :type SSECustomerKey: string\n :param SSECustomerKey: Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side -encryption -customer-algorithm header.\n\n :type SSECustomerKeyMD5: string\n :param SSECustomerKeyMD5: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.\n Please note that this parameter is automatically populated if it is not provided. Including this parameter is not required\n \n\n :type SSEKMSKeyId: string\n :param SSEKMSKeyId: Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version\n\n :type RequestPayer: string\n :param RequestPayer: Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html\n\n :type Tagging: string\n :param Tagging: The tag-set for the object. The tag-set must be encoded as URL Query parameters\n\n :type ObjectLockMode: string\n :param ObjectLockMode: Specifies the Object Lock mode that you want to apply to the uploaded object.\n\n :type ObjectLockRetainUntilDate: datetime\n :param ObjectLockRetainUntilDate: Specifies the date and time when you want the Object Lock to expire.\n\n :type ObjectLockLegalHoldStatus: string\n :param ObjectLockLegalHoldStatus: Specifies whether you want to apply a Legal Hold to the uploaded object.\n\n :rtype: dict\n :return: {\n 'AbortDate': datetime(2015, 1, 1),\n 'AbortRuleId': 'string',\n 'Bucket': 'string',\n 'Key': 'string',\n 'UploadId': 'string',\n 'ServerSideEncryption': 'AES256'|'aws:kms',\n 'SSECustomerAlgorithm': 'string',\n 'SSECustomerKeyMD5': 'string',\n 'SSEKMSKeyId': 'string',\n 'RequestCharged': 'requester'\n }\n \n \n \"\"\"\n pass\n\ndef delete_bucket(Bucket=None):\n \"\"\"\n Deletes the bucket. All objects (including all object versions and Delete Markers) in the bucket must be deleted before the bucket itself can be deleted.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_bucket(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n \"\"\"\n pass\n\ndef delete_bucket_analytics_configuration(Bucket=None, Id=None):\n \"\"\"\n Deletes an analytics configuration for the bucket (specified by the analytics configuration ID).\n See also: AWS API Documentation\n \n \n :example: response = client.delete_bucket_analytics_configuration(\n Bucket='string',\n Id='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The name of the bucket from which an analytics configuration is deleted.\n \n\n :type Id: string\n :param Id: [REQUIRED]\n The identifier used to represent an analytics configuration.\n \n\n \"\"\"\n pass\n\ndef delete_bucket_cors(Bucket=None):\n \"\"\"\n Deletes the CORS configuration information set for the bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_bucket_cors(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n \"\"\"\n pass\n\ndef delete_bucket_encryption(Bucket=None):\n \"\"\"\n Deletes the server-side encryption configuration from the bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_bucket_encryption(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The name of the bucket containing the server-side encryption configuration to delete.\n \n\n \"\"\"\n pass\n\ndef delete_bucket_inventory_configuration(Bucket=None, Id=None):\n \"\"\"\n Deletes an inventory configuration (identified by the inventory ID) from the bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_bucket_inventory_configuration(\n Bucket='string',\n Id='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The name of the bucket containing the inventory configuration to delete.\n \n\n :type Id: string\n :param Id: [REQUIRED]\n The ID used to identify the inventory configuration.\n \n\n \"\"\"\n pass\n\ndef delete_bucket_lifecycle(Bucket=None):\n \"\"\"\n Deletes the lifecycle configuration from the bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_bucket_lifecycle(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n \"\"\"\n pass\n\ndef delete_bucket_metrics_configuration(Bucket=None, Id=None):\n \"\"\"\n Deletes a metrics configuration (specified by the metrics configuration ID) from the bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_bucket_metrics_configuration(\n Bucket='string',\n Id='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The name of the bucket containing the metrics configuration to delete.\n \n\n :type Id: string\n :param Id: [REQUIRED]\n The ID used to identify the metrics configuration.\n \n\n \"\"\"\n pass\n\ndef delete_bucket_policy(Bucket=None):\n \"\"\"\n Deletes the policy from the bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_bucket_policy(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n \"\"\"\n pass\n\ndef delete_bucket_replication(Bucket=None):\n \"\"\"\n Deletes the replication configuration from the bucket. For information about replication configuration, see `Cross-Region Replication (CRR) < https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html>`__ in the Amazon S3 Developer Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.delete_bucket_replication(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The bucket name.\n Note\n It can take a while to propagate the deletion of a replication configuration to all Amazon S3 systems.\n \n\n \"\"\"\n pass\n\ndef delete_bucket_tagging(Bucket=None):\n \"\"\"\n Deletes the tags from the bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_bucket_tagging(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n \"\"\"\n pass\n\ndef delete_bucket_website(Bucket=None):\n \"\"\"\n This operation removes the website configuration from the bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_bucket_website(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n \"\"\"\n pass\n\ndef delete_object(Bucket=None, Key=None, MFA=None, VersionId=None, RequestPayer=None, BypassGovernanceRetention=None):\n \"\"\"\n Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn't a null version, Amazon S3 does not remove any objects.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_object(\n Bucket='string',\n Key='string',\n MFA='string',\n VersionId='string',\n RequestPayer='requester',\n BypassGovernanceRetention=True|False\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type Key: string\n :param Key: [REQUIRED]\n\n :type MFA: string\n :param MFA: The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device.\n\n :type VersionId: string\n :param VersionId: VersionId used to reference a specific version of the object.\n\n :type RequestPayer: string\n :param RequestPayer: Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html\n\n :type BypassGovernanceRetention: boolean\n :param BypassGovernanceRetention: Indicates whether S3 Object Lock should bypass Governance-mode restrictions to process this operation.\n\n :rtype: dict\n :return: {\n 'DeleteMarker': True|False,\n 'VersionId': 'string',\n 'RequestCharged': 'requester'\n }\n \n \n \"\"\"\n pass\n\ndef delete_object_tagging(Bucket=None, Key=None, VersionId=None):\n \"\"\"\n Removes the tag-set from an existing object.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_object_tagging(\n Bucket='string',\n Key='string',\n VersionId='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type Key: string\n :param Key: [REQUIRED]\n\n :type VersionId: string\n :param VersionId: The versionId of the object that the tag-set will be removed from.\n\n :rtype: dict\n :return: {\n 'VersionId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_objects(Bucket=None, Delete=None, MFA=None, RequestPayer=None, BypassGovernanceRetention=None):\n \"\"\"\n This operation enables you to delete multiple objects from a bucket using a single HTTP request. You may specify up to 1000 keys.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_objects(\n Bucket='string',\n Delete={\n 'Objects': [\n {\n 'Key': 'string',\n 'VersionId': 'string'\n },\n ],\n 'Quiet': True|False\n },\n MFA='string',\n RequestPayer='requester',\n BypassGovernanceRetention=True|False\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type Delete: dict\n :param Delete: [REQUIRED]\n Objects (list) -- [REQUIRED]\n (dict) --\n Key (string) -- [REQUIRED]Key name of the object to delete.\n VersionId (string) --VersionId for the specific version of the object to delete.\n \n Quiet (boolean) --Element to enable quiet mode for the request. When you add this element, you must set its value to true.\n \n\n :type MFA: string\n :param MFA: The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device.\n\n :type RequestPayer: string\n :param RequestPayer: Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html\n\n :type BypassGovernanceRetention: boolean\n :param BypassGovernanceRetention: Specifies whether you want to delete this object even if it has a Governance-type Object Lock in place. You must have sufficient permissions to perform this operation.\n\n :rtype: dict\n :return: {\n 'Deleted': [\n {\n 'Key': 'string',\n 'VersionId': 'string',\n 'DeleteMarker': True|False,\n 'DeleteMarkerVersionId': 'string'\n },\n ],\n 'RequestCharged': 'requester',\n 'Errors': [\n {\n 'Key': 'string',\n 'VersionId': 'string',\n 'Code': 'string',\n 'Message': 'string'\n },\n ]\n }\n \n \n :returns: \n (dict) --\n Key (string) --\n VersionId (string) --\n DeleteMarker (boolean) --\n DeleteMarkerVersionId (string) --\n \n \n \n \"\"\"\n pass\n\ndef delete_public_access_block(Bucket=None):\n \"\"\"\n Removes the PublicAccessBlock configuration from an Amazon S3 bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_public_access_block(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete.\n \n\n \"\"\"\n pass\n\ndef download_file(Bucket=None, Key=None, Filename=None, ExtraArgs=None, Callback=None, Config=None):\n \"\"\"\n Download an S3 object to a file.\n :\n Similar behavior as S3Transfer's download_file() method,\n except that parameters are capitalized. Detailed examples can be found at\n S3Transfer's .\n \n :example: import boto3\n s3 = boto3.resource('s3')\n s3.meta.client.download_file('mybucket', 'hello.txt', '/tmp/hello.txt')\n \n \n :type Bucket: str\n :param Bucket: The name of the bucket to download from.\n\n :type Key: str\n :param Key: The name of the key to download from.\n\n :type Filename: str\n :param Filename: The path to the file to download to.\n\n :type ExtraArgs: dict\n :param ExtraArgs: Extra arguments that may be passed to the\n client operation.\n\n :type Callback: function\n :param Callback: A method which takes a number of bytes transferred to\n be periodically called during the download.\n\n :type Config: boto3.s3.transfer.TransferConfig\n :param Config: The transfer configuration to be used when performing the\n transfer.\n\n \"\"\"\n pass\n\ndef download_fileobj(Fileobj=None, Bucket=None, Key=None, ExtraArgs=None, Callback=None, Config=None):\n \"\"\"\n Download an object from S3 to a file-like object.\n The file-like object must be in binary mode.\n This is a managed transfer which will perform a multipart download in\n multiple threads if necessary.\n :\n \n :example: import boto3\n s3 = boto3.client('s3')\n \n with open('filename', 'wb') as data:\n s3.download_fileobj('mybucket', 'mykey', data)\n \n \n :type Fileobj: a file-like object\n :param Fileobj: A file-like object to download into. At a minimum, it must\n implement the write method and must accept bytes.\n\n :type Bucket: str\n :param Bucket: The name of the bucket to download from.\n\n :type Key: str\n :param Key: The name of the key to download from.\n\n :type ExtraArgs: dict\n :param ExtraArgs: Extra arguments that may be passed to the\n client operation.\n\n :type Callback: function\n :param Callback: A method which takes a number of bytes transferred to\n be periodically called during the download.\n\n :type Config: boto3.s3.transfer.TransferConfig\n :param Config: The transfer configuration to be used when performing the\n download.\n\n \"\"\"\n pass\n\ndef generate_presigned_post(Bucket=None, Key=None, Fields=None, Conditions=None, ExpiresIn=None):\n \"\"\"\n Builds the url and the form fields used for a presigned s3 post\n \n :type Bucket: string\n :param Bucket: The name of the bucket to presign the post to. Note that\n bucket related conditions should not be included in the\n conditions parameter.\n\n :type Key: string\n :param Key: Key name, optionally add ${filename} to the end to\n attach the submitted filename. Note that key related conditions and\n fields are filled out for you and should not be included in the\n Fields or Conditions parameter.\n\n :type Fields: dict\n :param Fields: A dictionary of prefilled form fields to build on top\n of. Elements that may be included are acl, Cache-Control,\n Content-Type, Content-Disposition, Content-Encoding, Expires,\n success_action_redirect, redirect, success_action_status,\n and x-amz-meta-.\n Note that if a particular element is included in the fields\n dictionary it will not be automatically added to the conditions\n list. You must specify a condition for the element as well.\n \n\n :type Conditions: list\n :param Conditions: A list of conditions to include in the policy. Each\n element can be either a list or a structure. For example:\n [\n {'acl': 'public-read'},\n ['content-length-range', 2, 5],\n ['starts-with', '$success_action_redirect', '']\n ]\n Conditions that are included may pertain to acl,\n content-length-range, Cache-Control, Content-Type,\n Content-Disposition, Content-Encoding, Expires,\n success_action_redirect, redirect, success_action_status,\n and/or x-amz-meta-.\n Note that if you include a condition, you must specify\n the a valid value in the fields dictionary as well. A value will\n not be added automatically to the fields dictionary based on the\n conditions.\n \n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned post\n is valid for.\n\n :rtype: dict\n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_bucket_accelerate_configuration(Bucket=None):\n \"\"\"\n Returns the accelerate configuration of a bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.get_bucket_accelerate_configuration(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n Name of the bucket for which the accelerate configuration is retrieved.\n \n\n :rtype: dict\n :return: {\n 'Status': 'Enabled'|'Suspended'\n }\n \n \n \"\"\"\n pass\n\ndef get_bucket_acl(Bucket=None):\n \"\"\"\n Gets the access control policy for the bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.get_bucket_acl(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :rtype: dict\n :return: {\n 'Owner': {\n 'DisplayName': 'string',\n 'ID': 'string'\n },\n 'Grants': [\n {\n 'Grantee': {\n 'DisplayName': 'string',\n 'EmailAddress': 'string',\n 'ID': 'string',\n 'Type': 'CanonicalUser'|'AmazonCustomerByEmail'|'Group',\n 'URI': 'string'\n },\n 'Permission': 'FULL_CONTROL'|'WRITE'|'WRITE_ACP'|'READ'|'READ_ACP'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_bucket_analytics_configuration(Bucket=None, Id=None):\n \"\"\"\n Gets an analytics configuration for the bucket (specified by the analytics configuration ID).\n See also: AWS API Documentation\n \n \n :example: response = client.get_bucket_analytics_configuration(\n Bucket='string',\n Id='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The name of the bucket from which an analytics configuration is retrieved.\n \n\n :type Id: string\n :param Id: [REQUIRED]\n The identifier used to represent an analytics configuration.\n \n\n :rtype: dict\n :return: {\n 'AnalyticsConfiguration': {\n 'Id': 'string',\n 'Filter': {\n 'Prefix': 'string',\n 'Tag': {\n 'Key': 'string',\n 'Value': 'string'\n },\n 'And': {\n 'Prefix': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n },\n 'StorageClassAnalysis': {\n 'DataExport': {\n 'OutputSchemaVersion': 'V_1',\n 'Destination': {\n 'S3BucketDestination': {\n 'Format': 'CSV',\n 'BucketAccountId': 'string',\n 'Bucket': 'string',\n 'Prefix': 'string'\n }\n }\n }\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_bucket_cors(Bucket=None):\n \"\"\"\n Returns the CORS configuration for the bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.get_bucket_cors(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :rtype: dict\n :return: {\n 'CORSRules': [\n {\n 'AllowedHeaders': [\n 'string',\n ],\n 'AllowedMethods': [\n 'string',\n ],\n 'AllowedOrigins': [\n 'string',\n ],\n 'ExposeHeaders': [\n 'string',\n ],\n 'MaxAgeSeconds': 123\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_bucket_encryption(Bucket=None):\n \"\"\"\n Returns the server-side encryption configuration of a bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.get_bucket_encryption(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The name of the bucket from which the server-side encryption configuration is retrieved.\n \n\n :rtype: dict\n :return: {\n 'ServerSideEncryptionConfiguration': {\n 'Rules': [\n {\n 'ApplyServerSideEncryptionByDefault': {\n 'SSEAlgorithm': 'AES256'|'aws:kms',\n 'KMSMasterKeyID': 'string'\n }\n },\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_bucket_inventory_configuration(Bucket=None, Id=None):\n \"\"\"\n Returns an inventory configuration (identified by the inventory ID) from the bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.get_bucket_inventory_configuration(\n Bucket='string',\n Id='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The name of the bucket containing the inventory configuration to retrieve.\n \n\n :type Id: string\n :param Id: [REQUIRED]\n The ID used to identify the inventory configuration.\n \n\n :rtype: dict\n :return: {\n 'InventoryConfiguration': {\n 'Destination': {\n 'S3BucketDestination': {\n 'AccountId': 'string',\n 'Bucket': 'string',\n 'Format': 'CSV'|'ORC'|'Parquet',\n 'Prefix': 'string',\n 'Encryption': {\n 'SSES3': {},\n 'SSEKMS': {\n 'KeyId': 'string'\n }\n }\n }\n },\n 'IsEnabled': True|False,\n 'Filter': {\n 'Prefix': 'string'\n },\n 'Id': 'string',\n 'IncludedObjectVersions': 'All'|'Current',\n 'OptionalFields': [\n 'Size'|'LastModifiedDate'|'StorageClass'|'ETag'|'IsMultipartUploaded'|'ReplicationStatus'|'EncryptionStatus'|'ObjectLockRetainUntilDate'|'ObjectLockMode'|'ObjectLockLegalHoldStatus',\n ],\n 'Schedule': {\n 'Frequency': 'Daily'|'Weekly'\n }\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_bucket_lifecycle(Bucket=None):\n \"\"\"\n Deprecated, see the GetBucketLifecycleConfiguration operation.\n See also: AWS API Documentation\n \n \n :example: response = client.get_bucket_lifecycle(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :rtype: dict\n :return: {\n 'Rules': [\n {\n 'Expiration': {\n 'Date': datetime(2015, 1, 1),\n 'Days': 123,\n 'ExpiredObjectDeleteMarker': True|False\n },\n 'ID': 'string',\n 'Prefix': 'string',\n 'Status': 'Enabled'|'Disabled',\n 'Transition': {\n 'Date': datetime(2015, 1, 1),\n 'Days': 123,\n 'StorageClass': 'GLACIER'|'STANDARD_IA'|'ONEZONE_IA'|'INTELLIGENT_TIERING'\n },\n 'NoncurrentVersionTransition': {\n 'NoncurrentDays': 123,\n 'StorageClass': 'GLACIER'|'STANDARD_IA'|'ONEZONE_IA'|'INTELLIGENT_TIERING'\n },\n 'NoncurrentVersionExpiration': {\n 'NoncurrentDays': 123\n },\n 'AbortIncompleteMultipartUpload': {\n 'DaysAfterInitiation': 123\n }\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_bucket_lifecycle_configuration(Bucket=None):\n \"\"\"\n Returns the lifecycle configuration information set on the bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.get_bucket_lifecycle_configuration(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :rtype: dict\n :return: {\n 'Rules': [\n {\n 'Expiration': {\n 'Date': datetime(2015, 1, 1),\n 'Days': 123,\n 'ExpiredObjectDeleteMarker': True|False\n },\n 'ID': 'string',\n 'Prefix': 'string',\n 'Filter': {\n 'Prefix': 'string',\n 'Tag': {\n 'Key': 'string',\n 'Value': 'string'\n },\n 'And': {\n 'Prefix': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n },\n 'Status': 'Enabled'|'Disabled',\n 'Transitions': [\n {\n 'Date': datetime(2015, 1, 1),\n 'Days': 123,\n 'StorageClass': 'GLACIER'|'STANDARD_IA'|'ONEZONE_IA'|'INTELLIGENT_TIERING'\n },\n ],\n 'NoncurrentVersionTransitions': [\n {\n 'NoncurrentDays': 123,\n 'StorageClass': 'GLACIER'|'STANDARD_IA'|'ONEZONE_IA'|'INTELLIGENT_TIERING'\n },\n ],\n 'NoncurrentVersionExpiration': {\n 'NoncurrentDays': 123\n },\n 'AbortIncompleteMultipartUpload': {\n 'DaysAfterInitiation': 123\n }\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_bucket_location(Bucket=None):\n \"\"\"\n Returns the region the bucket resides in.\n See also: AWS API Documentation\n \n \n :example: response = client.get_bucket_location(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :rtype: dict\n :return: {\n 'LocationConstraint': 'EU'|'eu-west-1'|'us-west-1'|'us-west-2'|'ap-south-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'sa-east-1'|'cn-north-1'|'eu-central-1'\n }\n \n \n \"\"\"\n pass\n\ndef get_bucket_logging(Bucket=None):\n \"\"\"\n Returns the logging status of a bucket and the permissions users have to view and modify that status. To use GET, you must be the bucket owner.\n See also: AWS API Documentation\n \n \n :example: response = client.get_bucket_logging(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :rtype: dict\n :return: {\n 'LoggingEnabled': {\n 'TargetBucket': 'string',\n 'TargetGrants': [\n {\n 'Grantee': {\n 'DisplayName': 'string',\n 'EmailAddress': 'string',\n 'ID': 'string',\n 'Type': 'CanonicalUser'|'AmazonCustomerByEmail'|'Group',\n 'URI': 'string'\n },\n 'Permission': 'FULL_CONTROL'|'READ'|'WRITE'\n },\n ],\n 'TargetPrefix': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_bucket_metrics_configuration(Bucket=None, Id=None):\n \"\"\"\n Gets a metrics configuration (specified by the metrics configuration ID) from the bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.get_bucket_metrics_configuration(\n Bucket='string',\n Id='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The name of the bucket containing the metrics configuration to retrieve.\n \n\n :type Id: string\n :param Id: [REQUIRED]\n The ID used to identify the metrics configuration.\n \n\n :rtype: dict\n :return: {\n 'MetricsConfiguration': {\n 'Id': 'string',\n 'Filter': {\n 'Prefix': 'string',\n 'Tag': {\n 'Key': 'string',\n 'Value': 'string'\n },\n 'And': {\n 'Prefix': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_bucket_notification(Bucket=None):\n \"\"\"\n Deprecated, see the GetBucketNotificationConfiguration operation.\n See also: AWS API Documentation\n \n \n :example: response = client.get_bucket_notification(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n Name of the bucket to get the notification configuration for.\n \n\n :rtype: dict\n :return: {\n 'TopicConfiguration': {\n 'Id': 'string',\n 'Events': [\n 's3:ReducedRedundancyLostObject'|'s3:ObjectCreated:*'|'s3:ObjectCreated:Put'|'s3:ObjectCreated:Post'|'s3:ObjectCreated:Copy'|'s3:ObjectCreated:CompleteMultipartUpload'|'s3:ObjectRemoved:*'|'s3:ObjectRemoved:Delete'|'s3:ObjectRemoved:DeleteMarkerCreated'|'s3:ObjectRestore:Post'|'s3:ObjectRestore:Completed',\n ],\n 'Event': 's3:ReducedRedundancyLostObject'|'s3:ObjectCreated:*'|'s3:ObjectCreated:Put'|'s3:ObjectCreated:Post'|'s3:ObjectCreated:Copy'|'s3:ObjectCreated:CompleteMultipartUpload'|'s3:ObjectRemoved:*'|'s3:ObjectRemoved:Delete'|'s3:ObjectRemoved:DeleteMarkerCreated'|'s3:ObjectRestore:Post'|'s3:ObjectRestore:Completed',\n 'Topic': 'string'\n },\n 'QueueConfiguration': {\n 'Id': 'string',\n 'Event': 's3:ReducedRedundancyLostObject'|'s3:ObjectCreated:*'|'s3:ObjectCreated:Put'|'s3:ObjectCreated:Post'|'s3:ObjectCreated:Copy'|'s3:ObjectCreated:CompleteMultipartUpload'|'s3:ObjectRemoved:*'|'s3:ObjectRemoved:Delete'|'s3:ObjectRemoved:DeleteMarkerCreated'|'s3:ObjectRestore:Post'|'s3:ObjectRestore:Completed',\n 'Events': [\n 's3:ReducedRedundancyLostObject'|'s3:ObjectCreated:*'|'s3:ObjectCreated:Put'|'s3:ObjectCreated:Post'|'s3:ObjectCreated:Copy'|'s3:ObjectCreated:CompleteMultipartUpload'|'s3:ObjectRemoved:*'|'s3:ObjectRemoved:Delete'|'s3:ObjectRemoved:DeleteMarkerCreated'|'s3:ObjectRestore:Post'|'s3:ObjectRestore:Completed',\n ],\n 'Queue': 'string'\n },\n 'CloudFunctionConfiguration': {\n 'Id': 'string',\n 'Event': 's3:ReducedRedundancyLostObject'|'s3:ObjectCreated:*'|'s3:ObjectCreated:Put'|'s3:ObjectCreated:Post'|'s3:ObjectCreated:Copy'|'s3:ObjectCreated:CompleteMultipartUpload'|'s3:ObjectRemoved:*'|'s3:ObjectRemoved:Delete'|'s3:ObjectRemoved:DeleteMarkerCreated'|'s3:ObjectRestore:Post'|'s3:ObjectRestore:Completed',\n 'Events': [\n 's3:ReducedRedundancyLostObject'|'s3:ObjectCreated:*'|'s3:ObjectCreated:Put'|'s3:ObjectCreated:Post'|'s3:ObjectCreated:Copy'|'s3:ObjectCreated:CompleteMultipartUpload'|'s3:ObjectRemoved:*'|'s3:ObjectRemoved:Delete'|'s3:ObjectRemoved:DeleteMarkerCreated'|'s3:ObjectRestore:Post'|'s3:ObjectRestore:Completed',\n ],\n 'CloudFunction': 'string',\n 'InvocationRole': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_bucket_notification_configuration(Bucket=None):\n \"\"\"\n Returns the notification configuration of a bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.get_bucket_notification_configuration(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n Name of the bucket to get the notification configuration for.\n \n\n :rtype: dict\n :return: {\n 'TopicConfigurations': [\n {\n 'Id': 'string',\n 'TopicArn': 'string',\n 'Events': [\n 's3:ReducedRedundancyLostObject'|'s3:ObjectCreated:*'|'s3:ObjectCreated:Put'|'s3:ObjectCreated:Post'|'s3:ObjectCreated:Copy'|'s3:ObjectCreated:CompleteMultipartUpload'|'s3:ObjectRemoved:*'|'s3:ObjectRemoved:Delete'|'s3:ObjectRemoved:DeleteMarkerCreated'|'s3:ObjectRestore:Post'|'s3:ObjectRestore:Completed',\n ],\n 'Filter': {\n 'Key': {\n 'FilterRules': [\n {\n 'Name': 'prefix'|'suffix',\n 'Value': 'string'\n },\n ]\n }\n }\n },\n ],\n 'QueueConfigurations': [\n {\n 'Id': 'string',\n 'QueueArn': 'string',\n 'Events': [\n 's3:ReducedRedundancyLostObject'|'s3:ObjectCreated:*'|'s3:ObjectCreated:Put'|'s3:ObjectCreated:Post'|'s3:ObjectCreated:Copy'|'s3:ObjectCreated:CompleteMultipartUpload'|'s3:ObjectRemoved:*'|'s3:ObjectRemoved:Delete'|'s3:ObjectRemoved:DeleteMarkerCreated'|'s3:ObjectRestore:Post'|'s3:ObjectRestore:Completed',\n ],\n 'Filter': {\n 'Key': {\n 'FilterRules': [\n {\n 'Name': 'prefix'|'suffix',\n 'Value': 'string'\n },\n ]\n }\n }\n },\n ],\n 'LambdaFunctionConfigurations': [\n {\n 'Id': 'string',\n 'LambdaFunctionArn': 'string',\n 'Events': [\n 's3:ReducedRedundancyLostObject'|'s3:ObjectCreated:*'|'s3:ObjectCreated:Put'|'s3:ObjectCreated:Post'|'s3:ObjectCreated:Copy'|'s3:ObjectCreated:CompleteMultipartUpload'|'s3:ObjectRemoved:*'|'s3:ObjectRemoved:Delete'|'s3:ObjectRemoved:DeleteMarkerCreated'|'s3:ObjectRestore:Post'|'s3:ObjectRestore:Completed',\n ],\n 'Filter': {\n 'Key': {\n 'FilterRules': [\n {\n 'Name': 'prefix'|'suffix',\n 'Value': 'string'\n },\n ]\n }\n }\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_bucket_policy(Bucket=None):\n \"\"\"\n Returns the policy of a specified bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.get_bucket_policy(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :rtype: dict\n :return: {\n 'Policy': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_bucket_policy_status(Bucket=None):\n \"\"\"\n Retrieves the policy status for an Amazon S3 bucket, indicating whether the bucket is public.\n See also: AWS API Documentation\n \n \n :example: response = client.get_bucket_policy_status(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The name of the Amazon S3 bucket whose policy status you want to retrieve.\n \n\n :rtype: dict\n :return: {\n 'PolicyStatus': {\n 'IsPublic': True|False\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_bucket_replication(Bucket=None):\n \"\"\"\n Returns the replication configuration of a bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.get_bucket_replication(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :rtype: dict\n :return: {\n 'ReplicationConfiguration': {\n 'Role': 'string',\n 'Rules': [\n {\n 'ID': 'string',\n 'Priority': 123,\n 'Prefix': 'string',\n 'Filter': {\n 'Prefix': 'string',\n 'Tag': {\n 'Key': 'string',\n 'Value': 'string'\n },\n 'And': {\n 'Prefix': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n },\n 'Status': 'Enabled'|'Disabled',\n 'SourceSelectionCriteria': {\n 'SseKmsEncryptedObjects': {\n 'Status': 'Enabled'|'Disabled'\n }\n },\n 'Destination': {\n 'Bucket': 'string',\n 'Account': 'string',\n 'StorageClass': 'STANDARD'|'REDUCED_REDUNDANCY'|'STANDARD_IA'|'ONEZONE_IA'|'INTELLIGENT_TIERING'|'GLACIER',\n 'AccessControlTranslation': {\n 'Owner': 'Destination'\n },\n 'EncryptionConfiguration': {\n 'ReplicaKmsKeyID': 'string'\n }\n },\n 'DeleteMarkerReplication': {\n 'Status': 'Enabled'|'Disabled'\n }\n },\n ]\n }\n }\n \n \n :returns: \n If you specify both a Prefix and a Tag filter, wrap these filters in an And tag.\n If you specify a filter based on multiple tags, wrap the Tag elements in an And tag.\n \n \"\"\"\n pass\n\ndef get_bucket_request_payment(Bucket=None):\n \"\"\"\n Returns the request payment configuration of a bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.get_bucket_request_payment(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :rtype: dict\n :return: {\n 'Payer': 'Requester'|'BucketOwner'\n }\n \n \n \"\"\"\n pass\n\ndef get_bucket_tagging(Bucket=None):\n \"\"\"\n Returns the tag set associated with the bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.get_bucket_tagging(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :rtype: dict\n :return: {\n 'TagSet': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_bucket_versioning(Bucket=None):\n \"\"\"\n Returns the versioning state of a bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.get_bucket_versioning(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :rtype: dict\n :return: {\n 'Status': 'Enabled'|'Suspended',\n 'MFADelete': 'Enabled'|'Disabled'\n }\n \n \n \"\"\"\n pass\n\ndef get_bucket_website(Bucket=None):\n \"\"\"\n Returns the website configuration for a bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.get_bucket_website(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :rtype: dict\n :return: {\n 'RedirectAllRequestsTo': {\n 'HostName': 'string',\n 'Protocol': 'http'|'https'\n },\n 'IndexDocument': {\n 'Suffix': 'string'\n },\n 'ErrorDocument': {\n 'Key': 'string'\n },\n 'RoutingRules': [\n {\n 'Condition': {\n 'HttpErrorCodeReturnedEquals': 'string',\n 'KeyPrefixEquals': 'string'\n },\n 'Redirect': {\n 'HostName': 'string',\n 'HttpRedirectCode': 'string',\n 'Protocol': 'http'|'https',\n 'ReplaceKeyPrefixWith': 'string',\n 'ReplaceKeyWith': 'string'\n }\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_object(Bucket=None, IfMatch=None, IfModifiedSince=None, IfNoneMatch=None, IfUnmodifiedSince=None, Key=None, Range=None, ResponseCacheControl=None, ResponseContentDisposition=None, ResponseContentEncoding=None, ResponseContentLanguage=None, ResponseContentType=None, ResponseExpires=None, VersionId=None, SSECustomerAlgorithm=None, SSECustomerKey=None, SSECustomerKeyMD5=None, RequestPayer=None, PartNumber=None):\n \"\"\"\n Retrieves objects from Amazon S3.\n See also: AWS API Documentation\n \n \n :example: response = client.get_object(\n Bucket='string',\n IfMatch='string',\n IfModifiedSince=datetime(2015, 1, 1),\n IfNoneMatch='string',\n IfUnmodifiedSince=datetime(2015, 1, 1),\n Key='string',\n Range='string',\n ResponseCacheControl='string',\n ResponseContentDisposition='string',\n ResponseContentEncoding='string',\n ResponseContentLanguage='string',\n ResponseContentType='string',\n ResponseExpires=datetime(2015, 1, 1),\n VersionId='string',\n SSECustomerAlgorithm='string',\n SSECustomerKey='string',\n RequestPayer='requester',\n PartNumber=123\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type IfMatch: string\n :param IfMatch: Return the object only if its entity tag (ETag) is the same as the one specified, otherwise return a 412 (precondition failed).\n\n :type IfModifiedSince: datetime\n :param IfModifiedSince: Return the object only if it has been modified since the specified time, otherwise return a 304 (not modified).\n\n :type IfNoneMatch: string\n :param IfNoneMatch: Return the object only if its entity tag (ETag) is different from the one specified, otherwise return a 304 (not modified).\n\n :type IfUnmodifiedSince: datetime\n :param IfUnmodifiedSince: Return the object only if it has not been modified since the specified time, otherwise return a 412 (precondition failed).\n\n :type Key: string\n :param Key: [REQUIRED]\n\n :type Range: string\n :param Range: Downloads the specified range bytes of an object. For more information about the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.\n\n :type ResponseCacheControl: string\n :param ResponseCacheControl: Sets the Cache-Control header of the response.\n\n :type ResponseContentDisposition: string\n :param ResponseContentDisposition: Sets the Content-Disposition header of the response\n\n :type ResponseContentEncoding: string\n :param ResponseContentEncoding: Sets the Content-Encoding header of the response.\n\n :type ResponseContentLanguage: string\n :param ResponseContentLanguage: Sets the Content-Language header of the response.\n\n :type ResponseContentType: string\n :param ResponseContentType: Sets the Content-Type header of the response.\n\n :type ResponseExpires: datetime\n :param ResponseExpires: Sets the Expires header of the response.\n\n :type VersionId: string\n :param VersionId: VersionId used to reference a specific version of the object.\n\n :type SSECustomerAlgorithm: string\n :param SSECustomerAlgorithm: Specifies the algorithm to use to when encrypting the object (e.g., AES256).\n\n :type SSECustomerKey: string\n :param SSECustomerKey: Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side -encryption -customer-algorithm header.\n\n :type SSECustomerKeyMD5: string\n :param SSECustomerKeyMD5: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.\n Please note that this parameter is automatically populated if it is not provided. Including this parameter is not required\n \n\n :type RequestPayer: string\n :param RequestPayer: Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html\n\n :type PartNumber: integer\n :param PartNumber: Part number of the object being read. This is a positive integer between 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified. Useful for downloading just a part of an object.\n\n :rtype: dict\n :return: {\n 'Body': StreamingBody(),\n 'DeleteMarker': True|False,\n 'AcceptRanges': 'string',\n 'Expiration': 'string',\n 'Restore': 'string',\n 'LastModified': datetime(2015, 1, 1),\n 'ContentLength': 123,\n 'ETag': 'string',\n 'MissingMeta': 123,\n 'VersionId': 'string',\n 'CacheControl': 'string',\n 'ContentDisposition': 'string',\n 'ContentEncoding': 'string',\n 'ContentLanguage': 'string',\n 'ContentRange': 'string',\n 'ContentType': 'string',\n 'Expires': datetime(2015, 1, 1),\n 'WebsiteRedirectLocation': 'string',\n 'ServerSideEncryption': 'AES256'|'aws:kms',\n 'Metadata': {\n 'string': 'string'\n },\n 'SSECustomerAlgorithm': 'string',\n 'SSECustomerKeyMD5': 'string',\n 'SSEKMSKeyId': 'string',\n 'StorageClass': 'STANDARD'|'REDUCED_REDUNDANCY'|'STANDARD_IA'|'ONEZONE_IA'|'INTELLIGENT_TIERING'|'GLACIER',\n 'RequestCharged': 'requester',\n 'ReplicationStatus': 'COMPLETE'|'PENDING'|'FAILED'|'REPLICA',\n 'PartsCount': 123,\n 'TagCount': 123,\n 'ObjectLockMode': 'GOVERNANCE'|'COMPLIANCE',\n 'ObjectLockRetainUntilDate': datetime(2015, 1, 1),\n 'ObjectLockLegalHoldStatus': 'ON'|'OFF'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_object_acl(Bucket=None, Key=None, VersionId=None, RequestPayer=None):\n \"\"\"\n Returns the access control list (ACL) of an object.\n See also: AWS API Documentation\n \n \n :example: response = client.get_object_acl(\n Bucket='string',\n Key='string',\n VersionId='string',\n RequestPayer='requester'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type Key: string\n :param Key: [REQUIRED]\n\n :type VersionId: string\n :param VersionId: VersionId used to reference a specific version of the object.\n\n :type RequestPayer: string\n :param RequestPayer: Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html\n\n :rtype: dict\n :return: {\n 'Owner': {\n 'DisplayName': 'string',\n 'ID': 'string'\n },\n 'Grants': [\n {\n 'Grantee': {\n 'DisplayName': 'string',\n 'EmailAddress': 'string',\n 'ID': 'string',\n 'Type': 'CanonicalUser'|'AmazonCustomerByEmail'|'Group',\n 'URI': 'string'\n },\n 'Permission': 'FULL_CONTROL'|'WRITE'|'WRITE_ACP'|'READ'|'READ_ACP'\n },\n ],\n 'RequestCharged': 'requester'\n }\n \n \n :returns: \n DisplayName (string) --\n ID (string) --\n \n \"\"\"\n pass\n\ndef get_object_legal_hold(Bucket=None, Key=None, VersionId=None, RequestPayer=None):\n \"\"\"\n Gets an object's current Legal Hold status.\n See also: AWS API Documentation\n \n \n :example: response = client.get_object_legal_hold(\n Bucket='string',\n Key='string',\n VersionId='string',\n RequestPayer='requester'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The bucket containing the object whose Legal Hold status you want to retrieve.\n \n\n :type Key: string\n :param Key: [REQUIRED]\n The key name for the object whose Legal Hold status you want to retrieve.\n \n\n :type VersionId: string\n :param VersionId: The version ID of the object whose Legal Hold status you want to retrieve.\n\n :type RequestPayer: string\n :param RequestPayer: Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html\n\n :rtype: dict\n :return: {\n 'LegalHold': {\n 'Status': 'ON'|'OFF'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_object_lock_configuration(Bucket=None):\n \"\"\"\n Gets the Object Lock configuration for a bucket. The rule specified in the Object Lock configuration will be applied by default to every new object placed in the specified bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.get_object_lock_configuration(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The bucket whose Object Lock configuration you want to retrieve.\n \n\n :rtype: dict\n :return: {\n 'ObjectLockConfiguration': {\n 'ObjectLockEnabled': 'Enabled',\n 'Rule': {\n 'DefaultRetention': {\n 'Mode': 'GOVERNANCE'|'COMPLIANCE',\n 'Days': 123,\n 'Years': 123\n }\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_object_retention(Bucket=None, Key=None, VersionId=None, RequestPayer=None):\n \"\"\"\n Retrieves an object's retention settings.\n See also: AWS API Documentation\n \n \n :example: response = client.get_object_retention(\n Bucket='string',\n Key='string',\n VersionId='string',\n RequestPayer='requester'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The bucket containing the object whose retention settings you want to retrieve.\n \n\n :type Key: string\n :param Key: [REQUIRED]\n The key name for the object whose retention settings you want to retrieve.\n \n\n :type VersionId: string\n :param VersionId: The version ID for the object whose retention settings you want to retrieve.\n\n :type RequestPayer: string\n :param RequestPayer: Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html\n\n :rtype: dict\n :return: {\n 'Retention': {\n 'Mode': 'GOVERNANCE'|'COMPLIANCE',\n 'RetainUntilDate': datetime(2015, 1, 1)\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_object_tagging(Bucket=None, Key=None, VersionId=None):\n \"\"\"\n Returns the tag-set of an object.\n See also: AWS API Documentation\n \n \n :example: response = client.get_object_tagging(\n Bucket='string',\n Key='string',\n VersionId='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type Key: string\n :param Key: [REQUIRED]\n\n :type VersionId: string\n :param VersionId: \n\n :rtype: dict\n :return: {\n 'VersionId': 'string',\n 'TagSet': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_object_torrent(Bucket=None, Key=None, RequestPayer=None):\n \"\"\"\n Return torrent files from a bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.get_object_torrent(\n Bucket='string',\n Key='string',\n RequestPayer='requester'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type Key: string\n :param Key: [REQUIRED]\n\n :type RequestPayer: string\n :param RequestPayer: Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html\n\n :rtype: dict\n :return: {\n 'Body': StreamingBody(),\n 'RequestCharged': 'requester'\n }\n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_public_access_block(Bucket=None):\n \"\"\"\n Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.get_public_access_block(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The name of the Amazon S3 bucket whose PublicAccessBlock configuration you want to retrieve.\n \n\n :rtype: dict\n :return: {\n 'PublicAccessBlockConfiguration': {\n 'BlockPublicAcls': True|False,\n 'IgnorePublicAcls': True|False,\n 'BlockPublicPolicy': True|False,\n 'RestrictPublicBuckets': True|False\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef head_bucket(Bucket=None):\n \"\"\"\n This operation is useful to determine if a bucket exists and you have permission to access it.\n See also: AWS API Documentation\n \n \n :example: response = client.head_bucket(\n Bucket='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n \"\"\"\n pass\n\ndef head_object(Bucket=None, IfMatch=None, IfModifiedSince=None, IfNoneMatch=None, IfUnmodifiedSince=None, Key=None, Range=None, VersionId=None, SSECustomerAlgorithm=None, SSECustomerKey=None, SSECustomerKeyMD5=None, RequestPayer=None, PartNumber=None):\n \"\"\"\n The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.\n See also: AWS API Documentation\n \n \n :example: response = client.head_object(\n Bucket='string',\n IfMatch='string',\n IfModifiedSince=datetime(2015, 1, 1),\n IfNoneMatch='string',\n IfUnmodifiedSince=datetime(2015, 1, 1),\n Key='string',\n Range='string',\n VersionId='string',\n SSECustomerAlgorithm='string',\n SSECustomerKey='string',\n RequestPayer='requester',\n PartNumber=123\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type IfMatch: string\n :param IfMatch: Return the object only if its entity tag (ETag) is the same as the one specified, otherwise return a 412 (precondition failed).\n\n :type IfModifiedSince: datetime\n :param IfModifiedSince: Return the object only if it has been modified since the specified time, otherwise return a 304 (not modified).\n\n :type IfNoneMatch: string\n :param IfNoneMatch: Return the object only if its entity tag (ETag) is different from the one specified, otherwise return a 304 (not modified).\n\n :type IfUnmodifiedSince: datetime\n :param IfUnmodifiedSince: Return the object only if it has not been modified since the specified time, otherwise return a 412 (precondition failed).\n\n :type Key: string\n :param Key: [REQUIRED]\n\n :type Range: string\n :param Range: Downloads the specified range bytes of an object. For more information about the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.\n\n :type VersionId: string\n :param VersionId: VersionId used to reference a specific version of the object.\n\n :type SSECustomerAlgorithm: string\n :param SSECustomerAlgorithm: Specifies the algorithm to use to when encrypting the object (e.g., AES256).\n\n :type SSECustomerKey: string\n :param SSECustomerKey: Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side -encryption -customer-algorithm header.\n\n :type SSECustomerKeyMD5: string\n :param SSECustomerKeyMD5: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.\n Please note that this parameter is automatically populated if it is not provided. Including this parameter is not required\n \n\n :type RequestPayer: string\n :param RequestPayer: Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html\n\n :type PartNumber: integer\n :param PartNumber: Part number of the object being read. This is a positive integer between 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. Useful querying about the size of the part and the number of parts in this object.\n\n :rtype: dict\n :return: {\n 'DeleteMarker': True|False,\n 'AcceptRanges': 'string',\n 'Expiration': 'string',\n 'Restore': 'string',\n 'LastModified': datetime(2015, 1, 1),\n 'ContentLength': 123,\n 'ETag': 'string',\n 'MissingMeta': 123,\n 'VersionId': 'string',\n 'CacheControl': 'string',\n 'ContentDisposition': 'string',\n 'ContentEncoding': 'string',\n 'ContentLanguage': 'string',\n 'ContentType': 'string',\n 'Expires': datetime(2015, 1, 1),\n 'WebsiteRedirectLocation': 'string',\n 'ServerSideEncryption': 'AES256'|'aws:kms',\n 'Metadata': {\n 'string': 'string'\n },\n 'SSECustomerAlgorithm': 'string',\n 'SSECustomerKeyMD5': 'string',\n 'SSEKMSKeyId': 'string',\n 'StorageClass': 'STANDARD'|'REDUCED_REDUNDANCY'|'STANDARD_IA'|'ONEZONE_IA'|'INTELLIGENT_TIERING'|'GLACIER',\n 'RequestCharged': 'requester',\n 'ReplicationStatus': 'COMPLETE'|'PENDING'|'FAILED'|'REPLICA',\n 'PartsCount': 123,\n 'ObjectLockMode': 'GOVERNANCE'|'COMPLIANCE',\n 'ObjectLockRetainUntilDate': datetime(2015, 1, 1),\n 'ObjectLockLegalHoldStatus': 'ON'|'OFF'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef list_bucket_analytics_configurations(Bucket=None, ContinuationToken=None):\n \"\"\"\n Lists the analytics configurations for the bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.list_bucket_analytics_configurations(\n Bucket='string',\n ContinuationToken='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The name of the bucket from which analytics configurations are retrieved.\n \n\n :type ContinuationToken: string\n :param ContinuationToken: The ContinuationToken that represents a placeholder from where this request should begin.\n\n :rtype: dict\n :return: {\n 'IsTruncated': True|False,\n 'ContinuationToken': 'string',\n 'NextContinuationToken': 'string',\n 'AnalyticsConfigurationList': [\n {\n 'Id': 'string',\n 'Filter': {\n 'Prefix': 'string',\n 'Tag': {\n 'Key': 'string',\n 'Value': 'string'\n },\n 'And': {\n 'Prefix': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n },\n 'StorageClassAnalysis': {\n 'DataExport': {\n 'OutputSchemaVersion': 'V_1',\n 'Destination': {\n 'S3BucketDestination': {\n 'Format': 'CSV',\n 'BucketAccountId': 'string',\n 'Bucket': 'string',\n 'Prefix': 'string'\n }\n }\n }\n }\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef list_bucket_inventory_configurations(Bucket=None, ContinuationToken=None):\n \"\"\"\n Returns a list of inventory configurations for the bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.list_bucket_inventory_configurations(\n Bucket='string',\n ContinuationToken='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The name of the bucket containing the inventory configurations to retrieve.\n \n\n :type ContinuationToken: string\n :param ContinuationToken: The marker used to continue an inventory configuration listing that has been truncated. Use the NextContinuationToken from a previously truncated list response to continue the listing. The continuation token is an opaque value that Amazon S3 understands.\n\n :rtype: dict\n :return: {\n 'ContinuationToken': 'string',\n 'InventoryConfigurationList': [\n {\n 'Destination': {\n 'S3BucketDestination': {\n 'AccountId': 'string',\n 'Bucket': 'string',\n 'Format': 'CSV'|'ORC'|'Parquet',\n 'Prefix': 'string',\n 'Encryption': {\n 'SSES3': {},\n 'SSEKMS': {\n 'KeyId': 'string'\n }\n }\n }\n },\n 'IsEnabled': True|False,\n 'Filter': {\n 'Prefix': 'string'\n },\n 'Id': 'string',\n 'IncludedObjectVersions': 'All'|'Current',\n 'OptionalFields': [\n 'Size'|'LastModifiedDate'|'StorageClass'|'ETag'|'IsMultipartUploaded'|'ReplicationStatus'|'EncryptionStatus'|'ObjectLockRetainUntilDate'|'ObjectLockMode'|'ObjectLockLegalHoldStatus',\n ],\n 'Schedule': {\n 'Frequency': 'Daily'|'Weekly'\n }\n },\n ],\n 'IsTruncated': True|False,\n 'NextContinuationToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_bucket_metrics_configurations(Bucket=None, ContinuationToken=None):\n \"\"\"\n Lists the metrics configurations for the bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.list_bucket_metrics_configurations(\n Bucket='string',\n ContinuationToken='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The name of the bucket containing the metrics configurations to retrieve.\n \n\n :type ContinuationToken: string\n :param ContinuationToken: The marker that is used to continue a metrics configuration listing that has been truncated. Use the NextContinuationToken from a previously truncated list response to continue the listing. The continuation token is an opaque value that Amazon S3 understands.\n\n :rtype: dict\n :return: {\n 'IsTruncated': True|False,\n 'ContinuationToken': 'string',\n 'NextContinuationToken': 'string',\n 'MetricsConfigurationList': [\n {\n 'Id': 'string',\n 'Filter': {\n 'Prefix': 'string',\n 'Tag': {\n 'Key': 'string',\n 'Value': 'string'\n },\n 'And': {\n 'Prefix': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n }\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef list_buckets():\n \"\"\"\n Returns a list of all buckets owned by the authenticated sender of the request.\n See also: AWS API Documentation\n \n \n :example: response = client.list_buckets()\n \n \n :rtype: dict\n :return: {\n 'Buckets': [\n {\n 'Name': 'string',\n 'CreationDate': datetime(2015, 1, 1)\n },\n ],\n 'Owner': {\n 'DisplayName': 'string',\n 'ID': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef list_multipart_uploads(Bucket=None, Delimiter=None, EncodingType=None, KeyMarker=None, MaxUploads=None, Prefix=None, UploadIdMarker=None):\n \"\"\"\n This operation lists in-progress multipart uploads.\n See also: AWS API Documentation\n \n \n :example: response = client.list_multipart_uploads(\n Bucket='string',\n Delimiter='string',\n EncodingType='url',\n KeyMarker='string',\n MaxUploads=123,\n Prefix='string',\n UploadIdMarker='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type Delimiter: string\n :param Delimiter: Character you use to group keys.\n\n :type EncodingType: string\n :param EncodingType: Requests Amazon S3 to encode the object keys in the response and specifies the encoding method to use. An object key may contain any Unicode character; however, XML 1.0 parser cannot parse some characters, such as characters with an ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you can add this parameter to request that Amazon S3 encode the keys in the response.\n\n :type KeyMarker: string\n :param KeyMarker: Together with upload-id-marker, this parameter specifies the multipart upload after which listing should begin.\n\n :type MaxUploads: integer\n :param MaxUploads: Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the response body. 1,000 is the maximum number of uploads that can be returned in a response.\n\n :type Prefix: string\n :param Prefix: Lists in-progress uploads only for those keys that begin with the specified prefix.\n\n :type UploadIdMarker: string\n :param UploadIdMarker: Together with key-marker, specifies the multipart upload after which listing should begin. If key-marker is not specified, the upload-id-marker parameter is ignored.\n\n :rtype: dict\n :return: {\n 'Bucket': 'string',\n 'KeyMarker': 'string',\n 'UploadIdMarker': 'string',\n 'NextKeyMarker': 'string',\n 'Prefix': 'string',\n 'Delimiter': 'string',\n 'NextUploadIdMarker': 'string',\n 'MaxUploads': 123,\n 'IsTruncated': True|False,\n 'Uploads': [\n {\n 'UploadId': 'string',\n 'Key': 'string',\n 'Initiated': datetime(2015, 1, 1),\n 'StorageClass': 'STANDARD'|'REDUCED_REDUNDANCY'|'STANDARD_IA'|'ONEZONE_IA'|'INTELLIGENT_TIERING'|'GLACIER',\n 'Owner': {\n 'DisplayName': 'string',\n 'ID': 'string'\n },\n 'Initiator': {\n 'ID': 'string',\n 'DisplayName': 'string'\n }\n },\n ],\n 'CommonPrefixes': [\n {\n 'Prefix': 'string'\n },\n ],\n 'EncodingType': 'url'\n }\n \n \n :returns: \n DisplayName (string) --\n ID (string) --\n \n \"\"\"\n pass\n\ndef list_object_versions(Bucket=None, Delimiter=None, EncodingType=None, KeyMarker=None, MaxKeys=None, Prefix=None, VersionIdMarker=None):\n \"\"\"\n Returns metadata about all of the versions of objects in a bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.list_object_versions(\n Bucket='string',\n Delimiter='string',\n EncodingType='url',\n KeyMarker='string',\n MaxKeys=123,\n Prefix='string',\n VersionIdMarker='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type Delimiter: string\n :param Delimiter: A delimiter is a character you use to group keys.\n\n :type EncodingType: string\n :param EncodingType: Requests Amazon S3 to encode the object keys in the response and specifies the encoding method to use. An object key may contain any Unicode character; however, XML 1.0 parser cannot parse some characters, such as characters with an ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you can add this parameter to request that Amazon S3 encode the keys in the response.\n\n :type KeyMarker: string\n :param KeyMarker: Specifies the key to start with when listing objects in a bucket.\n\n :type MaxKeys: integer\n :param MaxKeys: Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.\n\n :type Prefix: string\n :param Prefix: Limits the response to keys that begin with the specified prefix.\n\n :type VersionIdMarker: string\n :param VersionIdMarker: Specifies the object version you want to start listing from.\n\n :rtype: dict\n :return: {\n 'IsTruncated': True|False,\n 'KeyMarker': 'string',\n 'VersionIdMarker': 'string',\n 'NextKeyMarker': 'string',\n 'NextVersionIdMarker': 'string',\n 'Versions': [\n {\n 'ETag': 'string',\n 'Size': 123,\n 'StorageClass': 'STANDARD',\n 'Key': 'string',\n 'VersionId': 'string',\n 'IsLatest': True|False,\n 'LastModified': datetime(2015, 1, 1),\n 'Owner': {\n 'DisplayName': 'string',\n 'ID': 'string'\n }\n },\n ],\n 'DeleteMarkers': [\n {\n 'Owner': {\n 'DisplayName': 'string',\n 'ID': 'string'\n },\n 'Key': 'string',\n 'VersionId': 'string',\n 'IsLatest': True|False,\n 'LastModified': datetime(2015, 1, 1)\n },\n ],\n 'Name': 'string',\n 'Prefix': 'string',\n 'Delimiter': 'string',\n 'MaxKeys': 123,\n 'CommonPrefixes': [\n {\n 'Prefix': 'string'\n },\n ],\n 'EncodingType': 'url'\n }\n \n \n :returns: \n DisplayName (string) --\n ID (string) --\n \n \"\"\"\n pass\n\ndef list_objects(Bucket=None, Delimiter=None, EncodingType=None, Marker=None, MaxKeys=None, Prefix=None, RequestPayer=None):\n \"\"\"\n Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.list_objects(\n Bucket='string',\n Delimiter='string',\n EncodingType='url',\n Marker='string',\n MaxKeys=123,\n Prefix='string',\n RequestPayer='requester'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type Delimiter: string\n :param Delimiter: A delimiter is a character you use to group keys.\n\n :type EncodingType: string\n :param EncodingType: Requests Amazon S3 to encode the object keys in the response and specifies the encoding method to use. An object key may contain any Unicode character; however, XML 1.0 parser cannot parse some characters, such as characters with an ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you can add this parameter to request that Amazon S3 encode the keys in the response.\n\n :type Marker: string\n :param Marker: Specifies the key to start with when listing objects in a bucket.\n\n :type MaxKeys: integer\n :param MaxKeys: Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.\n\n :type Prefix: string\n :param Prefix: Limits the response to keys that begin with the specified prefix.\n\n :type RequestPayer: string\n :param RequestPayer: Confirms that the requester knows that she or he will be charged for the list objects request. Bucket owners need not specify this parameter in their requests.\n\n :rtype: dict\n :return: {\n 'IsTruncated': True|False,\n 'Marker': 'string',\n 'NextMarker': 'string',\n 'Contents': [\n {\n 'Key': 'string',\n 'LastModified': datetime(2015, 1, 1),\n 'ETag': 'string',\n 'Size': 123,\n 'StorageClass': 'STANDARD'|'REDUCED_REDUNDANCY'|'GLACIER'|'STANDARD_IA'|'ONEZONE_IA'|'INTELLIGENT_TIERING',\n 'Owner': {\n 'DisplayName': 'string',\n 'ID': 'string'\n }\n },\n ],\n 'Name': 'string',\n 'Prefix': 'string',\n 'Delimiter': 'string',\n 'MaxKeys': 123,\n 'CommonPrefixes': [\n {\n 'Prefix': 'string'\n },\n ],\n 'EncodingType': 'url'\n }\n \n \n :returns: \n DisplayName (string) --\n ID (string) --\n \n \"\"\"\n pass\n\ndef list_objects_v2(Bucket=None, Delimiter=None, EncodingType=None, MaxKeys=None, Prefix=None, ContinuationToken=None, FetchOwner=None, StartAfter=None, RequestPayer=None):\n \"\"\"\n Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend you use this revised API for new application development.\n See also: AWS API Documentation\n \n \n :example: response = client.list_objects_v2(\n Bucket='string',\n Delimiter='string',\n EncodingType='url',\n MaxKeys=123,\n Prefix='string',\n ContinuationToken='string',\n FetchOwner=True|False,\n StartAfter='string',\n RequestPayer='requester'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n Name of the bucket to list.\n \n\n :type Delimiter: string\n :param Delimiter: A delimiter is a character you use to group keys.\n\n :type EncodingType: string\n :param EncodingType: Encoding type used by Amazon S3 to encode object keys in the response.\n\n :type MaxKeys: integer\n :param MaxKeys: Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.\n\n :type Prefix: string\n :param Prefix: Limits the response to keys that begin with the specified prefix.\n\n :type ContinuationToken: string\n :param ContinuationToken: ContinuationToken indicates Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key\n\n :type FetchOwner: boolean\n :param FetchOwner: The owner field is not present in listV2 by default, if you want to return owner field with each key in the result then set the fetch owner field to true\n\n :type StartAfter: string\n :param StartAfter: StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this specified key. StartAfter can be any key in the bucket\n\n :type RequestPayer: string\n :param RequestPayer: Confirms that the requester knows that she or he will be charged for the list objects request in V2 style. Bucket owners need not specify this parameter in their requests.\n\n :rtype: dict\n :return: {\n 'IsTruncated': True|False,\n 'Contents': [\n {\n 'Key': 'string',\n 'LastModified': datetime(2015, 1, 1),\n 'ETag': 'string',\n 'Size': 123,\n 'StorageClass': 'STANDARD'|'REDUCED_REDUNDANCY'|'GLACIER'|'STANDARD_IA'|'ONEZONE_IA'|'INTELLIGENT_TIERING',\n 'Owner': {\n 'DisplayName': 'string',\n 'ID': 'string'\n }\n },\n ],\n 'Name': 'string',\n 'Prefix': 'string',\n 'Delimiter': 'string',\n 'MaxKeys': 123,\n 'CommonPrefixes': [\n {\n 'Prefix': 'string'\n },\n ],\n 'EncodingType': 'url',\n 'KeyCount': 123,\n 'ContinuationToken': 'string',\n 'NextContinuationToken': 'string',\n 'StartAfter': 'string'\n }\n \n \n :returns: \n DisplayName (string) --\n ID (string) --\n \n \"\"\"\n pass\n\ndef list_parts(Bucket=None, Key=None, MaxParts=None, PartNumberMarker=None, UploadId=None, RequestPayer=None):\n \"\"\"\n Lists the parts that have been uploaded for a specific multipart upload.\n See also: AWS API Documentation\n \n \n :example: response = client.list_parts(\n Bucket='string',\n Key='string',\n MaxParts=123,\n PartNumberMarker=123,\n UploadId='string',\n RequestPayer='requester'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type Key: string\n :param Key: [REQUIRED]\n\n :type MaxParts: integer\n :param MaxParts: Sets the maximum number of parts to return.\n\n :type PartNumberMarker: integer\n :param PartNumberMarker: Specifies the part after which listing should begin. Only parts with higher part numbers will be listed.\n\n :type UploadId: string\n :param UploadId: [REQUIRED]\n Upload ID identifying the multipart upload whose parts are being listed.\n \n\n :type RequestPayer: string\n :param RequestPayer: Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html\n\n :rtype: dict\n :return: {\n 'AbortDate': datetime(2015, 1, 1),\n 'AbortRuleId': 'string',\n 'Bucket': 'string',\n 'Key': 'string',\n 'UploadId': 'string',\n 'PartNumberMarker': 123,\n 'NextPartNumberMarker': 123,\n 'MaxParts': 123,\n 'IsTruncated': True|False,\n 'Parts': [\n {\n 'PartNumber': 123,\n 'LastModified': datetime(2015, 1, 1),\n 'ETag': 'string',\n 'Size': 123\n },\n ],\n 'Initiator': {\n 'ID': 'string',\n 'DisplayName': 'string'\n },\n 'Owner': {\n 'DisplayName': 'string',\n 'ID': 'string'\n },\n 'StorageClass': 'STANDARD'|'REDUCED_REDUNDANCY'|'STANDARD_IA'|'ONEZONE_IA'|'INTELLIGENT_TIERING'|'GLACIER',\n 'RequestCharged': 'requester'\n }\n \n \n :returns: \n DisplayName (string) --\n ID (string) --\n \n \"\"\"\n pass\n\ndef put_bucket_accelerate_configuration(Bucket=None, AccelerateConfiguration=None):\n \"\"\"\n Sets the accelerate configuration of an existing bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.put_bucket_accelerate_configuration(\n Bucket='string',\n AccelerateConfiguration={\n 'Status': 'Enabled'|'Suspended'\n }\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n Name of the bucket for which the accelerate configuration is set.\n \n\n :type AccelerateConfiguration: dict\n :param AccelerateConfiguration: [REQUIRED]\n Specifies the Accelerate Configuration you want to set for the bucket.\n Status (string) --The accelerate configuration of the bucket.\n \n\n \"\"\"\n pass\n\ndef put_bucket_acl(ACL=None, AccessControlPolicy=None, Bucket=None, GrantFullControl=None, GrantRead=None, GrantReadACP=None, GrantWrite=None, GrantWriteACP=None):\n \"\"\"\n Sets the permissions on a bucket using access control lists (ACL).\n See also: AWS API Documentation\n \n \n :example: response = client.put_bucket_acl(\n ACL='private'|'public-read'|'public-read-write'|'authenticated-read',\n AccessControlPolicy={\n 'Grants': [\n {\n 'Grantee': {\n 'DisplayName': 'string',\n 'EmailAddress': 'string',\n 'ID': 'string',\n 'Type': 'CanonicalUser'|'AmazonCustomerByEmail'|'Group',\n 'URI': 'string'\n },\n 'Permission': 'FULL_CONTROL'|'WRITE'|'WRITE_ACP'|'READ'|'READ_ACP'\n },\n ],\n 'Owner': {\n 'DisplayName': 'string',\n 'ID': 'string'\n }\n },\n Bucket='string',\n GrantFullControl='string',\n GrantRead='string',\n GrantReadACP='string',\n GrantWrite='string',\n GrantWriteACP='string'\n )\n \n \n :type ACL: string\n :param ACL: The canned ACL to apply to the bucket.\n\n :type AccessControlPolicy: dict\n :param AccessControlPolicy: \n Grants (list) --A list of grants.\n (dict) --\n Grantee (dict) --\n DisplayName (string) --Screen name of the grantee.\n EmailAddress (string) --Email address of the grantee.\n ID (string) --The canonical user ID of the grantee.\n Type (string) -- [REQUIRED]Type of grantee\n URI (string) --URI of the grantee group.\n Permission (string) --Specifies the permission given to the grantee.\n \n Owner (dict) --\n DisplayName (string) --\n ID (string) --\n \n\n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type GrantFullControl: string\n :param GrantFullControl: Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.\n\n :type GrantRead: string\n :param GrantRead: Allows grantee to list the objects in the bucket.\n\n :type GrantReadACP: string\n :param GrantReadACP: Allows grantee to read the bucket ACL.\n\n :type GrantWrite: string\n :param GrantWrite: Allows grantee to create, overwrite, and delete any object in the bucket.\n\n :type GrantWriteACP: string\n :param GrantWriteACP: Allows grantee to write the ACL for the applicable bucket.\n\n \"\"\"\n pass\n\ndef put_bucket_analytics_configuration(Bucket=None, Id=None, AnalyticsConfiguration=None):\n \"\"\"\n Sets an analytics configuration for the bucket (specified by the analytics configuration ID).\n See also: AWS API Documentation\n \n \n :example: response = client.put_bucket_analytics_configuration(\n Bucket='string',\n Id='string',\n AnalyticsConfiguration={\n 'Id': 'string',\n 'Filter': {\n 'Prefix': 'string',\n 'Tag': {\n 'Key': 'string',\n 'Value': 'string'\n },\n 'And': {\n 'Prefix': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n },\n 'StorageClassAnalysis': {\n 'DataExport': {\n 'OutputSchemaVersion': 'V_1',\n 'Destination': {\n 'S3BucketDestination': {\n 'Format': 'CSV',\n 'BucketAccountId': 'string',\n 'Bucket': 'string',\n 'Prefix': 'string'\n }\n }\n }\n }\n }\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The name of the bucket to which an analytics configuration is stored.\n \n\n :type Id: string\n :param Id: [REQUIRED]\n The identifier used to represent an analytics configuration.\n \n\n :type AnalyticsConfiguration: dict\n :param AnalyticsConfiguration: [REQUIRED]\n The configuration and any analyses for the analytics filter.\n Id (string) -- [REQUIRED]The identifier used to represent an analytics configuration.\n Filter (dict) --The filter used to describe a set of objects for analyses. A filter must have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no filter is provided, all objects will be considered in any analysis.\n Prefix (string) --The prefix to use when evaluating an analytics filter.\n Tag (dict) --The tag to use when evaluating an analytics filter.\n Key (string) -- [REQUIRED]Name of the tag.\n Value (string) -- [REQUIRED]Value of the tag.\n And (dict) --A conjunction (logical AND) of predicates, which is used in evaluating an analytics filter. The operator must have at least two predicates.\n Prefix (string) --The prefix to use when evaluating an AND predicate.\n Tags (list) --The list of tags to use when evaluating an AND predicate.\n (dict) --\n Key (string) -- [REQUIRED]Name of the tag.\n Value (string) -- [REQUIRED]Value of the tag.\n \n \n StorageClassAnalysis (dict) -- [REQUIRED]If present, it indicates that data related to access patterns will be collected and made available to analyze the tradeoffs between different storage classes.\n DataExport (dict) --A container used to describe how data related to the storage class analysis should be exported.\n OutputSchemaVersion (string) -- [REQUIRED]The version of the output schema to use when exporting data. Must be V_1.\n Destination (dict) -- [REQUIRED]The place to store the data for an analysis.\n S3BucketDestination (dict) -- [REQUIRED]A destination signifying output to an S3 bucket.\n Format (string) -- [REQUIRED]The file format used when exporting data to Amazon S3.\n BucketAccountId (string) --The account ID that owns the destination bucket. If no account ID is provided, the owner will not be validated prior to exporting data.\n Bucket (string) -- [REQUIRED]The Amazon resource name (ARN) of the bucket to which data is exported.\n Prefix (string) --The prefix to use when exporting data. The exported data begins with this prefix.\n \n \n \n\n \"\"\"\n pass\n\ndef put_bucket_cors(Bucket=None, CORSConfiguration=None):\n \"\"\"\n Sets the CORS configuration for a bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.put_bucket_cors(\n Bucket='string',\n CORSConfiguration={\n 'CORSRules': [\n {\n 'AllowedHeaders': [\n 'string',\n ],\n 'AllowedMethods': [\n 'string',\n ],\n 'AllowedOrigins': [\n 'string',\n ],\n 'ExposeHeaders': [\n 'string',\n ],\n 'MaxAgeSeconds': 123\n },\n ]\n },\n \n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type CORSConfiguration: dict\n :param CORSConfiguration: [REQUIRED]\n CORSRules (list) -- [REQUIRED]\n (dict) --\n AllowedHeaders (list) --Specifies which headers are allowed in a pre-flight OPTIONS request.\n (string) --\n AllowedMethods (list) -- [REQUIRED]Identifies HTTP methods that the domain/origin specified in the rule is allowed to execute.\n (string) --\n AllowedOrigins (list) -- [REQUIRED]One or more origins you want customers to be able to access the bucket from.\n (string) --\n ExposeHeaders (list) --One or more headers in the response that you want customers to be able to access from their applications (for example, from a JavaScript XMLHttpRequest object).\n (string) --\n MaxAgeSeconds (integer) --The time in seconds that your browser is to cache the preflight response for the specified resource.\n \n \n\n \"\"\"\n pass\n\ndef put_bucket_encryption(Bucket=None, ContentMD5=None, ServerSideEncryptionConfiguration=None):\n \"\"\"\n Creates a new server-side encryption configuration (or replaces an existing one, if present).\n See also: AWS API Documentation\n \n \n :example: response = client.put_bucket_encryption(\n Bucket='string',\n ContentMD5='string',\n ServerSideEncryptionConfiguration={\n 'Rules': [\n {\n 'ApplyServerSideEncryptionByDefault': {\n 'SSEAlgorithm': 'AES256'|'aws:kms',\n 'KMSMasterKeyID': 'string'\n }\n },\n ]\n }\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The name of the bucket for which the server-side encryption configuration is set.\n \n\n :type ContentMD5: string\n :param ContentMD5: The base64-encoded 128-bit MD5 digest of the server-side encryption configuration.\n\n :type ServerSideEncryptionConfiguration: dict\n :param ServerSideEncryptionConfiguration: [REQUIRED]\n Container for server-side encryption configuration rules. Currently S3 supports one rule only.\n Rules (list) -- [REQUIRED]Container for information about a particular server-side encryption configuration rule.\n (dict) --Container for information about a particular server-side encryption configuration rule.\n ApplyServerSideEncryptionByDefault (dict) --Describes the default server-side encryption to apply to new objects in the bucket. If Put Object request does not specify any server-side encryption, this default encryption will be applied.\n SSEAlgorithm (string) -- [REQUIRED]Server-side encryption algorithm to use for the default encryption.\n KMSMasterKeyID (string) --KMS master key ID to use for the default encryption. This parameter is allowed if SSEAlgorithm is aws:kms.\n \n \n \n\n \"\"\"\n pass\n\ndef put_bucket_inventory_configuration(Bucket=None, Id=None, InventoryConfiguration=None):\n \"\"\"\n Adds an inventory configuration (identified by the inventory ID) from the bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.put_bucket_inventory_configuration(\n Bucket='string',\n Id='string',\n InventoryConfiguration={\n 'Destination': {\n 'S3BucketDestination': {\n 'AccountId': 'string',\n 'Bucket': 'string',\n 'Format': 'CSV'|'ORC'|'Parquet',\n 'Prefix': 'string',\n 'Encryption': {\n 'SSES3': {}\n ,\n 'SSEKMS': {\n 'KeyId': 'string'\n }\n }\n }\n },\n 'IsEnabled': True|False,\n 'Filter': {\n 'Prefix': 'string'\n },\n 'Id': 'string',\n 'IncludedObjectVersions': 'All'|'Current',\n 'OptionalFields': [\n 'Size'|'LastModifiedDate'|'StorageClass'|'ETag'|'IsMultipartUploaded'|'ReplicationStatus'|'EncryptionStatus'|'ObjectLockRetainUntilDate'|'ObjectLockMode'|'ObjectLockLegalHoldStatus',\n ],\n 'Schedule': {\n 'Frequency': 'Daily'|'Weekly'\n }\n }\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The name of the bucket where the inventory configuration will be stored.\n \n\n :type Id: string\n :param Id: [REQUIRED]\n The ID used to identify the inventory configuration.\n \n\n :type InventoryConfiguration: dict\n :param InventoryConfiguration: [REQUIRED]\n Specifies the inventory configuration.\n Destination (dict) -- [REQUIRED]Contains information about where to publish the inventory results.\n S3BucketDestination (dict) -- [REQUIRED]Contains the bucket name, file format, bucket owner (optional), and prefix (optional) where inventory results are published.\n AccountId (string) --The ID of the account that owns the destination bucket.\n Bucket (string) -- [REQUIRED]The Amazon resource name (ARN) of the bucket where inventory results will be published.\n Format (string) -- [REQUIRED]Specifies the output format of the inventory results.\n Prefix (string) --The prefix that is prepended to all inventory results.\n Encryption (dict) --Contains the type of server-side encryption used to encrypt the inventory results.\n SSES3 (dict) --Specifies the use of SSE-S3 to encrypt delivered Inventory reports.\n SSEKMS (dict) --Specifies the use of SSE-KMS to encrypt delivered Inventory reports.\n KeyId (string) -- [REQUIRED]Specifies the ID of the AWS Key Management Service (KMS) master encryption key to use for encrypting Inventory reports.\n \n \n IsEnabled (boolean) -- [REQUIRED]Specifies whether the inventory is enabled or disabled.\n Filter (dict) --Specifies an inventory filter. The inventory only includes objects that meet the filter's criteria.\n Prefix (string) -- [REQUIRED]The prefix that an object must have to be included in the inventory results.\n Id (string) -- [REQUIRED]The ID used to identify the inventory configuration.\n IncludedObjectVersions (string) -- [REQUIRED]Specifies which object version(s) to included in the inventory results.\n OptionalFields (list) --Contains the optional fields that are included in the inventory results.\n (string) --\n Schedule (dict) -- [REQUIRED]Specifies the schedule for generating inventory results.\n Frequency (string) -- [REQUIRED]Specifies how frequently inventory results are produced.\n \n \n\n \"\"\"\n pass\n\ndef put_bucket_lifecycle(Bucket=None, LifecycleConfiguration=None):\n \"\"\"\n Deprecated, see the PutBucketLifecycleConfiguration operation.\n See also: AWS API Documentation\n \n \n :example: response = client.put_bucket_lifecycle(\n Bucket='string',\n LifecycleConfiguration={\n 'Rules': [\n {\n 'Expiration': {\n 'Date': datetime(2015, 1, 1),\n 'Days': 123,\n 'ExpiredObjectDeleteMarker': True|False\n },\n 'ID': 'string',\n 'Prefix': 'string',\n 'Status': 'Enabled'|'Disabled',\n 'Transition': {\n 'Date': datetime(2015, 1, 1),\n 'Days': 123,\n 'StorageClass': 'GLACIER'|'STANDARD_IA'|'ONEZONE_IA'|'INTELLIGENT_TIERING'\n },\n 'NoncurrentVersionTransition': {\n 'NoncurrentDays': 123,\n 'StorageClass': 'GLACIER'|'STANDARD_IA'|'ONEZONE_IA'|'INTELLIGENT_TIERING'\n },\n 'NoncurrentVersionExpiration': {\n 'NoncurrentDays': 123\n },\n 'AbortIncompleteMultipartUpload': {\n 'DaysAfterInitiation': 123\n }\n },\n ]\n }\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type LifecycleConfiguration: dict\n :param LifecycleConfiguration: \n Rules (list) -- [REQUIRED]\n (dict) --\n Expiration (dict) --\n Date (datetime) --Indicates at what date the object is to be moved or deleted. Should be in GMT ISO 8601 Format.\n Days (integer) --Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer.\n ExpiredObjectDeleteMarker (boolean) --Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to true, the delete marker will be expired; if set to false the policy takes no action. This cannot be specified with Days or Date in a Lifecycle Expiration Policy.\n ID (string) --Unique identifier for the rule. The value cannot be longer than 255 characters.\n Prefix (string) -- [REQUIRED]Prefix identifying one or more objects to which the rule applies.\n Status (string) -- [REQUIRED]If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is not currently being applied.\n Transition (dict) --\n Date (datetime) --Indicates at what date the object is to be moved or deleted. Should be in GMT ISO 8601 Format.\n Days (integer) --Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer.\n StorageClass (string) --The class of storage used to store the object.\n NoncurrentVersionTransition (dict) --Container for the transition rule that describes when noncurrent objects transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING or GLACIER storage class. If your bucket is versioning-enabled (or versioning is suspended), you can set this action to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING or GLACIER storage class at a specific period in the object's lifetime.\n NoncurrentDays (integer) --Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. For information about the noncurrent days calculations, see How Amazon S3 Calculates When an Object Became Noncurrent in the Amazon Simple Storage Service Developer Guide.\n StorageClass (string) --The class of storage used to store the object.\n NoncurrentVersionExpiration (dict) --Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 permanently deletes the noncurrent object versions. You set this lifecycle configuration action on a bucket that has versioning enabled (or suspended) to request that Amazon S3 delete noncurrent object versions at a specific period in the object's lifetime.\n NoncurrentDays (integer) --Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. For information about the noncurrent days calculations, see How Amazon S3 Calculates When an Object Became Noncurrent in the Amazon Simple Storage Service Developer Guide.\n AbortIncompleteMultipartUpload (dict) --Specifies the days since the initiation of an Incomplete Multipart Upload that Lifecycle will wait before permanently removing all parts of the upload.\n DaysAfterInitiation (integer) --Indicates the number of days that must pass since initiation for Lifecycle to abort an Incomplete Multipart Upload.\n \n \n \n\n \"\"\"\n pass\n\ndef put_bucket_lifecycle_configuration(Bucket=None, LifecycleConfiguration=None):\n \"\"\"\n Sets lifecycle configuration for your bucket. If a lifecycle configuration exists, it replaces it.\n See also: AWS API Documentation\n \n \n :example: response = client.put_bucket_lifecycle_configuration(\n Bucket='string',\n LifecycleConfiguration={\n 'Rules': [\n {\n 'Expiration': {\n 'Date': datetime(2015, 1, 1),\n 'Days': 123,\n 'ExpiredObjectDeleteMarker': True|False\n },\n 'ID': 'string',\n 'Prefix': 'string',\n 'Filter': {\n 'Prefix': 'string',\n 'Tag': {\n 'Key': 'string',\n 'Value': 'string'\n },\n 'And': {\n 'Prefix': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n },\n 'Status': 'Enabled'|'Disabled',\n 'Transitions': [\n {\n 'Date': datetime(2015, 1, 1),\n 'Days': 123,\n 'StorageClass': 'GLACIER'|'STANDARD_IA'|'ONEZONE_IA'|'INTELLIGENT_TIERING'\n },\n ],\n 'NoncurrentVersionTransitions': [\n {\n 'NoncurrentDays': 123,\n 'StorageClass': 'GLACIER'|'STANDARD_IA'|'ONEZONE_IA'|'INTELLIGENT_TIERING'\n },\n ],\n 'NoncurrentVersionExpiration': {\n 'NoncurrentDays': 123\n },\n 'AbortIncompleteMultipartUpload': {\n 'DaysAfterInitiation': 123\n }\n },\n ]\n }\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type LifecycleConfiguration: dict\n :param LifecycleConfiguration: \n Rules (list) -- [REQUIRED]\n (dict) --\n Expiration (dict) --\n Date (datetime) --Indicates at what date the object is to be moved or deleted. Should be in GMT ISO 8601 Format.\n Days (integer) --Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer.\n ExpiredObjectDeleteMarker (boolean) --Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to true, the delete marker will be expired; if set to false the policy takes no action. This cannot be specified with Days or Date in a Lifecycle Expiration Policy.\n ID (string) --Unique identifier for the rule. The value cannot be longer than 255 characters.\n Prefix (string) --Prefix identifying one or more objects to which the rule applies. This is deprecated; use Filter instead.\n Filter (dict) --The Filter is used to identify objects that a Lifecycle Rule applies to. A Filter must have exactly one of Prefix, Tag, or And specified.\n Prefix (string) --Prefix identifying one or more objects to which the rule applies.\n Tag (dict) --This tag must exist in the object's tag set in order for the rule to apply.\n Key (string) -- [REQUIRED]Name of the tag.\n Value (string) -- [REQUIRED]Value of the tag.\n And (dict) --This is used in a Lifecycle Rule Filter to apply a logical AND to two or more predicates. The Lifecycle Rule will apply to any object matching all of the predicates configured inside the And operator.\n Prefix (string) --\n Tags (list) --All of these tags must exist in the object's tag set in order for the rule to apply.\n (dict) --\n Key (string) -- [REQUIRED]Name of the tag.\n Value (string) -- [REQUIRED]Value of the tag.\n \n \n Status (string) -- [REQUIRED]If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is not currently being applied.\n Transitions (list) --\n (dict) --\n Date (datetime) --Indicates at what date the object is to be moved or deleted. Should be in GMT ISO 8601 Format.\n Days (integer) --Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer.\n StorageClass (string) --The class of storage used to store the object.\n \n NoncurrentVersionTransitions (list) --\n (dict) --Container for the transition rule that describes when noncurrent objects transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING or GLACIER storage class. If your bucket is versioning-enabled (or versioning is suspended), you can set this action to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING or GLACIER storage class at a specific period in the object's lifetime.\n NoncurrentDays (integer) --Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. For information about the noncurrent days calculations, see How Amazon S3 Calculates When an Object Became Noncurrent in the Amazon Simple Storage Service Developer Guide.\n StorageClass (string) --The class of storage used to store the object.\n \n NoncurrentVersionExpiration (dict) --Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 permanently deletes the noncurrent object versions. You set this lifecycle configuration action on a bucket that has versioning enabled (or suspended) to request that Amazon S3 delete noncurrent object versions at a specific period in the object's lifetime.\n NoncurrentDays (integer) --Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. For information about the noncurrent days calculations, see How Amazon S3 Calculates When an Object Became Noncurrent in the Amazon Simple Storage Service Developer Guide.\n AbortIncompleteMultipartUpload (dict) --Specifies the days since the initiation of an Incomplete Multipart Upload that Lifecycle will wait before permanently removing all parts of the upload.\n DaysAfterInitiation (integer) --Indicates the number of days that must pass since initiation for Lifecycle to abort an Incomplete Multipart Upload.\n \n \n \n\n \"\"\"\n pass\n\ndef put_bucket_logging(Bucket=None, BucketLoggingStatus=None):\n \"\"\"\n Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. To set the logging status of a bucket, you must be the bucket owner.\n See also: AWS API Documentation\n \n \n :example: response = client.put_bucket_logging(\n Bucket='string',\n BucketLoggingStatus={\n 'LoggingEnabled': {\n 'TargetBucket': 'string',\n 'TargetGrants': [\n {\n 'Grantee': {\n 'DisplayName': 'string',\n 'EmailAddress': 'string',\n 'ID': 'string',\n 'Type': 'CanonicalUser'|'AmazonCustomerByEmail'|'Group',\n 'URI': 'string'\n },\n 'Permission': 'FULL_CONTROL'|'READ'|'WRITE'\n },\n ],\n 'TargetPrefix': 'string'\n }\n },\n \n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type BucketLoggingStatus: dict\n :param BucketLoggingStatus: [REQUIRED]\n LoggingEnabled (dict) --Container for logging information. Presence of this element indicates that logging is enabled. Parameters TargetBucket and TargetPrefix are required in this case.\n TargetBucket (string) -- [REQUIRED]Specifies the bucket where you want Amazon S3 to store server access logs. You can have your logs delivered to any bucket that you own, including the same bucket that is being logged. You can also configure multiple buckets to deliver their logs to the same target bucket. In this case you should choose a different TargetPrefix for each source bucket so that the delivered log files can be distinguished by key.\n TargetGrants (list) --\n (dict) --\n Grantee (dict) --\n DisplayName (string) --Screen name of the grantee.\n EmailAddress (string) --Email address of the grantee.\n ID (string) --The canonical user ID of the grantee.\n Type (string) -- [REQUIRED]Type of grantee\n URI (string) --URI of the grantee group.\n Permission (string) --Logging permissions assigned to the Grantee for the bucket.\n \n TargetPrefix (string) -- [REQUIRED]This element lets you specify a prefix for the keys that the log files will be stored under.\n \n \n\n \"\"\"\n pass\n\ndef put_bucket_metrics_configuration(Bucket=None, Id=None, MetricsConfiguration=None):\n \"\"\"\n Sets a metrics configuration (specified by the metrics configuration ID) for the bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.put_bucket_metrics_configuration(\n Bucket='string',\n Id='string',\n MetricsConfiguration={\n 'Id': 'string',\n 'Filter': {\n 'Prefix': 'string',\n 'Tag': {\n 'Key': 'string',\n 'Value': 'string'\n },\n 'And': {\n 'Prefix': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n }\n }\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The name of the bucket for which the metrics configuration is set.\n \n\n :type Id: string\n :param Id: [REQUIRED]\n The ID used to identify the metrics configuration.\n \n\n :type MetricsConfiguration: dict\n :param MetricsConfiguration: [REQUIRED]\n Specifies the metrics configuration.\n Id (string) -- [REQUIRED]The ID used to identify the metrics configuration.\n Filter (dict) --Specifies a metrics configuration filter. The metrics configuration will only include objects that meet the filter's criteria. A filter must be a prefix, a tag, or a conjunction (MetricsAndOperator).\n Prefix (string) --The prefix used when evaluating a metrics filter.\n Tag (dict) --The tag used when evaluating a metrics filter.\n Key (string) -- [REQUIRED]Name of the tag.\n Value (string) -- [REQUIRED]Value of the tag.\n And (dict) --A conjunction (logical AND) of predicates, which is used in evaluating a metrics filter. The operator must have at least two predicates, and an object must match all of the predicates in order for the filter to apply.\n Prefix (string) --The prefix used when evaluating an AND predicate.\n Tags (list) --The list of tags used when evaluating an AND predicate.\n (dict) --\n Key (string) -- [REQUIRED]Name of the tag.\n Value (string) -- [REQUIRED]Value of the tag.\n \n \n \n\n \"\"\"\n pass\n\ndef put_bucket_notification(Bucket=None, NotificationConfiguration=None):\n \"\"\"\n Deprecated, see the PutBucketNotificationConfiguraiton operation.\n See also: AWS API Documentation\n \n \n :example: response = client.put_bucket_notification(\n Bucket='string',\n NotificationConfiguration={\n 'TopicConfiguration': {\n 'Id': 'string',\n 'Events': [\n 's3:ReducedRedundancyLostObject'|'s3:ObjectCreated:*'|'s3:ObjectCreated:Put'|'s3:ObjectCreated:Post'|'s3:ObjectCreated:Copy'|'s3:ObjectCreated:CompleteMultipartUpload'|'s3:ObjectRemoved:*'|'s3:ObjectRemoved:Delete'|'s3:ObjectRemoved:DeleteMarkerCreated'|'s3:ObjectRestore:Post'|'s3:ObjectRestore:Completed',\n ],\n 'Event': 's3:ReducedRedundancyLostObject'|'s3:ObjectCreated:*'|'s3:ObjectCreated:Put'|'s3:ObjectCreated:Post'|'s3:ObjectCreated:Copy'|'s3:ObjectCreated:CompleteMultipartUpload'|'s3:ObjectRemoved:*'|'s3:ObjectRemoved:Delete'|'s3:ObjectRemoved:DeleteMarkerCreated'|'s3:ObjectRestore:Post'|'s3:ObjectRestore:Completed',\n 'Topic': 'string'\n },\n 'QueueConfiguration': {\n 'Id': 'string',\n 'Event': 's3:ReducedRedundancyLostObject'|'s3:ObjectCreated:*'|'s3:ObjectCreated:Put'|'s3:ObjectCreated:Post'|'s3:ObjectCreated:Copy'|'s3:ObjectCreated:CompleteMultipartUpload'|'s3:ObjectRemoved:*'|'s3:ObjectRemoved:Delete'|'s3:ObjectRemoved:DeleteMarkerCreated'|'s3:ObjectRestore:Post'|'s3:ObjectRestore:Completed',\n 'Events': [\n 's3:ReducedRedundancyLostObject'|'s3:ObjectCreated:*'|'s3:ObjectCreated:Put'|'s3:ObjectCreated:Post'|'s3:ObjectCreated:Copy'|'s3:ObjectCreated:CompleteMultipartUpload'|'s3:ObjectRemoved:*'|'s3:ObjectRemoved:Delete'|'s3:ObjectRemoved:DeleteMarkerCreated'|'s3:ObjectRestore:Post'|'s3:ObjectRestore:Completed',\n ],\n 'Queue': 'string'\n },\n 'CloudFunctionConfiguration': {\n 'Id': 'string',\n 'Event': 's3:ReducedRedundancyLostObject'|'s3:ObjectCreated:*'|'s3:ObjectCreated:Put'|'s3:ObjectCreated:Post'|'s3:ObjectCreated:Copy'|'s3:ObjectCreated:CompleteMultipartUpload'|'s3:ObjectRemoved:*'|'s3:ObjectRemoved:Delete'|'s3:ObjectRemoved:DeleteMarkerCreated'|'s3:ObjectRestore:Post'|'s3:ObjectRestore:Completed',\n 'Events': [\n 's3:ReducedRedundancyLostObject'|'s3:ObjectCreated:*'|'s3:ObjectCreated:Put'|'s3:ObjectCreated:Post'|'s3:ObjectCreated:Copy'|'s3:ObjectCreated:CompleteMultipartUpload'|'s3:ObjectRemoved:*'|'s3:ObjectRemoved:Delete'|'s3:ObjectRemoved:DeleteMarkerCreated'|'s3:ObjectRestore:Post'|'s3:ObjectRestore:Completed',\n ],\n 'CloudFunction': 'string',\n 'InvocationRole': 'string'\n }\n }\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type NotificationConfiguration: dict\n :param NotificationConfiguration: [REQUIRED]\n TopicConfiguration (dict) --\n Id (string) --An optional unique identifier for configurations in a notification configuration. If you don't provide one, Amazon S3 will assign an ID.\n Events (list) --\n (string) --The bucket event for which to send notifications.\n Event (string) --Bucket event for which to send notifications.\n Topic (string) --Amazon SNS topic to which Amazon S3 will publish a message to report the specified events for the bucket.\n QueueConfiguration (dict) --\n Id (string) --An optional unique identifier for configurations in a notification configuration. If you don't provide one, Amazon S3 will assign an ID.\n Event (string) --The bucket event for which to send notifications.\n Events (list) --\n (string) --The bucket event for which to send notifications.\n Queue (string) --\n CloudFunctionConfiguration (dict) --\n Id (string) --An optional unique identifier for configurations in a notification configuration. If you don't provide one, Amazon S3 will assign an ID.\n Event (string) --The bucket event for which to send notifications.\n Events (list) --\n (string) --The bucket event for which to send notifications.\n CloudFunction (string) --\n InvocationRole (string) --\n \n\n \"\"\"\n pass\n\ndef put_bucket_notification_configuration(Bucket=None, NotificationConfiguration=None):\n \"\"\"\n Enables notifications of specified events for a bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.put_bucket_notification_configuration(\n Bucket='string',\n NotificationConfiguration={\n 'TopicConfigurations': [\n {\n 'Id': 'string',\n 'TopicArn': 'string',\n 'Events': [\n 's3:ReducedRedundancyLostObject'|'s3:ObjectCreated:*'|'s3:ObjectCreated:Put'|'s3:ObjectCreated:Post'|'s3:ObjectCreated:Copy'|'s3:ObjectCreated:CompleteMultipartUpload'|'s3:ObjectRemoved:*'|'s3:ObjectRemoved:Delete'|'s3:ObjectRemoved:DeleteMarkerCreated'|'s3:ObjectRestore:Post'|'s3:ObjectRestore:Completed',\n ],\n 'Filter': {\n 'Key': {\n 'FilterRules': [\n {\n 'Name': 'prefix'|'suffix',\n 'Value': 'string'\n },\n ]\n }\n }\n },\n ],\n 'QueueConfigurations': [\n {\n 'Id': 'string',\n 'QueueArn': 'string',\n 'Events': [\n 's3:ReducedRedundancyLostObject'|'s3:ObjectCreated:*'|'s3:ObjectCreated:Put'|'s3:ObjectCreated:Post'|'s3:ObjectCreated:Copy'|'s3:ObjectCreated:CompleteMultipartUpload'|'s3:ObjectRemoved:*'|'s3:ObjectRemoved:Delete'|'s3:ObjectRemoved:DeleteMarkerCreated'|'s3:ObjectRestore:Post'|'s3:ObjectRestore:Completed',\n ],\n 'Filter': {\n 'Key': {\n 'FilterRules': [\n {\n 'Name': 'prefix'|'suffix',\n 'Value': 'string'\n },\n ]\n }\n }\n },\n ],\n 'LambdaFunctionConfigurations': [\n {\n 'Id': 'string',\n 'LambdaFunctionArn': 'string',\n 'Events': [\n 's3:ReducedRedundancyLostObject'|'s3:ObjectCreated:*'|'s3:ObjectCreated:Put'|'s3:ObjectCreated:Post'|'s3:ObjectCreated:Copy'|'s3:ObjectCreated:CompleteMultipartUpload'|'s3:ObjectRemoved:*'|'s3:ObjectRemoved:Delete'|'s3:ObjectRemoved:DeleteMarkerCreated'|'s3:ObjectRestore:Post'|'s3:ObjectRestore:Completed',\n ],\n 'Filter': {\n 'Key': {\n 'FilterRules': [\n {\n 'Name': 'prefix'|'suffix',\n 'Value': 'string'\n },\n ]\n }\n }\n },\n ]\n }\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type NotificationConfiguration: dict\n :param NotificationConfiguration: [REQUIRED]\n A container for specifying the notification configuration of the bucket. If this element is empty, notifications are turned off for the bucket.\n TopicConfigurations (list) --\n (dict) --A container for specifying the configuration for publication of messages to an Amazon Simple Notification Service (Amazon SNS) topic.when Amazon S3 detects specified events.\n Id (string) --An optional unique identifier for configurations in a notification configuration. If you don't provide one, Amazon S3 will assign an ID.\n TopicArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3 will publish a message when it detects events of the specified type.\n Events (list) -- [REQUIRED]\n (string) --The bucket event for which to send notifications.\n Filter (dict) --A container for object key name filtering rules. For information about key name filtering, see Configuring Event Notifications in the Amazon Simple Storage Service Developer Guide .\n Key (dict) --A container for object key name prefix and suffix filtering rules.\n FilterRules (list) --A list of containers for the key value pair that defines the criteria for the filter rule.\n (dict) --A container for a key value pair that defines the criteria for the filter rule.\n Name (string) --The object key name prefix or suffix identifying one or more objects to which the filtering rule applies. The maximum prefix length is 1,024 characters. Overlapping prefixes and suffixes are not supported. For more information, see Configuring Event Notifications in the Amazon Simple Storage Service Developer Guide .\n Value (string) --\n \n \n \n QueueConfigurations (list) --\n (dict) --A container for specifying the configuration for publication of messages to an Amazon Simple Queue Service (Amazon SQS) queue.when Amazon S3 detects specified events.\n Id (string) --An optional unique identifier for configurations in a notification configuration. If you don't provide one, Amazon S3 will assign an ID.\n QueueArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 will publish a message when it detects events of the specified type.\n Events (list) -- [REQUIRED]\n (string) --The bucket event for which to send notifications.\n Filter (dict) --A container for object key name filtering rules. For information about key name filtering, see Configuring Event Notifications in the Amazon Simple Storage Service Developer Guide .\n Key (dict) --A container for object key name prefix and suffix filtering rules.\n FilterRules (list) --A list of containers for the key value pair that defines the criteria for the filter rule.\n (dict) --A container for a key value pair that defines the criteria for the filter rule.\n Name (string) --The object key name prefix or suffix identifying one or more objects to which the filtering rule applies. The maximum prefix length is 1,024 characters. Overlapping prefixes and suffixes are not supported. For more information, see Configuring Event Notifications in the Amazon Simple Storage Service Developer Guide .\n Value (string) --\n \n \n \n LambdaFunctionConfigurations (list) --\n (dict) --A container for specifying the configuration for AWS Lambda notifications.\n Id (string) --An optional unique identifier for configurations in a notification configuration. If you don't provide one, Amazon S3 will assign an ID.\n LambdaFunctionArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Lambda cloud function that Amazon S3 can invoke when it detects events of the specified type.\n Events (list) -- [REQUIRED]\n (string) --The bucket event for which to send notifications.\n Filter (dict) --A container for object key name filtering rules. For information about key name filtering, see Configuring Event Notifications in the Amazon Simple Storage Service Developer Guide .\n Key (dict) --A container for object key name prefix and suffix filtering rules.\n FilterRules (list) --A list of containers for the key value pair that defines the criteria for the filter rule.\n (dict) --A container for a key value pair that defines the criteria for the filter rule.\n Name (string) --The object key name prefix or suffix identifying one or more objects to which the filtering rule applies. The maximum prefix length is 1,024 characters. Overlapping prefixes and suffixes are not supported. For more information, see Configuring Event Notifications in the Amazon Simple Storage Service Developer Guide .\n Value (string) --\n \n \n \n \n\n \"\"\"\n pass\n\ndef put_bucket_policy(Bucket=None, ConfirmRemoveSelfBucketAccess=None, Policy=None):\n \"\"\"\n Replaces a policy on a bucket. If the bucket already has a policy, the one in this request completely replaces it.\n See also: AWS API Documentation\n \n \n :example: response = client.put_bucket_policy(\n Bucket='string',\n ConfirmRemoveSelfBucketAccess=True|False,\n Policy='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type ConfirmRemoveSelfBucketAccess: boolean\n :param ConfirmRemoveSelfBucketAccess: Set this parameter to true to confirm that you want to remove your permissions to change this bucket policy in the future.\n\n :type Policy: string\n :param Policy: [REQUIRED]\n The bucket policy as a JSON document.\n \n\n \"\"\"\n pass\n\ndef put_bucket_replication(Bucket=None, ReplicationConfiguration=None):\n \"\"\"\n Creates a replication configuration or replaces an existing one. For more information, see `Cross-Region Replication (CRR) < https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html>`__ in the Amazon S3 Developer Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.put_bucket_replication(\n Bucket='string',\n ReplicationConfiguration={\n 'Role': 'string',\n 'Rules': [\n {\n 'ID': 'string',\n 'Priority': 123,\n 'Prefix': 'string',\n 'Filter': {\n 'Prefix': 'string',\n 'Tag': {\n 'Key': 'string',\n 'Value': 'string'\n },\n 'And': {\n 'Prefix': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n },\n 'Status': 'Enabled'|'Disabled',\n 'SourceSelectionCriteria': {\n 'SseKmsEncryptedObjects': {\n 'Status': 'Enabled'|'Disabled'\n }\n },\n 'Destination': {\n 'Bucket': 'string',\n 'Account': 'string',\n 'StorageClass': 'STANDARD'|'REDUCED_REDUNDANCY'|'STANDARD_IA'|'ONEZONE_IA'|'INTELLIGENT_TIERING'|'GLACIER',\n 'AccessControlTranslation': {\n 'Owner': 'Destination'\n },\n 'EncryptionConfiguration': {\n 'ReplicaKmsKeyID': 'string'\n }\n },\n 'DeleteMarkerReplication': {\n 'Status': 'Enabled'|'Disabled'\n }\n },\n ]\n }\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type ReplicationConfiguration: dict\n :param ReplicationConfiguration: [REQUIRED]\n A container for replication rules. You can add up to 1,000 rules. The maximum size of a replication configuration is 2 MB.\n Role (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that Amazon S3 can assume when replicating the objects.\n Rules (list) -- [REQUIRED]A container for one or more replication rules. A replication configuration must have at least one rule and can contain a maximum of 1,000 rules.\n (dict) --A container for information about a specific replication rule.\n ID (string) --A unique identifier for the rule. The maximum value is 255 characters.\n Priority (integer) --The priority associated with the rule. If you specify multiple rules in a replication configuration, Amazon S3 prioritizes the rules to prevent conflicts when filtering. If two or more rules identify the same object based on a specified filter, the rule with higher priority takes precedence. For example:\n Same object quality prefix based filter criteria If prefixes you specified in multiple rules overlap\n Same object qualify tag based filter criteria specified in multiple rules\n For more information, see `Cross-Region Replication (CRR) < https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html>`__ in the Amazon S3 Developer Guide .\n Prefix (string) --An object keyname prefix that identifies the object or objects to which the rule applies. The maximum prefix length is 1,024 characters.\n Filter (dict) --A filter that identifies the subset of objects to which the replication rule applies. A Filter must specify exactly one Prefix , Tag , or an And child element.\n Prefix (string) --An object keyname prefix that identifies the subset of objects to which the rule applies.\n Tag (dict) --A container for specifying a tag key and value.\n The rule applies only to objects that have the tag in their tag set.\n Key (string) -- [REQUIRED]Name of the tag.\n Value (string) -- [REQUIRED]Value of the tag.\n And (dict) --A container for specifying rule filters. The filters determine the subset of objects to which the rule applies. This element is required only if you specify more than one filter. For example:\n If you specify both a Prefix and a Tag filter, wrap these filters in an And tag.\n If you specify a filter based on multiple tags, wrap the Tag elements in an And tag.\n Prefix (string) --\n Tags (list) --\n (dict) --\n Key (string) -- [REQUIRED]Name of the tag.\n Value (string) -- [REQUIRED]Value of the tag.\n \n \n Status (string) -- [REQUIRED]If status isn't enabled, the rule is ignored.\n SourceSelectionCriteria (dict) --A container that describes additional filters for identifying the source objects that you want to replicate. You can choose to enable or disable the replication of these objects. Currently, Amazon S3 supports only the filter that you can specify for objects created with server-side encryption using an AWS KMS-Managed Key (SSE-KMS).\n If you want Amazon S3 to replicate objects created with server-side encryption using AWS KMS-Managed Keys.\n SseKmsEncryptedObjects (dict) --A container for filter information for the selection of S3 objects encrypted with AWS KMS. If you include SourceSelectionCriteria in the replication configuration, this element is required.\n Status (string) -- [REQUIRED]If the status is not Enabled , replication for S3 objects encrypted with AWS KMS is disabled.\n \n Destination (dict) -- [REQUIRED]A container for information about the replication destination.\n Bucket (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store replicas of the object identified by the rule.\n If there are multiple rules in your replication configuration, all rules must specify the same bucket as the destination. A replication configuration can replicate objects to only one destination bucket.\n Account (string) --The account ID of the destination bucket. Currently, Amazon S3 verifies this value only if Access Control Translation is enabled.\n In a cross-account scenario, if you change replica ownership to the AWS account that owns the destination bucket by adding the AccessControlTranslation element, this is the account ID of the owner of the destination bucket.\n StorageClass (string) --The class of storage used to store the object. By default Amazon S3 uses storage class of the source object when creating a replica.\n AccessControlTranslation (dict) --A container for information about access control for replicas.\n Use this element only in a cross-account scenario where source and destination bucket owners are not the same to change replica ownership to the AWS account that owns the destination bucket. If you don't add this element to the replication configuration, the replicas are owned by same AWS account that owns the source object.\n Owner (string) -- [REQUIRED]The override value for the owner of the replica object.\n EncryptionConfiguration (dict) --A container that provides information about encryption. If SourceSelectionCriteria is specified, you must specify this element.\n ReplicaKmsKeyID (string) --The ID of the AWS KMS key for the AWS Region where the destination bucket resides. Amazon S3 uses this key to encrypt the replica object.\n \n DeleteMarkerReplication (dict) --Specifies whether Amazon S3 should replicate delete makers.\n Status (string) --The status of the delete marker replication.\n Note\n In the current implementation, Amazon S3 doesn't replicate the delete markers. The status must be Disabled .\n \n \n \n\n \"\"\"\n pass\n\ndef put_bucket_request_payment(Bucket=None, RequestPaymentConfiguration=None):\n \"\"\"\n Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the download will be charged for the download. Documentation on requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html\n See also: AWS API Documentation\n \n \n :example: response = client.put_bucket_request_payment(\n Bucket='string',\n RequestPaymentConfiguration={\n 'Payer': 'Requester'|'BucketOwner'\n }\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type RequestPaymentConfiguration: dict\n :param RequestPaymentConfiguration: [REQUIRED]\n Payer (string) -- [REQUIRED]Specifies who pays for the download and request fees.\n \n\n \"\"\"\n pass\n\ndef put_bucket_tagging(Bucket=None, Tagging=None):\n \"\"\"\n Sets the tags for a bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.put_bucket_tagging(\n Bucket='string',\n Tagging={\n 'TagSet': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type Tagging: dict\n :param Tagging: [REQUIRED]\n TagSet (list) -- [REQUIRED]\n (dict) --\n Key (string) -- [REQUIRED]Name of the tag.\n Value (string) -- [REQUIRED]Value of the tag.\n \n \n\n \"\"\"\n pass\n\ndef put_bucket_versioning(Bucket=None, MFA=None, VersioningConfiguration=None):\n \"\"\"\n Sets the versioning state of an existing bucket. To set the versioning state, you must be the bucket owner.\n See also: AWS API Documentation\n \n \n :example: response = client.put_bucket_versioning(\n Bucket='string',\n MFA='string',\n VersioningConfiguration={\n 'MFADelete': 'Enabled'|'Disabled',\n 'Status': 'Enabled'|'Suspended'\n }\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type MFA: string\n :param MFA: The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device.\n\n :type VersioningConfiguration: dict\n :param VersioningConfiguration: [REQUIRED]\n MFADelete (string) --Specifies whether MFA delete is enabled in the bucket versioning configuration. This element is only returned if the bucket has been configured with MFA delete. If the bucket has never been so configured, this element is not returned.\n Status (string) --The versioning state of the bucket.\n \n\n \"\"\"\n pass\n\ndef put_bucket_website(Bucket=None, WebsiteConfiguration=None):\n \"\"\"\n Set the website configuration for a bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.put_bucket_website(\n Bucket='string',\n WebsiteConfiguration={\n 'ErrorDocument': {\n 'Key': 'string'\n },\n 'IndexDocument': {\n 'Suffix': 'string'\n },\n 'RedirectAllRequestsTo': {\n 'HostName': 'string',\n 'Protocol': 'http'|'https'\n },\n 'RoutingRules': [\n {\n 'Condition': {\n 'HttpErrorCodeReturnedEquals': 'string',\n 'KeyPrefixEquals': 'string'\n },\n 'Redirect': {\n 'HostName': 'string',\n 'HttpRedirectCode': 'string',\n 'Protocol': 'http'|'https',\n 'ReplaceKeyPrefixWith': 'string',\n 'ReplaceKeyWith': 'string'\n }\n },\n ]\n }\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type WebsiteConfiguration: dict\n :param WebsiteConfiguration: [REQUIRED]\n ErrorDocument (dict) --\n Key (string) -- [REQUIRED]The object key name to use when a 4XX class error occurs.\n IndexDocument (dict) --\n Suffix (string) -- [REQUIRED]A suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html) The suffix must not be empty and must not include a slash character.\n RedirectAllRequestsTo (dict) --\n HostName (string) -- [REQUIRED]Name of the host where requests will be redirected.\n Protocol (string) --Protocol to use (http, https) when redirecting requests. The default is the protocol that is used in the original request.\n RoutingRules (list) --\n (dict) --\n Condition (dict) --A container for describing a condition that must be met for the specified redirect to apply. For example, 1. If request is for pages in the /docs folder, redirect to the /documents folder. 2. If request results in HTTP error 4xx, redirect request to another host where you might process the error.\n HttpErrorCodeReturnedEquals (string) --The HTTP error code when the redirect is applied. In the event of an error, if the error code equals this value, then the specified redirect is applied. Required when parent element Condition is specified and sibling KeyPrefixEquals is not specified. If both are specified, then both must be true for the redirect to be applied.\n KeyPrefixEquals (string) --The object key name prefix when the redirect is applied. For example, to redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. To redirect request for all pages with the prefix docs/, the key prefix will be /docs, which identifies all objects in the docs/ folder. Required when the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals is not specified. If both conditions are specified, both must be true for the redirect to be applied.\n Redirect (dict) -- [REQUIRED]Container for redirect information. You can redirect requests to another host, to another page, or with another protocol. In the event of an error, you can specify a different error code to return.\n HostName (string) --The host name to use in the redirect request.\n HttpRedirectCode (string) --The HTTP redirect code to use on the response. Not required if one of the siblings is present.\n Protocol (string) --Protocol to use (http, https) when redirecting requests. The default is the protocol that is used in the original request.\n ReplaceKeyPrefixWith (string) --The object key prefix to use in the redirect request. For example, to redirect requests for all pages with prefix docs/ (objects in the docs/ folder) to documents/, you can set a condition block with KeyPrefixEquals set to docs/ and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required if one of the siblings is present. Can be present only if ReplaceKeyWith is not provided.\n ReplaceKeyWith (string) --The specific object key to use in the redirect request. For example, redirect request to error.html. Not required if one of the sibling is present. Can be present only if ReplaceKeyPrefixWith is not provided.\n \n \n \n\n \"\"\"\n pass\n\ndef put_object(ACL=None, Body=None, Bucket=None, CacheControl=None, ContentDisposition=None, ContentEncoding=None, ContentLanguage=None, ContentLength=None, ContentMD5=None, ContentType=None, Expires=None, GrantFullControl=None, GrantRead=None, GrantReadACP=None, GrantWriteACP=None, Key=None, Metadata=None, ServerSideEncryption=None, StorageClass=None, WebsiteRedirectLocation=None, SSECustomerAlgorithm=None, SSECustomerKey=None, SSECustomerKeyMD5=None, SSEKMSKeyId=None, RequestPayer=None, Tagging=None, ObjectLockMode=None, ObjectLockRetainUntilDate=None, ObjectLockLegalHoldStatus=None):\n \"\"\"\n Adds an object to a bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.put_object(\n ACL='private'|'public-read'|'public-read-write'|'authenticated-read'|'aws-exec-read'|'bucket-owner-read'|'bucket-owner-full-control',\n Body=b'bytes'|file,\n Bucket='string',\n CacheControl='string',\n ContentDisposition='string',\n ContentEncoding='string',\n ContentLanguage='string',\n ContentLength=123,\n ContentMD5='string',\n ContentType='string',\n Expires=datetime(2015, 1, 1),\n GrantFullControl='string',\n GrantRead='string',\n GrantReadACP='string',\n GrantWriteACP='string',\n Key='string',\n Metadata={\n 'string': 'string'\n },\n ServerSideEncryption='AES256'|'aws:kms',\n StorageClass='STANDARD'|'REDUCED_REDUNDANCY'|'STANDARD_IA'|'ONEZONE_IA'|'INTELLIGENT_TIERING'|'GLACIER',\n WebsiteRedirectLocation='string',\n SSECustomerAlgorithm='string',\n SSECustomerKey='string',\n SSEKMSKeyId='string',\n RequestPayer='requester',\n Tagging='string',\n ObjectLockMode='GOVERNANCE'|'COMPLIANCE',\n ObjectLockRetainUntilDate=datetime(2015, 1, 1),\n ObjectLockLegalHoldStatus='ON'|'OFF'\n )\n \n \n :type ACL: string\n :param ACL: The canned ACL to apply to the object.\n\n :type Body: bytes or seekable file-like object\n :param Body: Object data.\n\n :type Bucket: string\n :param Bucket: [REQUIRED]\n Name of the bucket to which the PUT operation was initiated.\n \n\n :type CacheControl: string\n :param CacheControl: Specifies caching behavior along the request/reply chain.\n\n :type ContentDisposition: string\n :param ContentDisposition: Specifies presentational information for the object.\n\n :type ContentEncoding: string\n :param ContentEncoding: Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.\n\n :type ContentLanguage: string\n :param ContentLanguage: The language the content is in.\n\n :type ContentLength: integer\n :param ContentLength: Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically.\n\n :type ContentMD5: string\n :param ContentMD5: The base64-encoded 128-bit MD5 digest of the part data.\n\n :type ContentType: string\n :param ContentType: A standard MIME type describing the format of the object data.\n\n :type Expires: datetime\n :param Expires: The date and time at which the object is no longer cacheable.\n\n :type GrantFullControl: string\n :param GrantFullControl: Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.\n\n :type GrantRead: string\n :param GrantRead: Allows grantee to read the object data and its metadata.\n\n :type GrantReadACP: string\n :param GrantReadACP: Allows grantee to read the object ACL.\n\n :type GrantWriteACP: string\n :param GrantWriteACP: Allows grantee to write the ACL for the applicable object.\n\n :type Key: string\n :param Key: [REQUIRED]\n Object key for which the PUT operation was initiated.\n \n\n :type Metadata: dict\n :param Metadata: A map of metadata to store with the object in S3.\n (string) --\n (string) --\n \n\n :type ServerSideEncryption: string\n :param ServerSideEncryption: The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).\n\n :type StorageClass: string\n :param StorageClass: The type of storage to use for the object. Defaults to 'STANDARD'.\n\n :type WebsiteRedirectLocation: string\n :param WebsiteRedirectLocation: If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.\n\n :type SSECustomerAlgorithm: string\n :param SSECustomerAlgorithm: Specifies the algorithm to use to when encrypting the object (e.g., AES256).\n\n :type SSECustomerKey: string\n :param SSECustomerKey: Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side -encryption -customer-algorithm header.\n\n :type SSECustomerKeyMD5: string\n :param SSECustomerKeyMD5: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.\n Please note that this parameter is automatically populated if it is not provided. Including this parameter is not required\n \n\n :type SSEKMSKeyId: string\n :param SSEKMSKeyId: Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version\n\n :type RequestPayer: string\n :param RequestPayer: Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html\n\n :type Tagging: string\n :param Tagging: The tag-set for the object. The tag-set must be encoded as URL Query parameters. (For example, 'Key1=Value1')\n\n :type ObjectLockMode: string\n :param ObjectLockMode: The Object Lock mode that you want to apply to this object.\n\n :type ObjectLockRetainUntilDate: datetime\n :param ObjectLockRetainUntilDate: The date and time when you want this object's Object Lock to expire.\n\n :type ObjectLockLegalHoldStatus: string\n :param ObjectLockLegalHoldStatus: The Legal Hold status that you want to apply to the specified object.\n\n :rtype: dict\n :return: {\n 'Expiration': 'string',\n 'ETag': 'string',\n 'ServerSideEncryption': 'AES256'|'aws:kms',\n 'VersionId': 'string',\n 'SSECustomerAlgorithm': 'string',\n 'SSECustomerKeyMD5': 'string',\n 'SSEKMSKeyId': 'string',\n 'RequestCharged': 'requester'\n }\n \n \n \"\"\"\n pass\n\ndef put_object_acl(ACL=None, AccessControlPolicy=None, Bucket=None, GrantFullControl=None, GrantRead=None, GrantReadACP=None, GrantWrite=None, GrantWriteACP=None, Key=None, RequestPayer=None, VersionId=None):\n \"\"\"\n uses the acl subresource to set the access control list (ACL) permissions for an object that already exists in a bucket\n See also: AWS API Documentation\n \n \n :example: response = client.put_object_acl(\n ACL='private'|'public-read'|'public-read-write'|'authenticated-read'|'aws-exec-read'|'bucket-owner-read'|'bucket-owner-full-control',\n AccessControlPolicy={\n 'Grants': [\n {\n 'Grantee': {\n 'DisplayName': 'string',\n 'EmailAddress': 'string',\n 'ID': 'string',\n 'Type': 'CanonicalUser'|'AmazonCustomerByEmail'|'Group',\n 'URI': 'string'\n },\n 'Permission': 'FULL_CONTROL'|'WRITE'|'WRITE_ACP'|'READ'|'READ_ACP'\n },\n ],\n 'Owner': {\n 'DisplayName': 'string',\n 'ID': 'string'\n }\n },\n Bucket='string',\n GrantFullControl='string',\n GrantRead='string',\n GrantReadACP='string',\n GrantWrite='string',\n GrantWriteACP='string',\n Key='string',\n RequestPayer='requester',\n VersionId='string'\n )\n \n \n :type ACL: string\n :param ACL: The canned ACL to apply to the object.\n\n :type AccessControlPolicy: dict\n :param AccessControlPolicy: \n Grants (list) --A list of grants.\n (dict) --\n Grantee (dict) --\n DisplayName (string) --Screen name of the grantee.\n EmailAddress (string) --Email address of the grantee.\n ID (string) --The canonical user ID of the grantee.\n Type (string) -- [REQUIRED]Type of grantee\n URI (string) --URI of the grantee group.\n Permission (string) --Specifies the permission given to the grantee.\n \n Owner (dict) --\n DisplayName (string) --\n ID (string) --\n \n\n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type GrantFullControl: string\n :param GrantFullControl: Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.\n\n :type GrantRead: string\n :param GrantRead: Allows grantee to list the objects in the bucket.\n\n :type GrantReadACP: string\n :param GrantReadACP: Allows grantee to read the bucket ACL.\n\n :type GrantWrite: string\n :param GrantWrite: Allows grantee to create, overwrite, and delete any object in the bucket.\n\n :type GrantWriteACP: string\n :param GrantWriteACP: Allows grantee to write the ACL for the applicable bucket.\n\n :type Key: string\n :param Key: [REQUIRED]\n\n :type RequestPayer: string\n :param RequestPayer: Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html\n\n :type VersionId: string\n :param VersionId: VersionId used to reference a specific version of the object.\n\n :rtype: dict\n :return: {\n 'RequestCharged': 'requester'\n }\n \n \n \"\"\"\n pass\n\ndef put_object_legal_hold(Bucket=None, Key=None, LegalHold=None, RequestPayer=None, VersionId=None, ContentMD5=None):\n \"\"\"\n Applies a Legal Hold configuration to the specified object.\n See also: AWS API Documentation\n \n \n :example: response = client.put_object_legal_hold(\n Bucket='string',\n Key='string',\n LegalHold={\n 'Status': 'ON'|'OFF'\n },\n RequestPayer='requester',\n VersionId='string',\n ContentMD5='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The bucket containing the object that you want to place a Legal Hold on.\n \n\n :type Key: string\n :param Key: [REQUIRED]\n The key name for the object that you want to place a Legal Hold on.\n \n\n :type LegalHold: dict\n :param LegalHold: Container element for the Legal Hold configuration you want to apply to the specified object.\n Status (string) --Indicates whether the specified object has a Legal Hold in place.\n \n\n :type RequestPayer: string\n :param RequestPayer: Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html\n\n :type VersionId: string\n :param VersionId: The version ID of the object that you want to place a Legal Hold on.\n\n :type ContentMD5: string\n :param ContentMD5: The MD5 hash for the request body.\n\n :rtype: dict\n :return: {\n 'RequestCharged': 'requester'\n }\n \n \n \"\"\"\n pass\n\ndef put_object_lock_configuration(Bucket=None, ObjectLockConfiguration=None, RequestPayer=None, Token=None, ContentMD5=None):\n \"\"\"\n Places an Object Lock configuration on the specified bucket. The rule specified in the Object Lock configuration will be applied by default to every new object placed in the specified bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.put_object_lock_configuration(\n Bucket='string',\n ObjectLockConfiguration={\n 'ObjectLockEnabled': 'Enabled',\n 'Rule': {\n 'DefaultRetention': {\n 'Mode': 'GOVERNANCE'|'COMPLIANCE',\n 'Days': 123,\n 'Years': 123\n }\n }\n },\n RequestPayer='requester',\n Token='string',\n ContentMD5='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The bucket whose Object Lock configuration you want to create or replace.\n \n\n :type ObjectLockConfiguration: dict\n :param ObjectLockConfiguration: The Object Lock configuration that you want to apply to the specified bucket.\n ObjectLockEnabled (string) --Indicates whether this bucket has an Object Lock configuration enabled.\n Rule (dict) --The Object Lock rule in place for the specified object.\n DefaultRetention (dict) --The default retention period that you want to apply to new objects placed in the specified bucket.\n Mode (string) --The default Object Lock retention mode you want to apply to new objects placed in the specified bucket.\n Days (integer) --The number of days that you want to specify for the default retention period.\n Years (integer) --The number of years that you want to specify for the default retention period.\n \n \n\n :type RequestPayer: string\n :param RequestPayer: Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html\n\n :type Token: string\n :param Token: \n\n :type ContentMD5: string\n :param ContentMD5: The MD5 hash for the request body.\n\n :rtype: dict\n :return: {\n 'RequestCharged': 'requester'\n }\n \n \n \"\"\"\n pass\n\ndef put_object_retention(Bucket=None, Key=None, Retention=None, RequestPayer=None, VersionId=None, BypassGovernanceRetention=None, ContentMD5=None):\n \"\"\"\n Places an Object Retention configuration on an object.\n See also: AWS API Documentation\n \n \n :example: response = client.put_object_retention(\n Bucket='string',\n Key='string',\n Retention={\n 'Mode': 'GOVERNANCE'|'COMPLIANCE',\n 'RetainUntilDate': datetime(2015, 1, 1)\n },\n RequestPayer='requester',\n VersionId='string',\n BypassGovernanceRetention=True|False,\n ContentMD5='string'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The bucket that contains the object you want to apply this Object Retention configuration to.\n \n\n :type Key: string\n :param Key: [REQUIRED]\n The key name for the object that you want to apply this Object Retention configuration to.\n \n\n :type Retention: dict\n :param Retention: The container element for the Object Retention configuration.\n Mode (string) --Indicates the Retention mode for the specified object.\n RetainUntilDate (datetime) --The date on which this Object Lock Retention will expire.\n \n\n :type RequestPayer: string\n :param RequestPayer: Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html\n\n :type VersionId: string\n :param VersionId: The version ID for the object that you want to apply this Object Retention configuration to.\n\n :type BypassGovernanceRetention: boolean\n :param BypassGovernanceRetention: Indicates whether this operation should bypass Governance-mode restrictions.j\n\n :type ContentMD5: string\n :param ContentMD5: The MD5 hash for the request body.\n\n :rtype: dict\n :return: {\n 'RequestCharged': 'requester'\n }\n \n \n \"\"\"\n pass\n\ndef put_object_tagging(Bucket=None, Key=None, VersionId=None, ContentMD5=None, Tagging=None):\n \"\"\"\n Sets the supplied tag-set to an object that already exists in a bucket\n See also: AWS API Documentation\n \n \n :example: response = client.put_object_tagging(\n Bucket='string',\n Key='string',\n VersionId='string',\n ContentMD5='string',\n Tagging={\n 'TagSet': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type Key: string\n :param Key: [REQUIRED]\n\n :type VersionId: string\n :param VersionId: \n\n :type ContentMD5: string\n :param ContentMD5: \n\n :type Tagging: dict\n :param Tagging: [REQUIRED]\n TagSet (list) -- [REQUIRED]\n (dict) --\n Key (string) -- [REQUIRED]Name of the tag.\n Value (string) -- [REQUIRED]Value of the tag.\n \n \n\n :rtype: dict\n :return: {\n 'VersionId': 'string'\n }\n \n \n :returns: \n (dict) --\n VersionId (string) --\n \n \n \n \"\"\"\n pass\n\ndef put_public_access_block(Bucket=None, ContentMD5=None, PublicAccessBlockConfiguration=None):\n \"\"\"\n Creates or modifies the PublicAccessBlock configuration for an Amazon S3 bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.put_public_access_block(\n Bucket='string',\n ContentMD5='string',\n PublicAccessBlockConfiguration={\n 'BlockPublicAcls': True|False,\n 'IgnorePublicAcls': True|False,\n 'BlockPublicPolicy': True|False,\n 'RestrictPublicBuckets': True|False\n }\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The name of the Amazon S3 bucket whose PublicAccessBlock configuration you want to set.\n \n\n :type ContentMD5: string\n :param ContentMD5: The MD5 hash of the PutPublicAccessBlock request body.\n\n :type PublicAccessBlockConfiguration: dict\n :param PublicAccessBlockConfiguration: [REQUIRED]\n The PublicAccessBlock configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. For more information about when Amazon S3 considers a bucket or object public, see The Meaning of 'Public' in the Amazon Simple Storage Service Developer Guide .\n BlockPublicAcls (boolean) --Specifies whether Amazon S3 should block public access control lists (ACLs) for this bucket and objects in this bucket. Setting this element to TRUE causes the following behavior:\n PUT Bucket acl and PUT Object acl calls fail if the specified ACL is public.\n PUT Object calls fail if the request includes a public ACL.\n Enabling this setting doesn't affect existing policies or ACLs.\n IgnorePublicAcls (boolean) --Specifies whether Amazon S3 should ignore public ACLs for this bucket and objects in this bucket. Setting this element to TRUE causes Amazon S3 to ignore all public ACLs on this bucket and objects in this bucket.\n Enabling this setting doesn't affect the persistence of any existing ACLs and doesn't prevent new public ACLs from being set.\n BlockPublicPolicy (boolean) --Specifies whether Amazon S3 should block public bucket policies for this bucket. Setting this element to TRUE causes Amazon S3 to reject calls to PUT Bucket policy if the specified bucket policy allows public access.\n Enabling this setting doesn't affect existing bucket policies.\n RestrictPublicBuckets (boolean) --Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE restricts access to this bucket to only AWS services and authorized users within this account if the bucket has a public policy.\n Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.\n \n\n \"\"\"\n pass\n\ndef restore_object(Bucket=None, Key=None, VersionId=None, RestoreRequest=None, RequestPayer=None):\n \"\"\"\n Restores an archived copy of an object back into Amazon S3\n See also: AWS API Documentation\n \n \n :example: response = client.restore_object(\n Bucket='string',\n Key='string',\n VersionId='string',\n RestoreRequest={\n 'Days': 123,\n 'GlacierJobParameters': {\n 'Tier': 'Standard'|'Bulk'|'Expedited'\n },\n 'Type': 'SELECT',\n 'Tier': 'Standard'|'Bulk'|'Expedited',\n 'Description': 'string',\n 'SelectParameters': {\n 'InputSerialization': {\n 'CSV': {\n 'FileHeaderInfo': 'USE'|'IGNORE'|'NONE',\n 'Comments': 'string',\n 'QuoteEscapeCharacter': 'string',\n 'RecordDelimiter': 'string',\n 'FieldDelimiter': 'string',\n 'QuoteCharacter': 'string',\n 'AllowQuotedRecordDelimiter': True|False\n },\n 'CompressionType': 'NONE'|'GZIP'|'BZIP2',\n 'JSON': {\n 'Type': 'DOCUMENT'|'LINES'\n },\n 'Parquet': {}\n \n },\n 'ExpressionType': 'SQL',\n 'Expression': 'string',\n 'OutputSerialization': {\n 'CSV': {\n 'QuoteFields': 'ALWAYS'|'ASNEEDED',\n 'QuoteEscapeCharacter': 'string',\n 'RecordDelimiter': 'string',\n 'FieldDelimiter': 'string',\n 'QuoteCharacter': 'string'\n },\n 'JSON': {\n 'RecordDelimiter': 'string'\n }\n }\n },\n 'OutputLocation': {\n 'S3': {\n 'BucketName': 'string',\n 'Prefix': 'string',\n 'Encryption': {\n 'EncryptionType': 'AES256'|'aws:kms',\n 'KMSKeyId': 'string',\n 'KMSContext': 'string'\n },\n 'CannedACL': 'private'|'public-read'|'public-read-write'|'authenticated-read'|'aws-exec-read'|'bucket-owner-read'|'bucket-owner-full-control',\n 'AccessControlList': [\n {\n 'Grantee': {\n 'DisplayName': 'string',\n 'EmailAddress': 'string',\n 'ID': 'string',\n 'Type': 'CanonicalUser'|'AmazonCustomerByEmail'|'Group',\n 'URI': 'string'\n },\n 'Permission': 'FULL_CONTROL'|'WRITE'|'WRITE_ACP'|'READ'|'READ_ACP'\n },\n ],\n 'Tagging': {\n 'TagSet': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n },\n 'UserMetadata': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'StorageClass': 'STANDARD'|'REDUCED_REDUNDANCY'|'STANDARD_IA'|'ONEZONE_IA'|'INTELLIGENT_TIERING'|'GLACIER'\n }\n }\n },\n RequestPayer='requester'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type Key: string\n :param Key: [REQUIRED]\n\n :type VersionId: string\n :param VersionId: \n\n :type RestoreRequest: dict\n :param RestoreRequest: Container for restore job parameters.\n Days (integer) --Lifetime of the active copy in days. Do not use with restores that specify OutputLocation.\n GlacierJobParameters (dict) --Glacier related parameters pertaining to this job. Do not use with restores that specify OutputLocation.\n Tier (string) -- [REQUIRED]Glacier retrieval tier at which the restore will be processed.\n Type (string) --Type of restore request.\n Tier (string) --Glacier retrieval tier at which the restore will be processed.\n Description (string) --The optional description for the job.\n SelectParameters (dict) --Describes the parameters for Select job types.\n InputSerialization (dict) -- [REQUIRED]Describes the serialization format of the object.\n CSV (dict) --Describes the serialization of a CSV-encoded object.\n FileHeaderInfo (string) --Describes the first line of input. Valid values: None, Ignore, Use.\n Comments (string) --The single character used to indicate a row should be ignored when present at the start of a row.\n QuoteEscapeCharacter (string) --The single character used for escaping the quote character inside an already escaped value.\n RecordDelimiter (string) --The value used to separate individual records.\n FieldDelimiter (string) --The value used to separate individual fields in a record.\n QuoteCharacter (string) --Value used for escaping where the field delimiter is part of the value.\n AllowQuotedRecordDelimiter (boolean) --Specifies that CSV field values may contain quoted record delimiters and such records should be allowed. Default value is FALSE. Setting this value to TRUE may lower performance.\n CompressionType (string) --Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default Value: NONE.\n JSON (dict) --Specifies JSON as object's input serialization format.\n Type (string) --The type of JSON. Valid values: Document, Lines.\n Parquet (dict) --Specifies Parquet as object's input serialization format.\n ExpressionType (string) -- [REQUIRED]The type of the provided expression (e.g., SQL).\n Expression (string) -- [REQUIRED]The expression that is used to query the object.\n OutputSerialization (dict) -- [REQUIRED]Describes how the results of the Select job are serialized.\n CSV (dict) --Describes the serialization of CSV-encoded Select results.\n QuoteFields (string) --Indicates whether or not all output fields should be quoted.\n QuoteEscapeCharacter (string) --Th single character used for escaping the quote character inside an already escaped value.\n RecordDelimiter (string) --The value used to separate individual records.\n FieldDelimiter (string) --The value used to separate individual fields in a record.\n QuoteCharacter (string) --The value used for escaping where the field delimiter is part of the value.\n JSON (dict) --Specifies JSON as request's output serialization format.\n RecordDelimiter (string) --The value used to separate individual records in the output.\n \n OutputLocation (dict) --Describes the location where the restore job's output is stored.\n S3 (dict) --Describes an S3 location that will receive the results of the restore request.\n BucketName (string) -- [REQUIRED]The name of the bucket where the restore results will be placed.\n Prefix (string) -- [REQUIRED]The prefix that is prepended to the restore results for this request.\n Encryption (dict) --Describes the server-side encryption that will be applied to the restore results.\n EncryptionType (string) -- [REQUIRED]The server-side encryption algorithm used when storing job results in Amazon S3 (e.g., AES256, aws:kms).\n KMSKeyId (string) --If the encryption type is aws:kms, this optional value specifies the AWS KMS key ID to use for encryption of job results.\n KMSContext (string) --If the encryption type is aws:kms, this optional value can be used to specify the encryption context for the restore results.\n CannedACL (string) --The canned ACL to apply to the restore results.\n AccessControlList (list) --A list of grants that control access to the staged results.\n (dict) --\n Grantee (dict) --\n DisplayName (string) --Screen name of the grantee.\n EmailAddress (string) --Email address of the grantee.\n ID (string) --The canonical user ID of the grantee.\n Type (string) -- [REQUIRED]Type of grantee\n URI (string) --URI of the grantee group.\n Permission (string) --Specifies the permission given to the grantee.\n \n Tagging (dict) --The tag-set that is applied to the restore results.\n TagSet (list) -- [REQUIRED]\n (dict) --\n Key (string) -- [REQUIRED]Name of the tag.\n Value (string) -- [REQUIRED]Value of the tag.\n \n UserMetadata (list) --A list of metadata to store with the restore results in S3.\n (dict) --A metadata key-value pair to store with an object.\n Name (string) --\n Value (string) --\n \n StorageClass (string) --The class of storage used to store the restore results.\n \n \n\n :type RequestPayer: string\n :param RequestPayer: Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html\n\n :rtype: dict\n :return: {\n 'RequestCharged': 'requester',\n 'RestoreOutputPath': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef select_object_content(Bucket=None, Key=None, SSECustomerAlgorithm=None, SSECustomerKey=None, SSECustomerKeyMD5=None, Expression=None, ExpressionType=None, RequestProgress=None, InputSerialization=None, OutputSerialization=None):\n \"\"\"\n This operation filters the contents of an Amazon S3 object based on a simple Structured Query Language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON or CSV) of the object. Amazon S3 uses this to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.\n See also: AWS API Documentation\n \n \n :example: response = client.select_object_content(\n Bucket='string',\n Key='string',\n SSECustomerAlgorithm='string',\n SSECustomerKey='string',\n Expression='string',\n ExpressionType='SQL',\n RequestProgress={\n 'Enabled': True|False\n },\n InputSerialization={\n 'CSV': {\n 'FileHeaderInfo': 'USE'|'IGNORE'|'NONE',\n 'Comments': 'string',\n 'QuoteEscapeCharacter': 'string',\n 'RecordDelimiter': 'string',\n 'FieldDelimiter': 'string',\n 'QuoteCharacter': 'string',\n 'AllowQuotedRecordDelimiter': True|False\n },\n 'CompressionType': 'NONE'|'GZIP'|'BZIP2',\n 'JSON': {\n 'Type': 'DOCUMENT'|'LINES'\n },\n 'Parquet': {}\n \n },\n OutputSerialization={\n 'CSV': {\n 'QuoteFields': 'ALWAYS'|'ASNEEDED',\n 'QuoteEscapeCharacter': 'string',\n 'RecordDelimiter': 'string',\n 'FieldDelimiter': 'string',\n 'QuoteCharacter': 'string'\n },\n 'JSON': {\n 'RecordDelimiter': 'string'\n }\n }\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n The S3 bucket.\n \n\n :type Key: string\n :param Key: [REQUIRED]\n The object key.\n \n\n :type SSECustomerAlgorithm: string\n :param SSECustomerAlgorithm: The SSE Algorithm used to encrypt the object. For more information, see Server-Side Encryption (Using Customer-Provided Encryption Keys .\n\n :type SSECustomerKey: string\n :param SSECustomerKey: The SSE Customer Key. For more information, see Server-Side Encryption (Using Customer-Provided Encryption Keys .\n\n :type SSECustomerKeyMD5: string\n :param SSECustomerKeyMD5: The SSE Customer Key MD5. For more information, see Server-Side Encryption (Using Customer-Provided Encryption Keys .\n Please note that this parameter is automatically populated if it is not provided. Including this parameter is not required\n \n\n :type Expression: string\n :param Expression: [REQUIRED]\n The expression that is used to query the object.\n \n\n :type ExpressionType: string\n :param ExpressionType: [REQUIRED]\n The type of the provided expression (for example., SQL).\n \n\n :type RequestProgress: dict\n :param RequestProgress: Specifies if periodic request progress information should be enabled.\n Enabled (boolean) --Specifies whether periodic QueryProgress frames should be sent. Valid values: TRUE, FALSE. Default value: FALSE.\n \n\n :type InputSerialization: dict\n :param InputSerialization: [REQUIRED]\n Describes the format of the data in the object that is being queried.\n CSV (dict) --Describes the serialization of a CSV-encoded object.\n FileHeaderInfo (string) --Describes the first line of input. Valid values: None, Ignore, Use.\n Comments (string) --The single character used to indicate a row should be ignored when present at the start of a row.\n QuoteEscapeCharacter (string) --The single character used for escaping the quote character inside an already escaped value.\n RecordDelimiter (string) --The value used to separate individual records.\n FieldDelimiter (string) --The value used to separate individual fields in a record.\n QuoteCharacter (string) --Value used for escaping where the field delimiter is part of the value.\n AllowQuotedRecordDelimiter (boolean) --Specifies that CSV field values may contain quoted record delimiters and such records should be allowed. Default value is FALSE. Setting this value to TRUE may lower performance.\n CompressionType (string) --Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default Value: NONE.\n JSON (dict) --Specifies JSON as object's input serialization format.\n Type (string) --The type of JSON. Valid values: Document, Lines.\n Parquet (dict) --Specifies Parquet as object's input serialization format.\n \n\n :type OutputSerialization: dict\n :param OutputSerialization: [REQUIRED]\n Describes the format of the data that you want Amazon S3 to return in response.\n CSV (dict) --Describes the serialization of CSV-encoded Select results.\n QuoteFields (string) --Indicates whether or not all output fields should be quoted.\n QuoteEscapeCharacter (string) --Th single character used for escaping the quote character inside an already escaped value.\n RecordDelimiter (string) --The value used to separate individual records.\n FieldDelimiter (string) --The value used to separate individual fields in a record.\n QuoteCharacter (string) --The value used for escaping where the field delimiter is part of the value.\n JSON (dict) --Specifies JSON as request's output serialization format.\n RecordDelimiter (string) --The value used to separate individual records in the output.\n \n \n\n :rtype: dict\n :return: {\n 'Payload': EventStream({\n 'Records': {\n 'Payload': b'bytes'\n },\n 'Stats': {\n 'Details': {\n 'BytesScanned': 123,\n 'BytesProcessed': 123,\n 'BytesReturned': 123\n }\n },\n 'Progress': {\n 'Details': {\n 'BytesScanned': 123,\n 'BytesProcessed': 123,\n 'BytesReturned': 123\n }\n },\n 'Cont': {},\n 'End': {}\n })\n }\n \n \n \"\"\"\n pass\n\ndef upload_file(Filename=None, Bucket=None, Key=None, ExtraArgs=None, Callback=None, Config=None):\n \"\"\"\n Upload a file to an S3 object.\n :\n Similar behavior as S3Transfer's upload_file() method,\n except that parameters are capitalized. Detailed examples can be found at\n S3Transfer's .\n \n :example: import boto3\n s3 = boto3.resource('s3')\n s3.meta.client.upload_file('/tmp/hello.txt', 'mybucket', 'hello.txt')\n \n \n :type Filename: str\n :param Filename: The path to the file to upload.\n\n :type Bucket: str\n :param Bucket: The name of the bucket to upload to.\n\n :type Key: str\n :param Key: The name of the key to upload to.\n\n :type ExtraArgs: dict\n :param ExtraArgs: Extra arguments that may be passed to the\n client operation.\n\n :type Callback: function\n :param Callback: A method which takes a number of bytes transferred to\n be periodically called during the upload.\n\n :type Config: boto3.s3.transfer.TransferConfig\n :param Config: The transfer configuration to be used when performing the\n transfer.\n\n \"\"\"\n pass\n\ndef upload_fileobj(Fileobj=None, Bucket=None, Key=None, ExtraArgs=None, Callback=None, Config=None):\n \"\"\"\n Upload a file-like object to S3.\n The file-like object must be in binary mode.\n This is a managed transfer which will perform a multipart upload in\n multiple threads if necessary.\n :\n \n :example: import boto3\n s3 = boto3.client('s3')\n \n with open('filename', 'rb') as data:\n s3.upload_fileobj(data, 'mybucket', 'mykey')\n \n \n :type Fileobj: a file-like object\n :param Fileobj: A file-like object to upload. At a minimum, it must\n implement the read method, and must return bytes.\n\n :type Bucket: str\n :param Bucket: The name of the bucket to upload to.\n\n :type Key: str\n :param Key: The name of the key to upload to.\n\n :type ExtraArgs: dict\n :param ExtraArgs: Extra arguments that may be passed to the\n client operation.\n\n :type Callback: function\n :param Callback: A method which takes a number of bytes transferred to\n be periodically called during the upload.\n\n :type Config: boto3.s3.transfer.TransferConfig\n :param Config: The transfer configuration to be used when performing the\n upload.\n\n \"\"\"\n pass\n\ndef upload_part(Body=None, Bucket=None, ContentLength=None, ContentMD5=None, Key=None, PartNumber=None, UploadId=None, SSECustomerAlgorithm=None, SSECustomerKey=None, SSECustomerKeyMD5=None, RequestPayer=None):\n \"\"\"\n Uploads a part in a multipart upload.\n See also: AWS API Documentation\n \n \n :example: response = client.upload_part(\n Body=b'bytes'|file,\n Bucket='string',\n ContentLength=123,\n ContentMD5='string',\n Key='string',\n PartNumber=123,\n UploadId='string',\n SSECustomerAlgorithm='string',\n SSECustomerKey='string',\n RequestPayer='requester'\n )\n \n \n :type Body: bytes or seekable file-like object\n :param Body: Object data.\n\n :type Bucket: string\n :param Bucket: [REQUIRED]\n Name of the bucket to which the multipart upload was initiated.\n \n\n :type ContentLength: integer\n :param ContentLength: Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically.\n\n :type ContentMD5: string\n :param ContentMD5: The base64-encoded 128-bit MD5 digest of the part data.\n\n :type Key: string\n :param Key: [REQUIRED]\n Object key for which the multipart upload was initiated.\n \n\n :type PartNumber: integer\n :param PartNumber: [REQUIRED]\n Part number of part being uploaded. This is a positive integer between 1 and 10,000.\n \n\n :type UploadId: string\n :param UploadId: [REQUIRED]\n Upload ID identifying the multipart upload whose part is being uploaded.\n \n\n :type SSECustomerAlgorithm: string\n :param SSECustomerAlgorithm: Specifies the algorithm to use to when encrypting the object (e.g., AES256).\n\n :type SSECustomerKey: string\n :param SSECustomerKey: Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side -encryption -customer-algorithm header. This must be the same encryption key specified in the initiate multipart upload request.\n\n :type SSECustomerKeyMD5: string\n :param SSECustomerKeyMD5: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.\n Please note that this parameter is automatically populated if it is not provided. Including this parameter is not required\n \n\n :type RequestPayer: string\n :param RequestPayer: Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html\n\n :rtype: dict\n :return: {\n 'ServerSideEncryption': 'AES256'|'aws:kms',\n 'ETag': 'string',\n 'SSECustomerAlgorithm': 'string',\n 'SSECustomerKeyMD5': 'string',\n 'SSEKMSKeyId': 'string',\n 'RequestCharged': 'requester'\n }\n \n \n \"\"\"\n pass\n\ndef upload_part_copy(Bucket=None, CopySource=None, CopySourceIfMatch=None, CopySourceIfModifiedSince=None, CopySourceIfNoneMatch=None, CopySourceIfUnmodifiedSince=None, CopySourceRange=None, Key=None, PartNumber=None, UploadId=None, SSECustomerAlgorithm=None, SSECustomerKey=None, SSECustomerKeyMD5=None, CopySourceSSECustomerAlgorithm=None, CopySourceSSECustomerKey=None, CopySourceSSECustomerKeyMD5=None, RequestPayer=None):\n \"\"\"\n Uploads a part by copying data from an existing object as data source.\n See also: AWS API Documentation\n \n \n :example: response = client.upload_part_copy(\n Bucket='string',\n CopySource='string' or {'Bucket': 'string', 'Key': 'string', 'VersionId': 'string'},\n CopySourceIfMatch='string',\n CopySourceIfModifiedSince=datetime(2015, 1, 1),\n CopySourceIfNoneMatch='string',\n CopySourceIfUnmodifiedSince=datetime(2015, 1, 1),\n CopySourceRange='string',\n Key='string',\n PartNumber=123,\n UploadId='string',\n SSECustomerAlgorithm='string',\n SSECustomerKey='string',\n CopySourceSSECustomerAlgorithm='string',\n CopySourceSSECustomerKey='string',\n RequestPayer='requester'\n )\n \n \n :type Bucket: string\n :param Bucket: [REQUIRED]\n\n :type CopySource: str or dict\n :param CopySource: [REQUIRED] The name of the source bucket, key name of the source object, and optional version ID of the source object. You can either provide this value as a string or a dictionary. The string form is {bucket}/{key} or {bucket}/{key}?versionId={versionId} if you want to copy a specific version. You can also provide this value as a dictionary. The dictionary format is recommended over the string format because it is more explicit. The dictionary format is: {'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}. Note that the VersionId key is optional and may be omitted.\n\n :type CopySourceIfMatch: string\n :param CopySourceIfMatch: Copies the object if its entity tag (ETag) matches the specified tag.\n\n :type CopySourceIfModifiedSince: datetime\n :param CopySourceIfModifiedSince: Copies the object if it has been modified since the specified time.\n\n :type CopySourceIfNoneMatch: string\n :param CopySourceIfNoneMatch: Copies the object if its entity tag (ETag) is different than the specified ETag.\n\n :type CopySourceIfUnmodifiedSince: datetime\n :param CopySourceIfUnmodifiedSince: Copies the object if it hasn't been modified since the specified time.\n\n :type CopySourceRange: string\n :param CopySourceRange: The range of bytes to copy from the source object. The range value must use the form bytes=first-last, where the first and last are the zero-based byte offsets to copy. For example, bytes=0-9 indicates that you want to copy the first ten bytes of the source. You can copy a range only if the source object is greater than 5 GB.\n\n :type Key: string\n :param Key: [REQUIRED]\n\n :type PartNumber: integer\n :param PartNumber: [REQUIRED]\n Part number of part being copied. This is a positive integer between 1 and 10,000.\n \n\n :type UploadId: string\n :param UploadId: [REQUIRED]\n Upload ID identifying the multipart upload whose part is being copied.\n \n\n :type SSECustomerAlgorithm: string\n :param SSECustomerAlgorithm: Specifies the algorithm to use to when encrypting the object (e.g., AES256).\n\n :type SSECustomerKey: string\n :param SSECustomerKey: Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side -encryption -customer-algorithm header. This must be the same encryption key specified in the initiate multipart upload request.\n\n :type SSECustomerKeyMD5: string\n :param SSECustomerKeyMD5: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.\n Please note that this parameter is automatically populated if it is not provided. Including this parameter is not required\n \n\n :type CopySourceSSECustomerAlgorithm: string\n :param CopySourceSSECustomerAlgorithm: Specifies the algorithm to use when decrypting the source object (e.g., AES256).\n\n :type CopySourceSSECustomerKey: string\n :param CopySourceSSECustomerKey: Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source object. The encryption key provided in this header must be one that was used when the source object was created.\n\n :type CopySourceSSECustomerKeyMD5: string\n :param CopySourceSSECustomerKeyMD5: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.\n Please note that this parameter is automatically populated if it is not provided. Including this parameter is not required\n \n\n :type RequestPayer: string\n :param RequestPayer: Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html\n\n :rtype: dict\n :return: {\n 'CopySourceVersionId': 'string',\n 'CopyPartResult': {\n 'ETag': 'string',\n 'LastModified': datetime(2015, 1, 1)\n },\n 'ServerSideEncryption': 'AES256'|'aws:kms',\n 'SSECustomerAlgorithm': 'string',\n 'SSECustomerKeyMD5': 'string',\n 'SSEKMSKeyId': 'string',\n 'RequestCharged': 'requester'\n }\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.7145647406578064, "alphanum_fraction": 0.7227095365524292, "avg_line_length": 75.86591339111328, "blob_id": "7b5cee73dea23de5bcc63b9aa4704f7d9923ab97", "content_id": "44311ce1b6b706d58dc3358e1e22248d6e0fa866", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 81402, "license_type": "permissive", "max_line_length": 797, "num_lines": 1059, "path": "/pyboto3/applicationautoscaling.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef delete_scaling_policy(PolicyName=None, ServiceNamespace=None, ResourceId=None, ScalableDimension=None):\n \"\"\"\n Deletes the specified Application Auto Scaling scaling policy.\n Deleting a policy deletes the underlying alarm action, but does not delete the CloudWatch alarm associated with the scaling policy, even if it no longer has an associated action.\n To create a scaling policy or update an existing one, see PutScalingPolicy .\n See also: AWS API Documentation\n \n Examples\n This example deletes a scaling policy for the Amazon ECS service called web-app, which is running in the default cluster.\n Expected Output:\n \n :example: response = client.delete_scaling_policy(\n PolicyName='string',\n ServiceNamespace='ecs'|'elasticmapreduce'|'ec2'|'appstream'|'dynamodb'|'rds'|'sagemaker'|'custom-resource',\n ResourceId='string',\n ScalableDimension='ecs:service:DesiredCount'|'ec2:spot-fleet-request:TargetCapacity'|'elasticmapreduce:instancegroup:InstanceCount'|'appstream:fleet:DesiredCapacity'|'dynamodb:table:ReadCapacityUnits'|'dynamodb:table:WriteCapacityUnits'|'dynamodb:index:ReadCapacityUnits'|'dynamodb:index:WriteCapacityUnits'|'rds:cluster:ReadReplicaCount'|'sagemaker:variant:DesiredInstanceCount'|'custom-resource:ResourceType:Property'\n )\n \n \n :type PolicyName: string\n :param PolicyName: [REQUIRED]\n The name of the scaling policy.\n \n\n :type ServiceNamespace: string\n :param ServiceNamespace: [REQUIRED]\n The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference .\n \n\n :type ResourceId: string\n :param ResourceId: [REQUIRED]\n The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.\n ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp .\n Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE .\n EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0 .\n AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet .\n DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table .\n DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index .\n Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster .\n Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering .\n Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider.\n \n\n :type ScalableDimension: string\n :param ScalableDimension: [REQUIRED]\n The scalable dimension. This string consists of the service namespace, resource type, and scaling property.\n ecs:service:DesiredCount - The desired task count of an ECS service.\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot fleet request.\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.\n appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition.\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_scheduled_action(ServiceNamespace=None, ScheduledActionName=None, ResourceId=None, ScalableDimension=None):\n \"\"\"\n Deletes the specified Application Auto Scaling scheduled action.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_scheduled_action(\n ServiceNamespace='ecs'|'elasticmapreduce'|'ec2'|'appstream'|'dynamodb'|'rds'|'sagemaker'|'custom-resource',\n ScheduledActionName='string',\n ResourceId='string',\n ScalableDimension='ecs:service:DesiredCount'|'ec2:spot-fleet-request:TargetCapacity'|'elasticmapreduce:instancegroup:InstanceCount'|'appstream:fleet:DesiredCapacity'|'dynamodb:table:ReadCapacityUnits'|'dynamodb:table:WriteCapacityUnits'|'dynamodb:index:ReadCapacityUnits'|'dynamodb:index:WriteCapacityUnits'|'rds:cluster:ReadReplicaCount'|'sagemaker:variant:DesiredInstanceCount'|'custom-resource:ResourceType:Property'\n )\n \n \n :type ServiceNamespace: string\n :param ServiceNamespace: [REQUIRED]\n The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference .\n \n\n :type ScheduledActionName: string\n :param ScheduledActionName: [REQUIRED]\n The name of the scheduled action.\n \n\n :type ResourceId: string\n :param ResourceId: [REQUIRED]\n The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.\n ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp .\n Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE .\n EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0 .\n AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet .\n DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table .\n DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index .\n Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster .\n Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering .\n Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider.\n \n\n :type ScalableDimension: string\n :param ScalableDimension: The scalable dimension. This string consists of the service namespace, resource type, and scaling property.\n ecs:service:DesiredCount - The desired task count of an ECS service.\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot fleet request.\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.\n appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition.\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef deregister_scalable_target(ServiceNamespace=None, ResourceId=None, ScalableDimension=None):\n \"\"\"\n Deregisters a scalable target.\n Deregistering a scalable target deletes the scaling policies that are associated with it.\n To create a scalable target or update an existing one, see RegisterScalableTarget .\n See also: AWS API Documentation\n \n Examples\n This example deregisters a scalable target for an Amazon ECS service called web-app that is running in the default cluster.\n Expected Output:\n \n :example: response = client.deregister_scalable_target(\n ServiceNamespace='ecs'|'elasticmapreduce'|'ec2'|'appstream'|'dynamodb'|'rds'|'sagemaker'|'custom-resource',\n ResourceId='string',\n ScalableDimension='ecs:service:DesiredCount'|'ec2:spot-fleet-request:TargetCapacity'|'elasticmapreduce:instancegroup:InstanceCount'|'appstream:fleet:DesiredCapacity'|'dynamodb:table:ReadCapacityUnits'|'dynamodb:table:WriteCapacityUnits'|'dynamodb:index:ReadCapacityUnits'|'dynamodb:index:WriteCapacityUnits'|'rds:cluster:ReadReplicaCount'|'sagemaker:variant:DesiredInstanceCount'|'custom-resource:ResourceType:Property'\n )\n \n \n :type ServiceNamespace: string\n :param ServiceNamespace: [REQUIRED]\n The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference .\n \n\n :type ResourceId: string\n :param ResourceId: [REQUIRED]\n The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.\n ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp .\n Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE .\n EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0 .\n AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet .\n DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table .\n DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index .\n Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster .\n Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering .\n Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider.\n \n\n :type ScalableDimension: string\n :param ScalableDimension: [REQUIRED]\n The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.\n ecs:service:DesiredCount - The desired task count of an ECS service.\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot fleet request.\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.\n appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition.\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef describe_scalable_targets(ServiceNamespace=None, ResourceIds=None, ScalableDimension=None, MaxResults=None, NextToken=None):\n \"\"\"\n Gets information about the scalable targets in the specified namespace.\n You can filter the results using the ResourceIds and ScalableDimension parameters.\n To create a scalable target or update an existing one, see RegisterScalableTarget . If you are no longer using a scalable target, you can deregister it using DeregisterScalableTarget .\n See also: AWS API Documentation\n \n Examples\n This example describes the scalable targets for the ecs service namespace.\n Expected Output:\n \n :example: response = client.describe_scalable_targets(\n ServiceNamespace='ecs'|'elasticmapreduce'|'ec2'|'appstream'|'dynamodb'|'rds'|'sagemaker'|'custom-resource',\n ResourceIds=[\n 'string',\n ],\n ScalableDimension='ecs:service:DesiredCount'|'ec2:spot-fleet-request:TargetCapacity'|'elasticmapreduce:instancegroup:InstanceCount'|'appstream:fleet:DesiredCapacity'|'dynamodb:table:ReadCapacityUnits'|'dynamodb:table:WriteCapacityUnits'|'dynamodb:index:ReadCapacityUnits'|'dynamodb:index:WriteCapacityUnits'|'rds:cluster:ReadReplicaCount'|'sagemaker:variant:DesiredInstanceCount'|'custom-resource:ResourceType:Property',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type ServiceNamespace: string\n :param ServiceNamespace: [REQUIRED]\n The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference .\n \n\n :type ResourceIds: list\n :param ResourceIds: The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.\n ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp .\n Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE .\n EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0 .\n AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet .\n DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table .\n DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index .\n Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster .\n Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering .\n Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider.\n (string) --\n \n\n :type ScalableDimension: string\n :param ScalableDimension: The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.\n ecs:service:DesiredCount - The desired task count of an ECS service.\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot fleet request.\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.\n appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition.\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of scalable targets. This value can be between 1 and 50. The default value is 50.\n If this parameter is used, the operation returns up to MaxResults results at a time, along with a NextToken value. To get the next set of results, include the NextToken value in a subsequent call. If this parameter is not used, the operation returns up to 50 results and a NextToken value, if applicable.\n \n\n :type NextToken: string\n :param NextToken: The token for the next set of results.\n\n :rtype: dict\n :return: {\n 'ScalableTargets': [\n {\n 'ServiceNamespace': 'ecs'|'elasticmapreduce'|'ec2'|'appstream'|'dynamodb'|'rds'|'sagemaker'|'custom-resource',\n 'ResourceId': 'string',\n 'ScalableDimension': 'ecs:service:DesiredCount'|'ec2:spot-fleet-request:TargetCapacity'|'elasticmapreduce:instancegroup:InstanceCount'|'appstream:fleet:DesiredCapacity'|'dynamodb:table:ReadCapacityUnits'|'dynamodb:table:WriteCapacityUnits'|'dynamodb:index:ReadCapacityUnits'|'dynamodb:index:WriteCapacityUnits'|'rds:cluster:ReadReplicaCount'|'sagemaker:variant:DesiredInstanceCount'|'custom-resource:ResourceType:Property',\n 'MinCapacity': 123,\n 'MaxCapacity': 123,\n 'RoleARN': 'string',\n 'CreationTime': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp .\n Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE .\n EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0 .\n AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet .\n DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table .\n DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index .\n Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster .\n Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering .\n Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider.\n \n \"\"\"\n pass\n\ndef describe_scaling_activities(ServiceNamespace=None, ResourceId=None, ScalableDimension=None, MaxResults=None, NextToken=None):\n \"\"\"\n Provides descriptive information about the scaling activities in the specified namespace from the previous six weeks.\n You can filter the results using the ResourceId and ScalableDimension parameters.\n Scaling activities are triggered by CloudWatch alarms that are associated with scaling policies. To view the scaling policies for a service namespace, see DescribeScalingPolicies . To create a scaling policy or update an existing one, see PutScalingPolicy .\n See also: AWS API Documentation\n \n Examples\n This example describes the scaling activities for an Amazon ECS service called web-app that is running in the default cluster.\n Expected Output:\n \n :example: response = client.describe_scaling_activities(\n ServiceNamespace='ecs'|'elasticmapreduce'|'ec2'|'appstream'|'dynamodb'|'rds'|'sagemaker'|'custom-resource',\n ResourceId='string',\n ScalableDimension='ecs:service:DesiredCount'|'ec2:spot-fleet-request:TargetCapacity'|'elasticmapreduce:instancegroup:InstanceCount'|'appstream:fleet:DesiredCapacity'|'dynamodb:table:ReadCapacityUnits'|'dynamodb:table:WriteCapacityUnits'|'dynamodb:index:ReadCapacityUnits'|'dynamodb:index:WriteCapacityUnits'|'rds:cluster:ReadReplicaCount'|'sagemaker:variant:DesiredInstanceCount'|'custom-resource:ResourceType:Property',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type ServiceNamespace: string\n :param ServiceNamespace: [REQUIRED]\n The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference .\n \n\n :type ResourceId: string\n :param ResourceId: The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.\n ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp .\n Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE .\n EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0 .\n AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet .\n DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table .\n DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index .\n Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster .\n Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering .\n Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider.\n \n\n :type ScalableDimension: string\n :param ScalableDimension: The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.\n ecs:service:DesiredCount - The desired task count of an ECS service.\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot fleet request.\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.\n appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition.\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of scalable targets. This value can be between 1 and 50. The default value is 50.\n If this parameter is used, the operation returns up to MaxResults results at a time, along with a NextToken value. To get the next set of results, include the NextToken value in a subsequent call. If this parameter is not used, the operation returns up to 50 results and a NextToken value, if applicable.\n \n\n :type NextToken: string\n :param NextToken: The token for the next set of results.\n\n :rtype: dict\n :return: {\n 'ScalingActivities': [\n {\n 'ActivityId': 'string',\n 'ServiceNamespace': 'ecs'|'elasticmapreduce'|'ec2'|'appstream'|'dynamodb'|'rds'|'sagemaker'|'custom-resource',\n 'ResourceId': 'string',\n 'ScalableDimension': 'ecs:service:DesiredCount'|'ec2:spot-fleet-request:TargetCapacity'|'elasticmapreduce:instancegroup:InstanceCount'|'appstream:fleet:DesiredCapacity'|'dynamodb:table:ReadCapacityUnits'|'dynamodb:table:WriteCapacityUnits'|'dynamodb:index:ReadCapacityUnits'|'dynamodb:index:WriteCapacityUnits'|'rds:cluster:ReadReplicaCount'|'sagemaker:variant:DesiredInstanceCount'|'custom-resource:ResourceType:Property',\n 'Description': 'string',\n 'Cause': 'string',\n 'StartTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'StatusCode': 'Pending'|'InProgress'|'Successful'|'Overridden'|'Unfulfilled'|'Failed',\n 'StatusMessage': 'string',\n 'Details': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp .\n Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE .\n EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0 .\n AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet .\n DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table .\n DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index .\n Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster .\n Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering .\n Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider.\n \n \"\"\"\n pass\n\ndef describe_scaling_policies(PolicyNames=None, ServiceNamespace=None, ResourceId=None, ScalableDimension=None, MaxResults=None, NextToken=None):\n \"\"\"\n Describes the scaling policies for the specified service namespace.\n You can filter the results using the ResourceId , ScalableDimension , and PolicyNames parameters.\n To create a scaling policy or update an existing one, see PutScalingPolicy . If you are no longer using a scaling policy, you can delete it using DeleteScalingPolicy .\n See also: AWS API Documentation\n \n Examples\n This example describes the scaling policies for the ecs service namespace.\n Expected Output:\n \n :example: response = client.describe_scaling_policies(\n PolicyNames=[\n 'string',\n ],\n ServiceNamespace='ecs'|'elasticmapreduce'|'ec2'|'appstream'|'dynamodb'|'rds'|'sagemaker'|'custom-resource',\n ResourceId='string',\n ScalableDimension='ecs:service:DesiredCount'|'ec2:spot-fleet-request:TargetCapacity'|'elasticmapreduce:instancegroup:InstanceCount'|'appstream:fleet:DesiredCapacity'|'dynamodb:table:ReadCapacityUnits'|'dynamodb:table:WriteCapacityUnits'|'dynamodb:index:ReadCapacityUnits'|'dynamodb:index:WriteCapacityUnits'|'rds:cluster:ReadReplicaCount'|'sagemaker:variant:DesiredInstanceCount'|'custom-resource:ResourceType:Property',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type PolicyNames: list\n :param PolicyNames: The names of the scaling policies to describe.\n (string) --\n \n\n :type ServiceNamespace: string\n :param ServiceNamespace: [REQUIRED]\n The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference .\n \n\n :type ResourceId: string\n :param ResourceId: The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.\n ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp .\n Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE .\n EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0 .\n AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet .\n DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table .\n DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index .\n Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster .\n Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering .\n Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider.\n \n\n :type ScalableDimension: string\n :param ScalableDimension: The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.\n ecs:service:DesiredCount - The desired task count of an ECS service.\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot fleet request.\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.\n appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition.\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of scalable targets. This value can be between 1 and 50. The default value is 50.\n If this parameter is used, the operation returns up to MaxResults results at a time, along with a NextToken value. To get the next set of results, include the NextToken value in a subsequent call. If this parameter is not used, the operation returns up to 50 results and a NextToken value, if applicable.\n \n\n :type NextToken: string\n :param NextToken: The token for the next set of results.\n\n :rtype: dict\n :return: {\n 'ScalingPolicies': [\n {\n 'PolicyARN': 'string',\n 'PolicyName': 'string',\n 'ServiceNamespace': 'ecs'|'elasticmapreduce'|'ec2'|'appstream'|'dynamodb'|'rds'|'sagemaker'|'custom-resource',\n 'ResourceId': 'string',\n 'ScalableDimension': 'ecs:service:DesiredCount'|'ec2:spot-fleet-request:TargetCapacity'|'elasticmapreduce:instancegroup:InstanceCount'|'appstream:fleet:DesiredCapacity'|'dynamodb:table:ReadCapacityUnits'|'dynamodb:table:WriteCapacityUnits'|'dynamodb:index:ReadCapacityUnits'|'dynamodb:index:WriteCapacityUnits'|'rds:cluster:ReadReplicaCount'|'sagemaker:variant:DesiredInstanceCount'|'custom-resource:ResourceType:Property',\n 'PolicyType': 'StepScaling'|'TargetTrackingScaling',\n 'StepScalingPolicyConfiguration': {\n 'AdjustmentType': 'ChangeInCapacity'|'PercentChangeInCapacity'|'ExactCapacity',\n 'StepAdjustments': [\n {\n 'MetricIntervalLowerBound': 123.0,\n 'MetricIntervalUpperBound': 123.0,\n 'ScalingAdjustment': 123\n },\n ],\n 'MinAdjustmentMagnitude': 123,\n 'Cooldown': 123,\n 'MetricAggregationType': 'Average'|'Minimum'|'Maximum'\n },\n 'TargetTrackingScalingPolicyConfiguration': {\n 'TargetValue': 123.0,\n 'PredefinedMetricSpecification': {\n 'PredefinedMetricType': 'DynamoDBReadCapacityUtilization'|'DynamoDBWriteCapacityUtilization'|'ALBRequestCountPerTarget'|'RDSReaderAverageCPUUtilization'|'RDSReaderAverageDatabaseConnections'|'EC2SpotFleetRequestAverageCPUUtilization'|'EC2SpotFleetRequestAverageNetworkIn'|'EC2SpotFleetRequestAverageNetworkOut'|'SageMakerVariantInvocationsPerInstance'|'ECSServiceAverageCPUUtilization'|'ECSServiceAverageMemoryUtilization',\n 'ResourceLabel': 'string'\n },\n 'CustomizedMetricSpecification': {\n 'MetricName': 'string',\n 'Namespace': 'string',\n 'Dimensions': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'Statistic': 'Average'|'Minimum'|'Maximum'|'SampleCount'|'Sum',\n 'Unit': 'string'\n },\n 'ScaleOutCooldown': 123,\n 'ScaleInCooldown': 123,\n 'DisableScaleIn': True|False\n },\n 'Alarms': [\n {\n 'AlarmName': 'string',\n 'AlarmARN': 'string'\n },\n ],\n 'CreationTime': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp .\n Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE .\n EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0 .\n AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet .\n DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table .\n DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index .\n Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster .\n Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering .\n Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider.\n \n \"\"\"\n pass\n\ndef describe_scheduled_actions(ScheduledActionNames=None, ServiceNamespace=None, ResourceId=None, ScalableDimension=None, MaxResults=None, NextToken=None):\n \"\"\"\n Describes the scheduled actions for the specified service namespace.\n You can filter the results using the ResourceId , ScalableDimension , and ScheduledActionNames parameters.\n To create a scheduled action or update an existing one, see PutScheduledAction . If you are no longer using a scheduled action, you can delete it using DeleteScheduledAction .\n See also: AWS API Documentation\n \n \n :example: response = client.describe_scheduled_actions(\n ScheduledActionNames=[\n 'string',\n ],\n ServiceNamespace='ecs'|'elasticmapreduce'|'ec2'|'appstream'|'dynamodb'|'rds'|'sagemaker'|'custom-resource',\n ResourceId='string',\n ScalableDimension='ecs:service:DesiredCount'|'ec2:spot-fleet-request:TargetCapacity'|'elasticmapreduce:instancegroup:InstanceCount'|'appstream:fleet:DesiredCapacity'|'dynamodb:table:ReadCapacityUnits'|'dynamodb:table:WriteCapacityUnits'|'dynamodb:index:ReadCapacityUnits'|'dynamodb:index:WriteCapacityUnits'|'rds:cluster:ReadReplicaCount'|'sagemaker:variant:DesiredInstanceCount'|'custom-resource:ResourceType:Property',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type ScheduledActionNames: list\n :param ScheduledActionNames: The names of the scheduled actions to describe.\n (string) --\n \n\n :type ServiceNamespace: string\n :param ServiceNamespace: [REQUIRED]\n The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference .\n \n\n :type ResourceId: string\n :param ResourceId: The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.\n ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp .\n Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE .\n EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0 .\n AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet .\n DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table .\n DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index .\n Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster .\n Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering .\n Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider.\n \n\n :type ScalableDimension: string\n :param ScalableDimension: The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.\n ecs:service:DesiredCount - The desired task count of an ECS service.\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot fleet request.\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.\n appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition.\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of scheduled action results. This value can be between 1 and 50. The default value is 50.\n If this parameter is used, the operation returns up to MaxResults results at a time, along with a NextToken value. To get the next set of results, include the NextToken value in a subsequent call. If this parameter is not used, the operation returns up to 50 results and a NextToken value, if applicable.\n \n\n :type NextToken: string\n :param NextToken: The token for the next set of results.\n\n :rtype: dict\n :return: {\n 'ScheduledActions': [\n {\n 'ScheduledActionName': 'string',\n 'ScheduledActionARN': 'string',\n 'ServiceNamespace': 'ecs'|'elasticmapreduce'|'ec2'|'appstream'|'dynamodb'|'rds'|'sagemaker'|'custom-resource',\n 'Schedule': 'string',\n 'ResourceId': 'string',\n 'ScalableDimension': 'ecs:service:DesiredCount'|'ec2:spot-fleet-request:TargetCapacity'|'elasticmapreduce:instancegroup:InstanceCount'|'appstream:fleet:DesiredCapacity'|'dynamodb:table:ReadCapacityUnits'|'dynamodb:table:WriteCapacityUnits'|'dynamodb:index:ReadCapacityUnits'|'dynamodb:index:WriteCapacityUnits'|'rds:cluster:ReadReplicaCount'|'sagemaker:variant:DesiredInstanceCount'|'custom-resource:ResourceType:Property',\n 'StartTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'ScalableTargetAction': {\n 'MinCapacity': 123,\n 'MaxCapacity': 123\n },\n 'CreationTime': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n At expressions - at(*yyyy* -*mm* -*dd* T*hh* :*mm* :*ss* )\n Rate expressions - rate(*value* *unit* )\n Cron expressions - cron(*fields* )\n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef put_scaling_policy(PolicyName=None, ServiceNamespace=None, ResourceId=None, ScalableDimension=None, PolicyType=None, StepScalingPolicyConfiguration=None, TargetTrackingScalingPolicyConfiguration=None):\n \"\"\"\n Creates or updates a policy for an Application Auto Scaling scalable target.\n Each scalable target is identified by a service namespace, resource ID, and scalable dimension. A scaling policy applies to the scalable target identified by those three attributes. You cannot create a scaling policy until you register the scalable target using RegisterScalableTarget .\n To update a policy, specify its policy name and the parameters that you want to change. Any parameters that you don't specify are not changed by this update request.\n You can view the scaling policies for a service namespace using DescribeScalingPolicies . If you are no longer using a scaling policy, you can delete it using DeleteScalingPolicy .\n See also: AWS API Documentation\n \n Examples\n This example applies a scaling policy to an Amazon ECS service called web-app in the default cluster. The policy increases the desired count of the service by 200%, with a cool down period of 60 seconds.\n Expected Output:\n This example applies a scaling policy to an Amazon EC2 Spot fleet. The policy increases the target capacity of the spot fleet by 200%, with a cool down period of 180 seconds.\",\n Expected Output:\n \n :example: response = client.put_scaling_policy(\n PolicyName='string',\n ServiceNamespace='ecs'|'elasticmapreduce'|'ec2'|'appstream'|'dynamodb'|'rds'|'sagemaker'|'custom-resource',\n ResourceId='string',\n ScalableDimension='ecs:service:DesiredCount'|'ec2:spot-fleet-request:TargetCapacity'|'elasticmapreduce:instancegroup:InstanceCount'|'appstream:fleet:DesiredCapacity'|'dynamodb:table:ReadCapacityUnits'|'dynamodb:table:WriteCapacityUnits'|'dynamodb:index:ReadCapacityUnits'|'dynamodb:index:WriteCapacityUnits'|'rds:cluster:ReadReplicaCount'|'sagemaker:variant:DesiredInstanceCount'|'custom-resource:ResourceType:Property',\n PolicyType='StepScaling'|'TargetTrackingScaling',\n StepScalingPolicyConfiguration={\n 'AdjustmentType': 'ChangeInCapacity'|'PercentChangeInCapacity'|'ExactCapacity',\n 'StepAdjustments': [\n {\n 'MetricIntervalLowerBound': 123.0,\n 'MetricIntervalUpperBound': 123.0,\n 'ScalingAdjustment': 123\n },\n ],\n 'MinAdjustmentMagnitude': 123,\n 'Cooldown': 123,\n 'MetricAggregationType': 'Average'|'Minimum'|'Maximum'\n },\n TargetTrackingScalingPolicyConfiguration={\n 'TargetValue': 123.0,\n 'PredefinedMetricSpecification': {\n 'PredefinedMetricType': 'DynamoDBReadCapacityUtilization'|'DynamoDBWriteCapacityUtilization'|'ALBRequestCountPerTarget'|'RDSReaderAverageCPUUtilization'|'RDSReaderAverageDatabaseConnections'|'EC2SpotFleetRequestAverageCPUUtilization'|'EC2SpotFleetRequestAverageNetworkIn'|'EC2SpotFleetRequestAverageNetworkOut'|'SageMakerVariantInvocationsPerInstance'|'ECSServiceAverageCPUUtilization'|'ECSServiceAverageMemoryUtilization',\n 'ResourceLabel': 'string'\n },\n 'CustomizedMetricSpecification': {\n 'MetricName': 'string',\n 'Namespace': 'string',\n 'Dimensions': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'Statistic': 'Average'|'Minimum'|'Maximum'|'SampleCount'|'Sum',\n 'Unit': 'string'\n },\n 'ScaleOutCooldown': 123,\n 'ScaleInCooldown': 123,\n 'DisableScaleIn': True|False\n }\n )\n \n \n :type PolicyName: string\n :param PolicyName: [REQUIRED]\n The name of the scaling policy.\n \n\n :type ServiceNamespace: string\n :param ServiceNamespace: [REQUIRED]\n The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference .\n \n\n :type ResourceId: string\n :param ResourceId: [REQUIRED]\n The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.\n ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp .\n Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE .\n EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0 .\n AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet .\n DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table .\n DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index .\n Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster .\n Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering .\n Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider.\n \n\n :type ScalableDimension: string\n :param ScalableDimension: [REQUIRED]\n The scalable dimension. This string consists of the service namespace, resource type, and scaling property.\n ecs:service:DesiredCount - The desired task count of an ECS service.\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot fleet request.\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.\n appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition.\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.\n \n\n :type PolicyType: string\n :param PolicyType: The policy type. This parameter is required if you are creating a policy.\n For DynamoDB, only TargetTrackingScaling is supported. For Amazon ECS, Spot Fleet, and Amazon RDS, both StepScaling and TargetTrackingScaling are supported. For any other service, only StepScaling is supported.\n \n\n :type StepScalingPolicyConfiguration: dict\n :param StepScalingPolicyConfiguration: A step scaling policy.\n This parameter is required if you are creating a policy and the policy type is StepScaling .\n AdjustmentType (string) --The adjustment type, which specifies how the ScalingAdjustment parameter in a StepAdjustment is interpreted.\n StepAdjustments (list) --A set of adjustments that enable you to scale based on the size of the alarm breach.\n (dict) --Represents a step adjustment for a StepScalingPolicyConfiguration . Describes an adjustment based on the difference between the value of the aggregated CloudWatch metric and the breach threshold that you've defined for the alarm.\n For the following examples, suppose that you have an alarm with a breach threshold of 50:\n To trigger the adjustment when the metric is greater than or equal to 50 and less than 60, specify a lower bound of 0 and an upper bound of 10.\n To trigger the adjustment when the metric is greater than 40 and less than or equal to 50, specify a lower bound of -10 and an upper bound of 0.\n There are a few rules for the step adjustments for your step policy:\n The ranges of your step adjustments can't overlap or have a gap.\n At most one step adjustment can have a null lower bound. If one step adjustment has a negative lower bound, then there must be a step adjustment with a null lower bound.\n At most one step adjustment can have a null upper bound. If one step adjustment has a positive upper bound, then there must be a step adjustment with a null upper bound.\n The upper and lower bound can't be null in the same step adjustment.\n MetricIntervalLowerBound (float) --The lower bound for the difference between the alarm threshold and the CloudWatch metric. If the metric value is above the breach threshold, the lower bound is inclusive (the metric must be greater than or equal to the threshold plus the lower bound). Otherwise, it is exclusive (the metric must be greater than the threshold plus the lower bound). A null value indicates negative infinity.\n MetricIntervalUpperBound (float) --The upper bound for the difference between the alarm threshold and the CloudWatch metric. If the metric value is above the breach threshold, the upper bound is exclusive (the metric must be less than the threshold plus the upper bound). Otherwise, it is inclusive (the metric must be less than or equal to the threshold plus the upper bound). A null value indicates positive infinity.\n The upper bound must be greater than the lower bound.\n ScalingAdjustment (integer) -- [REQUIRED]The amount by which to scale, based on the specified adjustment type. A positive value adds to the current scalable dimension while a negative number removes from the current scalable dimension.\n \n MinAdjustmentMagnitude (integer) --The minimum number to adjust your scalable dimension as a result of a scaling activity. If the adjustment type is PercentChangeInCapacity , the scaling policy changes the scalable dimension of the scalable target by this amount.\n Cooldown (integer) --The amount of time, in seconds, after a scaling activity completes where previous trigger-related scaling activities can influence future scaling events.\n For scale out policies, while the cooldown period is in effect, the capacity that has been added by the previous scale out event that initiated the cooldown is calculated as part of the desired capacity for the next scale out. The intention is to continuously (but not excessively) scale out. For example, an alarm triggers a step scaling policy to scale out an Amazon ECS service by 2 tasks, the scaling activity completes successfully, and a cooldown period of 5 minutes starts. During the Cooldown period, if the alarm triggers the same policy again but at a more aggressive step adjustment to scale out the service by 3 tasks, the 2 tasks that were added in the previous scale out event are considered part of that capacity and only 1 additional task is added to the desired count.\n For scale in policies, the cooldown period is used to block subsequent scale in requests until it has expired. The intention is to scale in conservatively to protect your application's availability. However, if another alarm triggers a scale out policy during the cooldown period after a scale-in, Application Auto Scaling scales out your scalable target immediately.\n MetricAggregationType (string) --The aggregation type for the CloudWatch metrics. Valid values are Minimum , Maximum , and Average .\n \n\n :type TargetTrackingScalingPolicyConfiguration: dict\n :param TargetTrackingScalingPolicyConfiguration: A target tracking policy.\n This parameter is required if you are creating a policy and the policy type is TargetTrackingScaling .\n TargetValue (float) -- [REQUIRED]The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2).\n PredefinedMetricSpecification (dict) --A predefined metric.\n PredefinedMetricType (string) -- [REQUIRED]The metric type. The ALBRequestCountPerTarget metric type applies only to Spot fleet requests and ECS services.\n ResourceLabel (string) --Identifies the resource associated with the metric type. You can't specify a resource label unless the metric type is ALBRequestCountPerTarget and there is a target group attached to the Spot fleet request or ECS service.\n The format is app/<load-balancer-name>/<load-balancer-id>/targetgroup/<target-group-name>/<target-group-id>, where:\n app/<load-balancer-name>/<load-balancer-id> is the final portion of the load balancer ARN\n targetgroup/<target-group-name>/<target-group-id> is the final portion of the target group ARN.\n \n CustomizedMetricSpecification (dict) --A customized metric.\n MetricName (string) -- [REQUIRED]The name of the metric.\n Namespace (string) -- [REQUIRED]The namespace of the metric.\n Dimensions (list) --The dimensions of the metric.\n (dict) --Describes the dimension of a metric.\n Name (string) -- [REQUIRED]The name of the dimension.\n Value (string) -- [REQUIRED]The value of the dimension.\n \n Statistic (string) -- [REQUIRED]The statistic of the metric.\n Unit (string) --The unit of the metric.\n ScaleOutCooldown (integer) --The amount of time, in seconds, after a scale out activity completes before another scale out activity can start.\n While the cooldown period is in effect, the capacity that has been added by the previous scale out event that initiated the cooldown is calculated as part of the desired capacity for the next scale out. The intention is to continuously (but not excessively) scale out.\n ScaleInCooldown (integer) --The amount of time, in seconds, after a scale in activity completes before another scale in activity can start.\n The cooldown period is used to block subsequent scale in requests until it has expired. The intention is to scale in conservatively to protect your application's availability. However, if another alarm triggers a scale out policy during the cooldown period after a scale-in, Application Auto Scaling scales out your scalable target immediately.\n DisableScaleIn (boolean) --Indicates whether scale in by the target tracking policy is disabled. If the value is true , scale in is disabled and the target tracking policy won't remove capacity from the scalable resource. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the scalable resource. The default value is false .\n \n\n :rtype: dict\n :return: {\n 'PolicyARN': 'string',\n 'Alarms': [\n {\n 'AlarmName': 'string',\n 'AlarmARN': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef put_scheduled_action(ServiceNamespace=None, Schedule=None, ScheduledActionName=None, ResourceId=None, ScalableDimension=None, StartTime=None, EndTime=None, ScalableTargetAction=None):\n \"\"\"\n Creates or updates a scheduled action for an Application Auto Scaling scalable target.\n Each scalable target is identified by a service namespace, resource ID, and scalable dimension. A scheduled action applies to the scalable target identified by those three attributes. You cannot create a scheduled action until you register the scalable target using RegisterScalableTarget .\n To update an action, specify its name and the parameters that you want to change. If you don't specify start and end times, the old values are deleted. Any other parameters that you don't specify are not changed by this update request.\n You can view the scheduled actions using DescribeScheduledActions . If you are no longer using a scheduled action, you can delete it using DeleteScheduledAction .\n See also: AWS API Documentation\n \n \n :example: response = client.put_scheduled_action(\n ServiceNamespace='ecs'|'elasticmapreduce'|'ec2'|'appstream'|'dynamodb'|'rds'|'sagemaker'|'custom-resource',\n Schedule='string',\n ScheduledActionName='string',\n ResourceId='string',\n ScalableDimension='ecs:service:DesiredCount'|'ec2:spot-fleet-request:TargetCapacity'|'elasticmapreduce:instancegroup:InstanceCount'|'appstream:fleet:DesiredCapacity'|'dynamodb:table:ReadCapacityUnits'|'dynamodb:table:WriteCapacityUnits'|'dynamodb:index:ReadCapacityUnits'|'dynamodb:index:WriteCapacityUnits'|'rds:cluster:ReadReplicaCount'|'sagemaker:variant:DesiredInstanceCount'|'custom-resource:ResourceType:Property',\n StartTime=datetime(2015, 1, 1),\n EndTime=datetime(2015, 1, 1),\n ScalableTargetAction={\n 'MinCapacity': 123,\n 'MaxCapacity': 123\n }\n )\n \n \n :type ServiceNamespace: string\n :param ServiceNamespace: [REQUIRED]\n The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference .\n \n\n :type Schedule: string\n :param Schedule: The schedule for this action. The following formats are supported:\n At expressions - at(*yyyy* -*mm* -*dd* T*hh* :*mm* :*ss* )\n Rate expressions - rate(*value* *unit* )\n Cron expressions - cron(*fields* )\n At expressions are useful for one-time schedules. Specify the time, in UTC.\n For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days .\n For more information about cron expressions, see Cron Expressions in the Amazon CloudWatch Events User Guide .\n \n\n :type ScheduledActionName: string\n :param ScheduledActionName: [REQUIRED]\n The name of the scheduled action.\n \n\n :type ResourceId: string\n :param ResourceId: [REQUIRED]\n The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.\n ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp .\n Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE .\n EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0 .\n AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet .\n DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table .\n DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index .\n Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster .\n Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering .\n Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider.\n \n\n :type ScalableDimension: string\n :param ScalableDimension: The scalable dimension. This parameter is required if you are creating a scheduled action. This string consists of the service namespace, resource type, and scaling property.\n ecs:service:DesiredCount - The desired task count of an ECS service.\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot fleet request.\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.\n appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition.\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.\n \n\n :type StartTime: datetime\n :param StartTime: The date and time for the scheduled action to start.\n\n :type EndTime: datetime\n :param EndTime: The date and time for the scheduled action to end.\n\n :type ScalableTargetAction: dict\n :param ScalableTargetAction: The new minimum and maximum capacity. You can set both values or just one. During the scheduled time, if the current capacity is below the minimum capacity, Application Auto Scaling scales out to the minimum capacity. If the current capacity is above the maximum capacity, Application Auto Scaling scales in to the maximum capacity.\n MinCapacity (integer) --The minimum capacity.\n MaxCapacity (integer) --The maximum capacity.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef register_scalable_target(ServiceNamespace=None, ResourceId=None, ScalableDimension=None, MinCapacity=None, MaxCapacity=None, RoleARN=None):\n \"\"\"\n Registers or updates a scalable target. A scalable target is a resource that Application Auto Scaling can scale out or scale in. After you have registered a scalable target, you can use this operation to update the minimum and maximum values for its scalable dimension.\n After you register a scalable target, you can create and apply scaling policies using PutScalingPolicy . You can view the scaling policies for a service namespace using DescribeScalableTargets . If you no longer need a scalable target, you can deregister it using DeregisterScalableTarget .\n See also: AWS API Documentation\n \n Examples\n This example registers a scalable target from an Amazon ECS service called web-app that is running on the default cluster, with a minimum desired count of 1 task and a maximum desired count of 10 tasks.\n Expected Output:\n This example registers a scalable target from an Amazon EC2 Spot fleet with a minimum target capacity of 1 and a maximum of 10.\n Expected Output:\n \n :example: response = client.register_scalable_target(\n ServiceNamespace='ecs'|'elasticmapreduce'|'ec2'|'appstream'|'dynamodb'|'rds'|'sagemaker'|'custom-resource',\n ResourceId='string',\n ScalableDimension='ecs:service:DesiredCount'|'ec2:spot-fleet-request:TargetCapacity'|'elasticmapreduce:instancegroup:InstanceCount'|'appstream:fleet:DesiredCapacity'|'dynamodb:table:ReadCapacityUnits'|'dynamodb:table:WriteCapacityUnits'|'dynamodb:index:ReadCapacityUnits'|'dynamodb:index:WriteCapacityUnits'|'rds:cluster:ReadReplicaCount'|'sagemaker:variant:DesiredInstanceCount'|'custom-resource:ResourceType:Property',\n MinCapacity=123,\n MaxCapacity=123,\n RoleARN='string'\n )\n \n \n :type ServiceNamespace: string\n :param ServiceNamespace: [REQUIRED]\n The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference .\n \n\n :type ResourceId: string\n :param ResourceId: [REQUIRED]\n The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.\n ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp .\n Spot fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE .\n EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0 .\n AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet .\n DynamoDB table - The resource type is table and the unique identifier is the resource ID. Example: table/my-table .\n DynamoDB global secondary index - The resource type is index and the unique identifier is the resource ID. Example: table/my-table/index/my-table-index .\n Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster .\n Amazon SageMaker endpoint variants - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering .\n Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider.\n \n\n :type ScalableDimension: string\n :param ScalableDimension: [REQUIRED]\n The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.\n ecs:service:DesiredCount - The desired task count of an ECS service.\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot fleet request.\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.\n appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition.\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.\n \n\n :type MinCapacity: integer\n :param MinCapacity: The minimum value to scale to in response to a scale in event. This parameter is required if you are registering a scalable target.\n\n :type MaxCapacity: integer\n :param MaxCapacity: The maximum value to scale to in response to a scale out event. This parameter is required if you are registering a scalable target.\n\n :type RoleARN: string\n :param RoleARN: Application Auto Scaling creates a service-linked role that grants it permissions to modify the scalable target on your behalf. For more information, see Service-Linked Roles for Application Auto Scaling .\n For resources that are not supported using a service-linked role, this parameter is required and must specify the ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6431272029876709, "alphanum_fraction": 0.6583749651908875, "avg_line_length": 56.38407897949219, "blob_id": "5b6f602e9a2b761b61308f1cf958205d3c36060d", "content_id": "ab7ad344646323b23d09c4cc3095b8fd3dac4c1e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 362611, "license_type": "permissive", "max_line_length": 766, "num_lines": 6319, "path": "/pyboto3/sagemaker.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef add_tags(ResourceArn=None, Tags=None):\n \"\"\"\n Adds or overwrites one or more tags for the specified Amazon SageMaker resource. You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, models, endpoint configurations, and endpoints.\n Each tag consists of a key and an optional value. Tag keys must be unique per resource. For more information about tags, see For more information, see AWS Tagging Strategies .\n See also: AWS API Documentation\n \n \n :example: response = client.add_tags(\n ResourceArn='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type ResourceArn: string\n :param ResourceArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the resource that you want to tag.\n \n\n :type Tags: list\n :param Tags: [REQUIRED]\n An array of Tag objects. Each tag is a key-value pair. Only the key parameter is required. If you don't specify a value, Amazon SageMaker sets the value to an empty string.\n (dict) --Describes a tag.\n Key (string) -- [REQUIRED]The tag key.\n Value (string) -- [REQUIRED]The tag value.\n \n \n\n :rtype: dict\n :return: {\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_algorithm(AlgorithmName=None, AlgorithmDescription=None, TrainingSpecification=None, InferenceSpecification=None, ValidationSpecification=None, CertifyForMarketplace=None):\n \"\"\"\n Create a machine learning algorithm that you can use in Amazon SageMaker and list in the AWS Marketplace.\n See also: AWS API Documentation\n \n \n :example: response = client.create_algorithm(\n AlgorithmName='string',\n AlgorithmDescription='string',\n TrainingSpecification={\n 'TrainingImage': 'string',\n 'TrainingImageDigest': 'string',\n 'SupportedHyperParameters': [\n {\n 'Name': 'string',\n 'Description': 'string',\n 'Type': 'Integer'|'Continuous'|'Categorical'|'FreeText',\n 'Range': {\n 'IntegerParameterRangeSpecification': {\n 'MinValue': 'string',\n 'MaxValue': 'string'\n },\n 'ContinuousParameterRangeSpecification': {\n 'MinValue': 'string',\n 'MaxValue': 'string'\n },\n 'CategoricalParameterRangeSpecification': {\n 'Values': [\n 'string',\n ]\n }\n },\n 'IsTunable': True|False,\n 'IsRequired': True|False,\n 'DefaultValue': 'string'\n },\n ],\n 'SupportedTrainingInstanceTypes': [\n 'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.m5.large'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge',\n ],\n 'SupportsDistributedTraining': True|False,\n 'MetricDefinitions': [\n {\n 'Name': 'string',\n 'Regex': 'string'\n },\n ],\n 'TrainingChannels': [\n {\n 'Name': 'string',\n 'Description': 'string',\n 'IsRequired': True|False,\n 'SupportedContentTypes': [\n 'string',\n ],\n 'SupportedCompressionTypes': [\n 'None'|'Gzip',\n ],\n 'SupportedInputModes': [\n 'Pipe'|'File',\n ]\n },\n ],\n 'SupportedTuningJobObjectiveMetrics': [\n {\n 'Type': 'Maximize'|'Minimize',\n 'MetricName': 'string'\n },\n ]\n },\n InferenceSpecification={\n 'Containers': [\n {\n 'ContainerHostname': 'string',\n 'Image': 'string',\n 'ImageDigest': 'string',\n 'ModelDataUrl': 'string',\n 'ProductId': 'string'\n },\n ],\n 'SupportedTransformInstanceTypes': [\n 'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge'|'ml.m5.large'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge',\n ],\n 'SupportedRealtimeInferenceInstanceTypes': [\n 'ml.t2.medium'|'ml.t2.large'|'ml.t2.xlarge'|'ml.t2.2xlarge'|'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.m5.large'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge'|'ml.c4.large'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge'|'ml.c5.large'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge',\n ],\n 'SupportedContentTypes': [\n 'string',\n ],\n 'SupportedResponseMIMETypes': [\n 'string',\n ]\n },\n ValidationSpecification={\n 'ValidationRole': 'string',\n 'ValidationProfiles': [\n {\n 'ProfileName': 'string',\n 'TrainingJobDefinition': {\n 'TrainingInputMode': 'Pipe'|'File',\n 'HyperParameters': {\n 'string': 'string'\n },\n 'InputDataConfig': [\n {\n 'ChannelName': 'string',\n 'DataSource': {\n 'S3DataSource': {\n 'S3DataType': 'ManifestFile'|'S3Prefix'|'AugmentedManifestFile',\n 'S3Uri': 'string',\n 'S3DataDistributionType': 'FullyReplicated'|'ShardedByS3Key',\n 'AttributeNames': [\n 'string',\n ]\n }\n },\n 'ContentType': 'string',\n 'CompressionType': 'None'|'Gzip',\n 'RecordWrapperType': 'None'|'RecordIO',\n 'InputMode': 'Pipe'|'File',\n 'ShuffleConfig': {\n 'Seed': 123\n }\n },\n ],\n 'OutputDataConfig': {\n 'KmsKeyId': 'string',\n 'S3OutputPath': 'string'\n },\n 'ResourceConfig': {\n 'InstanceType': 'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.m5.large'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge',\n 'InstanceCount': 123,\n 'VolumeSizeInGB': 123,\n 'VolumeKmsKeyId': 'string'\n },\n 'StoppingCondition': {\n 'MaxRuntimeInSeconds': 123\n }\n },\n 'TransformJobDefinition': {\n 'MaxConcurrentTransforms': 123,\n 'MaxPayloadInMB': 123,\n 'BatchStrategy': 'MultiRecord'|'SingleRecord',\n 'Environment': {\n 'string': 'string'\n },\n 'TransformInput': {\n 'DataSource': {\n 'S3DataSource': {\n 'S3DataType': 'ManifestFile'|'S3Prefix'|'AugmentedManifestFile',\n 'S3Uri': 'string'\n }\n },\n 'ContentType': 'string',\n 'CompressionType': 'None'|'Gzip',\n 'SplitType': 'None'|'Line'|'RecordIO'|'TFRecord'\n },\n 'TransformOutput': {\n 'S3OutputPath': 'string',\n 'Accept': 'string',\n 'AssembleWith': 'None'|'Line',\n 'KmsKeyId': 'string'\n },\n 'TransformResources': {\n 'InstanceType': 'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge'|'ml.m5.large'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge',\n 'InstanceCount': 123,\n 'VolumeKmsKeyId': 'string'\n }\n }\n },\n ]\n },\n CertifyForMarketplace=True|False\n )\n \n \n :type AlgorithmName: string\n :param AlgorithmName: [REQUIRED]\n The name of the algorithm.\n \n\n :type AlgorithmDescription: string\n :param AlgorithmDescription: A description of the algorithm.\n\n :type TrainingSpecification: dict\n :param TrainingSpecification: [REQUIRED]\n Specifies details about training jobs run by this algorithm, including the following:\n The Amazon ECR path of the container and the version digest of the algorithm.\n The hyperparameters that the algorithm supports.\n The instance types that the algorithm supports for training.\n Whether the algorithm supports distributed training.\n The metrics that the algorithm emits to Amazon CloudWatch.\n Which metrics that the algorithm emits can be used as the objective metric for hyperparameter tuning jobs.\n The input channels that the algorithm supports for training data. For example, an algorithm might support train , validation , and test channels.\n TrainingImage (string) -- [REQUIRED]The Amazon Amazon ECR registry path of the Docker image that contains the training algorithm.\n TrainingImageDigest (string) --An MD5 hash of the training algorithm that identifies the Docker image used for training.\n SupportedHyperParameters (list) --A list of the HyperParameterSpecification objects, that define the supported hyperparameters. This is required if the algorithm supports automatic model tuning.>\n (dict) --Defines a hyperparameter to be used by an algorithm.\n Name (string) -- [REQUIRED]The name of this hyperparameter. The name must be unique.\n Description (string) --A brief description of the hyperparameter.\n Type (string) -- [REQUIRED]The type of this hyperparameter. The valid types are Integer , Continuous , Categorical , and FreeText .\n Range (dict) --The allowed range for this hyperparameter.\n IntegerParameterRangeSpecification (dict) --A IntegerParameterRangeSpecification object that defines the possible values for an integer hyperparameter.\n MinValue (string) -- [REQUIRED]The minimum integer value allowed.\n MaxValue (string) -- [REQUIRED]The maximum integer value allowed.\n ContinuousParameterRangeSpecification (dict) --A ContinuousParameterRangeSpecification object that defines the possible values for a continuous hyperparameter.\n MinValue (string) -- [REQUIRED]The minimum floating-point value allowed.\n MaxValue (string) -- [REQUIRED]The maximum floating-point value allowed.\n CategoricalParameterRangeSpecification (dict) --A CategoricalParameterRangeSpecification object that defines the possible values for a categorical hyperparameter.\n Values (list) -- [REQUIRED]The allowed categories for the hyperparameter.\n (string) --\n \n IsTunable (boolean) --Indicates whether this hyperparameter is tunable in a hyperparameter tuning job.\n IsRequired (boolean) --Indicates whether this hyperparameter is required.\n DefaultValue (string) --The default value for this hyperparameter. If a default value is specified, a hyperparameter cannot be required.\n \n SupportedTrainingInstanceTypes (list) -- [REQUIRED]A list of the instance types that this algorithm can use for training.\n (string) --\n SupportsDistributedTraining (boolean) --Indicates whether the algorithm supports distributed training. If set to false, buyers can t request more than one instance during training.\n MetricDefinitions (list) --A list of MetricDefinition objects, which are used for parsing metrics generated by the algorithm.\n (dict) --Specifies a metric that the training algorithm writes to stderr or stdout . Amazon SageMakerhyperparameter tuning captures all defined metrics. You specify one metric that a hyperparameter tuning job uses as its objective metric to choose the best training job.\n Name (string) -- [REQUIRED]The name of the metric.\n Regex (string) -- [REQUIRED]A regular expression that searches the output of a training job and gets the value of the metric. For more information about using regular expressions to define metrics, see Defining Objective Metrics .\n \n TrainingChannels (list) -- [REQUIRED]A list of ChannelSpecification objects, which specify the input sources to be used by the algorithm.\n (dict) --Defines a named input source, called a channel, to be used by an algorithm.\n Name (string) -- [REQUIRED]The name of the channel.\n Description (string) --A brief description of the channel.\n IsRequired (boolean) --Indicates whether the channel is required by the algorithm.\n SupportedContentTypes (list) -- [REQUIRED]The supported MIME types for the data.\n (string) --\n SupportedCompressionTypes (list) --The allowed compression types, if data compression is used.\n (string) --\n SupportedInputModes (list) -- [REQUIRED]The allowed input mode, either FILE or PIPE.\n In FILE mode, Amazon SageMaker copies the data from the input source onto the local Amazon Elastic Block Store (Amazon EBS) volumes before starting your training algorithm. This is the most commonly used input mode.\n In PIPE mode, Amazon SageMaker streams input data from the source directly to your algorithm without using the EBS volume.\n (string) --\n \n SupportedTuningJobObjectiveMetrics (list) --A list of the metrics that the algorithm emits that can be used as the objective metric in a hyperparameter tuning job.\n (dict) --Defines the objective metric for a hyperparameter tuning job. Hyperparameter tuning uses the value of this metric to evaluate the training jobs it launches, and returns the training job that results in either the highest or lowest value for this metric, depending on the value you specify for the Type parameter.\n Type (string) -- [REQUIRED]Whether to minimize or maximize the objective metric.\n MetricName (string) -- [REQUIRED]The name of the metric to use for the objective metric.\n \n \n\n :type InferenceSpecification: dict\n :param InferenceSpecification: Specifies details about inference jobs that the algorithm runs, including the following:\n The Amazon ECR paths of containers that contain the inference code and model artifacts.\n The instance types that the algorithm supports for transform jobs and real-time endpoints used for inference.\n The input and output content formats that the algorithm supports for inference.\n Containers (list) -- [REQUIRED]The Amazon ECR registry path of the Docker image that contains the inference code.\n (dict) --Describes the Docker container for the model package.\n ContainerHostname (string) --The DNS host name for the Docker container.\n Image (string) -- [REQUIRED]The Amazon EC2 Container Registry (Amazon ECR) path where inference code is stored.\n If you are using your own custom algorithm instead of an algorithm provided by Amazon SageMaker, the inference code must meet Amazon SageMaker requirements. Amazon SageMaker supports both registry/repository[:tag] and registry/repository[@digest] image path formats. For more information, see Using Your Own Algorithms with Amazon SageMaker .\n ImageDigest (string) --An MD5 hash of the training algorithm that identifies the Docker image used for training.\n ModelDataUrl (string) --The Amazon S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix).\n ProductId (string) --The AWS Marketplace product ID of the model package.\n \n SupportedTransformInstanceTypes (list) -- [REQUIRED]A list of the instance types on which a transformation job can be run or on which an endpoint can be deployed.\n (string) --\n SupportedRealtimeInferenceInstanceTypes (list) -- [REQUIRED]A list of the instance types that are used to generate inferences in real-time.\n (string) --\n SupportedContentTypes (list) -- [REQUIRED]The supported MIME types for the input data.\n (string) --\n SupportedResponseMIMETypes (list) -- [REQUIRED]The supported MIME types for the output data.\n (string) --\n \n\n :type ValidationSpecification: dict\n :param ValidationSpecification: Specifies configurations for one or more training jobs and that Amazon SageMaker runs to test the algorithm's training code and, optionally, one or more batch transform jobs that Amazon SageMaker runs to test the algorithm's inference code.\n ValidationRole (string) -- [REQUIRED]The IAM roles that Amazon SageMaker uses to run the training jobs.\n ValidationProfiles (list) -- [REQUIRED]An array of AlgorithmValidationProfile objects, each of which specifies a training job and batch transform job that Amazon SageMaker runs to validate your algorithm.\n (dict) --Defines a training job and a batch transform job that Amazon SageMaker runs to validate your algorithm.\n The data provided in the validation profile is made available to your buyers on AWS Marketplace.\n ProfileName (string) -- [REQUIRED]The name of the profile for the algorithm. The name must have 1 to 63 characters. Valid characters are a-z, A-Z, 0-9, and - (hyphen).\n TrainingJobDefinition (dict) -- [REQUIRED]The TrainingJobDefinition object that describes the training job that Amazon SageMaker runs to validate your algorithm.\n TrainingInputMode (string) -- [REQUIRED]The input mode used by the algorithm for the training job. For the input modes that Amazon SageMaker algorithms support, see Algorithms .\n If an algorithm supports the File input mode, Amazon SageMaker downloads the training data from S3 to the provisioned ML storage Volume, and mounts the directory to docker volume for training container. If an algorithm supports the Pipe input mode, Amazon SageMaker streams data directly from S3 to the container.\n HyperParameters (dict) --The hyperparameters used for the training job.\n (string) --\n (string) --\n \n InputDataConfig (list) -- [REQUIRED]An array of Channel objects, each of which specifies an input source.\n (dict) --A channel is a named input source that training algorithms can consume.\n ChannelName (string) -- [REQUIRED]The name of the channel.\n DataSource (dict) -- [REQUIRED]The location of the channel data.\n S3DataSource (dict) -- [REQUIRED]The S3 location of the data source that is associated with a channel.\n S3DataType (string) -- [REQUIRED]If you choose S3Prefix , S3Uri identifies a key name prefix. Amazon SageMaker uses all objects that match the specified key name prefix for model training.\n If you choose ManifestFile , S3Uri identifies an object that is a manifest file containing a list of object keys that you want Amazon SageMaker to use for model training.\n If you choose AugmentedManifestFile , S3Uri identifies an object that is an augmented manifest file in JSON lines format. This file contains the data you want to use for model training. AugmentedManifestFile can only be used if the Channel's input mode is Pipe .\n S3Uri (string) -- [REQUIRED]Depending on the value specified for the S3DataType , identifies either a key name prefix or a manifest. For example:\n A key name prefix might look like this: s3://bucketname/exampleprefix .\n A manifest might look like this: s3://bucketname/example.manifest The manifest is an S3 object which is a JSON file with the following format: [ {'prefix': 's3://customer_bucket/some/prefix/'}, 'relative/path/to/custdata-1', 'relative/path/custdata-2', ... ] The preceding JSON matches the following s3Uris : s3://customer_bucket/some/prefix/relative/path/to/custdata-1 s3://customer_bucket/some/prefix/relative/path/custdata-2 ... The complete set of s3uris in this manifest is the input data for the channel for this datasource. The object that each s3uris points to must be readable by the IAM role that Amazon SageMaker uses to perform tasks on your behalf.\n S3DataDistributionType (string) --If you want Amazon SageMaker to replicate the entire dataset on each ML compute instance that is launched for model training, specify FullyReplicated .\n If you want Amazon SageMaker to replicate a subset of data on each ML compute instance that is launched for model training, specify ShardedByS3Key . If there are n ML compute instances launched for a training job, each instance gets approximately 1/n of the number of S3 objects. In this case, model training on each machine uses only the subset of training data.\n Don't choose more ML compute instances for training than available S3 objects. If you do, some nodes won't get any data and you will pay for nodes that aren't getting any training data. This applies in both File and Pipe modes. Keep this in mind when developing algorithms.\n In distributed training, where you use multiple ML compute EC2 instances, you might choose ShardedByS3Key . If the algorithm requires copying training data to the ML storage volume (when TrainingInputMode is set to File ), this copies 1/n of the number of objects.\n AttributeNames (list) --A list of one or more attribute names to use that are found in a specified augmented manifest file.\n (string) --\n \n ContentType (string) --The MIME type of the data.\n CompressionType (string) --If training data is compressed, the compression type. The default value is None . CompressionType is used only in Pipe input mode. In File mode, leave this field unset or set it to None.\n RecordWrapperType (string) --Specify RecordIO as the value when input data is in raw format but the training algorithm requires the RecordIO format. In this case, Amazon SageMaker wraps each individual S3 object in a RecordIO record. If the input data is already in RecordIO format, you don't need to set this attribute. For more information, see Create a Dataset Using RecordIO .\n In File mode, leave this field unset or set it to None.\n InputMode (string) --(Optional) The input mode to use for the data channel in a training job. If you don't set a value for InputMode , Amazon SageMaker uses the value set for TrainingInputMode . Use this parameter to override the TrainingInputMode setting in a AlgorithmSpecification request when you have a channel that needs a different input mode from the training job's general setting. To download the data from Amazon Simple Storage Service (Amazon S3) to the provisioned ML storage volume, and mount the directory to a Docker volume, use File input mode. To stream data directly from Amazon S3 to the container, choose Pipe input mode.\n To use a model for incremental training, choose File input model.\n ShuffleConfig (dict) --A configuration for a shuffle option for input data in a channel. If you use S3Prefix for S3DataType , this shuffles the results of the S3 key prefix matches. If you use ManifestFile , the order of the S3 object references in the ManifestFile is shuffled. If you use AugmentedManifestFile , the order of the JSON lines in the AugmentedManifestFile is shuffled. The shuffling order is determined using the Seed value.\n For Pipe input mode, shuffling is done at the start of every epoch. With large datasets this ensures that the order of the training data is different for each epoch, it helps reduce bias and possible overfitting. In a multi-node training job when ShuffleConfig is combined with S3DataDistributionType of ShardedByS3Key , the data is shuffled across nodes so that the content sent to a particular node on the first epoch might be sent to a different node on the second epoch.\n Seed (integer) -- [REQUIRED]Determines the shuffling order in ShuffleConfig value.\n \n OutputDataConfig (dict) -- [REQUIRED]the path to the S3 bucket where you want to store model artifacts. Amazon SageMaker creates subfolders for the artifacts.\n KmsKeyId (string) --The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:\n // KMS Key ID '1234abcd-12ab-34cd-56ef-1234567890ab'\n // Amazon Resource Name (ARN) of a KMS Key 'arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab'\n // KMS Key Alias 'alias/ExampleAlias'\n // Amazon Resource Name (ARN) of a KMS Key Alias 'arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias'\n If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.\n The KMS key policy must grant permission to the IAM role that you specify in your CreateTramsformJob request. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide .\n S3OutputPath (string) -- [REQUIRED]Identifies the S3 path where you want Amazon SageMaker to store the model artifacts. For example, s3://bucket-name/key-name-prefix .\n ResourceConfig (dict) -- [REQUIRED]The resources, including the ML compute instances and ML storage volumes, to use for model training.\n InstanceType (string) -- [REQUIRED]The ML compute instance type.\n InstanceCount (integer) -- [REQUIRED]The number of ML compute instances to use. For distributed training, provide a value greater than 1.\n VolumeSizeInGB (integer) -- [REQUIRED]The size of the ML storage volume that you want to provision.\n ML storage volumes store model artifacts and incremental states. Training algorithms might also use the ML storage volume for scratch space. If you want to store the training data in the ML storage volume, choose File as the TrainingInputMode in the algorithm specification.\n You must specify sufficient ML storage for your scenario.\n Note\n Amazon SageMaker supports only the General Purpose SSD (gp2) ML storage volume type.\n VolumeKmsKeyId (string) --The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the training job. The VolumeKmsKeyId can be any of the following formats:\n // KMS Key ID '1234abcd-12ab-34cd-56ef-1234567890ab'\n // Amazon Resource Name (ARN) of a KMS Key 'arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab'\n \n StoppingCondition (dict) -- [REQUIRED]Sets a duration for training. Use this parameter to cap model training costs.\n To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms might use this 120-second window to save the model artifacts.\n MaxRuntimeInSeconds (integer) --The maximum length of time, in seconds, that the training job can run. If model training does not complete during this time, Amazon SageMaker ends the job. If value is not specified, default value is 1 day. Maximum value is 5 days.\n \n TransformJobDefinition (dict) --The TransformJobDefinition object that describes the transform job that Amazon SageMaker runs to validate your algorithm.\n MaxConcurrentTransforms (integer) --The maximum number of parallel requests that can be sent to each instance in a transform job. The default value is 1.\n MaxPayloadInMB (integer) --The maximum payload size allowed, in MB. A payload is the data portion of a record (without metadata).\n BatchStrategy (string) --A string that determines the number of records included in a single mini-batch.\n SingleRecord means only one record is used per mini-batch. MultiRecord means a mini-batch is set to contain as many records that can fit within the MaxPayloadInMB limit.\n Environment (dict) --The environment variables to set in the Docker container. We support up to 16 key and values entries in the map.\n (string) --\n (string) --\n \n TransformInput (dict) -- [REQUIRED]A description of the input source and the way the transform job consumes it.\n DataSource (dict) -- [REQUIRED]Describes the location of the channel data, meaning the S3 location of the input data that the model can consume.\n S3DataSource (dict) -- [REQUIRED]The S3 location of the data source that is associated with a channel.\n S3DataType (string) -- [REQUIRED]If you choose S3Prefix , S3Uri identifies a key name prefix. Amazon SageMaker uses all objects with the specified key name prefix for batch transform.\n If you choose ManifestFile , S3Uri identifies an object that is a manifest file containing a list of object keys that you want Amazon SageMaker to use for batch transform.\n S3Uri (string) -- [REQUIRED]Depending on the value specified for the S3DataType , identifies either a key name prefix or a manifest. For example:\n A key name prefix might look like this: s3://bucketname/exampleprefix .\n A manifest might look like this: s3://bucketname/example.manifest The manifest is an S3 object which is a JSON file with the following format: [ {'prefix': 's3://customer_bucket/some/prefix/'}, 'relative/path/to/custdata-1', 'relative/path/custdata-2', ... ] The preceding JSON matches the following S3Uris : s3://customer_bucket/some/prefix/relative/path/to/custdata-1 s3://customer_bucket/some/prefix/relative/path/custdata-1 ... The complete set of S3Uris in this manifest constitutes the input data for the channel for this datasource. The object that each S3Uris points to must be readable by the IAM role that Amazon SageMaker uses to perform tasks on your behalf.\n \n ContentType (string) --The multipurpose internet mail extension (MIME) type of the data. Amazon SageMaker uses the MIME type with each http call to transfer data to the transform job.\n CompressionType (string) --Compressing data helps save on storage space. If your transform data is compressed, specify the compression type. Amazon SageMaker automatically decompresses the data for the transform job accordingly. The default value is None .\n SplitType (string) --The method to use to split the transform job's data files into smaller batches. Splitting is necessary when the total size of each object is too large to fit in a single request. You can also use data splitting to improve performance by processing multiple concurrent mini-batches. The default value for SplitType is None , which indicates that input data files are not split, and request payloads contain the entire contents of an input object. Set the value of this parameter to Line to split records on a newline character boundary. SplitType also supports a number of record-oriented binary data formats.\n When splitting is enabled, the size of a mini-batch depends on the values of the BatchStrategy and MaxPayloadInMB parameters. When the value of BatchStrategy is MultiRecord , Amazon SageMaker sends the maximum number of records in each request, up to the MaxPayloadInMB limit. If the value of BatchStrategy is SingleRecord , Amazon SageMaker sends individual records in each request.\n Note\n Some data formats represent a record as a binary payload wrapped with extra padding bytes. When splitting is applied to a binary data format, padding is removed if the value of BatchStrategy is set to SingleRecord . Padding is not removed if the value of BatchStrategy is set to MultiRecord .\n For more information about the RecordIO data format, see Data Format in the MXNet documentation. For more information about the TFRecord fofmat, see Consuming TFRecord data in the TensorFlow documentation.\n TransformOutput (dict) -- [REQUIRED]Identifies the Amazon S3 location where you want Amazon SageMaker to save the results from the transform job.\n S3OutputPath (string) -- [REQUIRED]The Amazon S3 path where you want Amazon SageMaker to store the results of the transform job. For example, s3://bucket-name/key-name-prefix .\n For every S3 object used as input for the transform job, the transformed data is stored in a corresponding subfolder in the location under the output prefix. For example, the input data s3://bucket-name/input-name-prefix/dataset01/data.csv will have the transformed data stored at s3://bucket-name/key-name-prefix/dataset01/ , based on the original name, as a series of .part files (.part0001, part0002, etc).\n Accept (string) --The MIME type used to specify the output data. Amazon SageMaker uses the MIME type with each http call to transfer data from the transform job.\n AssembleWith (string) --Defines how to assemble the results of the transform job as a single S3 object. You should select a format that is most convenient to you. To concatenate the results in binary format, specify None . To add a newline character at the end of every transformed record, specify Line .\n KmsKeyId (string) --The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:\n // KMS Key ID '1234abcd-12ab-34cd-56ef-1234567890ab'\n // Amazon Resource Name (ARN) of a KMS Key 'arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab'\n // KMS Key Alias 'alias/ExampleAlias'\n // Amazon Resource Name (ARN) of a KMS Key Alias 'arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias'\n If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.\n The KMS key policy must grant permission to the IAM role that you specify in your CreateTramsformJob request. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide .\n TransformResources (dict) -- [REQUIRED]Identifies the ML compute instances for the transform job.\n InstanceType (string) -- [REQUIRED]The ML compute instance type for the transform job. For using built-in algorithms to transform moderately sized datasets, ml.m4.xlarge or ml.m5.large should suffice. There is no default value for InstanceType .\n InstanceCount (integer) -- [REQUIRED]The number of ML compute instances to use in the transform job. For distributed transform, provide a value greater than 1. The default value is 1 .\n VolumeKmsKeyId (string) --The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the batch transform job. The VolumeKmsKeyId can be any of the following formats:\n // KMS Key ID '1234abcd-12ab-34cd-56ef-1234567890ab'\n // Amazon Resource Name (ARN) of a KMS Key 'arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab'\n \n \n \n\n :type CertifyForMarketplace: boolean\n :param CertifyForMarketplace: Whether to certify the algorithm so that it can be listed in AWS Marektplace.\n\n :rtype: dict\n :return: {\n 'AlgorithmArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_code_repository(CodeRepositoryName=None, GitConfig=None):\n \"\"\"\n Create a git repository as a resource in your Amazon SageMaker account. You can associate the repository with notebook instances so that you can use git source control for the notebooks you create. The git repository is a resource in your Amazon SageMaker account, so it can be associated with more than one notebook instance, and it persists independently from the lifecycle of any notebook instances it is associated with.\n The repository can be hosted either in AWS CodeCommit or in any other git repository.\n See also: AWS API Documentation\n \n \n :example: response = client.create_code_repository(\n CodeRepositoryName='string',\n GitConfig={\n 'RepositoryUrl': 'string',\n 'Branch': 'string',\n 'SecretArn': 'string'\n }\n )\n \n \n :type CodeRepositoryName: string\n :param CodeRepositoryName: [REQUIRED]\n The name of the git repository. The name must have 1 to 63 characters. Valid characters are a-z, A-Z, 0-9, and - (hyphen).\n \n\n :type GitConfig: dict\n :param GitConfig: [REQUIRED]\n Specifies details about the repository, including the URL where the repository is located, the default branch, and credentials to use to access the repository.\n RepositoryUrl (string) -- [REQUIRED]The URL where the git repository is located.\n Branch (string) --The default beach for the git repository.\n SecretArn (string) --The Amazon Resource Name (ARN) of the AWS Secrets Manager secret that contains the credentials used to access the git repository. The secret must have a staging label of AWSCURRENT and must be in the following format:\n {'username': *UserName* , 'password': *Password* }\n \n\n :rtype: dict\n :return: {\n 'CodeRepositoryArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_compilation_job(CompilationJobName=None, RoleArn=None, InputConfig=None, OutputConfig=None, StoppingCondition=None):\n \"\"\"\n Starts a model compilation job. After the model has been compiled, Amazon SageMaker saves the resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify.\n If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts with AWS IoT Greengrass. In that case, deploy them as an ML resource.\n In the request body, you provide the following:\n You can also provide a Tag to track the model compilation job's resource use and costs. The response body contains the CompilationJobArn for the compiled job.\n To stop a model compilation job, use StopCompilationJob . To get information about a particular model compilation job, use DescribeCompilationJob . To get information about multiple model compilation jobs, use ListCompilationJobs .\n See also: AWS API Documentation\n \n \n :example: response = client.create_compilation_job(\n CompilationJobName='string',\n RoleArn='string',\n InputConfig={\n 'S3Uri': 'string',\n 'DataInputConfig': 'string',\n 'Framework': 'TENSORFLOW'|'MXNET'|'ONNX'|'PYTORCH'|'XGBOOST'\n },\n OutputConfig={\n 'S3OutputLocation': 'string',\n 'TargetDevice': 'ml_m4'|'ml_m5'|'ml_c4'|'ml_c5'|'ml_p2'|'ml_p3'|'jetson_tx1'|'jetson_tx2'|'rasp3b'|'deeplens'\n },\n StoppingCondition={\n 'MaxRuntimeInSeconds': 123\n }\n )\n \n \n :type CompilationJobName: string\n :param CompilationJobName: [REQUIRED]\n A name for the model compilation job. The name must be unique within the AWS Region and within your AWS account.\n \n\n :type RoleArn: string\n :param RoleArn: [REQUIRED]\n The Amazon Resource Name (ARN) of an IIAMAM role that enables Amazon SageMaker to perform tasks on your behalf.\n During model compilation, Amazon SageMaker needs your permission to:\n Read input data from an S3 bucket\n Write model artifacts to an S3 bucket\n Write logs to Amazon CloudWatch Logs\n Publish metrics to Amazon CloudWatch\n You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission. For more information, see Amazon SageMaker Roles.\n \n\n :type InputConfig: dict\n :param InputConfig: [REQUIRED]\n Provides information about the location of input model artifacts, the name and shape of the expected data inputs, and the framework in which the model was trained.\n S3Uri (string) -- [REQUIRED]The S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix).\n DataInputConfig (string) -- [REQUIRED]Specifies the name and shape of the expected data inputs for your trained model with a JSON dictionary form. The data inputs are InputConfig$Framework specific.\n TensorFlow : You must specify the name and shape (NHWC format) of the expected data inputs using a dictionary format for your trained model. The dictionary formats required for the console and CLI are different.\n Examples for one input:\n If using the console, {'input':[1,1024,1024,3]}\n If using the CLI, {\\'input\\':[1,1024,1024,3]}\n Examples for two inputs:\n If using the console, {'data1': [1,28,28,1], 'data2':[1,28,28,1]}\n If using the CLI, {\\'data1\\': [1,28,28,1], \\'data2\\':[1,28,28,1]}\n \n MXNET/ONNX : You must specify the name and shape (NCHW format) of the expected data inputs in order using a dictionary format for your trained model. The dictionary formats required for the console and CLI are different.\n Examples for one input:\n If using the console, {'data':[1,3,1024,1024]}\n If using the CLI, {\\'data\\':[1,3,1024,1024]}\n Examples for two inputs:\n If using the console, {'var1': [1,1,28,28], 'var2':[1,1,28,28]}\n If using the CLI, {\\'var1\\': [1,1,28,28], \\'var2\\':[1,1,28,28]}\n \n PyTorch : You can either specify the name and shape (NCHW format) of expected data inputs in order using a dictionary format for your trained model or you can specify the shape only using a list format. The dictionary formats required for the console and CLI are different. The list formats for the console and CLI are the same.\n Examples for one input in dictionary format:\n If using the console, {'input0':[1,3,224,224]}\n If using the CLI, {\\'input0\\':[1,3,224,224]}\n Example for one input in list format: [[1,3,224,224]]\n Examples for two inputs in dictionary format:\n If using the console, {'input0':[1,3,224,224], 'input1':[1,3,224,224]}\n If using the CLI, {\\'input0\\':[1,3,224,224], \\'input1\\':[1,3,224,224]}\n Example for two inputs in list format: [[1,3,224,224], [1,3,224,224]]\n XGBOOST : input data name and shape are not needed.\n Framework (string) -- [REQUIRED]Identifies the framework in which the model was trained. For example: TENSORFLOW.\n \n\n :type OutputConfig: dict\n :param OutputConfig: [REQUIRED]\n Provides information about the output location for the compiled model and the target device the model runs on.\n S3OutputLocation (string) -- [REQUIRED]Identifies the S3 path where you want Amazon SageMaker to store the model artifacts. For example, s3://bucket-name/key-name-prefix.\n TargetDevice (string) -- [REQUIRED]Identifies the device that you want to run your model on after it has been compiled. For example: ml_c5.\n \n\n :type StoppingCondition: dict\n :param StoppingCondition: [REQUIRED]\n The duration allowed for model compilation.\n MaxRuntimeInSeconds (integer) --The maximum length of time, in seconds, that the training job can run. If model training does not complete during this time, Amazon SageMaker ends the job. If value is not specified, default value is 1 day. Maximum value is 5 days.\n \n\n :rtype: dict\n :return: {\n 'CompilationJobArn': 'string'\n }\n \n \n :returns: \n CompilationJobName (string) -- [REQUIRED]\n A name for the model compilation job. The name must be unique within the AWS Region and within your AWS account.\n \n RoleArn (string) -- [REQUIRED]\n The Amazon Resource Name (ARN) of an IIAMAM role that enables Amazon SageMaker to perform tasks on your behalf.\n During model compilation, Amazon SageMaker needs your permission to:\n \n Read input data from an S3 bucket\n Write model artifacts to an S3 bucket\n Write logs to Amazon CloudWatch Logs\n Publish metrics to Amazon CloudWatch\n \n You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission. For more information, see Amazon SageMaker Roles.\n \n InputConfig (dict) -- [REQUIRED]\n Provides information about the location of input model artifacts, the name and shape of the expected data inputs, and the framework in which the model was trained.\n \n S3Uri (string) -- [REQUIRED]The S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix).\n \n DataInputConfig (string) -- [REQUIRED]Specifies the name and shape of the expected data inputs for your trained model with a JSON dictionary form. The data inputs are InputConfig$Framework specific.\n \n TensorFlow : You must specify the name and shape (NHWC format) of the expected data inputs using a dictionary format for your trained model. The dictionary formats required for the console and CLI are different.\n Examples for one input:\n If using the console, {\"input\":[1,1024,1024,3]}\n If using the CLI, {\\\"input\\\":[1,1024,1024,3]}\n \n \n Examples for two inputs:\n If using the console, {\"data1\": [1,28,28,1], \"data2\":[1,28,28,1]}\n If using the CLI, {\\\"data1\\\": [1,28,28,1], \\\"data2\\\":[1,28,28,1]}\n \n \n \n \n MXNET/ONNX : You must specify the name and shape (NCHW format) of the expected data inputs in order using a dictionary format for your trained model. The dictionary formats required for the console and CLI are different.\n Examples for one input:\n If using the console, {\"data\":[1,3,1024,1024]}\n If using the CLI, {\\\"data\\\":[1,3,1024,1024]}\n \n \n Examples for two inputs:\n If using the console, {\"var1\": [1,1,28,28], \"var2\":[1,1,28,28]}\n If using the CLI, {\\\"var1\\\": [1,1,28,28], \\\"var2\\\":[1,1,28,28]}\n \n \n \n \n PyTorch : You can either specify the name and shape (NCHW format) of expected data inputs in order using a dictionary format for your trained model or you can specify the shape only using a list format. The dictionary formats required for the console and CLI are different. The list formats for the console and CLI are the same.\n Examples for one input in dictionary format:\n If using the console, {\"input0\":[1,3,224,224]}\n If using the CLI, {\\\"input0\\\":[1,3,224,224]}\n \n \n Example for one input in list format: [[1,3,224,224]]\n Examples for two inputs in dictionary format:\n If using the console, {\"input0\":[1,3,224,224], \"input1\":[1,3,224,224]}\n If using the CLI, {\\\"input0\\\":[1,3,224,224], \\\"input1\\\":[1,3,224,224]}\n \n \n Example for two inputs in list format: [[1,3,224,224], [1,3,224,224]]\n \n \n XGBOOST : input data name and shape are not needed.\n \n \n Framework (string) -- [REQUIRED]Identifies the framework in which the model was trained. For example: TENSORFLOW.\n \n \n \n OutputConfig (dict) -- [REQUIRED]\n Provides information about the output location for the compiled model and the target device the model runs on.\n \n S3OutputLocation (string) -- [REQUIRED]Identifies the S3 path where you want Amazon SageMaker to store the model artifacts. For example, s3://bucket-name/key-name-prefix.\n \n TargetDevice (string) -- [REQUIRED]Identifies the device that you want to run your model on after it has been compiled. For example: ml_c5.\n \n \n \n StoppingCondition (dict) -- [REQUIRED]\n The duration allowed for model compilation.\n \n MaxRuntimeInSeconds (integer) --The maximum length of time, in seconds, that the training job can run. If model training does not complete during this time, Amazon SageMaker ends the job. If value is not specified, default value is 1 day. Maximum value is 5 days.\n \n \n \n \n \"\"\"\n pass\n\ndef create_endpoint(EndpointName=None, EndpointConfigName=None, Tags=None):\n \"\"\"\n Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.\n The endpoint name must be unique within an AWS Region in your AWS account.\n When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.\n When Amazon SageMaker receives the request, it sets the endpoint status to Creating . After it creates the endpoint, it sets the status to InService . Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.\n For an example, see Exercise 1: Using the K-Means Algorithm Provided by Amazon SageMaker .\n If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provided. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS i an AWS Region in the AWS Identity and Access Management User Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.create_endpoint(\n EndpointName='string',\n EndpointConfigName='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type EndpointName: string\n :param EndpointName: [REQUIRED]\n The name of the endpoint. The name must be unique within an AWS Region in your AWS account.\n \n\n :type EndpointConfigName: string\n :param EndpointConfigName: [REQUIRED]\n The name of an endpoint configuration. For more information, see CreateEndpointConfig .\n \n\n :type Tags: list\n :param Tags: An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide .\n (dict) --Describes a tag.\n Key (string) -- [REQUIRED]The tag key.\n Value (string) -- [REQUIRED]The tag value.\n \n \n\n :rtype: dict\n :return: {\n 'EndpointArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_endpoint_config(EndpointConfigName=None, ProductionVariants=None, Tags=None, KmsKeyId=None):\n \"\"\"\n Creates an endpoint configuration that Amazon SageMaker hosting services uses to deploy models. In the configuration, you identify one or more models, created using the CreateModel API, to deploy and the resources that you want Amazon SageMaker to provision. Then you call the CreateEndpoint API.\n In the request, you define one or more ProductionVariant s, each of which identifies a model. Each ProductionVariant parameter also describes the resources that you want Amazon SageMaker to provision. This includes the number and type of ML compute instances to deploy.\n If you are hosting multiple models, you also assign a VariantWeight to specify how much traffic you want to allocate to each model. For example, suppose that you want to host two models, A and B, and you assign traffic weight 2 for model A and 1 for model B. Amazon SageMaker distributes two-thirds of the traffic to Model A, and one-third to model B.\n See also: AWS API Documentation\n \n \n :example: response = client.create_endpoint_config(\n EndpointConfigName='string',\n ProductionVariants=[\n {\n 'VariantName': 'string',\n 'ModelName': 'string',\n 'InitialInstanceCount': 123,\n 'InstanceType': 'ml.t2.medium'|'ml.t2.large'|'ml.t2.xlarge'|'ml.t2.2xlarge'|'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.m5.large'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge'|'ml.c4.large'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge'|'ml.c5.large'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge',\n 'InitialVariantWeight': ...,\n 'AcceleratorType': 'ml.eia1.medium'|'ml.eia1.large'|'ml.eia1.xlarge'\n },\n ],\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n KmsKeyId='string'\n )\n \n \n :type EndpointConfigName: string\n :param EndpointConfigName: [REQUIRED]\n The name of the endpoint configuration. You specify this name in a CreateEndpoint request.\n \n\n :type ProductionVariants: list\n :param ProductionVariants: [REQUIRED]\n An array of ProductionVariant objects, one for each model that you want to host at this endpoint.\n (dict) --Identifies a model that you want to host and the resources to deploy for hosting it. If you are deploying multiple models, tell Amazon SageMaker how to distribute traffic among the models by specifying variant weights.\n VariantName (string) -- [REQUIRED]The name of the production variant.\n ModelName (string) -- [REQUIRED]The name of the model that you want to host. This is the name that you specified when creating the model.\n InitialInstanceCount (integer) -- [REQUIRED]Number of instances to launch initially.\n InstanceType (string) -- [REQUIRED]The ML compute instance type.\n InitialVariantWeight (float) --Determines initial traffic distribution among all of the models that you specify in the endpoint configuration. The traffic to a production variant is determined by the ratio of the VariantWeight to the sum of all VariantWeight values across all ProductionVariants. If unspecified, it defaults to 1.0.\n AcceleratorType (string) --The size of the Elastic Inference (EI) instance to use for the production variant. EI instances provide on-demand GPU computing for inference. For more information, see Using Elastic Inference in Amazon SageMaker . For more information, see Using Elastic Inference in Amazon SageMaker .\n \n \n\n :type Tags: list\n :param Tags: An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide .\n (dict) --Describes a tag.\n Key (string) -- [REQUIRED]The tag key.\n Value (string) -- [REQUIRED]The tag value.\n \n \n\n :type KmsKeyId: string\n :param KmsKeyId: The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint.\n\n :rtype: dict\n :return: {\n 'EndpointConfigArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_hyper_parameter_tuning_job(HyperParameterTuningJobName=None, HyperParameterTuningJobConfig=None, TrainingJobDefinition=None, WarmStartConfig=None, Tags=None):\n \"\"\"\n Starts a hyperparameter tuning job. A hyperparameter tuning job finds the best version of a model by running many training jobs on your dataset using the algorithm you choose and values for hyperparameters within ranges that you specify. It then chooses the hyperparameter values that result in a model that performs the best, as measured by an objective metric that you choose.\n See also: AWS API Documentation\n \n \n :example: response = client.create_hyper_parameter_tuning_job(\n HyperParameterTuningJobName='string',\n HyperParameterTuningJobConfig={\n 'Strategy': 'Bayesian',\n 'HyperParameterTuningJobObjective': {\n 'Type': 'Maximize'|'Minimize',\n 'MetricName': 'string'\n },\n 'ResourceLimits': {\n 'MaxNumberOfTrainingJobs': 123,\n 'MaxParallelTrainingJobs': 123\n },\n 'ParameterRanges': {\n 'IntegerParameterRanges': [\n {\n 'Name': 'string',\n 'MinValue': 'string',\n 'MaxValue': 'string'\n },\n ],\n 'ContinuousParameterRanges': [\n {\n 'Name': 'string',\n 'MinValue': 'string',\n 'MaxValue': 'string'\n },\n ],\n 'CategoricalParameterRanges': [\n {\n 'Name': 'string',\n 'Values': [\n 'string',\n ]\n },\n ]\n },\n 'TrainingJobEarlyStoppingType': 'Off'|'Auto'\n },\n TrainingJobDefinition={\n 'StaticHyperParameters': {\n 'string': 'string'\n },\n 'AlgorithmSpecification': {\n 'TrainingImage': 'string',\n 'TrainingInputMode': 'Pipe'|'File',\n 'AlgorithmName': 'string',\n 'MetricDefinitions': [\n {\n 'Name': 'string',\n 'Regex': 'string'\n },\n ]\n },\n 'RoleArn': 'string',\n 'InputDataConfig': [\n {\n 'ChannelName': 'string',\n 'DataSource': {\n 'S3DataSource': {\n 'S3DataType': 'ManifestFile'|'S3Prefix'|'AugmentedManifestFile',\n 'S3Uri': 'string',\n 'S3DataDistributionType': 'FullyReplicated'|'ShardedByS3Key',\n 'AttributeNames': [\n 'string',\n ]\n }\n },\n 'ContentType': 'string',\n 'CompressionType': 'None'|'Gzip',\n 'RecordWrapperType': 'None'|'RecordIO',\n 'InputMode': 'Pipe'|'File',\n 'ShuffleConfig': {\n 'Seed': 123\n }\n },\n ],\n 'VpcConfig': {\n 'SecurityGroupIds': [\n 'string',\n ],\n 'Subnets': [\n 'string',\n ]\n },\n 'OutputDataConfig': {\n 'KmsKeyId': 'string',\n 'S3OutputPath': 'string'\n },\n 'ResourceConfig': {\n 'InstanceType': 'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.m5.large'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge',\n 'InstanceCount': 123,\n 'VolumeSizeInGB': 123,\n 'VolumeKmsKeyId': 'string'\n },\n 'StoppingCondition': {\n 'MaxRuntimeInSeconds': 123\n },\n 'EnableNetworkIsolation': True|False\n },\n WarmStartConfig={\n 'ParentHyperParameterTuningJobs': [\n {\n 'HyperParameterTuningJobName': 'string'\n },\n ],\n 'WarmStartType': 'IdenticalDataAndAlgorithm'|'TransferLearning'\n },\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type HyperParameterTuningJobName: string\n :param HyperParameterTuningJobName: [REQUIRED]\n The name of the tuning job. This name is the prefix for the names of all training jobs that this tuning job launches. The name must be unique within the same AWS account and AWS Region. The name must have { } to { } characters. Valid characters are a-z, A-Z, 0-9, and : + = @ _ % - (hyphen). The name is not case sensitive.\n \n\n :type HyperParameterTuningJobConfig: dict\n :param HyperParameterTuningJobConfig: [REQUIRED]\n The HyperParameterTuningJobConfig object that describes the tuning job, including the search strategy, the objective metric used to evaluate training jobs, ranges of parameters to search, and resource limits for the tuning job. For more information, see automatic-model-tuning\n Strategy (string) -- [REQUIRED]Specifies the search strategy for hyperparameters. Currently, the only valid value is Bayesian .\n HyperParameterTuningJobObjective (dict) -- [REQUIRED]The HyperParameterTuningJobObjective object that specifies the objective metric for this tuning job.\n Type (string) -- [REQUIRED]Whether to minimize or maximize the objective metric.\n MetricName (string) -- [REQUIRED]The name of the metric to use for the objective metric.\n ResourceLimits (dict) -- [REQUIRED]The ResourceLimits object that specifies the maximum number of training jobs and parallel training jobs for this tuning job.\n MaxNumberOfTrainingJobs (integer) -- [REQUIRED]The maximum number of training jobs that a hyperparameter tuning job can launch.\n MaxParallelTrainingJobs (integer) -- [REQUIRED]The maximum number of concurrent training jobs that a hyperparameter tuning job can launch.\n ParameterRanges (dict) -- [REQUIRED]The ParameterRanges object that specifies the ranges of hyperparameters that this tuning job searches.\n IntegerParameterRanges (list) --The array of IntegerParameterRange objects that specify ranges of integer hyperparameters that a hyperparameter tuning job searches.\n (dict) --For a hyperparameter of the integer type, specifies the range that a hyperparameter tuning job searches.\n Name (string) -- [REQUIRED]The name of the hyperparameter to search.\n MinValue (string) -- [REQUIRED]The minimum value of the hyperparameter to search.\n MaxValue (string) -- [REQUIRED]The maximum value of the hyperparameter to search.\n \n ContinuousParameterRanges (list) --The array of ContinuousParameterRange objects that specify ranges of continuous hyperparameters that a hyperparameter tuning job searches.\n (dict) --A list of continuous hyperparameters to tune.\n Name (string) -- [REQUIRED]The name of the continuous hyperparameter to tune.\n MinValue (string) -- [REQUIRED]The minimum value for the hyperparameter. The tuning job uses floating-point values between this value and MaxValue for tuning.\n MaxValue (string) -- [REQUIRED]The maximum value for the hyperparameter. The tuning job uses floating-point values between MinValue value and this value for tuning.\n \n CategoricalParameterRanges (list) --The array of CategoricalParameterRange objects that specify ranges of categorical hyperparameters that a hyperparameter tuning job searches.\n (dict) --A list of categorical hyperparameters to tune.\n Name (string) -- [REQUIRED]The name of the categorical hyperparameter to tune.\n Values (list) -- [REQUIRED]A list of the categories for the hyperparameter.\n (string) --\n \n \n TrainingJobEarlyStoppingType (string) --Specifies whether to use early stopping for training jobs launched by the hyperparameter tuning job. This can be one of the following values (the default value is OFF ):\n OFF\n Training jobs launched by the hyperparameter tuning job do not use early stopping.\n AUTO\n Amazon SageMaker stops training jobs launched by the hyperparameter tuning job when they are unlikely to perform better than previously completed training jobs. For more information, see Stop Training Jobs Early .\n \n\n :type TrainingJobDefinition: dict\n :param TrainingJobDefinition: [REQUIRED]\n The HyperParameterTrainingJobDefinition object that describes the training jobs that this tuning job launches, including static hyperparameters, input data configuration, output data configuration, resource configuration, and stopping condition.\n StaticHyperParameters (dict) --Specifies the values of hyperparameters that do not change for the tuning job.\n (string) --\n (string) --\n \n AlgorithmSpecification (dict) -- [REQUIRED]The HyperParameterAlgorithmSpecification object that specifies the resource algorithm to use for the training jobs that the tuning job launches.\n TrainingImage (string) --The registry path of the Docker image that contains the training algorithm. For information about Docker registry paths for built-in algorithms, see Algorithms Provided by Amazon SageMaker: Common Parameters .\n TrainingInputMode (string) -- [REQUIRED]The input mode that the algorithm supports: File or Pipe. In File input mode, Amazon SageMaker downloads the training data from Amazon S3 to the storage volume that is attached to the training instance and mounts the directory to the Docker volume for the training container. In Pipe input mode, Amazon SageMaker streams data directly from Amazon S3 to the container.\n If you specify File mode, make sure that you provision the storage volume that is attached to the training instance with enough capacity to accommodate the training data downloaded from Amazon S3, the model artifacts, and intermediate information.\n For more information about input modes, see Algorithms .\n AlgorithmName (string) --The name of the resource algorithm to use for the hyperparameter tuning job. If you specify a value for this parameter, do not specify a value for TrainingImage .\n MetricDefinitions (list) --An array of MetricDefinition objects that specify the metrics that the algorithm emits.\n (dict) --Specifies a metric that the training algorithm writes to stderr or stdout . Amazon SageMakerhyperparameter tuning captures all defined metrics. You specify one metric that a hyperparameter tuning job uses as its objective metric to choose the best training job.\n Name (string) -- [REQUIRED]The name of the metric.\n Regex (string) -- [REQUIRED]A regular expression that searches the output of a training job and gets the value of the metric. For more information about using regular expressions to define metrics, see Defining Objective Metrics .\n \n RoleArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the IAM role associated with the training jobs that the tuning job launches.\n InputDataConfig (list) --An array of Channel objects that specify the input for the training jobs that the tuning job launches.\n (dict) --A channel is a named input source that training algorithms can consume.\n ChannelName (string) -- [REQUIRED]The name of the channel.\n DataSource (dict) -- [REQUIRED]The location of the channel data.\n S3DataSource (dict) -- [REQUIRED]The S3 location of the data source that is associated with a channel.\n S3DataType (string) -- [REQUIRED]If you choose S3Prefix , S3Uri identifies a key name prefix. Amazon SageMaker uses all objects that match the specified key name prefix for model training.\n If you choose ManifestFile , S3Uri identifies an object that is a manifest file containing a list of object keys that you want Amazon SageMaker to use for model training.\n If you choose AugmentedManifestFile , S3Uri identifies an object that is an augmented manifest file in JSON lines format. This file contains the data you want to use for model training. AugmentedManifestFile can only be used if the Channel's input mode is Pipe .\n S3Uri (string) -- [REQUIRED]Depending on the value specified for the S3DataType , identifies either a key name prefix or a manifest. For example:\n A key name prefix might look like this: s3://bucketname/exampleprefix .\n A manifest might look like this: s3://bucketname/example.manifest The manifest is an S3 object which is a JSON file with the following format: [ {'prefix': 's3://customer_bucket/some/prefix/'}, 'relative/path/to/custdata-1', 'relative/path/custdata-2', ... ] The preceding JSON matches the following s3Uris : s3://customer_bucket/some/prefix/relative/path/to/custdata-1 s3://customer_bucket/some/prefix/relative/path/custdata-2 ... The complete set of s3uris in this manifest is the input data for the channel for this datasource. The object that each s3uris points to must be readable by the IAM role that Amazon SageMaker uses to perform tasks on your behalf.\n S3DataDistributionType (string) --If you want Amazon SageMaker to replicate the entire dataset on each ML compute instance that is launched for model training, specify FullyReplicated .\n If you want Amazon SageMaker to replicate a subset of data on each ML compute instance that is launched for model training, specify ShardedByS3Key . If there are n ML compute instances launched for a training job, each instance gets approximately 1/n of the number of S3 objects. In this case, model training on each machine uses only the subset of training data.\n Don't choose more ML compute instances for training than available S3 objects. If you do, some nodes won't get any data and you will pay for nodes that aren't getting any training data. This applies in both File and Pipe modes. Keep this in mind when developing algorithms.\n In distributed training, where you use multiple ML compute EC2 instances, you might choose ShardedByS3Key . If the algorithm requires copying training data to the ML storage volume (when TrainingInputMode is set to File ), this copies 1/n of the number of objects.\n AttributeNames (list) --A list of one or more attribute names to use that are found in a specified augmented manifest file.\n (string) --\n \n ContentType (string) --The MIME type of the data.\n CompressionType (string) --If training data is compressed, the compression type. The default value is None . CompressionType is used only in Pipe input mode. In File mode, leave this field unset or set it to None.\n RecordWrapperType (string) --Specify RecordIO as the value when input data is in raw format but the training algorithm requires the RecordIO format. In this case, Amazon SageMaker wraps each individual S3 object in a RecordIO record. If the input data is already in RecordIO format, you don't need to set this attribute. For more information, see Create a Dataset Using RecordIO .\n In File mode, leave this field unset or set it to None.\n InputMode (string) --(Optional) The input mode to use for the data channel in a training job. If you don't set a value for InputMode , Amazon SageMaker uses the value set for TrainingInputMode . Use this parameter to override the TrainingInputMode setting in a AlgorithmSpecification request when you have a channel that needs a different input mode from the training job's general setting. To download the data from Amazon Simple Storage Service (Amazon S3) to the provisioned ML storage volume, and mount the directory to a Docker volume, use File input mode. To stream data directly from Amazon S3 to the container, choose Pipe input mode.\n To use a model for incremental training, choose File input model.\n ShuffleConfig (dict) --A configuration for a shuffle option for input data in a channel. If you use S3Prefix for S3DataType , this shuffles the results of the S3 key prefix matches. If you use ManifestFile , the order of the S3 object references in the ManifestFile is shuffled. If you use AugmentedManifestFile , the order of the JSON lines in the AugmentedManifestFile is shuffled. The shuffling order is determined using the Seed value.\n For Pipe input mode, shuffling is done at the start of every epoch. With large datasets this ensures that the order of the training data is different for each epoch, it helps reduce bias and possible overfitting. In a multi-node training job when ShuffleConfig is combined with S3DataDistributionType of ShardedByS3Key , the data is shuffled across nodes so that the content sent to a particular node on the first epoch might be sent to a different node on the second epoch.\n Seed (integer) -- [REQUIRED]Determines the shuffling order in ShuffleConfig value.\n \n VpcConfig (dict) --The VpcConfig object that specifies the VPC that you want the training jobs that this hyperparameter tuning job launches to connect to. Control access to and from your training container by configuring the VPC. For more information, see Protect Training Jobs by Using an Amazon Virtual Private Cloud .\n SecurityGroupIds (list) -- [REQUIRED]The VPC security group IDs, in the form sg-xxxxxxxx. Specify the security groups for the VPC that is specified in the Subnets field.\n (string) --\n Subnets (list) -- [REQUIRED]The ID of the subnets in the VPC to which you want to connect your training job or model.\n (string) --\n \n OutputDataConfig (dict) -- [REQUIRED]Specifies the path to the Amazon S3 bucket where you store model artifacts from the training jobs that the tuning job launches.\n KmsKeyId (string) --The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:\n // KMS Key ID '1234abcd-12ab-34cd-56ef-1234567890ab'\n // Amazon Resource Name (ARN) of a KMS Key 'arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab'\n // KMS Key Alias 'alias/ExampleAlias'\n // Amazon Resource Name (ARN) of a KMS Key Alias 'arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias'\n If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.\n The KMS key policy must grant permission to the IAM role that you specify in your CreateTramsformJob request. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide .\n S3OutputPath (string) -- [REQUIRED]Identifies the S3 path where you want Amazon SageMaker to store the model artifacts. For example, s3://bucket-name/key-name-prefix .\n ResourceConfig (dict) -- [REQUIRED]The resources, including the compute instances and storage volumes, to use for the training jobs that the tuning job launches.\n Storage volumes store model artifacts and incremental states. Training algorithms might also use storage volumes for scratch space. If you want Amazon SageMaker to use the storage volume to store the training data, choose File as the TrainingInputMode in the algorithm specification. For distributed training algorithms, specify an instance count greater than 1.\n InstanceType (string) -- [REQUIRED]The ML compute instance type.\n InstanceCount (integer) -- [REQUIRED]The number of ML compute instances to use. For distributed training, provide a value greater than 1.\n VolumeSizeInGB (integer) -- [REQUIRED]The size of the ML storage volume that you want to provision.\n ML storage volumes store model artifacts and incremental states. Training algorithms might also use the ML storage volume for scratch space. If you want to store the training data in the ML storage volume, choose File as the TrainingInputMode in the algorithm specification.\n You must specify sufficient ML storage for your scenario.\n Note\n Amazon SageMaker supports only the General Purpose SSD (gp2) ML storage volume type.\n VolumeKmsKeyId (string) --The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the training job. The VolumeKmsKeyId can be any of the following formats:\n // KMS Key ID '1234abcd-12ab-34cd-56ef-1234567890ab'\n // Amazon Resource Name (ARN) of a KMS Key 'arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab'\n \n StoppingCondition (dict) -- [REQUIRED]Sets a maximum duration for the training jobs that the tuning job launches. Use this parameter to limit model training costs.\n To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal. This delays job termination for 120 seconds. Algorithms might use this 120-second window to save the model artifacts.\n When Amazon SageMaker terminates a job because the stopping condition has been met, training algorithms provided by Amazon SageMaker save the intermediate results of the job.\n MaxRuntimeInSeconds (integer) --The maximum length of time, in seconds, that the training job can run. If model training does not complete during this time, Amazon SageMaker ends the job. If value is not specified, default value is 1 day. Maximum value is 5 days.\n EnableNetworkIsolation (boolean) --Isolates the training container. No inbound or outbound network calls can be made, except for calls between peers within a training cluster for distributed training. If network isolation is used for training jobs that are configured to use a VPC, Amazon SageMaker downloads and uploads customer data and model artifacts through the specified VPC, but the training container does not have network access.\n Note\n The Semantic Segmentation built-in algorithm does not support network isolation.\n \n\n :type WarmStartConfig: dict\n :param WarmStartConfig: Specifies configuration for starting the hyperparameter tuning job using one or more previous tuning jobs as a starting point. The results of previous tuning jobs are used to inform which combinations of hyperparameters to search over in the new tuning job.\n All training jobs launched by the new hyperparameter tuning job are evaluated by using the objective metric. If you specify IDENTICAL_DATA_AND_ALGORITHM as the WarmStartType for the warm start configuration, the training job that performs the best in the new tuning job is compared to the best training jobs from the parent tuning jobs. From these, the training job that performs the best as measured by the objective metric is returned as the overall best training job.\n Note\n All training jobs launched by parent hyperparameter tuning jobs and the new hyperparameter tuning jobs count against the limit of training jobs for the tuning job.\n ParentHyperParameterTuningJobs (list) -- [REQUIRED]An array of hyperparameter tuning jobs that are used as the starting point for the new hyperparameter tuning job. For more information about warm starting a hyperparameter tuning job, see Using a Previous Hyperparameter Tuning Job as a Starting Point .\n Hyperparameter tuning jobs created before October 1, 2018 cannot be used as parent jobs for warm start tuning jobs.\n (dict) --A previously completed or stopped hyperparameter tuning job to be used as a starting point for a new hyperparameter tuning job.\n HyperParameterTuningJobName (string) --The name of the hyperparameter tuning job to be used as a starting point for a new hyperparameter tuning job.\n \n WarmStartType (string) -- [REQUIRED]Specifies one of the following:\n IDENTICAL_DATA_AND_ALGORITHM\n The new hyperparameter tuning job uses the same input data and training image as the parent tuning jobs. You can change the hyperparameter ranges to search and the maximum number of training jobs that the hyperparameter tuning job launches. You cannot use a new version of the training algorithm, unless the changes in the new version do not affect the algorithm itself. For example, changes that improve logging or adding support for a different data format are allowed. You can also change hyperparameters from tunable to static, and from static to tunable, but the total number of static plus tunable hyperparameters must remain the same as it is in all parent jobs. The objective metric for the new tuning job must be the same as for all parent jobs.\n TRANSFER_LEARNING\n The new hyperparameter tuning job can include input data, hyperparameter ranges, maximum number of concurrent training jobs, and maximum number of training jobs that are different than those of its parent hyperparameter tuning jobs. The training image can also be a different version from the version used in the parent hyperparameter tuning job. You can also change hyperparameters from tunable to static, and from static to tunable, but the total number of static plus tunable hyperparameters must remain the same as it is in all parent jobs. The objective metric for the new tuning job must be the same as for all parent jobs.\n \n\n :type Tags: list\n :param Tags: An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see AWS Tagging Strategies .\n Tags that you specify for the tuning job are also added to all training jobs that the tuning job launches.\n (dict) --Describes a tag.\n Key (string) -- [REQUIRED]The tag key.\n Value (string) -- [REQUIRED]The tag value.\n \n \n\n :rtype: dict\n :return: {\n 'HyperParameterTuningJobArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_labeling_job(LabelingJobName=None, LabelAttributeName=None, InputConfig=None, OutputConfig=None, RoleArn=None, LabelCategoryConfigS3Uri=None, StoppingConditions=None, LabelingJobAlgorithmsConfig=None, HumanTaskConfig=None, Tags=None):\n \"\"\"\n Creates a job that uses workers to label the data objects in your input dataset. You can use the labeled data to train machine learning models.\n You can select your workforce from one of three providers:\n You can also use automated data labeling to reduce the number of data objects that need to be labeled by a human. Automated data labeling uses active learning to determine if a data object can be labeled by machine or if it needs to be sent to a human worker. For more information, see Using Automated Data Labeling .\n The data objects to be labeled are contained in an Amazon S3 bucket. You create a manifest file that describes the location of each object. For more information, see Using Input and Output Data .\n The output can be used as the manifest file for another labeling job or as training data for your machine learning models.\n See also: AWS API Documentation\n \n \n :example: response = client.create_labeling_job(\n LabelingJobName='string',\n LabelAttributeName='string',\n InputConfig={\n 'DataSource': {\n 'S3DataSource': {\n 'ManifestS3Uri': 'string'\n }\n },\n 'DataAttributes': {\n 'ContentClassifiers': [\n 'FreeOfPersonallyIdentifiableInformation'|'FreeOfAdultContent',\n ]\n }\n },\n OutputConfig={\n 'S3OutputPath': 'string',\n 'KmsKeyId': 'string'\n },\n RoleArn='string',\n LabelCategoryConfigS3Uri='string',\n StoppingConditions={\n 'MaxHumanLabeledObjectCount': 123,\n 'MaxPercentageOfInputDatasetLabeled': 123\n },\n LabelingJobAlgorithmsConfig={\n 'LabelingJobAlgorithmSpecificationArn': 'string',\n 'InitialActiveLearningModelArn': 'string',\n 'LabelingJobResourceConfig': {\n 'VolumeKmsKeyId': 'string'\n }\n },\n HumanTaskConfig={\n 'WorkteamArn': 'string',\n 'UiConfig': {\n 'UiTemplateS3Uri': 'string'\n },\n 'PreHumanTaskLambdaArn': 'string',\n 'TaskKeywords': [\n 'string',\n ],\n 'TaskTitle': 'string',\n 'TaskDescription': 'string',\n 'NumberOfHumanWorkersPerDataObject': 123,\n 'TaskTimeLimitInSeconds': 123,\n 'TaskAvailabilityLifetimeInSeconds': 123,\n 'MaxConcurrentTaskCount': 123,\n 'AnnotationConsolidationConfig': {\n 'AnnotationConsolidationLambdaArn': 'string'\n },\n 'PublicWorkforceTaskPrice': {\n 'AmountInUsd': {\n 'Dollars': 123,\n 'Cents': 123,\n 'TenthFractionsOfACent': 123\n }\n }\n },\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type LabelingJobName: string\n :param LabelingJobName: [REQUIRED]\n The name of the labeling job. This name is used to identify the job in a list of labeling jobs.\n \n\n :type LabelAttributeName: string\n :param LabelAttributeName: [REQUIRED]\n The attribute name to use for the label in the output manifest file. This is the key for the key/value pair formed with the label that a worker assigns to the object. The name can't end with '-metadata'. If you are running a semantic segmentation labeling job, the attribute name must end with '-ref'. If you are running any other kind of labeling job, the attribute name must not end with '-ref'.\n \n\n :type InputConfig: dict\n :param InputConfig: [REQUIRED]\n Input data for the labeling job, such as the Amazon S3 location of the data objects and the location of the manifest file that describes the data objects.\n DataSource (dict) -- [REQUIRED]The location of the input data.\n S3DataSource (dict) -- [REQUIRED]The Amazon S3 location of the input data objects.\n ManifestS3Uri (string) -- [REQUIRED]The Amazon S3 location of the manifest file that describes the input data objects.\n \n DataAttributes (dict) --Attributes of the data specified by the customer.\n ContentClassifiers (list) --Declares that your content is free of personally identifiable information or adult content. Amazon SageMaker may restrict the Amazon Mechanical Turk workers that can view your task based on this information.\n (string) --\n \n \n\n :type OutputConfig: dict\n :param OutputConfig: [REQUIRED]\n The location of the output data and the AWS Key Management Service key ID for the key used to encrypt the output data, if any.\n S3OutputPath (string) -- [REQUIRED]The Amazon S3 location to write output data.\n KmsKeyId (string) --The AWS Key Management Service ID of the key used to encrypt the output data, if any.\n \n\n :type RoleArn: string\n :param RoleArn: [REQUIRED]\n The Amazon Resource Number (ARN) that Amazon SageMaker assumes to perform tasks on your behalf during data labeling. You must grant this role the necessary permissions so that Amazon SageMaker can successfully complete data labeling.\n \n\n :type LabelCategoryConfigS3Uri: string\n :param LabelCategoryConfigS3Uri: The S3 URL of the file that defines the categories used to label the data objects.\n The file is a JSON structure in the following format:\n {'document-version': '2018-11-28'\n 'labels': [\n {\n 'label': '*label 1* '\n },\n {\n 'label': '*label 2* '\n },\n ...\n {\n 'label': '*label n* '\n }\n ]\n }\n \n\n :type StoppingConditions: dict\n :param StoppingConditions: A set of conditions for stopping the labeling job. If any of the conditions are met, the job is automatically stopped. You can use these conditions to control the cost of data labeling.\n MaxHumanLabeledObjectCount (integer) --The maximum number of objects that can be labeled by human workers.\n MaxPercentageOfInputDatasetLabeled (integer) --The maximum number of input data objects that should be labeled.\n \n\n :type LabelingJobAlgorithmsConfig: dict\n :param LabelingJobAlgorithmsConfig: Configures the information required to perform automated data labeling.\n LabelingJobAlgorithmSpecificationArn (string) -- [REQUIRED]Specifies the Amazon Resource Name (ARN) of the algorithm used for auto-labeling. You must select one of the following ARNs:\n Image classification arn:aws:sagemaker:*region* :027400017018:labeling-job-algorithm-specification/image-classification\n Text classification arn:aws:sagemaker:*region* :027400017018:labeling-job-algorithm-specification/text-classification\n Object detection arn:aws:sagemaker:*region* :027400017018:labeling-job-algorithm-specification/object-detection\n InitialActiveLearningModelArn (string) --At the end of an auto-label job Amazon SageMaker Ground Truth sends the Amazon Resource Nam (ARN) of the final model used for auto-labeling. You can use this model as the starting point for subsequent similar jobs by providing the ARN of the model here.\n LabelingJobResourceConfig (dict) --Provides configuration information for a labeling job.\n VolumeKmsKeyId (string) --The AWS Key Management Service key ID for the key used to encrypt the output data, if any.\n \n \n\n :type HumanTaskConfig: dict\n :param HumanTaskConfig: [REQUIRED]\n Configures the information required for human workers to complete a labeling task.\n WorkteamArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the work team assigned to complete the tasks.\n UiConfig (dict) -- [REQUIRED]Information about the user interface that workers use to complete the labeling task.\n UiTemplateS3Uri (string) -- [REQUIRED]The Amazon S3 bucket location of the UI template. For more information about the contents of a UI template, see Creating Your Custom Labeling Task Template .\n PreHumanTaskLambdaArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of a Lambda function that is run before a data object is sent to a human worker. Use this function to provide input to a custom labeling job.\n For the built-in bounding box, image classification, semantic segmentation, and text classification task types, Amazon SageMaker Ground Truth provides the following Lambda functions:\n US East (Northern Virginia) (us-east-1):\n arn:aws:lambda:us-east-1:432418664414:function:PRE-BoundingBox\n arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClass\n arn:aws:lambda:us-east-1:432418664414:function:PRE-SemanticSegmentation\n arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClass\n US East (Ohio) (us-east-2):\n arn:aws:lambda:us-east-2:266458841044:function:PRE-BoundingBox\n arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClass\n arn:aws:lambda:us-east-2:266458841044:function:PRE-SemanticSegmentation\n arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClass\n US West (Oregon) (us-west-2):\n arn:aws:lambda:us-west-2:081040173940:function:PRE-BoundingBox\n arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClass\n arn:aws:lambda:us-west-2:081040173940:function:PRE-SemanticSegmentation\n arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClass\n EU (Ireland) (eu-west-1):\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-BoundingBox\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClass\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-SemanticSegmentation\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClass\n Asia Pacific (Tokyo (ap-northeast-1):\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-BoundingBox\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClass\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-SemanticSegmentation\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClass\n TaskKeywords (list) --Keywords used to describe the task so that workers on Amazon Mechanical Turk can discover the task.\n (string) --\n TaskTitle (string) -- [REQUIRED]A title for the task for your human workers.\n TaskDescription (string) -- [REQUIRED]A description of the task for your human workers.\n NumberOfHumanWorkersPerDataObject (integer) -- [REQUIRED]The number of human workers that will label an object.\n TaskTimeLimitInSeconds (integer) -- [REQUIRED]The amount of time that a worker has to complete a task.\n TaskAvailabilityLifetimeInSeconds (integer) --The length of time that a task remains available for labelling by human workers.\n MaxConcurrentTaskCount (integer) --Defines the maximum number of data objects that can be labeled by human workers at the same time. Each object may have more than one worker at one time.\n AnnotationConsolidationConfig (dict) -- [REQUIRED]Configures how labels are consolidated across human workers.\n AnnotationConsolidationLambdaArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of a Lambda function implements the logic for annotation consolidation.\n For the built-in bounding box, image classification, semantic segmentation, and text classification task types, Amazon SageMaker Ground Truth provides the following Lambda functions:\n Bounding box - Finds the most similar boxes from different workers based on the Jaccard index of the boxes. arn:aws:lambda:us-east-1:432418664414:function:ACS-BoundingBox arn:aws:lambda:us-east-2:266458841044:function:ACS-BoundingBox arn:aws:lambda:us-west-2:081040173940:function:ACS-BoundingBox arn:aws:lambda:eu-west-1:568282634449:function:ACS-BoundingBox arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-BoundingBox\n Image classification - Uses a variant of the Expectation Maximization approach to estimate the true class of an image based on annotations from individual workers. arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClass arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClass arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClass arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClass arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClass\n Semantic segmentation - Treats each pixel in an image as a multi-class classification and treats pixel annotations from workers as 'votes' for the correct label. arn:aws:lambda:us-east-1:432418664414:function:ACS-SemanticSegmentation arn:aws:lambda:us-east-2:266458841044:function:ACS-SemanticSegmentation arn:aws:lambda:us-west-2:081040173940:function:ACS-SemanticSegmentation arn:aws:lambda:eu-west-1:568282634449:function:ACS-SemanticSegmentation arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-SemanticSegmentation\n Text classification - Uses a variant of the Expectation Maximization approach to estimate the true class of text based on annotations from individual workers. arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClass arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClass arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClass arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClass arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClass\n For more information, see Annotation Consolidation .\n PublicWorkforceTaskPrice (dict) --The price that you pay for each task performed by a public worker.\n AmountInUsd (dict) --Defines the amount of money paid to a worker in United States dollars.\n Dollars (integer) --The whole number of dollars in the amount.\n Cents (integer) --The fractional portion, in cents, of the amount.\n TenthFractionsOfACent (integer) --Fractions of a cent, in tenths.\n \n \n\n :type Tags: list\n :param Tags: An array of key/value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide .\n (dict) --Describes a tag.\n Key (string) -- [REQUIRED]The tag key.\n Value (string) -- [REQUIRED]The tag value.\n \n \n\n :rtype: dict\n :return: {\n 'LabelingJobArn': 'string'\n }\n \n \n :returns: \n LabelingJobName (string) -- [REQUIRED]\n The name of the labeling job. This name is used to identify the job in a list of labeling jobs.\n \n LabelAttributeName (string) -- [REQUIRED]\n The attribute name to use for the label in the output manifest file. This is the key for the key/value pair formed with the label that a worker assigns to the object. The name can't end with \"-metadata\". If you are running a semantic segmentation labeling job, the attribute name must end with \"-ref\". If you are running any other kind of labeling job, the attribute name must not end with \"-ref\".\n \n InputConfig (dict) -- [REQUIRED]\n Input data for the labeling job, such as the Amazon S3 location of the data objects and the location of the manifest file that describes the data objects.\n \n DataSource (dict) -- [REQUIRED]The location of the input data.\n \n S3DataSource (dict) -- [REQUIRED]The Amazon S3 location of the input data objects.\n \n ManifestS3Uri (string) -- [REQUIRED]The Amazon S3 location of the manifest file that describes the input data objects.\n \n \n \n \n \n DataAttributes (dict) --Attributes of the data specified by the customer.\n \n ContentClassifiers (list) --Declares that your content is free of personally identifiable information or adult content. Amazon SageMaker may restrict the Amazon Mechanical Turk workers that can view your task based on this information.\n \n (string) --\n \n \n \n \n \n \n OutputConfig (dict) -- [REQUIRED]\n The location of the output data and the AWS Key Management Service key ID for the key used to encrypt the output data, if any.\n \n S3OutputPath (string) -- [REQUIRED]The Amazon S3 location to write output data.\n \n KmsKeyId (string) --The AWS Key Management Service ID of the key used to encrypt the output data, if any.\n \n \n \n RoleArn (string) -- [REQUIRED]\n The Amazon Resource Number (ARN) that Amazon SageMaker assumes to perform tasks on your behalf during data labeling. You must grant this role the necessary permissions so that Amazon SageMaker can successfully complete data labeling.\n \n LabelCategoryConfigS3Uri (string) -- The S3 URL of the file that defines the categories used to label the data objects.\n The file is a JSON structure in the following format:\n \n {\"document-version\": \"2018-11-28\"\n \"labels\": [\n {\n \"label\": \"*label 1* \"\n },\n {\n \"label\": \"*label 2* \"\n },\n ...\n {\n \"label\": \"*label n* \"\n }\n ]\n }\n \n \n StoppingConditions (dict) -- A set of conditions for stopping the labeling job. If any of the conditions are met, the job is automatically stopped. You can use these conditions to control the cost of data labeling.\n \n MaxHumanLabeledObjectCount (integer) --The maximum number of objects that can be labeled by human workers.\n \n MaxPercentageOfInputDatasetLabeled (integer) --The maximum number of input data objects that should be labeled.\n \n \n \n LabelingJobAlgorithmsConfig (dict) -- Configures the information required to perform automated data labeling.\n \n LabelingJobAlgorithmSpecificationArn (string) -- [REQUIRED]Specifies the Amazon Resource Name (ARN) of the algorithm used for auto-labeling. You must select one of the following ARNs:\n \n Image classification arn:aws:sagemaker:*region* :027400017018:labeling-job-algorithm-specification/image-classification\n Text classification arn:aws:sagemaker:*region* :027400017018:labeling-job-algorithm-specification/text-classification\n Object detection arn:aws:sagemaker:*region* :027400017018:labeling-job-algorithm-specification/object-detection\n \n \n InitialActiveLearningModelArn (string) --At the end of an auto-label job Amazon SageMaker Ground Truth sends the Amazon Resource Nam (ARN) of the final model used for auto-labeling. You can use this model as the starting point for subsequent similar jobs by providing the ARN of the model here.\n \n LabelingJobResourceConfig (dict) --Provides configuration information for a labeling job.\n \n VolumeKmsKeyId (string) --The AWS Key Management Service key ID for the key used to encrypt the output data, if any.\n \n \n \n \n \n HumanTaskConfig (dict) -- [REQUIRED]\n Configures the information required for human workers to complete a labeling task.\n \n WorkteamArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the work team assigned to complete the tasks.\n \n UiConfig (dict) -- [REQUIRED]Information about the user interface that workers use to complete the labeling task.\n \n UiTemplateS3Uri (string) -- [REQUIRED]The Amazon S3 bucket location of the UI template. For more information about the contents of a UI template, see Creating Your Custom Labeling Task Template .\n \n \n \n PreHumanTaskLambdaArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of a Lambda function that is run before a data object is sent to a human worker. Use this function to provide input to a custom labeling job.\n For the built-in bounding box, image classification, semantic segmentation, and text classification task types, Amazon SageMaker Ground Truth provides the following Lambda functions:\n \n US East (Northern Virginia) (us-east-1):\n \n arn:aws:lambda:us-east-1:432418664414:function:PRE-BoundingBox\n arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClass\n arn:aws:lambda:us-east-1:432418664414:function:PRE-SemanticSegmentation\n arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClass\n \n \n US East (Ohio) (us-east-2):\n \n arn:aws:lambda:us-east-2:266458841044:function:PRE-BoundingBox\n arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClass\n arn:aws:lambda:us-east-2:266458841044:function:PRE-SemanticSegmentation\n arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClass\n \n \n US West (Oregon) (us-west-2):\n \n arn:aws:lambda:us-west-2:081040173940:function:PRE-BoundingBox\n arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClass\n arn:aws:lambda:us-west-2:081040173940:function:PRE-SemanticSegmentation\n arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClass\n \n \n EU (Ireland) (eu-west-1):\n \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-BoundingBox\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClass\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-SemanticSegmentation\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClass\n \n \n Asia Pacific (Tokyo (ap-northeast-1):\n \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-BoundingBox\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClass\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-SemanticSegmentation\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClass\n \n \n TaskKeywords (list) --Keywords used to describe the task so that workers on Amazon Mechanical Turk can discover the task.\n \n (string) --\n \n \n TaskTitle (string) -- [REQUIRED]A title for the task for your human workers.\n \n TaskDescription (string) -- [REQUIRED]A description of the task for your human workers.\n \n NumberOfHumanWorkersPerDataObject (integer) -- [REQUIRED]The number of human workers that will label an object.\n \n TaskTimeLimitInSeconds (integer) -- [REQUIRED]The amount of time that a worker has to complete a task.\n \n TaskAvailabilityLifetimeInSeconds (integer) --The length of time that a task remains available for labelling by human workers.\n \n MaxConcurrentTaskCount (integer) --Defines the maximum number of data objects that can be labeled by human workers at the same time. Each object may have more than one worker at one time.\n \n AnnotationConsolidationConfig (dict) -- [REQUIRED]Configures how labels are consolidated across human workers.\n \n AnnotationConsolidationLambdaArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of a Lambda function implements the logic for annotation consolidation.\n For the built-in bounding box, image classification, semantic segmentation, and text classification task types, Amazon SageMaker Ground Truth provides the following Lambda functions:\n \n Bounding box - Finds the most similar boxes from different workers based on the Jaccard index of the boxes. arn:aws:lambda:us-east-1:432418664414:function:ACS-BoundingBox arn:aws:lambda:us-east-2:266458841044:function:ACS-BoundingBox arn:aws:lambda:us-west-2:081040173940:function:ACS-BoundingBox arn:aws:lambda:eu-west-1:568282634449:function:ACS-BoundingBox arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-BoundingBox\n Image classification - Uses a variant of the Expectation Maximization approach to estimate the true class of an image based on annotations from individual workers. arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClass arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClass arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClass arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClass arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClass\n Semantic segmentation - Treats each pixel in an image as a multi-class classification and treats pixel annotations from workers as \"votes\" for the correct label. arn:aws:lambda:us-east-1:432418664414:function:ACS-SemanticSegmentation arn:aws:lambda:us-east-2:266458841044:function:ACS-SemanticSegmentation arn:aws:lambda:us-west-2:081040173940:function:ACS-SemanticSegmentation arn:aws:lambda:eu-west-1:568282634449:function:ACS-SemanticSegmentation arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-SemanticSegmentation\n Text classification - Uses a variant of the Expectation Maximization approach to estimate the true class of text based on annotations from individual workers. arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClass arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClass arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClass arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClass arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClass\n \n For more information, see Annotation Consolidation .\n \n \n \n PublicWorkforceTaskPrice (dict) --The price that you pay for each task performed by a public worker.\n \n AmountInUsd (dict) --Defines the amount of money paid to a worker in United States dollars.\n \n Dollars (integer) --The whole number of dollars in the amount.\n \n Cents (integer) --The fractional portion, in cents, of the amount.\n \n TenthFractionsOfACent (integer) --Fractions of a cent, in tenths.\n \n \n \n \n \n \n \n Tags (list) -- An array of key/value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide .\n \n (dict) --Describes a tag.\n \n Key (string) -- [REQUIRED]The tag key.\n \n Value (string) -- [REQUIRED]The tag value.\n \n \n \n \n \n \n \"\"\"\n pass\n\ndef create_model(ModelName=None, PrimaryContainer=None, Containers=None, ExecutionRoleArn=None, Tags=None, VpcConfig=None, EnableNetworkIsolation=None):\n \"\"\"\n Creates a model in Amazon SageMaker. In the request, you name the model and describe a primary container. For the primary container, you specify the docker image containing inference code, artifacts (from prior training), and custom environment map that the inference code uses when you deploy the model for predictions.\n Use this API to create a model if you want to use Amazon SageMaker hosting services or run a batch transform job.\n To host your model, you create an endpoint configuration with the CreateEndpointConfig API, and then create an endpoint with the CreateEndpoint API. Amazon SageMaker then deploys all of the containers that you defined for the model in the hosting environment.\n To run a batch transform using your model, you start a job with the CreateTransformJob API. Amazon SageMaker uses your model and your dataset to get inferences which are then saved to a specified S3 location.\n In the CreateModel request, you must define a container with the PrimaryContainer parameter.\n In the request, you also provide an IAM role that Amazon SageMaker can assume to access model artifacts and docker image for deployment on ML compute hosting instances or for batch transform jobs. In addition, you also use the IAM role to manage permissions the inference code needs. For example, if the inference code access any other AWS resources, you grant necessary permissions via this role.\n See also: AWS API Documentation\n \n \n :example: response = client.create_model(\n ModelName='string',\n PrimaryContainer={\n 'ContainerHostname': 'string',\n 'Image': 'string',\n 'ModelDataUrl': 'string',\n 'Environment': {\n 'string': 'string'\n },\n 'ModelPackageName': 'string'\n },\n Containers=[\n {\n 'ContainerHostname': 'string',\n 'Image': 'string',\n 'ModelDataUrl': 'string',\n 'Environment': {\n 'string': 'string'\n },\n 'ModelPackageName': 'string'\n },\n ],\n ExecutionRoleArn='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n VpcConfig={\n 'SecurityGroupIds': [\n 'string',\n ],\n 'Subnets': [\n 'string',\n ]\n },\n EnableNetworkIsolation=True|False\n )\n \n \n :type ModelName: string\n :param ModelName: [REQUIRED]\n The name of the new model.\n \n\n :type PrimaryContainer: dict\n :param PrimaryContainer: The location of the primary docker image containing inference code, associated artifacts, and custom environment map that the inference code uses when the model is deployed for predictions.\n ContainerHostname (string) --The DNS host name for the container after Amazon SageMaker deploys it.\n Image (string) --The Amazon EC2 Container Registry (Amazon ECR) path where inference code is stored. If you are using your own custom algorithm instead of an algorithm provided by Amazon SageMaker, the inference code must meet Amazon SageMaker requirements. Amazon SageMaker supports both registry/repository[:tag] and registry/repository[@digest] image path formats. For more information, see Using Your Own Algorithms with Amazon SageMaker\n ModelDataUrl (string) --The S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix).\n If you provide a value for this parameter, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provide. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide .\n Environment (dict) --The environment variables to set in the Docker container. Each key and value in the Environment string to string map can have length of up to 1024. We support up to 16 entries in the map.\n (string) --\n (string) --\n \n ModelPackageName (string) --The name of the model package to use to create the model.\n \n\n :type Containers: list\n :param Containers: Specifies the containers in the inference pipeline.\n (dict) --Describes the container, as part of model definition.\n ContainerHostname (string) --The DNS host name for the container after Amazon SageMaker deploys it.\n Image (string) --The Amazon EC2 Container Registry (Amazon ECR) path where inference code is stored. If you are using your own custom algorithm instead of an algorithm provided by Amazon SageMaker, the inference code must meet Amazon SageMaker requirements. Amazon SageMaker supports both registry/repository[:tag] and registry/repository[@digest] image path formats. For more information, see Using Your Own Algorithms with Amazon SageMaker\n ModelDataUrl (string) --The S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix).\n If you provide a value for this parameter, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provide. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide .\n Environment (dict) --The environment variables to set in the Docker container. Each key and value in the Environment string to string map can have length of up to 1024. We support up to 16 entries in the map.\n (string) --\n (string) --\n \n ModelPackageName (string) --The name of the model package to use to create the model.\n \n \n\n :type ExecutionRoleArn: string\n :param ExecutionRoleArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker can assume to access model artifacts and docker image for deployment on ML compute instances or for batch transform jobs. Deploying on ML compute instances is part of model hosting. For more information, see Amazon SageMaker Roles .\n Note\n To be able to pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission.\n \n\n :type Tags: list\n :param Tags: An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide .\n (dict) --Describes a tag.\n Key (string) -- [REQUIRED]The tag key.\n Value (string) -- [REQUIRED]The tag value.\n \n \n\n :type VpcConfig: dict\n :param VpcConfig: A VpcConfig object that specifies the VPC that you want your model to connect to. Control access to and from your model container by configuring the VPC. VpcConfig is used in hosting services and in batch transform. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud and Protect Data in Batch Transform Jobs by Using an Amazon Virtual Private Cloud .\n SecurityGroupIds (list) -- [REQUIRED]The VPC security group IDs, in the form sg-xxxxxxxx. Specify the security groups for the VPC that is specified in the Subnets field.\n (string) --\n Subnets (list) -- [REQUIRED]The ID of the subnets in the VPC to which you want to connect your training job or model.\n (string) --\n \n\n :type EnableNetworkIsolation: boolean\n :param EnableNetworkIsolation: Isolates the model container. No inbound or outbound network calls can be made to or from the model container.\n Note\n The Semantic Segmentation built-in algorithm does not support network isolation.\n \n\n :rtype: dict\n :return: {\n 'ModelArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_model_package(ModelPackageName=None, ModelPackageDescription=None, InferenceSpecification=None, ValidationSpecification=None, SourceAlgorithmSpecification=None, CertifyForMarketplace=None):\n \"\"\"\n Creates a model package that you can use to create Amazon SageMaker models or list on AWS Marketplace. Buyers can subscribe to model packages listed on AWS Marketplace to create models in Amazon SageMaker.\n To create a model package by specifying a Docker container that contains your inference code and the Amazon S3 location of your model artifacts, provide values for InferenceSpecification . To create a model from an algorithm resource that you created or subscribed to in AWS Marketplace, provide a value for SourceAlgorithmSpecification .\n See also: AWS API Documentation\n \n \n :example: response = client.create_model_package(\n ModelPackageName='string',\n ModelPackageDescription='string',\n InferenceSpecification={\n 'Containers': [\n {\n 'ContainerHostname': 'string',\n 'Image': 'string',\n 'ImageDigest': 'string',\n 'ModelDataUrl': 'string',\n 'ProductId': 'string'\n },\n ],\n 'SupportedTransformInstanceTypes': [\n 'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge'|'ml.m5.large'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge',\n ],\n 'SupportedRealtimeInferenceInstanceTypes': [\n 'ml.t2.medium'|'ml.t2.large'|'ml.t2.xlarge'|'ml.t2.2xlarge'|'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.m5.large'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge'|'ml.c4.large'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge'|'ml.c5.large'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge',\n ],\n 'SupportedContentTypes': [\n 'string',\n ],\n 'SupportedResponseMIMETypes': [\n 'string',\n ]\n },\n ValidationSpecification={\n 'ValidationRole': 'string',\n 'ValidationProfiles': [\n {\n 'ProfileName': 'string',\n 'TransformJobDefinition': {\n 'MaxConcurrentTransforms': 123,\n 'MaxPayloadInMB': 123,\n 'BatchStrategy': 'MultiRecord'|'SingleRecord',\n 'Environment': {\n 'string': 'string'\n },\n 'TransformInput': {\n 'DataSource': {\n 'S3DataSource': {\n 'S3DataType': 'ManifestFile'|'S3Prefix'|'AugmentedManifestFile',\n 'S3Uri': 'string'\n }\n },\n 'ContentType': 'string',\n 'CompressionType': 'None'|'Gzip',\n 'SplitType': 'None'|'Line'|'RecordIO'|'TFRecord'\n },\n 'TransformOutput': {\n 'S3OutputPath': 'string',\n 'Accept': 'string',\n 'AssembleWith': 'None'|'Line',\n 'KmsKeyId': 'string'\n },\n 'TransformResources': {\n 'InstanceType': 'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge'|'ml.m5.large'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge',\n 'InstanceCount': 123,\n 'VolumeKmsKeyId': 'string'\n }\n }\n },\n ]\n },\n SourceAlgorithmSpecification={\n 'SourceAlgorithms': [\n {\n 'ModelDataUrl': 'string',\n 'AlgorithmName': 'string'\n },\n ]\n },\n CertifyForMarketplace=True|False\n )\n \n \n :type ModelPackageName: string\n :param ModelPackageName: [REQUIRED]\n The name of the model package. The name must have 1 to 63 characters. Valid characters are a-z, A-Z, 0-9, and - (hyphen).\n \n\n :type ModelPackageDescription: string\n :param ModelPackageDescription: A description of the model package.\n\n :type InferenceSpecification: dict\n :param InferenceSpecification: Specifies details about inference jobs that can be run with models based on this model package, including the following:\n The Amazon ECR paths of containers that contain the inference code and model artifacts.\n The instance types that the model package supports for transform jobs and real-time endpoints used for inference.\n The input and output content formats that the model package supports for inference.\n Containers (list) -- [REQUIRED]The Amazon ECR registry path of the Docker image that contains the inference code.\n (dict) --Describes the Docker container for the model package.\n ContainerHostname (string) --The DNS host name for the Docker container.\n Image (string) -- [REQUIRED]The Amazon EC2 Container Registry (Amazon ECR) path where inference code is stored.\n If you are using your own custom algorithm instead of an algorithm provided by Amazon SageMaker, the inference code must meet Amazon SageMaker requirements. Amazon SageMaker supports both registry/repository[:tag] and registry/repository[@digest] image path formats. For more information, see Using Your Own Algorithms with Amazon SageMaker .\n ImageDigest (string) --An MD5 hash of the training algorithm that identifies the Docker image used for training.\n ModelDataUrl (string) --The Amazon S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix).\n ProductId (string) --The AWS Marketplace product ID of the model package.\n \n SupportedTransformInstanceTypes (list) -- [REQUIRED]A list of the instance types on which a transformation job can be run or on which an endpoint can be deployed.\n (string) --\n SupportedRealtimeInferenceInstanceTypes (list) -- [REQUIRED]A list of the instance types that are used to generate inferences in real-time.\n (string) --\n SupportedContentTypes (list) -- [REQUIRED]The supported MIME types for the input data.\n (string) --\n SupportedResponseMIMETypes (list) -- [REQUIRED]The supported MIME types for the output data.\n (string) --\n \n\n :type ValidationSpecification: dict\n :param ValidationSpecification: Specifies configurations for one or more transform jobs that Amazon SageMaker runs to test the model package.\n ValidationRole (string) -- [REQUIRED]The IAM roles to be used for the validation of the model package.\n ValidationProfiles (list) -- [REQUIRED]An array of ModelPackageValidationProfile objects, each of which specifies a batch transform job that Amazon SageMaker runs to validate your model package.\n (dict) --Contains data, such as the inputs and targeted instance types that are used in the process of validating the model package.\n The data provided in the validation profile is made available to your buyers on AWS Marketplace.\n ProfileName (string) -- [REQUIRED]The name of the profile for the model package.\n TransformJobDefinition (dict) -- [REQUIRED]The TransformJobDefinition object that describes the transform job used for the validation of the model package.\n MaxConcurrentTransforms (integer) --The maximum number of parallel requests that can be sent to each instance in a transform job. The default value is 1.\n MaxPayloadInMB (integer) --The maximum payload size allowed, in MB. A payload is the data portion of a record (without metadata).\n BatchStrategy (string) --A string that determines the number of records included in a single mini-batch.\n SingleRecord means only one record is used per mini-batch. MultiRecord means a mini-batch is set to contain as many records that can fit within the MaxPayloadInMB limit.\n Environment (dict) --The environment variables to set in the Docker container. We support up to 16 key and values entries in the map.\n (string) --\n (string) --\n \n TransformInput (dict) -- [REQUIRED]A description of the input source and the way the transform job consumes it.\n DataSource (dict) -- [REQUIRED]Describes the location of the channel data, meaning the S3 location of the input data that the model can consume.\n S3DataSource (dict) -- [REQUIRED]The S3 location of the data source that is associated with a channel.\n S3DataType (string) -- [REQUIRED]If you choose S3Prefix , S3Uri identifies a key name prefix. Amazon SageMaker uses all objects with the specified key name prefix for batch transform.\n If you choose ManifestFile , S3Uri identifies an object that is a manifest file containing a list of object keys that you want Amazon SageMaker to use for batch transform.\n S3Uri (string) -- [REQUIRED]Depending on the value specified for the S3DataType , identifies either a key name prefix or a manifest. For example:\n A key name prefix might look like this: s3://bucketname/exampleprefix .\n A manifest might look like this: s3://bucketname/example.manifest The manifest is an S3 object which is a JSON file with the following format: [ {'prefix': 's3://customer_bucket/some/prefix/'}, 'relative/path/to/custdata-1', 'relative/path/custdata-2', ... ] The preceding JSON matches the following S3Uris : s3://customer_bucket/some/prefix/relative/path/to/custdata-1 s3://customer_bucket/some/prefix/relative/path/custdata-1 ... The complete set of S3Uris in this manifest constitutes the input data for the channel for this datasource. The object that each S3Uris points to must be readable by the IAM role that Amazon SageMaker uses to perform tasks on your behalf.\n \n ContentType (string) --The multipurpose internet mail extension (MIME) type of the data. Amazon SageMaker uses the MIME type with each http call to transfer data to the transform job.\n CompressionType (string) --Compressing data helps save on storage space. If your transform data is compressed, specify the compression type. Amazon SageMaker automatically decompresses the data for the transform job accordingly. The default value is None .\n SplitType (string) --The method to use to split the transform job's data files into smaller batches. Splitting is necessary when the total size of each object is too large to fit in a single request. You can also use data splitting to improve performance by processing multiple concurrent mini-batches. The default value for SplitType is None , which indicates that input data files are not split, and request payloads contain the entire contents of an input object. Set the value of this parameter to Line to split records on a newline character boundary. SplitType also supports a number of record-oriented binary data formats.\n When splitting is enabled, the size of a mini-batch depends on the values of the BatchStrategy and MaxPayloadInMB parameters. When the value of BatchStrategy is MultiRecord , Amazon SageMaker sends the maximum number of records in each request, up to the MaxPayloadInMB limit. If the value of BatchStrategy is SingleRecord , Amazon SageMaker sends individual records in each request.\n Note\n Some data formats represent a record as a binary payload wrapped with extra padding bytes. When splitting is applied to a binary data format, padding is removed if the value of BatchStrategy is set to SingleRecord . Padding is not removed if the value of BatchStrategy is set to MultiRecord .\n For more information about the RecordIO data format, see Data Format in the MXNet documentation. For more information about the TFRecord fofmat, see Consuming TFRecord data in the TensorFlow documentation.\n TransformOutput (dict) -- [REQUIRED]Identifies the Amazon S3 location where you want Amazon SageMaker to save the results from the transform job.\n S3OutputPath (string) -- [REQUIRED]The Amazon S3 path where you want Amazon SageMaker to store the results of the transform job. For example, s3://bucket-name/key-name-prefix .\n For every S3 object used as input for the transform job, the transformed data is stored in a corresponding subfolder in the location under the output prefix. For example, the input data s3://bucket-name/input-name-prefix/dataset01/data.csv will have the transformed data stored at s3://bucket-name/key-name-prefix/dataset01/ , based on the original name, as a series of .part files (.part0001, part0002, etc).\n Accept (string) --The MIME type used to specify the output data. Amazon SageMaker uses the MIME type with each http call to transfer data from the transform job.\n AssembleWith (string) --Defines how to assemble the results of the transform job as a single S3 object. You should select a format that is most convenient to you. To concatenate the results in binary format, specify None . To add a newline character at the end of every transformed record, specify Line .\n KmsKeyId (string) --The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:\n // KMS Key ID '1234abcd-12ab-34cd-56ef-1234567890ab'\n // Amazon Resource Name (ARN) of a KMS Key 'arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab'\n // KMS Key Alias 'alias/ExampleAlias'\n // Amazon Resource Name (ARN) of a KMS Key Alias 'arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias'\n If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.\n The KMS key policy must grant permission to the IAM role that you specify in your CreateTramsformJob request. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide .\n TransformResources (dict) -- [REQUIRED]Identifies the ML compute instances for the transform job.\n InstanceType (string) -- [REQUIRED]The ML compute instance type for the transform job. For using built-in algorithms to transform moderately sized datasets, ml.m4.xlarge or ml.m5.large should suffice. There is no default value for InstanceType .\n InstanceCount (integer) -- [REQUIRED]The number of ML compute instances to use in the transform job. For distributed transform, provide a value greater than 1. The default value is 1 .\n VolumeKmsKeyId (string) --The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the batch transform job. The VolumeKmsKeyId can be any of the following formats:\n // KMS Key ID '1234abcd-12ab-34cd-56ef-1234567890ab'\n // Amazon Resource Name (ARN) of a KMS Key 'arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab'\n \n \n \n\n :type SourceAlgorithmSpecification: dict\n :param SourceAlgorithmSpecification: Details about the algorithm that was used to create the model package.\n SourceAlgorithms (list) -- [REQUIRED]A list of the algorithms that were used to create a model package.\n (dict) --Specifies an algorithm that was used to create the model package. The algorithm must be either an algorithm resource in your Amazon SageMaker account or an algorithm in AWS Marketplace that you are subscribed to.\n ModelDataUrl (string) --The Amazon S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix).\n AlgorithmName (string) -- [REQUIRED]The name of an algorithm that was used to create the model package. The algorithm must be either an algorithm resource in your Amazon SageMaker account or an algorithm in AWS Marketplace that you are subscribed to.\n \n \n\n :type CertifyForMarketplace: boolean\n :param CertifyForMarketplace: Whether to certify the model package for listing on AWS Marketplace.\n\n :rtype: dict\n :return: {\n 'ModelPackageArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_notebook_instance(NotebookInstanceName=None, InstanceType=None, SubnetId=None, SecurityGroupIds=None, RoleArn=None, KmsKeyId=None, Tags=None, LifecycleConfigName=None, DirectInternetAccess=None, VolumeSizeInGB=None, AcceleratorTypes=None, DefaultCodeRepository=None, AdditionalCodeRepositories=None):\n \"\"\"\n Creates an Amazon SageMaker notebook instance. A notebook instance is a machine learning (ML) compute instance running on a Jupyter notebook.\n In a CreateNotebookInstance request, specify the type of ML compute instance that you want to run. Amazon SageMaker launches the instance, installs common libraries that you can use to explore datasets for model training, and attaches an ML storage volume to the notebook instance.\n Amazon SageMaker also provides a set of example notebooks. Each notebook demonstrates how to use Amazon SageMaker with a specific algorithm or with a machine learning framework.\n After receiving the request, Amazon SageMaker does the following:\n After creating the notebook instance, Amazon SageMaker returns its Amazon Resource Name (ARN).\n After Amazon SageMaker creates the notebook instance, you can connect to the Jupyter server and work in Jupyter notebooks. For example, you can write code to explore a dataset that you can use for model training, train a model, host models by creating Amazon SageMaker endpoints, and validate hosted models.\n For more information, see How It Works .\n See also: AWS API Documentation\n \n \n :example: response = client.create_notebook_instance(\n NotebookInstanceName='string',\n InstanceType='ml.t2.medium'|'ml.t2.large'|'ml.t2.xlarge'|'ml.t2.2xlarge'|'ml.t3.medium'|'ml.t3.large'|'ml.t3.xlarge'|'ml.t3.2xlarge'|'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge'|'ml.c5d.xlarge'|'ml.c5d.2xlarge'|'ml.c5d.4xlarge'|'ml.c5d.9xlarge'|'ml.c5d.18xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge',\n SubnetId='string',\n SecurityGroupIds=[\n 'string',\n ],\n RoleArn='string',\n KmsKeyId='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n LifecycleConfigName='string',\n DirectInternetAccess='Enabled'|'Disabled',\n VolumeSizeInGB=123,\n AcceleratorTypes=[\n 'ml.eia1.medium'|'ml.eia1.large'|'ml.eia1.xlarge',\n ],\n DefaultCodeRepository='string',\n AdditionalCodeRepositories=[\n 'string',\n ]\n )\n \n \n :type NotebookInstanceName: string\n :param NotebookInstanceName: [REQUIRED]\n The name of the new notebook instance.\n \n\n :type InstanceType: string\n :param InstanceType: [REQUIRED]\n The type of ML compute instance to launch for the notebook instance.\n \n\n :type SubnetId: string\n :param SubnetId: The ID of the subnet in a VPC to which you would like to have a connectivity from your ML compute instance.\n\n :type SecurityGroupIds: list\n :param SecurityGroupIds: The VPC security group IDs, in the form sg-xxxxxxxx. The security groups must be for the same VPC as specified in the subnet.\n (string) --\n \n\n :type RoleArn: string\n :param RoleArn: [REQUIRED]\n When you send any requests to AWS resources from the notebook instance, Amazon SageMaker assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so Amazon SageMaker can perform these tasks. The policy must allow the Amazon SageMaker service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see Amazon SageMaker Roles .\n Note\n To be able to pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission.\n \n\n :type KmsKeyId: string\n :param KmsKeyId: If you provide a AWS KMS key ID, Amazon SageMaker uses it to encrypt data at rest on the ML storage volume that is attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and Disabling Keys in the AWS Key Management Service Developer Guide .\n\n :type Tags: list\n :param Tags: A list of tags to associate with the notebook instance. You can add tags later by using the CreateTags API.\n (dict) --Describes a tag.\n Key (string) -- [REQUIRED]The tag key.\n Value (string) -- [REQUIRED]The tag value.\n \n \n\n :type LifecycleConfigName: string\n :param LifecycleConfigName: The name of a lifecycle configuration to associate with the notebook instance. For information about lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance .\n\n :type DirectInternetAccess: string\n :param DirectInternetAccess: Sets whether Amazon SageMaker provides internet access to the notebook instance. If you set this to Disabled this notebook instance will be able to access resources only in your VPC, and will not be able to connect to Amazon SageMaker training and endpoint services unless your configure a NAT Gateway in your VPC.\n For more information, see Notebook Instances Are Internet-Enabled by Default . You can set the value of this parameter to Disabled only if you set a value for the SubnetId parameter.\n \n\n :type VolumeSizeInGB: integer\n :param VolumeSizeInGB: The size, in GB, of the ML storage volume to attach to the notebook instance. The default value is 5 GB.\n\n :type AcceleratorTypes: list\n :param AcceleratorTypes: A list of Elastic Inference (EI) instance types to associate with this notebook instance. Currently, only one instance type can be associated with a notebook intance. For more information, see Using Elastic Inference in Amazon SageMaker .\n (string) --\n \n\n :type DefaultCodeRepository: string\n :param DefaultCodeRepository: A git repository to associate with the notebook instance as its default code repository. This can be either the name of a git repository stored as a resource in your account, or the URL of a git repository in AWS CodeCommit or in any other git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances .\n\n :type AdditionalCodeRepositories: list\n :param AdditionalCodeRepositories: An array of up to 3 git repositories to associate with the notebook instance. These can be either the names of git repositories stored as resources in your account, or the URL of git repositories in AWS CodeCommit or in any other git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances .\n (string) --\n \n\n :rtype: dict\n :return: {\n 'NotebookInstanceArn': 'string'\n }\n \n \n :returns: \n NotebookInstanceName (string) -- [REQUIRED]\n The name of the new notebook instance.\n \n InstanceType (string) -- [REQUIRED]\n The type of ML compute instance to launch for the notebook instance.\n \n SubnetId (string) -- The ID of the subnet in a VPC to which you would like to have a connectivity from your ML compute instance.\n SecurityGroupIds (list) -- The VPC security group IDs, in the form sg-xxxxxxxx. The security groups must be for the same VPC as specified in the subnet.\n \n (string) --\n \n \n RoleArn (string) -- [REQUIRED]\n When you send any requests to AWS resources from the notebook instance, Amazon SageMaker assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so Amazon SageMaker can perform these tasks. The policy must allow the Amazon SageMaker service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see Amazon SageMaker Roles .\n \n Note\n To be able to pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission.\n \n \n KmsKeyId (string) -- If you provide a AWS KMS key ID, Amazon SageMaker uses it to encrypt data at rest on the ML storage volume that is attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and Disabling Keys in the AWS Key Management Service Developer Guide .\n Tags (list) -- A list of tags to associate with the notebook instance. You can add tags later by using the CreateTags API.\n \n (dict) --Describes a tag.\n \n Key (string) -- [REQUIRED]The tag key.\n \n Value (string) -- [REQUIRED]The tag value.\n \n \n \n \n \n LifecycleConfigName (string) -- The name of a lifecycle configuration to associate with the notebook instance. For information about lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance .\n DirectInternetAccess (string) -- Sets whether Amazon SageMaker provides internet access to the notebook instance. If you set this to Disabled this notebook instance will be able to access resources only in your VPC, and will not be able to connect to Amazon SageMaker training and endpoint services unless your configure a NAT Gateway in your VPC.\n For more information, see Notebook Instances Are Internet-Enabled by Default . You can set the value of this parameter to Disabled only if you set a value for the SubnetId parameter.\n \n VolumeSizeInGB (integer) -- The size, in GB, of the ML storage volume to attach to the notebook instance. The default value is 5 GB.\n AcceleratorTypes (list) -- A list of Elastic Inference (EI) instance types to associate with this notebook instance. Currently, only one instance type can be associated with a notebook intance. For more information, see Using Elastic Inference in Amazon SageMaker .\n \n (string) --\n \n \n DefaultCodeRepository (string) -- A git repository to associate with the notebook instance as its default code repository. This can be either the name of a git repository stored as a resource in your account, or the URL of a git repository in AWS CodeCommit or in any other git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances .\n AdditionalCodeRepositories (list) -- An array of up to 3 git repositories to associate with the notebook instance. These can be either the names of git repositories stored as resources in your account, or the URL of git repositories in AWS CodeCommit or in any other git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances .\n \n (string) --\n \n \n \n \"\"\"\n pass\n\ndef create_notebook_instance_lifecycle_config(NotebookInstanceLifecycleConfigName=None, OnCreate=None, OnStart=None):\n \"\"\"\n Creates a lifecycle configuration that you can associate with a notebook instance. A lifecycle configuration is a collection of shell scripts that run when you create or start a notebook instance.\n Each lifecycle configuration script has a limit of 16384 characters.\n The value of the $PATH environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin .\n View CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook] .\n Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started.\n For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance .\n See also: AWS API Documentation\n \n \n :example: response = client.create_notebook_instance_lifecycle_config(\n NotebookInstanceLifecycleConfigName='string',\n OnCreate=[\n {\n 'Content': 'string'\n },\n ],\n OnStart=[\n {\n 'Content': 'string'\n },\n ]\n )\n \n \n :type NotebookInstanceLifecycleConfigName: string\n :param NotebookInstanceLifecycleConfigName: [REQUIRED]\n The name of the lifecycle configuration.\n \n\n :type OnCreate: list\n :param OnCreate: A shell script that runs only once, when you create a notebook instance. The shell script must be a base64-encoded string.\n (dict) --Contains the notebook instance lifecycle configuration script.\n Each lifecycle configuration script has a limit of 16384 characters.\n The value of the $PATH environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin .\n View CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook] .\n Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started.\n For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance .\n Content (string) --A base64-encoded string that contains a shell script for a notebook instance lifecycle configuration.\n \n \n\n :type OnStart: list\n :param OnStart: A shell script that runs every time you start a notebook instance, including when you create the notebook instance. The shell script must be a base64-encoded string.\n (dict) --Contains the notebook instance lifecycle configuration script.\n Each lifecycle configuration script has a limit of 16384 characters.\n The value of the $PATH environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin .\n View CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook] .\n Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started.\n For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance .\n Content (string) --A base64-encoded string that contains a shell script for a notebook instance lifecycle configuration.\n \n \n\n :rtype: dict\n :return: {\n 'NotebookInstanceLifecycleConfigArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_presigned_notebook_instance_url(NotebookInstanceName=None, SessionExpirationDurationInSeconds=None):\n \"\"\"\n Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the Amazon SageMaker console, when you choose Open next to a notebook instance, Amazon SageMaker opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page.\n You can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. To restrict access, attach an IAM policy that denies access to this API unless the call comes from an IP address in the specified list to every AWS Identity and Access Management user, group, or role used to access the notebook instance. Use the NotIpAddress condition operator and the aws:SourceIP condition context key to specify the list of IP addresses that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address .\n See also: AWS API Documentation\n \n \n :example: response = client.create_presigned_notebook_instance_url(\n NotebookInstanceName='string',\n SessionExpirationDurationInSeconds=123\n )\n \n \n :type NotebookInstanceName: string\n :param NotebookInstanceName: [REQUIRED]\n The name of the notebook instance.\n \n\n :type SessionExpirationDurationInSeconds: integer\n :param SessionExpirationDurationInSeconds: The duration of the session, in seconds. The default is 12 hours.\n\n :rtype: dict\n :return: {\n 'AuthorizedUrl': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_training_job(TrainingJobName=None, HyperParameters=None, AlgorithmSpecification=None, RoleArn=None, InputDataConfig=None, OutputDataConfig=None, ResourceConfig=None, VpcConfig=None, StoppingCondition=None, Tags=None, EnableNetworkIsolation=None):\n \"\"\"\n Starts a model training job. After training completes, Amazon SageMaker saves the resulting model artifacts to an Amazon S3 location that you specify.\n If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts in a deep learning service other than Amazon SageMaker, provided that you know how to use them for inferences.\n In the request body, you provide the following:\n For more information about Amazon SageMaker, see How It Works .\n See also: AWS API Documentation\n \n \n :example: response = client.create_training_job(\n TrainingJobName='string',\n HyperParameters={\n 'string': 'string'\n },\n AlgorithmSpecification={\n 'TrainingImage': 'string',\n 'AlgorithmName': 'string',\n 'TrainingInputMode': 'Pipe'|'File',\n 'MetricDefinitions': [\n {\n 'Name': 'string',\n 'Regex': 'string'\n },\n ]\n },\n RoleArn='string',\n InputDataConfig=[\n {\n 'ChannelName': 'string',\n 'DataSource': {\n 'S3DataSource': {\n 'S3DataType': 'ManifestFile'|'S3Prefix'|'AugmentedManifestFile',\n 'S3Uri': 'string',\n 'S3DataDistributionType': 'FullyReplicated'|'ShardedByS3Key',\n 'AttributeNames': [\n 'string',\n ]\n }\n },\n 'ContentType': 'string',\n 'CompressionType': 'None'|'Gzip',\n 'RecordWrapperType': 'None'|'RecordIO',\n 'InputMode': 'Pipe'|'File',\n 'ShuffleConfig': {\n 'Seed': 123\n }\n },\n ],\n OutputDataConfig={\n 'KmsKeyId': 'string',\n 'S3OutputPath': 'string'\n },\n ResourceConfig={\n 'InstanceType': 'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.m5.large'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge',\n 'InstanceCount': 123,\n 'VolumeSizeInGB': 123,\n 'VolumeKmsKeyId': 'string'\n },\n VpcConfig={\n 'SecurityGroupIds': [\n 'string',\n ],\n 'Subnets': [\n 'string',\n ]\n },\n StoppingCondition={\n 'MaxRuntimeInSeconds': 123\n },\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n EnableNetworkIsolation=True|False\n )\n \n \n :type TrainingJobName: string\n :param TrainingJobName: [REQUIRED]\n The name of the training job. The name must be unique within an AWS Region in an AWS account.\n \n\n :type HyperParameters: dict\n :param HyperParameters: Algorithm-specific parameters that influence the quality of the model. You set hyperparameters before you start the learning process. For a list of hyperparameters for each training algorithm provided by Amazon SageMaker, see Algorithms .\n You can specify a maximum of 100 hyperparameters. Each hyperparameter is a key-value pair. Each key and value is limited to 256 characters, as specified by the Length Constraint .\n (string) --\n (string) --\n \n\n :type AlgorithmSpecification: dict\n :param AlgorithmSpecification: [REQUIRED]\n The registry path of the Docker image that contains the training algorithm and algorithm-specific metadata, including the input mode. For more information about algorithms provided by Amazon SageMaker, see Algorithms . For information about providing your own algorithms, see Using Your Own Algorithms with Amazon SageMaker .\n TrainingImage (string) --The registry path of the Docker image that contains the training algorithm. For information about docker registry paths for built-in algorithms, see Algorithms Provided by Amazon SageMaker: Common Parameters .\n AlgorithmName (string) --The name of the algorithm resource to use for the training job. This must be an algorithm resource that you created or subscribe to on AWS Marketplace. If you specify a value for this parameter, you can't specify a value for TrainingImage .\n TrainingInputMode (string) -- [REQUIRED]The input mode that the algorithm supports. For the input modes that Amazon SageMaker algorithms support, see Algorithms . If an algorithm supports the File input mode, Amazon SageMaker downloads the training data from S3 to the provisioned ML storage Volume, and mounts the directory to docker volume for training container. If an algorithm supports the Pipe input mode, Amazon SageMaker streams data directly from S3 to the container.\n In File mode, make sure you provision ML storage volume with sufficient capacity to accommodate the data download from S3. In addition to the training data, the ML storage volume also stores the output model. The algorithm container use ML storage volume to also store intermediate information, if any.\n For distributed algorithms using File mode, training data is distributed uniformly, and your training duration is predictable if the input data objects size is approximately same. Amazon SageMaker does not split the files any further for model training. If the object sizes are skewed, training won't be optimal as the data distribution is also skewed where one host in a training cluster is overloaded, thus becoming bottleneck in training.\n MetricDefinitions (list) --A list of metric definition objects. Each object specifies the metric name and regular expressions used to parse algorithm logs. Amazon SageMaker publishes each metric to Amazon CloudWatch.\n (dict) --Specifies a metric that the training algorithm writes to stderr or stdout . Amazon SageMakerhyperparameter tuning captures all defined metrics. You specify one metric that a hyperparameter tuning job uses as its objective metric to choose the best training job.\n Name (string) -- [REQUIRED]The name of the metric.\n Regex (string) -- [REQUIRED]A regular expression that searches the output of a training job and gets the value of the metric. For more information about using regular expressions to define metrics, see Defining Objective Metrics .\n \n \n\n :type RoleArn: string\n :param RoleArn: [REQUIRED]\n The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.\n During model training, Amazon SageMaker needs your permission to read input data from an S3 bucket, download a Docker image that contains training code, write model artifacts to an S3 bucket, write logs to Amazon CloudWatch Logs, and publish metrics to Amazon CloudWatch. You grant permissions for all of these tasks to an IAM role. For more information, see Amazon SageMaker Roles .\n Note\n To be able to pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission.\n \n\n :type InputDataConfig: list\n :param InputDataConfig: An array of Channel objects. Each channel is a named input source. InputDataConfig describes the input data and its location.\n Algorithms can accept input data from one or more channels. For example, an algorithm might have two channels of input data, training_data and validation_data . The configuration for each channel provides the S3 location where the input data is stored. It also provides information about the stored data: the MIME type, compression method, and whether the data is wrapped in RecordIO format.\n Depending on the input mode that the algorithm supports, Amazon SageMaker either copies input data files from an S3 bucket to a local directory in the Docker container, or makes it available as input streams.\n (dict) --A channel is a named input source that training algorithms can consume.\n ChannelName (string) -- [REQUIRED]The name of the channel.\n DataSource (dict) -- [REQUIRED]The location of the channel data.\n S3DataSource (dict) -- [REQUIRED]The S3 location of the data source that is associated with a channel.\n S3DataType (string) -- [REQUIRED]If you choose S3Prefix , S3Uri identifies a key name prefix. Amazon SageMaker uses all objects that match the specified key name prefix for model training.\n If you choose ManifestFile , S3Uri identifies an object that is a manifest file containing a list of object keys that you want Amazon SageMaker to use for model training.\n If you choose AugmentedManifestFile , S3Uri identifies an object that is an augmented manifest file in JSON lines format. This file contains the data you want to use for model training. AugmentedManifestFile can only be used if the Channel's input mode is Pipe .\n S3Uri (string) -- [REQUIRED]Depending on the value specified for the S3DataType , identifies either a key name prefix or a manifest. For example:\n A key name prefix might look like this: s3://bucketname/exampleprefix .\n A manifest might look like this: s3://bucketname/example.manifest The manifest is an S3 object which is a JSON file with the following format: [ {'prefix': 's3://customer_bucket/some/prefix/'}, 'relative/path/to/custdata-1', 'relative/path/custdata-2', ... ] The preceding JSON matches the following s3Uris : s3://customer_bucket/some/prefix/relative/path/to/custdata-1 s3://customer_bucket/some/prefix/relative/path/custdata-2 ... The complete set of s3uris in this manifest is the input data for the channel for this datasource. The object that each s3uris points to must be readable by the IAM role that Amazon SageMaker uses to perform tasks on your behalf.\n S3DataDistributionType (string) --If you want Amazon SageMaker to replicate the entire dataset on each ML compute instance that is launched for model training, specify FullyReplicated .\n If you want Amazon SageMaker to replicate a subset of data on each ML compute instance that is launched for model training, specify ShardedByS3Key . If there are n ML compute instances launched for a training job, each instance gets approximately 1/n of the number of S3 objects. In this case, model training on each machine uses only the subset of training data.\n Don't choose more ML compute instances for training than available S3 objects. If you do, some nodes won't get any data and you will pay for nodes that aren't getting any training data. This applies in both File and Pipe modes. Keep this in mind when developing algorithms.\n In distributed training, where you use multiple ML compute EC2 instances, you might choose ShardedByS3Key . If the algorithm requires copying training data to the ML storage volume (when TrainingInputMode is set to File ), this copies 1/n of the number of objects.\n AttributeNames (list) --A list of one or more attribute names to use that are found in a specified augmented manifest file.\n (string) --\n \n ContentType (string) --The MIME type of the data.\n CompressionType (string) --If training data is compressed, the compression type. The default value is None . CompressionType is used only in Pipe input mode. In File mode, leave this field unset or set it to None.\n RecordWrapperType (string) --Specify RecordIO as the value when input data is in raw format but the training algorithm requires the RecordIO format. In this case, Amazon SageMaker wraps each individual S3 object in a RecordIO record. If the input data is already in RecordIO format, you don't need to set this attribute. For more information, see Create a Dataset Using RecordIO .\n In File mode, leave this field unset or set it to None.\n InputMode (string) --(Optional) The input mode to use for the data channel in a training job. If you don't set a value for InputMode , Amazon SageMaker uses the value set for TrainingInputMode . Use this parameter to override the TrainingInputMode setting in a AlgorithmSpecification request when you have a channel that needs a different input mode from the training job's general setting. To download the data from Amazon Simple Storage Service (Amazon S3) to the provisioned ML storage volume, and mount the directory to a Docker volume, use File input mode. To stream data directly from Amazon S3 to the container, choose Pipe input mode.\n To use a model for incremental training, choose File input model.\n ShuffleConfig (dict) --A configuration for a shuffle option for input data in a channel. If you use S3Prefix for S3DataType , this shuffles the results of the S3 key prefix matches. If you use ManifestFile , the order of the S3 object references in the ManifestFile is shuffled. If you use AugmentedManifestFile , the order of the JSON lines in the AugmentedManifestFile is shuffled. The shuffling order is determined using the Seed value.\n For Pipe input mode, shuffling is done at the start of every epoch. With large datasets this ensures that the order of the training data is different for each epoch, it helps reduce bias and possible overfitting. In a multi-node training job when ShuffleConfig is combined with S3DataDistributionType of ShardedByS3Key , the data is shuffled across nodes so that the content sent to a particular node on the first epoch might be sent to a different node on the second epoch.\n Seed (integer) -- [REQUIRED]Determines the shuffling order in ShuffleConfig value.\n \n \n\n :type OutputDataConfig: dict\n :param OutputDataConfig: [REQUIRED]\n Specifies the path to the S3 bucket where you want to store model artifacts. Amazon SageMaker creates subfolders for the artifacts.\n KmsKeyId (string) --The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:\n // KMS Key ID '1234abcd-12ab-34cd-56ef-1234567890ab'\n // Amazon Resource Name (ARN) of a KMS Key 'arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab'\n // KMS Key Alias 'alias/ExampleAlias'\n // Amazon Resource Name (ARN) of a KMS Key Alias 'arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias'\n If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.\n The KMS key policy must grant permission to the IAM role that you specify in your CreateTramsformJob request. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide .\n S3OutputPath (string) -- [REQUIRED]Identifies the S3 path where you want Amazon SageMaker to store the model artifacts. For example, s3://bucket-name/key-name-prefix .\n \n\n :type ResourceConfig: dict\n :param ResourceConfig: [REQUIRED]\n The resources, including the ML compute instances and ML storage volumes, to use for model training.\n ML storage volumes store model artifacts and incremental states. Training algorithms might also use ML storage volumes for scratch space. If you want Amazon SageMaker to use the ML storage volume to store the training data, choose File as the TrainingInputMode in the algorithm specification. For distributed training algorithms, specify an instance count greater than 1.\n InstanceType (string) -- [REQUIRED]The ML compute instance type.\n InstanceCount (integer) -- [REQUIRED]The number of ML compute instances to use. For distributed training, provide a value greater than 1.\n VolumeSizeInGB (integer) -- [REQUIRED]The size of the ML storage volume that you want to provision.\n ML storage volumes store model artifacts and incremental states. Training algorithms might also use the ML storage volume for scratch space. If you want to store the training data in the ML storage volume, choose File as the TrainingInputMode in the algorithm specification.\n You must specify sufficient ML storage for your scenario.\n Note\n Amazon SageMaker supports only the General Purpose SSD (gp2) ML storage volume type.\n VolumeKmsKeyId (string) --The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the training job. The VolumeKmsKeyId can be any of the following formats:\n // KMS Key ID '1234abcd-12ab-34cd-56ef-1234567890ab'\n // Amazon Resource Name (ARN) of a KMS Key 'arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab'\n \n\n :type VpcConfig: dict\n :param VpcConfig: A VpcConfig object that specifies the VPC that you want your training job to connect to. Control access to and from your training container by configuring the VPC. For more information, see Protect Training Jobs by Using an Amazon Virtual Private Cloud .\n SecurityGroupIds (list) -- [REQUIRED]The VPC security group IDs, in the form sg-xxxxxxxx. Specify the security groups for the VPC that is specified in the Subnets field.\n (string) --\n Subnets (list) -- [REQUIRED]The ID of the subnets in the VPC to which you want to connect your training job or model.\n (string) --\n \n\n :type StoppingCondition: dict\n :param StoppingCondition: [REQUIRED]\n Sets a duration for training. Use this parameter to cap model training costs. To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms might use this 120-second window to save the model artifacts.\n When Amazon SageMaker terminates a job because the stopping condition has been met, training algorithms provided by Amazon SageMaker save the intermediate results of the job. This intermediate data is a valid model artifact. You can use it to create a model using the CreateModel API.\n MaxRuntimeInSeconds (integer) --The maximum length of time, in seconds, that the training job can run. If model training does not complete during this time, Amazon SageMaker ends the job. If value is not specified, default value is 1 day. Maximum value is 5 days.\n \n\n :type Tags: list\n :param Tags: An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide .\n (dict) --Describes a tag.\n Key (string) -- [REQUIRED]The tag key.\n Value (string) -- [REQUIRED]The tag value.\n \n \n\n :type EnableNetworkIsolation: boolean\n :param EnableNetworkIsolation: Isolates the training container. No inbound or outbound network calls can be made, except for calls between peers within a training cluster for distributed training. If network isolation is used for training jobs that are configured to use a VPC, Amazon SageMaker downloads and uploads customer data and model artifacts through the specifed VPC, but the training container does not have network access.\n Note\n The Semantic Segmentation built-in algorithm does not support network isolation.\n \n\n :rtype: dict\n :return: {\n 'TrainingJobArn': 'string'\n }\n \n \n :returns: \n TrainingJobName (string) -- [REQUIRED]\n The name of the training job. The name must be unique within an AWS Region in an AWS account.\n \n HyperParameters (dict) -- Algorithm-specific parameters that influence the quality of the model. You set hyperparameters before you start the learning process. For a list of hyperparameters for each training algorithm provided by Amazon SageMaker, see Algorithms .\n You can specify a maximum of 100 hyperparameters. Each hyperparameter is a key-value pair. Each key and value is limited to 256 characters, as specified by the Length Constraint .\n \n (string) --\n (string) --\n \n \n \n \n AlgorithmSpecification (dict) -- [REQUIRED]\n The registry path of the Docker image that contains the training algorithm and algorithm-specific metadata, including the input mode. For more information about algorithms provided by Amazon SageMaker, see Algorithms . For information about providing your own algorithms, see Using Your Own Algorithms with Amazon SageMaker .\n \n TrainingImage (string) --The registry path of the Docker image that contains the training algorithm. For information about docker registry paths for built-in algorithms, see Algorithms Provided by Amazon SageMaker: Common Parameters .\n \n AlgorithmName (string) --The name of the algorithm resource to use for the training job. This must be an algorithm resource that you created or subscribe to on AWS Marketplace. If you specify a value for this parameter, you can't specify a value for TrainingImage .\n \n TrainingInputMode (string) -- [REQUIRED]The input mode that the algorithm supports. For the input modes that Amazon SageMaker algorithms support, see Algorithms . If an algorithm supports the File input mode, Amazon SageMaker downloads the training data from S3 to the provisioned ML storage Volume, and mounts the directory to docker volume for training container. If an algorithm supports the Pipe input mode, Amazon SageMaker streams data directly from S3 to the container.\n In File mode, make sure you provision ML storage volume with sufficient capacity to accommodate the data download from S3. In addition to the training data, the ML storage volume also stores the output model. The algorithm container use ML storage volume to also store intermediate information, if any.\n For distributed algorithms using File mode, training data is distributed uniformly, and your training duration is predictable if the input data objects size is approximately same. Amazon SageMaker does not split the files any further for model training. If the object sizes are skewed, training won't be optimal as the data distribution is also skewed where one host in a training cluster is overloaded, thus becoming bottleneck in training.\n \n MetricDefinitions (list) --A list of metric definition objects. Each object specifies the metric name and regular expressions used to parse algorithm logs. Amazon SageMaker publishes each metric to Amazon CloudWatch.\n \n (dict) --Specifies a metric that the training algorithm writes to stderr or stdout . Amazon SageMakerhyperparameter tuning captures all defined metrics. You specify one metric that a hyperparameter tuning job uses as its objective metric to choose the best training job.\n \n Name (string) -- [REQUIRED]The name of the metric.\n \n Regex (string) -- [REQUIRED]A regular expression that searches the output of a training job and gets the value of the metric. For more information about using regular expressions to define metrics, see Defining Objective Metrics .\n \n \n \n \n \n \n \n RoleArn (string) -- [REQUIRED]\n The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.\n During model training, Amazon SageMaker needs your permission to read input data from an S3 bucket, download a Docker image that contains training code, write model artifacts to an S3 bucket, write logs to Amazon CloudWatch Logs, and publish metrics to Amazon CloudWatch. You grant permissions for all of these tasks to an IAM role. For more information, see Amazon SageMaker Roles .\n \n Note\n To be able to pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission.\n \n \n InputDataConfig (list) -- An array of Channel objects. Each channel is a named input source. InputDataConfig describes the input data and its location.\n Algorithms can accept input data from one or more channels. For example, an algorithm might have two channels of input data, training_data and validation_data . The configuration for each channel provides the S3 location where the input data is stored. It also provides information about the stored data: the MIME type, compression method, and whether the data is wrapped in RecordIO format.\n Depending on the input mode that the algorithm supports, Amazon SageMaker either copies input data files from an S3 bucket to a local directory in the Docker container, or makes it available as input streams.\n \n (dict) --A channel is a named input source that training algorithms can consume.\n \n ChannelName (string) -- [REQUIRED]The name of the channel.\n \n DataSource (dict) -- [REQUIRED]The location of the channel data.\n \n S3DataSource (dict) -- [REQUIRED]The S3 location of the data source that is associated with a channel.\n \n S3DataType (string) -- [REQUIRED]If you choose S3Prefix , S3Uri identifies a key name prefix. Amazon SageMaker uses all objects that match the specified key name prefix for model training.\n If you choose ManifestFile , S3Uri identifies an object that is a manifest file containing a list of object keys that you want Amazon SageMaker to use for model training.\n If you choose AugmentedManifestFile , S3Uri identifies an object that is an augmented manifest file in JSON lines format. This file contains the data you want to use for model training. AugmentedManifestFile can only be used if the Channel's input mode is Pipe .\n \n S3Uri (string) -- [REQUIRED]Depending on the value specified for the S3DataType , identifies either a key name prefix or a manifest. For example:\n \n A key name prefix might look like this: s3://bucketname/exampleprefix .\n A manifest might look like this: s3://bucketname/example.manifest The manifest is an S3 object which is a JSON file with the following format: [ {\"prefix\": \"s3://customer_bucket/some/prefix/\"}, \"relative/path/to/custdata-1\", \"relative/path/custdata-2\", ... ] The preceding JSON matches the following s3Uris : s3://customer_bucket/some/prefix/relative/path/to/custdata-1 s3://customer_bucket/some/prefix/relative/path/custdata-2 ... The complete set of s3uris in this manifest is the input data for the channel for this datasource. The object that each s3uris points to must be readable by the IAM role that Amazon SageMaker uses to perform tasks on your behalf.\n \n \n S3DataDistributionType (string) --If you want Amazon SageMaker to replicate the entire dataset on each ML compute instance that is launched for model training, specify FullyReplicated .\n If you want Amazon SageMaker to replicate a subset of data on each ML compute instance that is launched for model training, specify ShardedByS3Key . If there are n ML compute instances launched for a training job, each instance gets approximately 1/n of the number of S3 objects. In this case, model training on each machine uses only the subset of training data.\n Don't choose more ML compute instances for training than available S3 objects. If you do, some nodes won't get any data and you will pay for nodes that aren't getting any training data. This applies in both File and Pipe modes. Keep this in mind when developing algorithms.\n In distributed training, where you use multiple ML compute EC2 instances, you might choose ShardedByS3Key . If the algorithm requires copying training data to the ML storage volume (when TrainingInputMode is set to File ), this copies 1/n of the number of objects.\n \n AttributeNames (list) --A list of one or more attribute names to use that are found in a specified augmented manifest file.\n \n (string) --\n \n \n \n \n \n \n ContentType (string) --The MIME type of the data.\n \n CompressionType (string) --If training data is compressed, the compression type. The default value is None . CompressionType is used only in Pipe input mode. In File mode, leave this field unset or set it to None.\n \n RecordWrapperType (string) --Specify RecordIO as the value when input data is in raw format but the training algorithm requires the RecordIO format. In this case, Amazon SageMaker wraps each individual S3 object in a RecordIO record. If the input data is already in RecordIO format, you don't need to set this attribute. For more information, see Create a Dataset Using RecordIO .\n In File mode, leave this field unset or set it to None.\n \n InputMode (string) --(Optional) The input mode to use for the data channel in a training job. If you don't set a value for InputMode , Amazon SageMaker uses the value set for TrainingInputMode . Use this parameter to override the TrainingInputMode setting in a AlgorithmSpecification request when you have a channel that needs a different input mode from the training job's general setting. To download the data from Amazon Simple Storage Service (Amazon S3) to the provisioned ML storage volume, and mount the directory to a Docker volume, use File input mode. To stream data directly from Amazon S3 to the container, choose Pipe input mode.\n To use a model for incremental training, choose File input model.\n \n ShuffleConfig (dict) --A configuration for a shuffle option for input data in a channel. If you use S3Prefix for S3DataType , this shuffles the results of the S3 key prefix matches. If you use ManifestFile , the order of the S3 object references in the ManifestFile is shuffled. If you use AugmentedManifestFile , the order of the JSON lines in the AugmentedManifestFile is shuffled. The shuffling order is determined using the Seed value.\n For Pipe input mode, shuffling is done at the start of every epoch. With large datasets this ensures that the order of the training data is different for each epoch, it helps reduce bias and possible overfitting. In a multi-node training job when ShuffleConfig is combined with S3DataDistributionType of ShardedByS3Key , the data is shuffled across nodes so that the content sent to a particular node on the first epoch might be sent to a different node on the second epoch.\n \n Seed (integer) -- [REQUIRED]Determines the shuffling order in ShuffleConfig value.\n \n \n \n \n \n \n \n OutputDataConfig (dict) -- [REQUIRED]\n Specifies the path to the S3 bucket where you want to store model artifacts. Amazon SageMaker creates subfolders for the artifacts.\n \n KmsKeyId (string) --The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:\n \n // KMS Key ID \"1234abcd-12ab-34cd-56ef-1234567890ab\"\n // Amazon Resource Name (ARN) of a KMS Key \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"\n // KMS Key Alias \"alias/ExampleAlias\"\n // Amazon Resource Name (ARN) of a KMS Key Alias \"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"\n \n If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.\n The KMS key policy must grant permission to the IAM role that you specify in your CreateTramsformJob request. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide .\n \n S3OutputPath (string) -- [REQUIRED]Identifies the S3 path where you want Amazon SageMaker to store the model artifacts. For example, s3://bucket-name/key-name-prefix .\n \n \n \n ResourceConfig (dict) -- [REQUIRED]\n The resources, including the ML compute instances and ML storage volumes, to use for model training.\n ML storage volumes store model artifacts and incremental states. Training algorithms might also use ML storage volumes for scratch space. If you want Amazon SageMaker to use the ML storage volume to store the training data, choose File as the TrainingInputMode in the algorithm specification. For distributed training algorithms, specify an instance count greater than 1.\n \n InstanceType (string) -- [REQUIRED]The ML compute instance type.\n \n InstanceCount (integer) -- [REQUIRED]The number of ML compute instances to use. For distributed training, provide a value greater than 1.\n \n VolumeSizeInGB (integer) -- [REQUIRED]The size of the ML storage volume that you want to provision.\n ML storage volumes store model artifacts and incremental states. Training algorithms might also use the ML storage volume for scratch space. If you want to store the training data in the ML storage volume, choose File as the TrainingInputMode in the algorithm specification.\n You must specify sufficient ML storage for your scenario.\n \n Note\n Amazon SageMaker supports only the General Purpose SSD (gp2) ML storage volume type.\n \n \n VolumeKmsKeyId (string) --The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the training job. The VolumeKmsKeyId can be any of the following formats:\n \n // KMS Key ID \"1234abcd-12ab-34cd-56ef-1234567890ab\"\n // Amazon Resource Name (ARN) of a KMS Key \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"\n \n \n \n \n VpcConfig (dict) -- A VpcConfig object that specifies the VPC that you want your training job to connect to. Control access to and from your training container by configuring the VPC. For more information, see Protect Training Jobs by Using an Amazon Virtual Private Cloud .\n \n SecurityGroupIds (list) -- [REQUIRED]The VPC security group IDs, in the form sg-xxxxxxxx. Specify the security groups for the VPC that is specified in the Subnets field.\n \n (string) --\n \n \n Subnets (list) -- [REQUIRED]The ID of the subnets in the VPC to which you want to connect your training job or model.\n \n (string) --\n \n \n \n \n StoppingCondition (dict) -- [REQUIRED]\n Sets a duration for training. Use this parameter to cap model training costs. To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms might use this 120-second window to save the model artifacts.\n When Amazon SageMaker terminates a job because the stopping condition has been met, training algorithms provided by Amazon SageMaker save the intermediate results of the job. This intermediate data is a valid model artifact. You can use it to create a model using the CreateModel API.\n \n MaxRuntimeInSeconds (integer) --The maximum length of time, in seconds, that the training job can run. If model training does not complete during this time, Amazon SageMaker ends the job. If value is not specified, default value is 1 day. Maximum value is 5 days.\n \n \n \n Tags (list) -- An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide .\n \n (dict) --Describes a tag.\n \n Key (string) -- [REQUIRED]The tag key.\n \n Value (string) -- [REQUIRED]The tag value.\n \n \n \n \n \n EnableNetworkIsolation (boolean) -- Isolates the training container. No inbound or outbound network calls can be made, except for calls between peers within a training cluster for distributed training. If network isolation is used for training jobs that are configured to use a VPC, Amazon SageMaker downloads and uploads customer data and model artifacts through the specifed VPC, but the training container does not have network access.\n \n Note\n The Semantic Segmentation built-in algorithm does not support network isolation.\n \n \n \n \"\"\"\n pass\n\ndef create_transform_job(TransformJobName=None, ModelName=None, MaxConcurrentTransforms=None, MaxPayloadInMB=None, BatchStrategy=None, Environment=None, TransformInput=None, TransformOutput=None, TransformResources=None, Tags=None):\n \"\"\"\n Starts a transform job. A transform job uses a trained model to get inferences on a dataset and saves these results to an Amazon S3 location that you specify.\n To perform batch transformations, you create a transform job and use the data that you have readily available.\n In the request body, you provide the following:\n For more information about how batch transformation works Amazon SageMaker, see How It Works .\n See also: AWS API Documentation\n \n \n :example: response = client.create_transform_job(\n TransformJobName='string',\n ModelName='string',\n MaxConcurrentTransforms=123,\n MaxPayloadInMB=123,\n BatchStrategy='MultiRecord'|'SingleRecord',\n Environment={\n 'string': 'string'\n },\n TransformInput={\n 'DataSource': {\n 'S3DataSource': {\n 'S3DataType': 'ManifestFile'|'S3Prefix'|'AugmentedManifestFile',\n 'S3Uri': 'string'\n }\n },\n 'ContentType': 'string',\n 'CompressionType': 'None'|'Gzip',\n 'SplitType': 'None'|'Line'|'RecordIO'|'TFRecord'\n },\n TransformOutput={\n 'S3OutputPath': 'string',\n 'Accept': 'string',\n 'AssembleWith': 'None'|'Line',\n 'KmsKeyId': 'string'\n },\n TransformResources={\n 'InstanceType': 'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge'|'ml.m5.large'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge',\n 'InstanceCount': 123,\n 'VolumeKmsKeyId': 'string'\n },\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type TransformJobName: string\n :param TransformJobName: [REQUIRED]\n The name of the transform job. The name must be unique within an AWS Region in an AWS account.\n \n\n :type ModelName: string\n :param ModelName: [REQUIRED]\n The name of the model that you want to use for the transform job. ModelName must be the name of an existing Amazon SageMaker model within an AWS Region in an AWS account.\n \n\n :type MaxConcurrentTransforms: integer\n :param MaxConcurrentTransforms: The maximum number of parallel requests that can be sent to each instance in a transform job. This is good for algorithms that implement multiple workers on larger instances . The default value is 1 . To allow Amazon SageMaker to determine the appropriate number for MaxConcurrentTransforms , set the value to 0 .\n\n :type MaxPayloadInMB: integer\n :param MaxPayloadInMB: The maximum payload size allowed, in MB. A payload is the data portion of a record (without metadata). The value in MaxPayloadInMB must be greater or equal to the size of a single record. You can approximate the size of a record by dividing the size of your dataset by the number of records. Then multiply this value by the number of records you want in a mini-batch. We recommend to enter a slightly larger value than this to ensure the records fit within the maximum payload size. The default value is 6 MB.\n For cases where the payload might be arbitrarily large and is transmitted using HTTP chunked encoding, set the value to 0 . This feature only works in supported algorithms. Currently, Amazon SageMaker built-in algorithms do not support this feature.\n \n\n :type BatchStrategy: string\n :param BatchStrategy: Determines the number of records included in a single mini-batch. SingleRecord means only one record is used per mini-batch. MultiRecord means a mini-batch is set to contain as many records that can fit within the MaxPayloadInMB limit.\n Batch transform will automatically split your input data into whatever payload size is specified if you set SplitType to Line and BatchStrategy to MultiRecord . There's no need to split the dataset into smaller files or to use larger payload sizes unless the records in your dataset are very large.\n \n\n :type Environment: dict\n :param Environment: The environment variables to set in the Docker container. We support up to 16 key and values entries in the map.\n (string) --\n (string) --\n \n\n :type TransformInput: dict\n :param TransformInput: [REQUIRED]\n Describes the input source and the way the transform job consumes it.\n DataSource (dict) -- [REQUIRED]Describes the location of the channel data, meaning the S3 location of the input data that the model can consume.\n S3DataSource (dict) -- [REQUIRED]The S3 location of the data source that is associated with a channel.\n S3DataType (string) -- [REQUIRED]If you choose S3Prefix , S3Uri identifies a key name prefix. Amazon SageMaker uses all objects with the specified key name prefix for batch transform.\n If you choose ManifestFile , S3Uri identifies an object that is a manifest file containing a list of object keys that you want Amazon SageMaker to use for batch transform.\n S3Uri (string) -- [REQUIRED]Depending on the value specified for the S3DataType , identifies either a key name prefix or a manifest. For example:\n A key name prefix might look like this: s3://bucketname/exampleprefix .\n A manifest might look like this: s3://bucketname/example.manifest The manifest is an S3 object which is a JSON file with the following format: [ {'prefix': 's3://customer_bucket/some/prefix/'}, 'relative/path/to/custdata-1', 'relative/path/custdata-2', ... ] The preceding JSON matches the following S3Uris : s3://customer_bucket/some/prefix/relative/path/to/custdata-1 s3://customer_bucket/some/prefix/relative/path/custdata-1 ... The complete set of S3Uris in this manifest constitutes the input data for the channel for this datasource. The object that each S3Uris points to must be readable by the IAM role that Amazon SageMaker uses to perform tasks on your behalf.\n \n ContentType (string) --The multipurpose internet mail extension (MIME) type of the data. Amazon SageMaker uses the MIME type with each http call to transfer data to the transform job.\n CompressionType (string) --Compressing data helps save on storage space. If your transform data is compressed, specify the compression type. Amazon SageMaker automatically decompresses the data for the transform job accordingly. The default value is None .\n SplitType (string) --The method to use to split the transform job's data files into smaller batches. Splitting is necessary when the total size of each object is too large to fit in a single request. You can also use data splitting to improve performance by processing multiple concurrent mini-batches. The default value for SplitType is None , which indicates that input data files are not split, and request payloads contain the entire contents of an input object. Set the value of this parameter to Line to split records on a newline character boundary. SplitType also supports a number of record-oriented binary data formats.\n When splitting is enabled, the size of a mini-batch depends on the values of the BatchStrategy and MaxPayloadInMB parameters. When the value of BatchStrategy is MultiRecord , Amazon SageMaker sends the maximum number of records in each request, up to the MaxPayloadInMB limit. If the value of BatchStrategy is SingleRecord , Amazon SageMaker sends individual records in each request.\n Note\n Some data formats represent a record as a binary payload wrapped with extra padding bytes. When splitting is applied to a binary data format, padding is removed if the value of BatchStrategy is set to SingleRecord . Padding is not removed if the value of BatchStrategy is set to MultiRecord .\n For more information about the RecordIO data format, see Data Format in the MXNet documentation. For more information about the TFRecord fofmat, see Consuming TFRecord data in the TensorFlow documentation.\n \n\n :type TransformOutput: dict\n :param TransformOutput: [REQUIRED]\n Describes the results of the transform job.\n S3OutputPath (string) -- [REQUIRED]The Amazon S3 path where you want Amazon SageMaker to store the results of the transform job. For example, s3://bucket-name/key-name-prefix .\n For every S3 object used as input for the transform job, the transformed data is stored in a corresponding subfolder in the location under the output prefix. For example, the input data s3://bucket-name/input-name-prefix/dataset01/data.csv will have the transformed data stored at s3://bucket-name/key-name-prefix/dataset01/ , based on the original name, as a series of .part files (.part0001, part0002, etc).\n Accept (string) --The MIME type used to specify the output data. Amazon SageMaker uses the MIME type with each http call to transfer data from the transform job.\n AssembleWith (string) --Defines how to assemble the results of the transform job as a single S3 object. You should select a format that is most convenient to you. To concatenate the results in binary format, specify None . To add a newline character at the end of every transformed record, specify Line .\n KmsKeyId (string) --The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:\n // KMS Key ID '1234abcd-12ab-34cd-56ef-1234567890ab'\n // Amazon Resource Name (ARN) of a KMS Key 'arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab'\n // KMS Key Alias 'alias/ExampleAlias'\n // Amazon Resource Name (ARN) of a KMS Key Alias 'arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias'\n If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.\n The KMS key policy must grant permission to the IAM role that you specify in your CreateTramsformJob request. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide .\n \n\n :type TransformResources: dict\n :param TransformResources: [REQUIRED]\n Describes the resources, including ML instance types and ML instance count, to use for the transform job.\n InstanceType (string) -- [REQUIRED]The ML compute instance type for the transform job. For using built-in algorithms to transform moderately sized datasets, ml.m4.xlarge or ml.m5.large should suffice. There is no default value for InstanceType .\n InstanceCount (integer) -- [REQUIRED]The number of ML compute instances to use in the transform job. For distributed transform, provide a value greater than 1. The default value is 1 .\n VolumeKmsKeyId (string) --The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the batch transform job. The VolumeKmsKeyId can be any of the following formats:\n // KMS Key ID '1234abcd-12ab-34cd-56ef-1234567890ab'\n // Amazon Resource Name (ARN) of a KMS Key 'arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab'\n \n\n :type Tags: list\n :param Tags: An array of key-value pairs. Adding tags is optional. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide .\n (dict) --Describes a tag.\n Key (string) -- [REQUIRED]The tag key.\n Value (string) -- [REQUIRED]The tag value.\n \n \n\n :rtype: dict\n :return: {\n 'TransformJobArn': 'string'\n }\n \n \n :returns: \n TransformJobName (string) -- [REQUIRED]\n The name of the transform job. The name must be unique within an AWS Region in an AWS account.\n \n ModelName (string) -- [REQUIRED]\n The name of the model that you want to use for the transform job. ModelName must be the name of an existing Amazon SageMaker model within an AWS Region in an AWS account.\n \n MaxConcurrentTransforms (integer) -- The maximum number of parallel requests that can be sent to each instance in a transform job. This is good for algorithms that implement multiple workers on larger instances . The default value is 1 . To allow Amazon SageMaker to determine the appropriate number for MaxConcurrentTransforms , set the value to 0 .\n MaxPayloadInMB (integer) -- The maximum payload size allowed, in MB. A payload is the data portion of a record (without metadata). The value in MaxPayloadInMB must be greater or equal to the size of a single record. You can approximate the size of a record by dividing the size of your dataset by the number of records. Then multiply this value by the number of records you want in a mini-batch. We recommend to enter a slightly larger value than this to ensure the records fit within the maximum payload size. The default value is 6 MB.\n For cases where the payload might be arbitrarily large and is transmitted using HTTP chunked encoding, set the value to 0 . This feature only works in supported algorithms. Currently, Amazon SageMaker built-in algorithms do not support this feature.\n \n BatchStrategy (string) -- Determines the number of records included in a single mini-batch. SingleRecord means only one record is used per mini-batch. MultiRecord means a mini-batch is set to contain as many records that can fit within the MaxPayloadInMB limit.\n Batch transform will automatically split your input data into whatever payload size is specified if you set SplitType to Line and BatchStrategy to MultiRecord . There's no need to split the dataset into smaller files or to use larger payload sizes unless the records in your dataset are very large.\n \n Environment (dict) -- The environment variables to set in the Docker container. We support up to 16 key and values entries in the map.\n \n (string) --\n (string) --\n \n \n \n \n TransformInput (dict) -- [REQUIRED]\n Describes the input source and the way the transform job consumes it.\n \n DataSource (dict) -- [REQUIRED]Describes the location of the channel data, meaning the S3 location of the input data that the model can consume.\n \n S3DataSource (dict) -- [REQUIRED]The S3 location of the data source that is associated with a channel.\n \n S3DataType (string) -- [REQUIRED]If you choose S3Prefix , S3Uri identifies a key name prefix. Amazon SageMaker uses all objects with the specified key name prefix for batch transform.\n If you choose ManifestFile , S3Uri identifies an object that is a manifest file containing a list of object keys that you want Amazon SageMaker to use for batch transform.\n \n S3Uri (string) -- [REQUIRED]Depending on the value specified for the S3DataType , identifies either a key name prefix or a manifest. For example:\n \n A key name prefix might look like this: s3://bucketname/exampleprefix .\n A manifest might look like this: s3://bucketname/example.manifest The manifest is an S3 object which is a JSON file with the following format: [ {\"prefix\": \"s3://customer_bucket/some/prefix/\"}, \"relative/path/to/custdata-1\", \"relative/path/custdata-2\", ... ] The preceding JSON matches the following S3Uris : s3://customer_bucket/some/prefix/relative/path/to/custdata-1 s3://customer_bucket/some/prefix/relative/path/custdata-1 ... The complete set of S3Uris in this manifest constitutes the input data for the channel for this datasource. The object that each S3Uris points to must be readable by the IAM role that Amazon SageMaker uses to perform tasks on your behalf.\n \n \n \n \n \n \n ContentType (string) --The multipurpose internet mail extension (MIME) type of the data. Amazon SageMaker uses the MIME type with each http call to transfer data to the transform job.\n \n CompressionType (string) --Compressing data helps save on storage space. If your transform data is compressed, specify the compression type. Amazon SageMaker automatically decompresses the data for the transform job accordingly. The default value is None .\n \n SplitType (string) --The method to use to split the transform job's data files into smaller batches. Splitting is necessary when the total size of each object is too large to fit in a single request. You can also use data splitting to improve performance by processing multiple concurrent mini-batches. The default value for SplitType is None , which indicates that input data files are not split, and request payloads contain the entire contents of an input object. Set the value of this parameter to Line to split records on a newline character boundary. SplitType also supports a number of record-oriented binary data formats.\n When splitting is enabled, the size of a mini-batch depends on the values of the BatchStrategy and MaxPayloadInMB parameters. When the value of BatchStrategy is MultiRecord , Amazon SageMaker sends the maximum number of records in each request, up to the MaxPayloadInMB limit. If the value of BatchStrategy is SingleRecord , Amazon SageMaker sends individual records in each request.\n \n Note\n Some data formats represent a record as a binary payload wrapped with extra padding bytes. When splitting is applied to a binary data format, padding is removed if the value of BatchStrategy is set to SingleRecord . Padding is not removed if the value of BatchStrategy is set to MultiRecord .\n \n For more information about the RecordIO data format, see Data Format in the MXNet documentation. For more information about the TFRecord fofmat, see Consuming TFRecord data in the TensorFlow documentation.\n \n \n \n TransformOutput (dict) -- [REQUIRED]\n Describes the results of the transform job.\n \n S3OutputPath (string) -- [REQUIRED]The Amazon S3 path where you want Amazon SageMaker to store the results of the transform job. For example, s3://bucket-name/key-name-prefix .\n For every S3 object used as input for the transform job, the transformed data is stored in a corresponding subfolder in the location under the output prefix. For example, the input data s3://bucket-name/input-name-prefix/dataset01/data.csv will have the transformed data stored at s3://bucket-name/key-name-prefix/dataset01/ , based on the original name, as a series of .part files (.part0001, part0002, etc).\n \n Accept (string) --The MIME type used to specify the output data. Amazon SageMaker uses the MIME type with each http call to transfer data from the transform job.\n \n AssembleWith (string) --Defines how to assemble the results of the transform job as a single S3 object. You should select a format that is most convenient to you. To concatenate the results in binary format, specify None . To add a newline character at the end of every transformed record, specify Line .\n \n KmsKeyId (string) --The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:\n \n // KMS Key ID \"1234abcd-12ab-34cd-56ef-1234567890ab\"\n // Amazon Resource Name (ARN) of a KMS Key \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"\n // KMS Key Alias \"alias/ExampleAlias\"\n // Amazon Resource Name (ARN) of a KMS Key Alias \"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"\n \n If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.\n The KMS key policy must grant permission to the IAM role that you specify in your CreateTramsformJob request. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide .\n \n \n \n TransformResources (dict) -- [REQUIRED]\n Describes the resources, including ML instance types and ML instance count, to use for the transform job.\n \n InstanceType (string) -- [REQUIRED]The ML compute instance type for the transform job. For using built-in algorithms to transform moderately sized datasets, ml.m4.xlarge or ml.m5.large should suffice. There is no default value for InstanceType .\n \n InstanceCount (integer) -- [REQUIRED]The number of ML compute instances to use in the transform job. For distributed transform, provide a value greater than 1. The default value is 1 .\n \n VolumeKmsKeyId (string) --The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the batch transform job. The VolumeKmsKeyId can be any of the following formats:\n \n // KMS Key ID \"1234abcd-12ab-34cd-56ef-1234567890ab\"\n // Amazon Resource Name (ARN) of a KMS Key \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"\n \n \n \n \n Tags (list) -- An array of key-value pairs. Adding tags is optional. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide .\n \n (dict) --Describes a tag.\n \n Key (string) -- [REQUIRED]The tag key.\n \n Value (string) -- [REQUIRED]The tag value.\n \n \n \n \n \n \n \"\"\"\n pass\n\ndef create_workteam(WorkteamName=None, MemberDefinitions=None, Description=None, Tags=None):\n \"\"\"\n Creates a new work team for labeling your data. A work team is defined by one or more Amazon Cognito user pools. You must first create the user pools before you can create a work team.\n You cannot create more than 25 work teams in an account and region.\n See also: AWS API Documentation\n \n \n :example: response = client.create_workteam(\n WorkteamName='string',\n MemberDefinitions=[\n {\n 'CognitoMemberDefinition': {\n 'UserPool': 'string',\n 'UserGroup': 'string',\n 'ClientId': 'string'\n }\n },\n ],\n Description='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type WorkteamName: string\n :param WorkteamName: [REQUIRED]\n The name of the work team. Use this name to identify the work team.\n \n\n :type MemberDefinitions: list\n :param MemberDefinitions: [REQUIRED]\n A list of MemberDefinition objects that contains objects that identify the Amazon Cognito user pool that makes up the work team. For more information, see Amazon Cognito User Pools .\n All of the CognitoMemberDefinition objects that make up the member definition must have the same ClientId and UserPool values.\n (dict) --Defines the Amazon Cognito user group that is part of a work team.\n CognitoMemberDefinition (dict) --The Amazon Cognito user group that is part of the work team.\n UserPool (string) -- [REQUIRED]An identifier for a user pool. The user pool must be in the same region as the service that you are calling.\n UserGroup (string) -- [REQUIRED]An identifier for a user group.\n ClientId (string) -- [REQUIRED]An identifier for an application client. You must create the app client ID using Amazon Cognito.\n \n \n\n :type Description: string\n :param Description: [REQUIRED]\n A description of the work team.\n \n\n :type Tags: list\n :param Tags: \n (dict) --Describes a tag.\n Key (string) -- [REQUIRED]The tag key.\n Value (string) -- [REQUIRED]The tag value.\n \n \n\n :rtype: dict\n :return: {\n 'WorkteamArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_algorithm(AlgorithmName=None):\n \"\"\"\n Removes the specified algorithm from your account.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_algorithm(\n AlgorithmName='string'\n )\n \n \n :type AlgorithmName: string\n :param AlgorithmName: [REQUIRED]\n The name of the algorithm to delete.\n \n\n \"\"\"\n pass\n\ndef delete_code_repository(CodeRepositoryName=None):\n \"\"\"\n Deletes the specified git repository from your account.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_code_repository(\n CodeRepositoryName='string'\n )\n \n \n :type CodeRepositoryName: string\n :param CodeRepositoryName: [REQUIRED]\n The name of the git repository to delete.\n \n\n \"\"\"\n pass\n\ndef delete_endpoint(EndpointName=None):\n \"\"\"\n Deletes an endpoint. Amazon SageMaker frees up all of the resources that were deployed when the endpoint was created.\n Amazon SageMaker retires any custom KMS key grants associated with the endpoint, meaning you don't need to use the RevokeGrant API call.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_endpoint(\n EndpointName='string'\n )\n \n \n :type EndpointName: string\n :param EndpointName: [REQUIRED]\n The name of the endpoint that you want to delete.\n \n\n \"\"\"\n pass\n\ndef delete_endpoint_config(EndpointConfigName=None):\n \"\"\"\n Deletes an endpoint configuration. The DeleteEndpointConfig API deletes only the specified configuration. It does not delete endpoints created using the configuration.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_endpoint_config(\n EndpointConfigName='string'\n )\n \n \n :type EndpointConfigName: string\n :param EndpointConfigName: [REQUIRED]\n The name of the endpoint configuration that you want to delete.\n \n\n \"\"\"\n pass\n\ndef delete_model(ModelName=None):\n \"\"\"\n Deletes a model. The DeleteModel API deletes only the model entry that was created in Amazon SageMaker when you called the CreateModel API. It does not delete model artifacts, inference code, or the IAM role that you specified when creating the model.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_model(\n ModelName='string'\n )\n \n \n :type ModelName: string\n :param ModelName: [REQUIRED]\n The name of the model to delete.\n \n\n \"\"\"\n pass\n\ndef delete_model_package(ModelPackageName=None):\n \"\"\"\n Deletes a model package.\n A model package is used to create Amazon SageMaker models or list on AWS Marketplace. Buyers can subscribe to model packages listed on AWS Marketplace to create models in Amazon SageMaker.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_model_package(\n ModelPackageName='string'\n )\n \n \n :type ModelPackageName: string\n :param ModelPackageName: [REQUIRED]\n The name of the model package. The name must have 1 to 63 characters. Valid characters are a-z, A-Z, 0-9, and - (hyphen).\n \n\n \"\"\"\n pass\n\ndef delete_notebook_instance(NotebookInstanceName=None):\n \"\"\"\n Deletes an Amazon SageMaker notebook instance. Before you can delete a notebook instance, you must call the StopNotebookInstance API.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_notebook_instance(\n NotebookInstanceName='string'\n )\n \n \n :type NotebookInstanceName: string\n :param NotebookInstanceName: [REQUIRED]\n The name of the Amazon SageMaker notebook instance to delete.\n \n\n \"\"\"\n pass\n\ndef delete_notebook_instance_lifecycle_config(NotebookInstanceLifecycleConfigName=None):\n \"\"\"\n Deletes a notebook instance lifecycle configuration.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_notebook_instance_lifecycle_config(\n NotebookInstanceLifecycleConfigName='string'\n )\n \n \n :type NotebookInstanceLifecycleConfigName: string\n :param NotebookInstanceLifecycleConfigName: [REQUIRED]\n The name of the lifecycle configuration to delete.\n \n\n \"\"\"\n pass\n\ndef delete_tags(ResourceArn=None, TagKeys=None):\n \"\"\"\n Deletes the specified tags from an Amazon SageMaker resource.\n To list a resource's tags, use the ListTags API.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_tags(\n ResourceArn='string',\n TagKeys=[\n 'string',\n ]\n )\n \n \n :type ResourceArn: string\n :param ResourceArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the resource whose tags you want to delete.\n \n\n :type TagKeys: list\n :param TagKeys: [REQUIRED]\n An array or one or more tag keys to delete.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_workteam(WorkteamName=None):\n \"\"\"\n Deletes an existing work team. This operation can't be undone.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_workteam(\n WorkteamName='string'\n )\n \n \n :type WorkteamName: string\n :param WorkteamName: [REQUIRED]\n The name of the work team to delete.\n \n\n :rtype: dict\n :return: {\n 'Success': True|False\n }\n \n \n \"\"\"\n pass\n\ndef describe_algorithm(AlgorithmName=None):\n \"\"\"\n Returns a description of the specified algorithm that is in your account.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_algorithm(\n AlgorithmName='string'\n )\n \n \n :type AlgorithmName: string\n :param AlgorithmName: [REQUIRED]\n The name of the algorithm to describe.\n \n\n :rtype: dict\n :return: {\n 'AlgorithmName': 'string',\n 'AlgorithmArn': 'string',\n 'AlgorithmDescription': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'TrainingSpecification': {\n 'TrainingImage': 'string',\n 'TrainingImageDigest': 'string',\n 'SupportedHyperParameters': [\n {\n 'Name': 'string',\n 'Description': 'string',\n 'Type': 'Integer'|'Continuous'|'Categorical'|'FreeText',\n 'Range': {\n 'IntegerParameterRangeSpecification': {\n 'MinValue': 'string',\n 'MaxValue': 'string'\n },\n 'ContinuousParameterRangeSpecification': {\n 'MinValue': 'string',\n 'MaxValue': 'string'\n },\n 'CategoricalParameterRangeSpecification': {\n 'Values': [\n 'string',\n ]\n }\n },\n 'IsTunable': True|False,\n 'IsRequired': True|False,\n 'DefaultValue': 'string'\n },\n ],\n 'SupportedTrainingInstanceTypes': [\n 'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.m5.large'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge',\n ],\n 'SupportsDistributedTraining': True|False,\n 'MetricDefinitions': [\n {\n 'Name': 'string',\n 'Regex': 'string'\n },\n ],\n 'TrainingChannels': [\n {\n 'Name': 'string',\n 'Description': 'string',\n 'IsRequired': True|False,\n 'SupportedContentTypes': [\n 'string',\n ],\n 'SupportedCompressionTypes': [\n 'None'|'Gzip',\n ],\n 'SupportedInputModes': [\n 'Pipe'|'File',\n ]\n },\n ],\n 'SupportedTuningJobObjectiveMetrics': [\n {\n 'Type': 'Maximize'|'Minimize',\n 'MetricName': 'string'\n },\n ]\n },\n 'InferenceSpecification': {\n 'Containers': [\n {\n 'ContainerHostname': 'string',\n 'Image': 'string',\n 'ImageDigest': 'string',\n 'ModelDataUrl': 'string',\n 'ProductId': 'string'\n },\n ],\n 'SupportedTransformInstanceTypes': [\n 'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge'|'ml.m5.large'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge',\n ],\n 'SupportedRealtimeInferenceInstanceTypes': [\n 'ml.t2.medium'|'ml.t2.large'|'ml.t2.xlarge'|'ml.t2.2xlarge'|'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.m5.large'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge'|'ml.c4.large'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge'|'ml.c5.large'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge',\n ],\n 'SupportedContentTypes': [\n 'string',\n ],\n 'SupportedResponseMIMETypes': [\n 'string',\n ]\n },\n 'ValidationSpecification': {\n 'ValidationRole': 'string',\n 'ValidationProfiles': [\n {\n 'ProfileName': 'string',\n 'TrainingJobDefinition': {\n 'TrainingInputMode': 'Pipe'|'File',\n 'HyperParameters': {\n 'string': 'string'\n },\n 'InputDataConfig': [\n {\n 'ChannelName': 'string',\n 'DataSource': {\n 'S3DataSource': {\n 'S3DataType': 'ManifestFile'|'S3Prefix'|'AugmentedManifestFile',\n 'S3Uri': 'string',\n 'S3DataDistributionType': 'FullyReplicated'|'ShardedByS3Key',\n 'AttributeNames': [\n 'string',\n ]\n }\n },\n 'ContentType': 'string',\n 'CompressionType': 'None'|'Gzip',\n 'RecordWrapperType': 'None'|'RecordIO',\n 'InputMode': 'Pipe'|'File',\n 'ShuffleConfig': {\n 'Seed': 123\n }\n },\n ],\n 'OutputDataConfig': {\n 'KmsKeyId': 'string',\n 'S3OutputPath': 'string'\n },\n 'ResourceConfig': {\n 'InstanceType': 'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.m5.large'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge',\n 'InstanceCount': 123,\n 'VolumeSizeInGB': 123,\n 'VolumeKmsKeyId': 'string'\n },\n 'StoppingCondition': {\n 'MaxRuntimeInSeconds': 123\n }\n },\n 'TransformJobDefinition': {\n 'MaxConcurrentTransforms': 123,\n 'MaxPayloadInMB': 123,\n 'BatchStrategy': 'MultiRecord'|'SingleRecord',\n 'Environment': {\n 'string': 'string'\n },\n 'TransformInput': {\n 'DataSource': {\n 'S3DataSource': {\n 'S3DataType': 'ManifestFile'|'S3Prefix'|'AugmentedManifestFile',\n 'S3Uri': 'string'\n }\n },\n 'ContentType': 'string',\n 'CompressionType': 'None'|'Gzip',\n 'SplitType': 'None'|'Line'|'RecordIO'|'TFRecord'\n },\n 'TransformOutput': {\n 'S3OutputPath': 'string',\n 'Accept': 'string',\n 'AssembleWith': 'None'|'Line',\n 'KmsKeyId': 'string'\n },\n 'TransformResources': {\n 'InstanceType': 'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge'|'ml.m5.large'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge',\n 'InstanceCount': 123,\n 'VolumeKmsKeyId': 'string'\n }\n }\n },\n ]\n },\n 'AlgorithmStatus': 'Pending'|'InProgress'|'Completed'|'Failed'|'Deleting',\n 'AlgorithmStatusDetails': {\n 'ValidationStatuses': [\n {\n 'Name': 'string',\n 'Status': 'NotStarted'|'InProgress'|'Completed'|'Failed',\n 'FailureReason': 'string'\n },\n ],\n 'ImageScanStatuses': [\n {\n 'Name': 'string',\n 'Status': 'NotStarted'|'InProgress'|'Completed'|'Failed',\n 'FailureReason': 'string'\n },\n ]\n },\n 'ProductId': 'string',\n 'CertifyForMarketplace': True|False\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_code_repository(CodeRepositoryName=None):\n \"\"\"\n Gets details about the specified git repository.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_code_repository(\n CodeRepositoryName='string'\n )\n \n \n :type CodeRepositoryName: string\n :param CodeRepositoryName: [REQUIRED]\n The name of the git repository to describe.\n \n\n :rtype: dict\n :return: {\n 'CodeRepositoryName': 'string',\n 'CodeRepositoryArn': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'GitConfig': {\n 'RepositoryUrl': 'string',\n 'Branch': 'string',\n 'SecretArn': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_compilation_job(CompilationJobName=None):\n \"\"\"\n Returns information about a model compilation job.\n To create a model compilation job, use CreateCompilationJob . To get information about multiple model compilation jobs, use ListCompilationJobs .\n See also: AWS API Documentation\n \n \n :example: response = client.describe_compilation_job(\n CompilationJobName='string'\n )\n \n \n :type CompilationJobName: string\n :param CompilationJobName: [REQUIRED]\n The name of the model compilation job that you want information about.\n \n\n :rtype: dict\n :return: {\n 'CompilationJobName': 'string',\n 'CompilationJobArn': 'string',\n 'CompilationJobStatus': 'INPROGRESS'|'COMPLETED'|'FAILED'|'STARTING'|'STOPPING'|'STOPPED',\n 'CompilationStartTime': datetime(2015, 1, 1),\n 'CompilationEndTime': datetime(2015, 1, 1),\n 'StoppingCondition': {\n 'MaxRuntimeInSeconds': 123\n },\n 'CreationTime': datetime(2015, 1, 1),\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'FailureReason': 'string',\n 'ModelArtifacts': {\n 'S3ModelArtifacts': 'string'\n },\n 'RoleArn': 'string',\n 'InputConfig': {\n 'S3Uri': 'string',\n 'DataInputConfig': 'string',\n 'Framework': 'TENSORFLOW'|'MXNET'|'ONNX'|'PYTORCH'|'XGBOOST'\n },\n 'OutputConfig': {\n 'S3OutputLocation': 'string',\n 'TargetDevice': 'ml_m4'|'ml_m5'|'ml_c4'|'ml_c5'|'ml_p2'|'ml_p3'|'jetson_tx1'|'jetson_tx2'|'rasp3b'|'deeplens'\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_endpoint(EndpointName=None):\n \"\"\"\n Returns the description of an endpoint.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_endpoint(\n EndpointName='string'\n )\n \n \n :type EndpointName: string\n :param EndpointName: [REQUIRED]\n The name of the endpoint.\n \n\n :rtype: dict\n :return: {\n 'EndpointName': 'string',\n 'EndpointArn': 'string',\n 'EndpointConfigName': 'string',\n 'ProductionVariants': [\n {\n 'VariantName': 'string',\n 'DeployedImages': [\n {\n 'SpecifiedImage': 'string',\n 'ResolvedImage': 'string',\n 'ResolutionTime': datetime(2015, 1, 1)\n },\n ],\n 'CurrentWeight': ...,\n 'DesiredWeight': ...,\n 'CurrentInstanceCount': 123,\n 'DesiredInstanceCount': 123\n },\n ],\n 'EndpointStatus': 'OutOfService'|'Creating'|'Updating'|'SystemUpdating'|'RollingBack'|'InService'|'Deleting'|'Failed',\n 'FailureReason': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastModifiedTime': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef describe_endpoint_config(EndpointConfigName=None):\n \"\"\"\n Returns the description of an endpoint configuration created using the CreateEndpointConfig API.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_endpoint_config(\n EndpointConfigName='string'\n )\n \n \n :type EndpointConfigName: string\n :param EndpointConfigName: [REQUIRED]\n The name of the endpoint configuration.\n \n\n :rtype: dict\n :return: {\n 'EndpointConfigName': 'string',\n 'EndpointConfigArn': 'string',\n 'ProductionVariants': [\n {\n 'VariantName': 'string',\n 'ModelName': 'string',\n 'InitialInstanceCount': 123,\n 'InstanceType': 'ml.t2.medium'|'ml.t2.large'|'ml.t2.xlarge'|'ml.t2.2xlarge'|'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.m5.large'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge'|'ml.c4.large'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge'|'ml.c5.large'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge',\n 'InitialVariantWeight': ...,\n 'AcceleratorType': 'ml.eia1.medium'|'ml.eia1.large'|'ml.eia1.xlarge'\n },\n ],\n 'KmsKeyId': 'string',\n 'CreationTime': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef describe_hyper_parameter_tuning_job(HyperParameterTuningJobName=None):\n \"\"\"\n Gets a description of a hyperparameter tuning job.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_hyper_parameter_tuning_job(\n HyperParameterTuningJobName='string'\n )\n \n \n :type HyperParameterTuningJobName: string\n :param HyperParameterTuningJobName: [REQUIRED]\n The name of the tuning job to describe.\n \n\n :rtype: dict\n :return: {\n 'HyperParameterTuningJobName': 'string',\n 'HyperParameterTuningJobArn': 'string',\n 'HyperParameterTuningJobConfig': {\n 'Strategy': 'Bayesian',\n 'HyperParameterTuningJobObjective': {\n 'Type': 'Maximize'|'Minimize',\n 'MetricName': 'string'\n },\n 'ResourceLimits': {\n 'MaxNumberOfTrainingJobs': 123,\n 'MaxParallelTrainingJobs': 123\n },\n 'ParameterRanges': {\n 'IntegerParameterRanges': [\n {\n 'Name': 'string',\n 'MinValue': 'string',\n 'MaxValue': 'string'\n },\n ],\n 'ContinuousParameterRanges': [\n {\n 'Name': 'string',\n 'MinValue': 'string',\n 'MaxValue': 'string'\n },\n ],\n 'CategoricalParameterRanges': [\n {\n 'Name': 'string',\n 'Values': [\n 'string',\n ]\n },\n ]\n },\n 'TrainingJobEarlyStoppingType': 'Off'|'Auto'\n },\n 'TrainingJobDefinition': {\n 'StaticHyperParameters': {\n 'string': 'string'\n },\n 'AlgorithmSpecification': {\n 'TrainingImage': 'string',\n 'TrainingInputMode': 'Pipe'|'File',\n 'AlgorithmName': 'string',\n 'MetricDefinitions': [\n {\n 'Name': 'string',\n 'Regex': 'string'\n },\n ]\n },\n 'RoleArn': 'string',\n 'InputDataConfig': [\n {\n 'ChannelName': 'string',\n 'DataSource': {\n 'S3DataSource': {\n 'S3DataType': 'ManifestFile'|'S3Prefix'|'AugmentedManifestFile',\n 'S3Uri': 'string',\n 'S3DataDistributionType': 'FullyReplicated'|'ShardedByS3Key',\n 'AttributeNames': [\n 'string',\n ]\n }\n },\n 'ContentType': 'string',\n 'CompressionType': 'None'|'Gzip',\n 'RecordWrapperType': 'None'|'RecordIO',\n 'InputMode': 'Pipe'|'File',\n 'ShuffleConfig': {\n 'Seed': 123\n }\n },\n ],\n 'VpcConfig': {\n 'SecurityGroupIds': [\n 'string',\n ],\n 'Subnets': [\n 'string',\n ]\n },\n 'OutputDataConfig': {\n 'KmsKeyId': 'string',\n 'S3OutputPath': 'string'\n },\n 'ResourceConfig': {\n 'InstanceType': 'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.m5.large'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge',\n 'InstanceCount': 123,\n 'VolumeSizeInGB': 123,\n 'VolumeKmsKeyId': 'string'\n },\n 'StoppingCondition': {\n 'MaxRuntimeInSeconds': 123\n },\n 'EnableNetworkIsolation': True|False\n },\n 'HyperParameterTuningJobStatus': 'Completed'|'InProgress'|'Failed'|'Stopped'|'Stopping',\n 'CreationTime': datetime(2015, 1, 1),\n 'HyperParameterTuningEndTime': datetime(2015, 1, 1),\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'TrainingJobStatusCounters': {\n 'Completed': 123,\n 'InProgress': 123,\n 'RetryableError': 123,\n 'NonRetryableError': 123,\n 'Stopped': 123\n },\n 'ObjectiveStatusCounters': {\n 'Succeeded': 123,\n 'Pending': 123,\n 'Failed': 123\n },\n 'BestTrainingJob': {\n 'TrainingJobName': 'string',\n 'TrainingJobArn': 'string',\n 'TuningJobName': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'TrainingStartTime': datetime(2015, 1, 1),\n 'TrainingEndTime': datetime(2015, 1, 1),\n 'TrainingJobStatus': 'InProgress'|'Completed'|'Failed'|'Stopping'|'Stopped',\n 'TunedHyperParameters': {\n 'string': 'string'\n },\n 'FailureReason': 'string',\n 'FinalHyperParameterTuningJobObjectiveMetric': {\n 'Type': 'Maximize'|'Minimize',\n 'MetricName': 'string',\n 'Value': ...\n },\n 'ObjectiveStatus': 'Succeeded'|'Pending'|'Failed'\n },\n 'OverallBestTrainingJob': {\n 'TrainingJobName': 'string',\n 'TrainingJobArn': 'string',\n 'TuningJobName': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'TrainingStartTime': datetime(2015, 1, 1),\n 'TrainingEndTime': datetime(2015, 1, 1),\n 'TrainingJobStatus': 'InProgress'|'Completed'|'Failed'|'Stopping'|'Stopped',\n 'TunedHyperParameters': {\n 'string': 'string'\n },\n 'FailureReason': 'string',\n 'FinalHyperParameterTuningJobObjectiveMetric': {\n 'Type': 'Maximize'|'Minimize',\n 'MetricName': 'string',\n 'Value': ...\n },\n 'ObjectiveStatus': 'Succeeded'|'Pending'|'Failed'\n },\n 'WarmStartConfig': {\n 'ParentHyperParameterTuningJobs': [\n {\n 'HyperParameterTuningJobName': 'string'\n },\n ],\n 'WarmStartType': 'IdenticalDataAndAlgorithm'|'TransferLearning'\n },\n 'FailureReason': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef describe_labeling_job(LabelingJobName=None):\n \"\"\"\n Gets information about a labeling job.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_labeling_job(\n LabelingJobName='string'\n )\n \n \n :type LabelingJobName: string\n :param LabelingJobName: [REQUIRED]\n The name of the labeling job to return information for.\n \n\n :rtype: dict\n :return: {\n 'LabelingJobStatus': 'InProgress'|'Completed'|'Failed'|'Stopping'|'Stopped',\n 'LabelCounters': {\n 'TotalLabeled': 123,\n 'HumanLabeled': 123,\n 'MachineLabeled': 123,\n 'FailedNonRetryableError': 123,\n 'Unlabeled': 123\n },\n 'FailureReason': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'JobReferenceCode': 'string',\n 'LabelingJobName': 'string',\n 'LabelingJobArn': 'string',\n 'LabelAttributeName': 'string',\n 'InputConfig': {\n 'DataSource': {\n 'S3DataSource': {\n 'ManifestS3Uri': 'string'\n }\n },\n 'DataAttributes': {\n 'ContentClassifiers': [\n 'FreeOfPersonallyIdentifiableInformation'|'FreeOfAdultContent',\n ]\n }\n },\n 'OutputConfig': {\n 'S3OutputPath': 'string',\n 'KmsKeyId': 'string'\n },\n 'RoleArn': 'string',\n 'LabelCategoryConfigS3Uri': 'string',\n 'StoppingConditions': {\n 'MaxHumanLabeledObjectCount': 123,\n 'MaxPercentageOfInputDatasetLabeled': 123\n },\n 'LabelingJobAlgorithmsConfig': {\n 'LabelingJobAlgorithmSpecificationArn': 'string',\n 'InitialActiveLearningModelArn': 'string',\n 'LabelingJobResourceConfig': {\n 'VolumeKmsKeyId': 'string'\n }\n },\n 'HumanTaskConfig': {\n 'WorkteamArn': 'string',\n 'UiConfig': {\n 'UiTemplateS3Uri': 'string'\n },\n 'PreHumanTaskLambdaArn': 'string',\n 'TaskKeywords': [\n 'string',\n ],\n 'TaskTitle': 'string',\n 'TaskDescription': 'string',\n 'NumberOfHumanWorkersPerDataObject': 123,\n 'TaskTimeLimitInSeconds': 123,\n 'TaskAvailabilityLifetimeInSeconds': 123,\n 'MaxConcurrentTaskCount': 123,\n 'AnnotationConsolidationConfig': {\n 'AnnotationConsolidationLambdaArn': 'string'\n },\n 'PublicWorkforceTaskPrice': {\n 'AmountInUsd': {\n 'Dollars': 123,\n 'Cents': 123,\n 'TenthFractionsOfACent': 123\n }\n }\n },\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'LabelingJobOutput': {\n 'OutputDatasetS3Uri': 'string',\n 'FinalActiveLearningModelArn': 'string'\n }\n }\n \n \n :returns: \n Image classification arn:aws:sagemaker:*region* :027400017018:labeling-job-algorithm-specification/image-classification\n Text classification arn:aws:sagemaker:*region* :027400017018:labeling-job-algorithm-specification/text-classification\n Object detection arn:aws:sagemaker:*region* :027400017018:labeling-job-algorithm-specification/object-detection\n \n \"\"\"\n pass\n\ndef describe_model(ModelName=None):\n \"\"\"\n Describes a model that you created using the CreateModel API.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_model(\n ModelName='string'\n )\n \n \n :type ModelName: string\n :param ModelName: [REQUIRED]\n The name of the model.\n \n\n :rtype: dict\n :return: {\n 'ModelName': 'string',\n 'PrimaryContainer': {\n 'ContainerHostname': 'string',\n 'Image': 'string',\n 'ModelDataUrl': 'string',\n 'Environment': {\n 'string': 'string'\n },\n 'ModelPackageName': 'string'\n },\n 'Containers': [\n {\n 'ContainerHostname': 'string',\n 'Image': 'string',\n 'ModelDataUrl': 'string',\n 'Environment': {\n 'string': 'string'\n },\n 'ModelPackageName': 'string'\n },\n ],\n 'ExecutionRoleArn': 'string',\n 'VpcConfig': {\n 'SecurityGroupIds': [\n 'string',\n ],\n 'Subnets': [\n 'string',\n ]\n },\n 'CreationTime': datetime(2015, 1, 1),\n 'ModelArn': 'string',\n 'EnableNetworkIsolation': True|False\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef describe_model_package(ModelPackageName=None):\n \"\"\"\n Returns a description of the specified model package, which is used to create Amazon SageMaker models or list them on AWS Marketplace.\n To create models in Amazon SageMaker, buyers can subscribe to model packages listed on AWS Marketplace.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_model_package(\n ModelPackageName='string'\n )\n \n \n :type ModelPackageName: string\n :param ModelPackageName: [REQUIRED]\n The name of the model package to describe.\n \n\n :rtype: dict\n :return: {\n 'ModelPackageName': 'string',\n 'ModelPackageArn': 'string',\n 'ModelPackageDescription': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'InferenceSpecification': {\n 'Containers': [\n {\n 'ContainerHostname': 'string',\n 'Image': 'string',\n 'ImageDigest': 'string',\n 'ModelDataUrl': 'string',\n 'ProductId': 'string'\n },\n ],\n 'SupportedTransformInstanceTypes': [\n 'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge'|'ml.m5.large'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge',\n ],\n 'SupportedRealtimeInferenceInstanceTypes': [\n 'ml.t2.medium'|'ml.t2.large'|'ml.t2.xlarge'|'ml.t2.2xlarge'|'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.m5.large'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge'|'ml.c4.large'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge'|'ml.c5.large'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge',\n ],\n 'SupportedContentTypes': [\n 'string',\n ],\n 'SupportedResponseMIMETypes': [\n 'string',\n ]\n },\n 'SourceAlgorithmSpecification': {\n 'SourceAlgorithms': [\n {\n 'ModelDataUrl': 'string',\n 'AlgorithmName': 'string'\n },\n ]\n },\n 'ValidationSpecification': {\n 'ValidationRole': 'string',\n 'ValidationProfiles': [\n {\n 'ProfileName': 'string',\n 'TransformJobDefinition': {\n 'MaxConcurrentTransforms': 123,\n 'MaxPayloadInMB': 123,\n 'BatchStrategy': 'MultiRecord'|'SingleRecord',\n 'Environment': {\n 'string': 'string'\n },\n 'TransformInput': {\n 'DataSource': {\n 'S3DataSource': {\n 'S3DataType': 'ManifestFile'|'S3Prefix'|'AugmentedManifestFile',\n 'S3Uri': 'string'\n }\n },\n 'ContentType': 'string',\n 'CompressionType': 'None'|'Gzip',\n 'SplitType': 'None'|'Line'|'RecordIO'|'TFRecord'\n },\n 'TransformOutput': {\n 'S3OutputPath': 'string',\n 'Accept': 'string',\n 'AssembleWith': 'None'|'Line',\n 'KmsKeyId': 'string'\n },\n 'TransformResources': {\n 'InstanceType': 'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge'|'ml.m5.large'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge',\n 'InstanceCount': 123,\n 'VolumeKmsKeyId': 'string'\n }\n }\n },\n ]\n },\n 'ModelPackageStatus': 'Pending'|'InProgress'|'Completed'|'Failed'|'Deleting',\n 'ModelPackageStatusDetails': {\n 'ValidationStatuses': [\n {\n 'Name': 'string',\n 'Status': 'NotStarted'|'InProgress'|'Completed'|'Failed',\n 'FailureReason': 'string'\n },\n ],\n 'ImageScanStatuses': [\n {\n 'Name': 'string',\n 'Status': 'NotStarted'|'InProgress'|'Completed'|'Failed',\n 'FailureReason': 'string'\n },\n ]\n },\n 'CertifyForMarketplace': True|False\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_notebook_instance(NotebookInstanceName=None):\n \"\"\"\n Returns information about a notebook instance.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_notebook_instance(\n NotebookInstanceName='string'\n )\n \n \n :type NotebookInstanceName: string\n :param NotebookInstanceName: [REQUIRED]\n The name of the notebook instance that you want information about.\n \n\n :rtype: dict\n :return: {\n 'NotebookInstanceArn': 'string',\n 'NotebookInstanceName': 'string',\n 'NotebookInstanceStatus': 'Pending'|'InService'|'Stopping'|'Stopped'|'Failed'|'Deleting'|'Updating',\n 'FailureReason': 'string',\n 'Url': 'string',\n 'InstanceType': 'ml.t2.medium'|'ml.t2.large'|'ml.t2.xlarge'|'ml.t2.2xlarge'|'ml.t3.medium'|'ml.t3.large'|'ml.t3.xlarge'|'ml.t3.2xlarge'|'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge'|'ml.c5d.xlarge'|'ml.c5d.2xlarge'|'ml.c5d.4xlarge'|'ml.c5d.9xlarge'|'ml.c5d.18xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge',\n 'SubnetId': 'string',\n 'SecurityGroups': [\n 'string',\n ],\n 'RoleArn': 'string',\n 'KmsKeyId': 'string',\n 'NetworkInterfaceId': 'string',\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'CreationTime': datetime(2015, 1, 1),\n 'NotebookInstanceLifecycleConfigName': 'string',\n 'DirectInternetAccess': 'Enabled'|'Disabled',\n 'VolumeSizeInGB': 123,\n 'AcceleratorTypes': [\n 'ml.eia1.medium'|'ml.eia1.large'|'ml.eia1.xlarge',\n ],\n 'DefaultCodeRepository': 'string',\n 'AdditionalCodeRepositories': [\n 'string',\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_notebook_instance_lifecycle_config(NotebookInstanceLifecycleConfigName=None):\n \"\"\"\n Returns a description of a notebook instance lifecycle configuration.\n For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance .\n See also: AWS API Documentation\n \n \n :example: response = client.describe_notebook_instance_lifecycle_config(\n NotebookInstanceLifecycleConfigName='string'\n )\n \n \n :type NotebookInstanceLifecycleConfigName: string\n :param NotebookInstanceLifecycleConfigName: [REQUIRED]\n The name of the lifecycle configuration to describe.\n \n\n :rtype: dict\n :return: {\n 'NotebookInstanceLifecycleConfigArn': 'string',\n 'NotebookInstanceLifecycleConfigName': 'string',\n 'OnCreate': [\n {\n 'Content': 'string'\n },\n ],\n 'OnStart': [\n {\n 'Content': 'string'\n },\n ],\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'CreationTime': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef describe_subscribed_workteam(WorkteamArn=None):\n \"\"\"\n Gets information about a work team provided by a vendor. It returns details about the subscription with a vendor in the AWS Marketplace.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_subscribed_workteam(\n WorkteamArn='string'\n )\n \n \n :type WorkteamArn: string\n :param WorkteamArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the subscribed work team to describe.\n \n\n :rtype: dict\n :return: {\n 'SubscribedWorkteam': {\n 'WorkteamArn': 'string',\n 'MarketplaceTitle': 'string',\n 'SellerName': 'string',\n 'MarketplaceDescription': 'string',\n 'ListingId': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef describe_training_job(TrainingJobName=None):\n \"\"\"\n Returns information about a training job.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_training_job(\n TrainingJobName='string'\n )\n \n \n :type TrainingJobName: string\n :param TrainingJobName: [REQUIRED]\n The name of the training job.\n \n\n :rtype: dict\n :return: {\n 'TrainingJobName': 'string',\n 'TrainingJobArn': 'string',\n 'TuningJobArn': 'string',\n 'LabelingJobArn': 'string',\n 'ModelArtifacts': {\n 'S3ModelArtifacts': 'string'\n },\n 'TrainingJobStatus': 'InProgress'|'Completed'|'Failed'|'Stopping'|'Stopped',\n 'SecondaryStatus': 'Starting'|'LaunchingMLInstances'|'PreparingTrainingStack'|'Downloading'|'DownloadingTrainingImage'|'Training'|'Uploading'|'Stopping'|'Stopped'|'MaxRuntimeExceeded'|'Completed'|'Failed',\n 'FailureReason': 'string',\n 'HyperParameters': {\n 'string': 'string'\n },\n 'AlgorithmSpecification': {\n 'TrainingImage': 'string',\n 'AlgorithmName': 'string',\n 'TrainingInputMode': 'Pipe'|'File',\n 'MetricDefinitions': [\n {\n 'Name': 'string',\n 'Regex': 'string'\n },\n ]\n },\n 'RoleArn': 'string',\n 'InputDataConfig': [\n {\n 'ChannelName': 'string',\n 'DataSource': {\n 'S3DataSource': {\n 'S3DataType': 'ManifestFile'|'S3Prefix'|'AugmentedManifestFile',\n 'S3Uri': 'string',\n 'S3DataDistributionType': 'FullyReplicated'|'ShardedByS3Key',\n 'AttributeNames': [\n 'string',\n ]\n }\n },\n 'ContentType': 'string',\n 'CompressionType': 'None'|'Gzip',\n 'RecordWrapperType': 'None'|'RecordIO',\n 'InputMode': 'Pipe'|'File',\n 'ShuffleConfig': {\n 'Seed': 123\n }\n },\n ],\n 'OutputDataConfig': {\n 'KmsKeyId': 'string',\n 'S3OutputPath': 'string'\n },\n 'ResourceConfig': {\n 'InstanceType': 'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.m5.large'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge',\n 'InstanceCount': 123,\n 'VolumeSizeInGB': 123,\n 'VolumeKmsKeyId': 'string'\n },\n 'VpcConfig': {\n 'SecurityGroupIds': [\n 'string',\n ],\n 'Subnets': [\n 'string',\n ]\n },\n 'StoppingCondition': {\n 'MaxRuntimeInSeconds': 123\n },\n 'CreationTime': datetime(2015, 1, 1),\n 'TrainingStartTime': datetime(2015, 1, 1),\n 'TrainingEndTime': datetime(2015, 1, 1),\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'SecondaryStatusTransitions': [\n {\n 'Status': 'Starting'|'LaunchingMLInstances'|'PreparingTrainingStack'|'Downloading'|'DownloadingTrainingImage'|'Training'|'Uploading'|'Stopping'|'Stopped'|'MaxRuntimeExceeded'|'Completed'|'Failed',\n 'StartTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'StatusMessage': 'string'\n },\n ],\n 'FinalMetricDataList': [\n {\n 'MetricName': 'string',\n 'Value': ...,\n 'Timestamp': datetime(2015, 1, 1)\n },\n ],\n 'EnableNetworkIsolation': True|False\n }\n \n \n :returns: \n LaunchingMLInstances\n PreparingTrainingStack\n DownloadingTrainingImage\n \n \"\"\"\n pass\n\ndef describe_transform_job(TransformJobName=None):\n \"\"\"\n Returns information about a transform job.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_transform_job(\n TransformJobName='string'\n )\n \n \n :type TransformJobName: string\n :param TransformJobName: [REQUIRED]\n The name of the transform job that you want to view details of.\n \n\n :rtype: dict\n :return: {\n 'TransformJobName': 'string',\n 'TransformJobArn': 'string',\n 'TransformJobStatus': 'InProgress'|'Completed'|'Failed'|'Stopping'|'Stopped',\n 'FailureReason': 'string',\n 'ModelName': 'string',\n 'MaxConcurrentTransforms': 123,\n 'MaxPayloadInMB': 123,\n 'BatchStrategy': 'MultiRecord'|'SingleRecord',\n 'Environment': {\n 'string': 'string'\n },\n 'TransformInput': {\n 'DataSource': {\n 'S3DataSource': {\n 'S3DataType': 'ManifestFile'|'S3Prefix'|'AugmentedManifestFile',\n 'S3Uri': 'string'\n }\n },\n 'ContentType': 'string',\n 'CompressionType': 'None'|'Gzip',\n 'SplitType': 'None'|'Line'|'RecordIO'|'TFRecord'\n },\n 'TransformOutput': {\n 'S3OutputPath': 'string',\n 'Accept': 'string',\n 'AssembleWith': 'None'|'Line',\n 'KmsKeyId': 'string'\n },\n 'TransformResources': {\n 'InstanceType': 'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge'|'ml.m5.large'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge',\n 'InstanceCount': 123,\n 'VolumeKmsKeyId': 'string'\n },\n 'CreationTime': datetime(2015, 1, 1),\n 'TransformStartTime': datetime(2015, 1, 1),\n 'TransformEndTime': datetime(2015, 1, 1),\n 'LabelingJobArn': 'string'\n }\n \n \n :returns: \n A key name prefix might look like this: s3://bucketname/exampleprefix .\n A manifest might look like this: s3://bucketname/example.manifest The manifest is an S3 object which is a JSON file with the following format: [ {\"prefix\": \"s3://customer_bucket/some/prefix/\"}, \"relative/path/to/custdata-1\", \"relative/path/custdata-2\", ... ] The preceding JSON matches the following S3Uris : s3://customer_bucket/some/prefix/relative/path/to/custdata-1 s3://customer_bucket/some/prefix/relative/path/custdata-1 ... The complete set of S3Uris in this manifest constitutes the input data for the channel for this datasource. The object that each S3Uris points to must be readable by the IAM role that Amazon SageMaker uses to perform tasks on your behalf.\n \n \"\"\"\n pass\n\ndef describe_workteam(WorkteamName=None):\n \"\"\"\n Gets information about a specific work team. You can see information such as the create date, the last updated date, membership information, and the work team's Amazon Resource Name (ARN).\n See also: AWS API Documentation\n \n \n :example: response = client.describe_workteam(\n WorkteamName='string'\n )\n \n \n :type WorkteamName: string\n :param WorkteamName: [REQUIRED]\n The name of the work team to return a description of.\n \n\n :rtype: dict\n :return: {\n 'Workteam': {\n 'WorkteamName': 'string',\n 'MemberDefinitions': [\n {\n 'CognitoMemberDefinition': {\n 'UserPool': 'string',\n 'UserGroup': 'string',\n 'ClientId': 'string'\n }\n },\n ],\n 'WorkteamArn': 'string',\n 'ProductListingIds': [\n 'string',\n ],\n 'Description': 'string',\n 'SubDomain': 'string',\n 'CreateDate': datetime(2015, 1, 1),\n 'LastUpdatedDate': datetime(2015, 1, 1)\n }\n }\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_search_suggestions(Resource=None, SuggestionQuery=None):\n \"\"\"\n An auto-complete API for the search functionality in the Amazon SageMaker console. It returns suggestions of possible matches for the property name to use in Search queries. Provides suggestions for HyperParameters , Tags , and Metrics .\n See also: AWS API Documentation\n \n \n :example: response = client.get_search_suggestions(\n Resource='TrainingJob',\n SuggestionQuery={\n 'PropertyNameQuery': {\n 'PropertyNameHint': 'string'\n }\n }\n )\n \n \n :type Resource: string\n :param Resource: [REQUIRED]\n The name of the Amazon SageMaker resource to Search for. The only valid Resource value is TrainingJob .\n \n\n :type SuggestionQuery: dict\n :param SuggestionQuery: Limits the property names that are included in the response.\n PropertyNameQuery (dict) --A type of SuggestionQuery . Defines a property name hint. Only property names that match the specified hint are included in the response.\n PropertyNameHint (string) -- [REQUIRED]Text that is part of a property's name. The property names of hyperparameter, metric, and tag key names that begin with the specified text in the PropertyNameHint .\n \n \n\n :rtype: dict\n :return: {\n 'PropertyNameSuggestions': [\n {\n 'PropertyName': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_algorithms(CreationTimeAfter=None, CreationTimeBefore=None, MaxResults=None, NameContains=None, NextToken=None, SortBy=None, SortOrder=None):\n \"\"\"\n Lists the machine learning algorithms that have been created.\n See also: AWS API Documentation\n \n \n :example: response = client.list_algorithms(\n CreationTimeAfter=datetime(2015, 1, 1),\n CreationTimeBefore=datetime(2015, 1, 1),\n MaxResults=123,\n NameContains='string',\n NextToken='string',\n SortBy='Name'|'CreationTime',\n SortOrder='Ascending'|'Descending'\n )\n \n \n :type CreationTimeAfter: datetime\n :param CreationTimeAfter: A filter that returns only algorithms created after the specified time (timestamp).\n\n :type CreationTimeBefore: datetime\n :param CreationTimeBefore: A filter that returns only algorithms created before the specified time (timestamp).\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of algorithms to return in the response.\n\n :type NameContains: string\n :param NameContains: A string in the algorithm name. This filter returns only algorithms whose name contains the specified string.\n\n :type NextToken: string\n :param NextToken: If the response to a previous ListAlgorithms request was truncated, the response includes a NextToken . To retrieve the next set of algorithms, use the token in the next request.\n\n :type SortBy: string\n :param SortBy: The parameter by which to sort the results. The default is CreationTime .\n\n :type SortOrder: string\n :param SortOrder: The sort order for the results. The default is Ascending .\n\n :rtype: dict\n :return: {\n 'AlgorithmSummaryList': [\n {\n 'AlgorithmName': 'string',\n 'AlgorithmArn': 'string',\n 'AlgorithmDescription': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'AlgorithmStatus': 'Pending'|'InProgress'|'Completed'|'Failed'|'Deleting'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_code_repositories(CreationTimeAfter=None, CreationTimeBefore=None, LastModifiedTimeAfter=None, LastModifiedTimeBefore=None, MaxResults=None, NameContains=None, NextToken=None, SortBy=None, SortOrder=None):\n \"\"\"\n Gets a list of the git repositories in your account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_code_repositories(\n CreationTimeAfter=datetime(2015, 1, 1),\n CreationTimeBefore=datetime(2015, 1, 1),\n LastModifiedTimeAfter=datetime(2015, 1, 1),\n LastModifiedTimeBefore=datetime(2015, 1, 1),\n MaxResults=123,\n NameContains='string',\n NextToken='string',\n SortBy='Name'|'CreationTime'|'LastModifiedTime',\n SortOrder='Ascending'|'Descending'\n )\n \n \n :type CreationTimeAfter: datetime\n :param CreationTimeAfter: A filter that returns only git repositories that were created after the specified time.\n\n :type CreationTimeBefore: datetime\n :param CreationTimeBefore: A filter that returns only git repositories that were created before the specified time.\n\n :type LastModifiedTimeAfter: datetime\n :param LastModifiedTimeAfter: A filter that returns only git repositories that were last modified after the specified time.\n\n :type LastModifiedTimeBefore: datetime\n :param LastModifiedTimeBefore: A filter that returns only git repositories that were last modified before the specified time.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of git repositories to return in the response.\n\n :type NameContains: string\n :param NameContains: A string in the git repositories name. This filter returns only repositories whose name contains the specified string.\n\n :type NextToken: string\n :param NextToken: If the result of a ListCodeRepositoriesOutput request was truncated, the response includes a NextToken . To get the next set of git repositories, use the token in the next request.\n\n :type SortBy: string\n :param SortBy: The field to sort results by. The default is Name .\n\n :type SortOrder: string\n :param SortOrder: The sort order for results. The default is Ascending .\n\n :rtype: dict\n :return: {\n 'CodeRepositorySummaryList': [\n {\n 'CodeRepositoryName': 'string',\n 'CodeRepositoryArn': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'GitConfig': {\n 'RepositoryUrl': 'string',\n 'Branch': 'string',\n 'SecretArn': 'string'\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n Name\n Amazon Resource Name (ARN)\n Creation time\n Last modified time\n Configuration information, including the URL location of the repository and the ARN of the AWS Secrets Manager secret that contains the credentials used to access the repository.\n \n \n \"\"\"\n pass\n\ndef list_compilation_jobs(NextToken=None, MaxResults=None, CreationTimeAfter=None, CreationTimeBefore=None, LastModifiedTimeAfter=None, LastModifiedTimeBefore=None, NameContains=None, StatusEquals=None, SortBy=None, SortOrder=None):\n \"\"\"\n Lists model compilation jobs that satisfy various filters.\n To create a model compilation job, use CreateCompilationJob . To get information about a particular model compilation job you have created, use DescribeCompilationJob .\n See also: AWS API Documentation\n \n \n :example: response = client.list_compilation_jobs(\n NextToken='string',\n MaxResults=123,\n CreationTimeAfter=datetime(2015, 1, 1),\n CreationTimeBefore=datetime(2015, 1, 1),\n LastModifiedTimeAfter=datetime(2015, 1, 1),\n LastModifiedTimeBefore=datetime(2015, 1, 1),\n NameContains='string',\n StatusEquals='INPROGRESS'|'COMPLETED'|'FAILED'|'STARTING'|'STOPPING'|'STOPPED',\n SortBy='Name'|'CreationTime'|'Status',\n SortOrder='Ascending'|'Descending'\n )\n \n \n :type NextToken: string\n :param NextToken: If the result of the previous ListCompilationJobs request was truncated, the response includes a NextToken . To retrieve the next set of model compilation jobs, use the token in the next request.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of model compilation jobs to return in the response.\n\n :type CreationTimeAfter: datetime\n :param CreationTimeAfter: A filter that returns the model compilation jobs that were created after a specified time.\n\n :type CreationTimeBefore: datetime\n :param CreationTimeBefore: A filter that returns the model compilation jobs that were created before a specified time.\n\n :type LastModifiedTimeAfter: datetime\n :param LastModifiedTimeAfter: A filter that returns the model compilation jobs that were modified after a specified time.\n\n :type LastModifiedTimeBefore: datetime\n :param LastModifiedTimeBefore: A filter that returns the model compilation jobs that were modified before a specified time.\n\n :type NameContains: string\n :param NameContains: A filter that returns the model compilation jobs whose name contains a specified string.\n\n :type StatusEquals: string\n :param StatusEquals: A filter that retrieves model compilation jobs with a specific DescribeCompilationJobResponse$CompilationJobStatus status.\n\n :type SortBy: string\n :param SortBy: The field by which to sort results. The default is CreationTime .\n\n :type SortOrder: string\n :param SortOrder: The sort order for results. The default is Ascending .\n\n :rtype: dict\n :return: {\n 'CompilationJobSummaries': [\n {\n 'CompilationJobName': 'string',\n 'CompilationJobArn': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'CompilationStartTime': datetime(2015, 1, 1),\n 'CompilationEndTime': datetime(2015, 1, 1),\n 'CompilationTargetDevice': 'ml_m4'|'ml_m5'|'ml_c4'|'ml_c5'|'ml_p2'|'ml_p3'|'jetson_tx1'|'jetson_tx2'|'rasp3b'|'deeplens',\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'CompilationJobStatus': 'INPROGRESS'|'COMPLETED'|'FAILED'|'STARTING'|'STOPPING'|'STOPPED'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_endpoint_configs(SortBy=None, SortOrder=None, NextToken=None, MaxResults=None, NameContains=None, CreationTimeBefore=None, CreationTimeAfter=None):\n \"\"\"\n Lists endpoint configurations.\n See also: AWS API Documentation\n \n \n :example: response = client.list_endpoint_configs(\n SortBy='Name'|'CreationTime',\n SortOrder='Ascending'|'Descending',\n NextToken='string',\n MaxResults=123,\n NameContains='string',\n CreationTimeBefore=datetime(2015, 1, 1),\n CreationTimeAfter=datetime(2015, 1, 1)\n )\n \n \n :type SortBy: string\n :param SortBy: The field to sort results by. The default is CreationTime .\n\n :type SortOrder: string\n :param SortOrder: The sort order for results. The default is Ascending .\n\n :type NextToken: string\n :param NextToken: If the result of the previous ListEndpointConfig request was truncated, the response includes a NextToken . To retrieve the next set of endpoint configurations, use the token in the next request.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of training jobs to return in the response.\n\n :type NameContains: string\n :param NameContains: A string in the endpoint configuration name. This filter returns only endpoint configurations whose name contains the specified string.\n\n :type CreationTimeBefore: datetime\n :param CreationTimeBefore: A filter that returns only endpoint configurations created before the specified time (timestamp).\n\n :type CreationTimeAfter: datetime\n :param CreationTimeAfter: A filter that returns only endpoint configurations created after the specified time (timestamp).\n\n :rtype: dict\n :return: {\n 'EndpointConfigs': [\n {\n 'EndpointConfigName': 'string',\n 'EndpointConfigArn': 'string',\n 'CreationTime': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_endpoints(SortBy=None, SortOrder=None, NextToken=None, MaxResults=None, NameContains=None, CreationTimeBefore=None, CreationTimeAfter=None, LastModifiedTimeBefore=None, LastModifiedTimeAfter=None, StatusEquals=None):\n \"\"\"\n Lists endpoints.\n See also: AWS API Documentation\n \n \n :example: response = client.list_endpoints(\n SortBy='Name'|'CreationTime'|'Status',\n SortOrder='Ascending'|'Descending',\n NextToken='string',\n MaxResults=123,\n NameContains='string',\n CreationTimeBefore=datetime(2015, 1, 1),\n CreationTimeAfter=datetime(2015, 1, 1),\n LastModifiedTimeBefore=datetime(2015, 1, 1),\n LastModifiedTimeAfter=datetime(2015, 1, 1),\n StatusEquals='OutOfService'|'Creating'|'Updating'|'SystemUpdating'|'RollingBack'|'InService'|'Deleting'|'Failed'\n )\n \n \n :type SortBy: string\n :param SortBy: Sorts the list of results. The default is CreationTime .\n\n :type SortOrder: string\n :param SortOrder: The sort order for results. The default is Ascending .\n\n :type NextToken: string\n :param NextToken: If the result of a ListEndpoints request was truncated, the response includes a NextToken . To retrieve the next set of endpoints, use the token in the next request.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of endpoints to return in the response.\n\n :type NameContains: string\n :param NameContains: A string in endpoint names. This filter returns only endpoints whose name contains the specified string.\n\n :type CreationTimeBefore: datetime\n :param CreationTimeBefore: A filter that returns only endpoints that were created before the specified time (timestamp).\n\n :type CreationTimeAfter: datetime\n :param CreationTimeAfter: A filter that returns only endpoints that were created after the specified time (timestamp).\n\n :type LastModifiedTimeBefore: datetime\n :param LastModifiedTimeBefore: A filter that returns only endpoints that were modified before the specified timestamp.\n\n :type LastModifiedTimeAfter: datetime\n :param LastModifiedTimeAfter: A filter that returns only endpoints that were modified after the specified timestamp.\n\n :type StatusEquals: string\n :param StatusEquals: A filter that returns only endpoints with the specified status.\n\n :rtype: dict\n :return: {\n 'Endpoints': [\n {\n 'EndpointName': 'string',\n 'EndpointArn': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'EndpointStatus': 'OutOfService'|'Creating'|'Updating'|'SystemUpdating'|'RollingBack'|'InService'|'Deleting'|'Failed'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n OutOfService : Endpoint is not available to take incoming requests.\n Creating : CreateEndpoint is executing.\n Updating : UpdateEndpoint or UpdateEndpointWeightsAndCapacities is executing.\n SystemUpdating : Endpoint is undergoing maintenance and cannot be updated or deleted or re-scaled until it has completed. This mainenance operation does not change any customer-specified values such as VPC config, KMS encryption, model, instance type, or instance count.\n RollingBack : Endpoint fails to scale up or down or change its variant weight and is in the process of rolling back to its previous configuration. Once the rollback completes, endpoint returns to an InService status. This transitional status only applies to an endpoint that has autoscaling enabled and is undergoing variant weight or capacity changes as part of an UpdateEndpointWeightsAndCapacities call or when the UpdateEndpointWeightsAndCapacities operation is called explicitly.\n InService : Endpoint is available to process incoming requests.\n Deleting : DeleteEndpoint is executing.\n Failed : Endpoint could not be created, updated, or re-scaled. Use DescribeEndpointOutput$FailureReason for information about the failure. DeleteEndpoint is the only operation that can be performed on a failed endpoint.\n \n \"\"\"\n pass\n\ndef list_hyper_parameter_tuning_jobs(NextToken=None, MaxResults=None, SortBy=None, SortOrder=None, NameContains=None, CreationTimeAfter=None, CreationTimeBefore=None, LastModifiedTimeAfter=None, LastModifiedTimeBefore=None, StatusEquals=None):\n \"\"\"\n Gets a list of HyperParameterTuningJobSummary objects that describe the hyperparameter tuning jobs launched in your account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_hyper_parameter_tuning_jobs(\n NextToken='string',\n MaxResults=123,\n SortBy='Name'|'Status'|'CreationTime',\n SortOrder='Ascending'|'Descending',\n NameContains='string',\n CreationTimeAfter=datetime(2015, 1, 1),\n CreationTimeBefore=datetime(2015, 1, 1),\n LastModifiedTimeAfter=datetime(2015, 1, 1),\n LastModifiedTimeBefore=datetime(2015, 1, 1),\n StatusEquals='Completed'|'InProgress'|'Failed'|'Stopped'|'Stopping'\n )\n \n \n :type NextToken: string\n :param NextToken: If the result of the previous ListHyperParameterTuningJobs request was truncated, the response includes a NextToken . To retrieve the next set of tuning jobs, use the token in the next request.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of tuning jobs to return. The default value is 10.\n\n :type SortBy: string\n :param SortBy: The field to sort results by. The default is Name .\n\n :type SortOrder: string\n :param SortOrder: The sort order for results. The default is Ascending .\n\n :type NameContains: string\n :param NameContains: A string in the tuning job name. This filter returns only tuning jobs whose name contains the specified string.\n\n :type CreationTimeAfter: datetime\n :param CreationTimeAfter: A filter that returns only tuning jobs that were created after the specified time.\n\n :type CreationTimeBefore: datetime\n :param CreationTimeBefore: A filter that returns only tuning jobs that were created before the specified time.\n\n :type LastModifiedTimeAfter: datetime\n :param LastModifiedTimeAfter: A filter that returns only tuning jobs that were modified after the specified time.\n\n :type LastModifiedTimeBefore: datetime\n :param LastModifiedTimeBefore: A filter that returns only tuning jobs that were modified before the specified time.\n\n :type StatusEquals: string\n :param StatusEquals: A filter that returns only tuning jobs with the specified status.\n\n :rtype: dict\n :return: {\n 'HyperParameterTuningJobSummaries': [\n {\n 'HyperParameterTuningJobName': 'string',\n 'HyperParameterTuningJobArn': 'string',\n 'HyperParameterTuningJobStatus': 'Completed'|'InProgress'|'Failed'|'Stopped'|'Stopping',\n 'Strategy': 'Bayesian',\n 'CreationTime': datetime(2015, 1, 1),\n 'HyperParameterTuningEndTime': datetime(2015, 1, 1),\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'TrainingJobStatusCounters': {\n 'Completed': 123,\n 'InProgress': 123,\n 'RetryableError': 123,\n 'NonRetryableError': 123,\n 'Stopped': 123\n },\n 'ObjectiveStatusCounters': {\n 'Succeeded': 123,\n 'Pending': 123,\n 'Failed': 123\n },\n 'ResourceLimits': {\n 'MaxNumberOfTrainingJobs': 123,\n 'MaxParallelTrainingJobs': 123\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_labeling_jobs(CreationTimeAfter=None, CreationTimeBefore=None, LastModifiedTimeAfter=None, LastModifiedTimeBefore=None, MaxResults=None, NextToken=None, NameContains=None, SortBy=None, SortOrder=None, StatusEquals=None):\n \"\"\"\n Gets a list of labeling jobs.\n See also: AWS API Documentation\n \n \n :example: response = client.list_labeling_jobs(\n CreationTimeAfter=datetime(2015, 1, 1),\n CreationTimeBefore=datetime(2015, 1, 1),\n LastModifiedTimeAfter=datetime(2015, 1, 1),\n LastModifiedTimeBefore=datetime(2015, 1, 1),\n MaxResults=123,\n NextToken='string',\n NameContains='string',\n SortBy='Name'|'CreationTime'|'Status',\n SortOrder='Ascending'|'Descending',\n StatusEquals='InProgress'|'Completed'|'Failed'|'Stopping'|'Stopped'\n )\n \n \n :type CreationTimeAfter: datetime\n :param CreationTimeAfter: A filter that returns only labeling jobs created after the specified time (timestamp).\n\n :type CreationTimeBefore: datetime\n :param CreationTimeBefore: A filter that returns only labeling jobs created before the specified time (timestamp).\n\n :type LastModifiedTimeAfter: datetime\n :param LastModifiedTimeAfter: A filter that returns only labeling jobs modified after the specified time (timestamp).\n\n :type LastModifiedTimeBefore: datetime\n :param LastModifiedTimeBefore: A filter that returns only labeling jobs modified before the specified time (timestamp).\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of labeling jobs to return in each page of the response.\n\n :type NextToken: string\n :param NextToken: If the result of the previous ListLabelingJobs request was truncated, the response includes a NextToken . To retrieve the next set of labeling jobs, use the token in the next request.\n\n :type NameContains: string\n :param NameContains: A string in the labeling job name. This filter returns only labeling jobs whose name contains the specified string.\n\n :type SortBy: string\n :param SortBy: The field to sort results by. The default is CreationTime .\n\n :type SortOrder: string\n :param SortOrder: The sort order for results. The default is Ascending .\n\n :type StatusEquals: string\n :param StatusEquals: A filter that retrieves only labeling jobs with a specific status.\n\n :rtype: dict\n :return: {\n 'LabelingJobSummaryList': [\n {\n 'LabelingJobName': 'string',\n 'LabelingJobArn': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'LabelingJobStatus': 'InProgress'|'Completed'|'Failed'|'Stopping'|'Stopped',\n 'LabelCounters': {\n 'TotalLabeled': 123,\n 'HumanLabeled': 123,\n 'MachineLabeled': 123,\n 'FailedNonRetryableError': 123,\n 'Unlabeled': 123\n },\n 'WorkteamArn': 'string',\n 'PreHumanTaskLambdaArn': 'string',\n 'AnnotationConsolidationLambdaArn': 'string',\n 'FailureReason': 'string',\n 'LabelingJobOutput': {\n 'OutputDatasetS3Uri': 'string',\n 'FinalActiveLearningModelArn': 'string'\n },\n 'InputConfig': {\n 'DataSource': {\n 'S3DataSource': {\n 'ManifestS3Uri': 'string'\n }\n },\n 'DataAttributes': {\n 'ContentClassifiers': [\n 'FreeOfPersonallyIdentifiableInformation'|'FreeOfAdultContent',\n ]\n }\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_labeling_jobs_for_workteam(WorkteamArn=None, MaxResults=None, NextToken=None, CreationTimeAfter=None, CreationTimeBefore=None, JobReferenceCodeContains=None, SortBy=None, SortOrder=None):\n \"\"\"\n Gets a list of labeling jobs assigned to a specified work team.\n See also: AWS API Documentation\n \n \n :example: response = client.list_labeling_jobs_for_workteam(\n WorkteamArn='string',\n MaxResults=123,\n NextToken='string',\n CreationTimeAfter=datetime(2015, 1, 1),\n CreationTimeBefore=datetime(2015, 1, 1),\n JobReferenceCodeContains='string',\n SortBy='CreationTime',\n SortOrder='Ascending'|'Descending'\n )\n \n \n :type WorkteamArn: string\n :param WorkteamArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the work team for which you want to see labeling jobs for.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of labeling jobs to return in each page of the response.\n\n :type NextToken: string\n :param NextToken: If the result of the previous ListLabelingJobsForWorkteam request was truncated, the response includes a NextToken . To retrieve the next set of labeling jobs, use the token in the next request.\n\n :type CreationTimeAfter: datetime\n :param CreationTimeAfter: A filter that returns only labeling jobs created after the specified time (timestamp).\n\n :type CreationTimeBefore: datetime\n :param CreationTimeBefore: A filter that returns only labeling jobs created before the specified time (timestamp).\n\n :type JobReferenceCodeContains: string\n :param JobReferenceCodeContains: A filter the limits jobs to only the ones whose job reference code contains the specified string.\n\n :type SortBy: string\n :param SortBy: The field to sort results by. The default is CreationTime .\n\n :type SortOrder: string\n :param SortOrder: The sort order for results. The default is Ascending .\n\n :rtype: dict\n :return: {\n 'LabelingJobSummaryList': [\n {\n 'LabelingJobName': 'string',\n 'JobReferenceCode': 'string',\n 'WorkRequesterAccountId': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LabelCounters': {\n 'HumanLabeled': 123,\n 'PendingHuman': 123,\n 'Total': 123\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_model_packages(CreationTimeAfter=None, CreationTimeBefore=None, MaxResults=None, NameContains=None, NextToken=None, SortBy=None, SortOrder=None):\n \"\"\"\n Lists the model packages that have been created.\n See also: AWS API Documentation\n \n \n :example: response = client.list_model_packages(\n CreationTimeAfter=datetime(2015, 1, 1),\n CreationTimeBefore=datetime(2015, 1, 1),\n MaxResults=123,\n NameContains='string',\n NextToken='string',\n SortBy='Name'|'CreationTime',\n SortOrder='Ascending'|'Descending'\n )\n \n \n :type CreationTimeAfter: datetime\n :param CreationTimeAfter: A filter that returns only model packages created after the specified time (timestamp).\n\n :type CreationTimeBefore: datetime\n :param CreationTimeBefore: A filter that returns only model packages created before the specified time (timestamp).\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of model packages to return in the response.\n\n :type NameContains: string\n :param NameContains: A string in the model package name. This filter returns only model packages whose name contains the specified string.\n\n :type NextToken: string\n :param NextToken: If the response to a previous ListModelPackages request was truncated, the response includes a NextToken . To retrieve the next set of model packages, use the token in the next request.\n\n :type SortBy: string\n :param SortBy: The parameter by which to sort the results. The default is CreationTime .\n\n :type SortOrder: string\n :param SortOrder: The sort order for the results. The default is Ascending .\n\n :rtype: dict\n :return: {\n 'ModelPackageSummaryList': [\n {\n 'ModelPackageName': 'string',\n 'ModelPackageArn': 'string',\n 'ModelPackageDescription': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'ModelPackageStatus': 'Pending'|'InProgress'|'Completed'|'Failed'|'Deleting'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_models(SortBy=None, SortOrder=None, NextToken=None, MaxResults=None, NameContains=None, CreationTimeBefore=None, CreationTimeAfter=None):\n \"\"\"\n Lists models created with the CreateModel API.\n See also: AWS API Documentation\n \n \n :example: response = client.list_models(\n SortBy='Name'|'CreationTime',\n SortOrder='Ascending'|'Descending',\n NextToken='string',\n MaxResults=123,\n NameContains='string',\n CreationTimeBefore=datetime(2015, 1, 1),\n CreationTimeAfter=datetime(2015, 1, 1)\n )\n \n \n :type SortBy: string\n :param SortBy: Sorts the list of results. The default is CreationTime .\n\n :type SortOrder: string\n :param SortOrder: The sort order for results. The default is Ascending .\n\n :type NextToken: string\n :param NextToken: If the response to a previous ListModels request was truncated, the response includes a NextToken . To retrieve the next set of models, use the token in the next request.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of models to return in the response.\n\n :type NameContains: string\n :param NameContains: A string in the training job name. This filter returns only models in the training job whose name contains the specified string.\n\n :type CreationTimeBefore: datetime\n :param CreationTimeBefore: A filter that returns only models created before the specified time (timestamp).\n\n :type CreationTimeAfter: datetime\n :param CreationTimeAfter: A filter that returns only models created after the specified time (timestamp).\n\n :rtype: dict\n :return: {\n 'Models': [\n {\n 'ModelName': 'string',\n 'ModelArn': 'string',\n 'CreationTime': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_notebook_instance_lifecycle_configs(NextToken=None, MaxResults=None, SortBy=None, SortOrder=None, NameContains=None, CreationTimeBefore=None, CreationTimeAfter=None, LastModifiedTimeBefore=None, LastModifiedTimeAfter=None):\n \"\"\"\n Lists notebook instance lifestyle configurations created with the CreateNotebookInstanceLifecycleConfig API.\n See also: AWS API Documentation\n \n \n :example: response = client.list_notebook_instance_lifecycle_configs(\n NextToken='string',\n MaxResults=123,\n SortBy='Name'|'CreationTime'|'LastModifiedTime',\n SortOrder='Ascending'|'Descending',\n NameContains='string',\n CreationTimeBefore=datetime(2015, 1, 1),\n CreationTimeAfter=datetime(2015, 1, 1),\n LastModifiedTimeBefore=datetime(2015, 1, 1),\n LastModifiedTimeAfter=datetime(2015, 1, 1)\n )\n \n \n :type NextToken: string\n :param NextToken: If the result of a ListNotebookInstanceLifecycleConfigs request was truncated, the response includes a NextToken . To get the next set of lifecycle configurations, use the token in the next request.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of lifecycle configurations to return in the response.\n\n :type SortBy: string\n :param SortBy: Sorts the list of results. The default is CreationTime .\n\n :type SortOrder: string\n :param SortOrder: The sort order for results.\n\n :type NameContains: string\n :param NameContains: A string in the lifecycle configuration name. This filter returns only lifecycle configurations whose name contains the specified string.\n\n :type CreationTimeBefore: datetime\n :param CreationTimeBefore: A filter that returns only lifecycle configurations that were created before the specified time (timestamp).\n\n :type CreationTimeAfter: datetime\n :param CreationTimeAfter: A filter that returns only lifecycle configurations that were created after the specified time (timestamp).\n\n :type LastModifiedTimeBefore: datetime\n :param LastModifiedTimeBefore: A filter that returns only lifecycle configurations that were modified before the specified time (timestamp).\n\n :type LastModifiedTimeAfter: datetime\n :param LastModifiedTimeAfter: A filter that returns only lifecycle configurations that were modified after the specified time (timestamp).\n\n :rtype: dict\n :return: {\n 'NextToken': 'string',\n 'NotebookInstanceLifecycleConfigs': [\n {\n 'NotebookInstanceLifecycleConfigName': 'string',\n 'NotebookInstanceLifecycleConfigArn': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastModifiedTime': datetime(2015, 1, 1)\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef list_notebook_instances(NextToken=None, MaxResults=None, SortBy=None, SortOrder=None, NameContains=None, CreationTimeBefore=None, CreationTimeAfter=None, LastModifiedTimeBefore=None, LastModifiedTimeAfter=None, StatusEquals=None, NotebookInstanceLifecycleConfigNameContains=None, DefaultCodeRepositoryContains=None, AdditionalCodeRepositoryEquals=None):\n \"\"\"\n Returns a list of the Amazon SageMaker notebook instances in the requester's account in an AWS Region.\n See also: AWS API Documentation\n \n \n :example: response = client.list_notebook_instances(\n NextToken='string',\n MaxResults=123,\n SortBy='Name'|'CreationTime'|'Status',\n SortOrder='Ascending'|'Descending',\n NameContains='string',\n CreationTimeBefore=datetime(2015, 1, 1),\n CreationTimeAfter=datetime(2015, 1, 1),\n LastModifiedTimeBefore=datetime(2015, 1, 1),\n LastModifiedTimeAfter=datetime(2015, 1, 1),\n StatusEquals='Pending'|'InService'|'Stopping'|'Stopped'|'Failed'|'Deleting'|'Updating',\n NotebookInstanceLifecycleConfigNameContains='string',\n DefaultCodeRepositoryContains='string',\n AdditionalCodeRepositoryEquals='string'\n )\n \n \n :type NextToken: string\n :param NextToken: If the previous call to the ListNotebookInstances is truncated, the response includes a NextToken . You can use this token in your subsequent ListNotebookInstances request to fetch the next set of notebook instances.\n Note\n You might specify a filter or a sort order in your request. When response is truncated, you must use the same values for the filer and sort order in the next request.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of notebook instances to return.\n\n :type SortBy: string\n :param SortBy: The field to sort results by. The default is Name .\n\n :type SortOrder: string\n :param SortOrder: The sort order for results.\n\n :type NameContains: string\n :param NameContains: A string in the notebook instances' name. This filter returns only notebook instances whose name contains the specified string.\n\n :type CreationTimeBefore: datetime\n :param CreationTimeBefore: A filter that returns only notebook instances that were created before the specified time (timestamp).\n\n :type CreationTimeAfter: datetime\n :param CreationTimeAfter: A filter that returns only notebook instances that were created after the specified time (timestamp).\n\n :type LastModifiedTimeBefore: datetime\n :param LastModifiedTimeBefore: A filter that returns only notebook instances that were modified before the specified time (timestamp).\n\n :type LastModifiedTimeAfter: datetime\n :param LastModifiedTimeAfter: A filter that returns only notebook instances that were modified after the specified time (timestamp).\n\n :type StatusEquals: string\n :param StatusEquals: A filter that returns only notebook instances with the specified status.\n\n :type NotebookInstanceLifecycleConfigNameContains: string\n :param NotebookInstanceLifecycleConfigNameContains: A string in the name of a notebook instances lifecycle configuration associated with this notebook instance. This filter returns only notebook instances associated with a lifecycle configuration with a name that contains the specified string.\n\n :type DefaultCodeRepositoryContains: string\n :param DefaultCodeRepositoryContains: A string in the name or URL of a git repository associated with this notebook instance. This filter returns only notebook instances associated with a git repository with a name that contains the specified string.\n\n :type AdditionalCodeRepositoryEquals: string\n :param AdditionalCodeRepositoryEquals: A filter that returns only notebook instances with associated with the specified git respository.\n\n :rtype: dict\n :return: {\n 'NextToken': 'string',\n 'NotebookInstances': [\n {\n 'NotebookInstanceName': 'string',\n 'NotebookInstanceArn': 'string',\n 'NotebookInstanceStatus': 'Pending'|'InService'|'Stopping'|'Stopped'|'Failed'|'Deleting'|'Updating',\n 'Url': 'string',\n 'InstanceType': 'ml.t2.medium'|'ml.t2.large'|'ml.t2.xlarge'|'ml.t2.2xlarge'|'ml.t3.medium'|'ml.t3.large'|'ml.t3.xlarge'|'ml.t3.2xlarge'|'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge'|'ml.c5d.xlarge'|'ml.c5d.2xlarge'|'ml.c5d.4xlarge'|'ml.c5d.9xlarge'|'ml.c5d.18xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'NotebookInstanceLifecycleConfigName': 'string',\n 'DefaultCodeRepository': 'string',\n 'AdditionalCodeRepositories': [\n 'string',\n ]\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_subscribed_workteams(NameContains=None, NextToken=None, MaxResults=None):\n \"\"\"\n Gets a list of the work teams that you are subscribed to in the AWS Marketplace. The list may be empty if no work team satisfies the filter specified in the NameContains parameter.\n See also: AWS API Documentation\n \n \n :example: response = client.list_subscribed_workteams(\n NameContains='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type NameContains: string\n :param NameContains: A string in the work team name. This filter returns only work teams whose name contains the specified string.\n\n :type NextToken: string\n :param NextToken: If the result of the previous ListSubscribedWorkteams request was truncated, the response includes a NextToken . To retrieve the next set of labeling jobs, use the token in the next request.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of work teams to return in each page of the response.\n\n :rtype: dict\n :return: {\n 'SubscribedWorkteams': [\n {\n 'WorkteamArn': 'string',\n 'MarketplaceTitle': 'string',\n 'SellerName': 'string',\n 'MarketplaceDescription': 'string',\n 'ListingId': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_tags(ResourceArn=None, NextToken=None, MaxResults=None):\n \"\"\"\n Returns the tags for the specified Amazon SageMaker resource.\n See also: AWS API Documentation\n \n \n :example: response = client.list_tags(\n ResourceArn='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type ResourceArn: string\n :param ResourceArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the resource whose tags you want to retrieve.\n \n\n :type NextToken: string\n :param NextToken: If the response to the previous ListTags request is truncated, Amazon SageMaker returns this token. To retrieve the next set of tags, use it in the subsequent request.\n\n :type MaxResults: integer\n :param MaxResults: Maximum number of tags to return.\n\n :rtype: dict\n :return: {\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_training_jobs(NextToken=None, MaxResults=None, CreationTimeAfter=None, CreationTimeBefore=None, LastModifiedTimeAfter=None, LastModifiedTimeBefore=None, NameContains=None, StatusEquals=None, SortBy=None, SortOrder=None):\n \"\"\"\n Lists training jobs.\n See also: AWS API Documentation\n \n \n :example: response = client.list_training_jobs(\n NextToken='string',\n MaxResults=123,\n CreationTimeAfter=datetime(2015, 1, 1),\n CreationTimeBefore=datetime(2015, 1, 1),\n LastModifiedTimeAfter=datetime(2015, 1, 1),\n LastModifiedTimeBefore=datetime(2015, 1, 1),\n NameContains='string',\n StatusEquals='InProgress'|'Completed'|'Failed'|'Stopping'|'Stopped',\n SortBy='Name'|'CreationTime'|'Status',\n SortOrder='Ascending'|'Descending'\n )\n \n \n :type NextToken: string\n :param NextToken: If the result of the previous ListTrainingJobs request was truncated, the response includes a NextToken . To retrieve the next set of training jobs, use the token in the next request.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of training jobs to return in the response.\n\n :type CreationTimeAfter: datetime\n :param CreationTimeAfter: A filter that returns only training jobs created after the specified time (timestamp).\n\n :type CreationTimeBefore: datetime\n :param CreationTimeBefore: A filter that returns only training jobs created before the specified time (timestamp).\n\n :type LastModifiedTimeAfter: datetime\n :param LastModifiedTimeAfter: A filter that returns only training jobs modified after the specified time (timestamp).\n\n :type LastModifiedTimeBefore: datetime\n :param LastModifiedTimeBefore: A filter that returns only training jobs modified before the specified time (timestamp).\n\n :type NameContains: string\n :param NameContains: A string in the training job name. This filter returns only training jobs whose name contains the specified string.\n\n :type StatusEquals: string\n :param StatusEquals: A filter that retrieves only training jobs with a specific status.\n\n :type SortBy: string\n :param SortBy: The field to sort results by. The default is CreationTime .\n\n :type SortOrder: string\n :param SortOrder: The sort order for results. The default is Ascending .\n\n :rtype: dict\n :return: {\n 'TrainingJobSummaries': [\n {\n 'TrainingJobName': 'string',\n 'TrainingJobArn': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'TrainingEndTime': datetime(2015, 1, 1),\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'TrainingJobStatus': 'InProgress'|'Completed'|'Failed'|'Stopping'|'Stopped'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_training_jobs_for_hyper_parameter_tuning_job(HyperParameterTuningJobName=None, NextToken=None, MaxResults=None, StatusEquals=None, SortBy=None, SortOrder=None):\n \"\"\"\n Gets a list of TrainingJobSummary objects that describe the training jobs that a hyperparameter tuning job launched.\n See also: AWS API Documentation\n \n \n :example: response = client.list_training_jobs_for_hyper_parameter_tuning_job(\n HyperParameterTuningJobName='string',\n NextToken='string',\n MaxResults=123,\n StatusEquals='InProgress'|'Completed'|'Failed'|'Stopping'|'Stopped',\n SortBy='Name'|'CreationTime'|'Status'|'FinalObjectiveMetricValue',\n SortOrder='Ascending'|'Descending'\n )\n \n \n :type HyperParameterTuningJobName: string\n :param HyperParameterTuningJobName: [REQUIRED]\n The name of the tuning job whose training jobs you want to list.\n \n\n :type NextToken: string\n :param NextToken: If the result of the previous ListTrainingJobsForHyperParameterTuningJob request was truncated, the response includes a NextToken . To retrieve the next set of training jobs, use the token in the next request.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of training jobs to return. The default value is 10.\n\n :type StatusEquals: string\n :param StatusEquals: A filter that returns only training jobs with the specified status.\n\n :type SortBy: string\n :param SortBy: The field to sort results by. The default is Name .\n If the value of this field is FinalObjectiveMetricValue , any training jobs that did not return an objective metric are not listed.\n \n\n :type SortOrder: string\n :param SortOrder: The sort order for results. The default is Ascending .\n\n :rtype: dict\n :return: {\n 'TrainingJobSummaries': [\n {\n 'TrainingJobName': 'string',\n 'TrainingJobArn': 'string',\n 'TuningJobName': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'TrainingStartTime': datetime(2015, 1, 1),\n 'TrainingEndTime': datetime(2015, 1, 1),\n 'TrainingJobStatus': 'InProgress'|'Completed'|'Failed'|'Stopping'|'Stopped',\n 'TunedHyperParameters': {\n 'string': 'string'\n },\n 'FailureReason': 'string',\n 'FinalHyperParameterTuningJobObjectiveMetric': {\n 'Type': 'Maximize'|'Minimize',\n 'MetricName': 'string',\n 'Value': ...\n },\n 'ObjectiveStatus': 'Succeeded'|'Pending'|'Failed'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef list_transform_jobs(CreationTimeAfter=None, CreationTimeBefore=None, LastModifiedTimeAfter=None, LastModifiedTimeBefore=None, NameContains=None, StatusEquals=None, SortBy=None, SortOrder=None, NextToken=None, MaxResults=None):\n \"\"\"\n Lists transform jobs.\n See also: AWS API Documentation\n \n \n :example: response = client.list_transform_jobs(\n CreationTimeAfter=datetime(2015, 1, 1),\n CreationTimeBefore=datetime(2015, 1, 1),\n LastModifiedTimeAfter=datetime(2015, 1, 1),\n LastModifiedTimeBefore=datetime(2015, 1, 1),\n NameContains='string',\n StatusEquals='InProgress'|'Completed'|'Failed'|'Stopping'|'Stopped',\n SortBy='Name'|'CreationTime'|'Status',\n SortOrder='Ascending'|'Descending',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type CreationTimeAfter: datetime\n :param CreationTimeAfter: A filter that returns only transform jobs created after the specified time.\n\n :type CreationTimeBefore: datetime\n :param CreationTimeBefore: A filter that returns only transform jobs created before the specified time.\n\n :type LastModifiedTimeAfter: datetime\n :param LastModifiedTimeAfter: A filter that returns only transform jobs modified after the specified time.\n\n :type LastModifiedTimeBefore: datetime\n :param LastModifiedTimeBefore: A filter that returns only transform jobs modified before the specified time.\n\n :type NameContains: string\n :param NameContains: A string in the transform job name. This filter returns only transform jobs whose name contains the specified string.\n\n :type StatusEquals: string\n :param StatusEquals: A filter that retrieves only transform jobs with a specific status.\n\n :type SortBy: string\n :param SortBy: The field to sort results by. The default is CreationTime .\n\n :type SortOrder: string\n :param SortOrder: The sort order for results. The default is Descending .\n\n :type NextToken: string\n :param NextToken: If the result of the previous ListTransformJobs request was truncated, the response includes a NextToken . To retrieve the next set of transform jobs, use the token in the next request.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of transform jobs to return in the response. The default value is 10 .\n\n :rtype: dict\n :return: {\n 'TransformJobSummaries': [\n {\n 'TransformJobName': 'string',\n 'TransformJobArn': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'TransformEndTime': datetime(2015, 1, 1),\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'TransformJobStatus': 'InProgress'|'Completed'|'Failed'|'Stopping'|'Stopped',\n 'FailureReason': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_workteams(SortBy=None, SortOrder=None, NameContains=None, NextToken=None, MaxResults=None):\n \"\"\"\n Gets a list of work teams that you have defined in a region. The list may be empty if no work team satisfies the filter specified in the NameContains parameter.\n See also: AWS API Documentation\n \n \n :example: response = client.list_workteams(\n SortBy='Name'|'CreateDate',\n SortOrder='Ascending'|'Descending',\n NameContains='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type SortBy: string\n :param SortBy: The field to sort results by. The default is CreationTime .\n\n :type SortOrder: string\n :param SortOrder: The sort order for results. The default is Ascending .\n\n :type NameContains: string\n :param NameContains: A string in the work team's name. This filter returns only work teams whose name contains the specified string.\n\n :type NextToken: string\n :param NextToken: If the result of the previous ListWorkteams request was truncated, the response includes a NextToken . To retrieve the next set of labeling jobs, use the token in the next request.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of work teams to return in each page of the response.\n\n :rtype: dict\n :return: {\n 'Workteams': [\n {\n 'WorkteamName': 'string',\n 'MemberDefinitions': [\n {\n 'CognitoMemberDefinition': {\n 'UserPool': 'string',\n 'UserGroup': 'string',\n 'ClientId': 'string'\n }\n },\n ],\n 'WorkteamArn': 'string',\n 'ProductListingIds': [\n 'string',\n ],\n 'Description': 'string',\n 'SubDomain': 'string',\n 'CreateDate': datetime(2015, 1, 1),\n 'LastUpdatedDate': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef render_ui_template(UiTemplate=None, Task=None, RoleArn=None):\n \"\"\"\n Renders the UI template so that you can preview the worker's experience.\n See also: AWS API Documentation\n \n \n :example: response = client.render_ui_template(\n UiTemplate={\n 'Content': 'string'\n },\n Task={\n 'Input': 'string'\n },\n RoleArn='string'\n )\n \n \n :type UiTemplate: dict\n :param UiTemplate: [REQUIRED]\n A Template object containing the worker UI template to render.\n Content (string) -- [REQUIRED]The content of the Liquid template for the worker user interface.\n \n\n :type Task: dict\n :param Task: [REQUIRED]\n A RenderableTask object containing a representative task to render.\n Input (string) -- [REQUIRED]A JSON object that contains values for the variables defined in the template. It is made available to the template under the substitution variable task.input . For example, if you define a variable task.input.text in your template, you can supply the variable in the JSON object as 'text': 'sample text' .\n \n\n :type RoleArn: string\n :param RoleArn: [REQUIRED]\n The Amazon Resource Name (ARN) that has access to the S3 objects that are used by the template.\n \n\n :rtype: dict\n :return: {\n 'RenderedContent': 'string',\n 'Errors': [\n {\n 'Code': 'string',\n 'Message': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef search(Resource=None, SearchExpression=None, SortBy=None, SortOrder=None, NextToken=None, MaxResults=None):\n \"\"\"\n Finds Amazon SageMaker resources that match a search query. Matching resource objects are returned as a list of SearchResult objects in the response. You can sort the search results by any resource property in a ascending or descending order.\n You can query against the following value types: numerical, text, Booleans, and timestamps.\n See also: AWS API Documentation\n \n \n :example: response = client.search(\n Resource='TrainingJob',\n SearchExpression={\n 'Filters': [\n {\n 'Name': 'string',\n 'Operator': 'Equals'|'NotEquals'|'GreaterThan'|'GreaterThanOrEqualTo'|'LessThan'|'LessThanOrEqualTo'|'Contains',\n 'Value': 'string'\n },\n ],\n 'NestedFilters': [\n {\n 'NestedPropertyName': 'string',\n 'Filters': [\n {\n 'Name': 'string',\n 'Operator': 'Equals'|'NotEquals'|'GreaterThan'|'GreaterThanOrEqualTo'|'LessThan'|'LessThanOrEqualTo'|'Contains',\n 'Value': 'string'\n },\n ]\n },\n ],\n 'SubExpressions': [\n {'... recursive ...'},\n ],\n 'Operator': 'And'|'Or'\n },\n SortBy='string',\n SortOrder='Ascending'|'Descending',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type Resource: string\n :param Resource: [REQUIRED]\n The name of the Amazon SageMaker resource to search for. Currently, the only valid Resource value is TrainingJob .\n \n\n :type SearchExpression: dict\n :param SearchExpression: A Boolean conditional statement. Resource objects must satisfy this condition to be included in search results. You must provide at least one subexpression, filter, or nested filter. The maximum number of recursive SubExpressions , NestedFilters , and Filters that can be included in a SearchExpression object is 50.\n Filters (list) --A list of filter objects.\n (dict) --A conditional statement for a search expression that includes a Boolean operator, a resource property, and a value.\n If you don't specify an Operator and a Value , the filter searches for only the specified property. For example, defining a Filter for the FailureReason for the TrainingJob Resource searches for training job objects that have a value in the FailureReason field.\n If you specify a Value , but not an Operator , Amazon SageMaker uses the equals operator as the default.\n In search, there are several property types:\n Metrics\n To define a metric filter, enter a value using the form 'Metrics.<name>' , where <name> is a metric name. For example, the following filter searches for training jobs with an 'accuracy' metric greater than '0.9' :\n {'Name': 'Metrics.accuracy',\n 'Operator': 'GREATER_THAN',\n 'Value': '0.9'\n }\n HyperParameters\n To define a hyperparameter filter, enter a value with the form 'HyperParameters.<name>' . Decimal hyperparameter values are treated as a decimal in a comparison if the specified Value is also a decimal value. If the specified Value is an integer, the decimal hyperparameter values are treated as integers. For example, the following filter is satisfied by training jobs with a 'learning_rate' hyperparameter that is less than '0.5' :\n {'Name': 'HyperParameters.learning_rate',\n 'Operator': 'LESS_THAN',\n 'Value': '0.5'\n }\n Tags\n To define a tag filter, enter a value with the form 'Tags.<key>' .\n Name (string) -- [REQUIRED]A property name. For example, TrainingJobName . For the list of valid property names returned in a search result for each supported resource, see TrainingJob properties. You must specify a valid property name for the resource.\n Operator (string) --A Boolean binary operator that is used to evaluate the filter. The operator field contains one of the following values:\n Equals\n The specified resource in Name equals the specified Value .\n NotEquals\n The specified resource in Name does not equal the specified Value .\n GreaterThan\n The specified resource in Name is greater than the specified Value . Not supported for text-based properties.\n GreaterThanOrEqualTo\n The specified resource in Name is greater than or equal to the specified Value . Not supported for text-based properties.\n LessThan\n The specified resource in Name is less than the specified Value . Not supported for text-based properties.\n LessThanOrEqualTo\n The specified resource in Name is less than or equal to the specified Value . Not supported for text-based properties.\n Contains\n Only supported for text-based properties. The word-list of the property contains the specified Value .\n If you have specified a filter Value , the default is Equals .\n Value (string) --A value used with Resource and Operator to determine if objects satisfy the filter's condition. For numerical properties, Value must be an integer or floating-point decimal. For timestamp properties, Value must be an ISO 8601 date-time string of the following format: YYYY-mm-dd'T'HH:MM:SS .\n \n NestedFilters (list) --A list of nested filter objects.\n (dict) --Defines a list of NestedFilter objects. To satisfy the conditions specified in the NestedFilters call, a resource must satisfy the conditions of all of the filters.\n For example, a NestedFilters could be defined using the training job's InputDataConfig property, this would be defined as a list of Channel objects.\n A NestedFilters object contains multiple filters. For example, to find all training jobs whose name contains train and that have cat/data in their S3Uri (specified in InputDataConfig ), you need to create a NestedFilters object that specifies the InputDataConfig property with the following Filter objects:\n '{Name:'InputDataConfig.ChannelName', 'Operator':'EQUALS', 'Value':'train'}',\n '{Name:'InputDataConfig.DataSource.S3DataSource.S3Uri', 'Operator':'CONTAINS', 'Value':'cat/data'}'\n NestedPropertyName (string) -- [REQUIRED]The name of the property to use in the nested filters. The value must match a listed property name, such as InputDataConfig .\n Filters (list) -- [REQUIRED]A list of filters. Each filter acts on a property. Filters must contain at least one Filters value. For example, a NestedFilters call might include a filter on the PropertyName parameter of the InputDataConfig property: InputDataConfig.DataSource.S3DataSource.S3Uri .\n (dict) --A conditional statement for a search expression that includes a Boolean operator, a resource property, and a value.\n If you don't specify an Operator and a Value , the filter searches for only the specified property. For example, defining a Filter for the FailureReason for the TrainingJob Resource searches for training job objects that have a value in the FailureReason field.\n If you specify a Value , but not an Operator , Amazon SageMaker uses the equals operator as the default.\n In search, there are several property types:\n Metrics\n To define a metric filter, enter a value using the form 'Metrics.<name>' , where <name> is a metric name. For example, the following filter searches for training jobs with an 'accuracy' metric greater than '0.9' :\n {'Name': 'Metrics.accuracy',\n 'Operator': 'GREATER_THAN',\n 'Value': '0.9'\n }\n HyperParameters\n To define a hyperparameter filter, enter a value with the form 'HyperParameters.<name>' . Decimal hyperparameter values are treated as a decimal in a comparison if the specified Value is also a decimal value. If the specified Value is an integer, the decimal hyperparameter values are treated as integers. For example, the following filter is satisfied by training jobs with a 'learning_rate' hyperparameter that is less than '0.5' :\n {'Name': 'HyperParameters.learning_rate',\n 'Operator': 'LESS_THAN',\n 'Value': '0.5'\n }\n Tags\n To define a tag filter, enter a value with the form 'Tags.<key>' .\n Name (string) -- [REQUIRED]A property name. For example, TrainingJobName . For the list of valid property names returned in a search result for each supported resource, see TrainingJob properties. You must specify a valid property name for the resource.\n Operator (string) --A Boolean binary operator that is used to evaluate the filter. The operator field contains one of the following values:\n Equals\n The specified resource in Name equals the specified Value .\n NotEquals\n The specified resource in Name does not equal the specified Value .\n GreaterThan\n The specified resource in Name is greater than the specified Value . Not supported for text-based properties.\n GreaterThanOrEqualTo\n The specified resource in Name is greater than or equal to the specified Value . Not supported for text-based properties.\n LessThan\n The specified resource in Name is less than the specified Value . Not supported for text-based properties.\n LessThanOrEqualTo\n The specified resource in Name is less than or equal to the specified Value . Not supported for text-based properties.\n Contains\n Only supported for text-based properties. The word-list of the property contains the specified Value .\n If you have specified a filter Value , the default is Equals .\n Value (string) --A value used with Resource and Operator to determine if objects satisfy the filter's condition. For numerical properties, Value must be an integer or floating-point decimal. For timestamp properties, Value must be an ISO 8601 date-time string of the following format: YYYY-mm-dd'T'HH:MM:SS .\n \n \n SubExpressions (list) --A list of search expression objects.\n (dict) --A multi-expression that searches for the specified resource or resources in a search. All resource objects that satisfy the expression's condition are included in the search results. You must specify at least one subexpression, filter, or nested filter. A SearchExpression can contain up to twenty elements.\n A SearchExpression contains the following components:\n A list of Filter objects. Each filter defines a simple Boolean expression comprised of a resource property name, Boolean operator, and value.\n A list of NestedFilter objects. Each nested filter defines a list of Boolean expressions using a list of resource properties. A nested filter is satisfied if a single object in the list satisfies all Boolean expressions.\n A list of SearchExpression objects. A search expression object can be nested in a list of search expression objects.\n A Boolean operator: And or Or .\n \n Operator (string) --A Boolean operator used to evaluate the search expression. If you want every conditional statement in all lists to be satisfied for the entire search expression to be true, specify And . If only a single conditional statement needs to be true for the entire search expression to be true, specify Or . The default value is And .\n \n\n :type SortBy: string\n :param SortBy: The name of the resource property used to sort the SearchResults . The default is LastModifiedTime .\n\n :type SortOrder: string\n :param SortOrder: How SearchResults are ordered. Valid values are Ascending or Descending . The default is Descending .\n\n :type NextToken: string\n :param NextToken: If more than MaxResults resource objects match the specified SearchExpression , the SearchResponse includes a NextToken . The NextToken can be passed to the next SearchRequest to continue retrieving results for the specified SearchExpression and Sort parameters.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return in a SearchResponse .\n\n :rtype: dict\n :return: {\n 'Results': [\n {\n 'TrainingJob': {\n 'TrainingJobName': 'string',\n 'TrainingJobArn': 'string',\n 'TuningJobArn': 'string',\n 'LabelingJobArn': 'string',\n 'ModelArtifacts': {\n 'S3ModelArtifacts': 'string'\n },\n 'TrainingJobStatus': 'InProgress'|'Completed'|'Failed'|'Stopping'|'Stopped',\n 'SecondaryStatus': 'Starting'|'LaunchingMLInstances'|'PreparingTrainingStack'|'Downloading'|'DownloadingTrainingImage'|'Training'|'Uploading'|'Stopping'|'Stopped'|'MaxRuntimeExceeded'|'Completed'|'Failed',\n 'FailureReason': 'string',\n 'HyperParameters': {\n 'string': 'string'\n },\n 'AlgorithmSpecification': {\n 'TrainingImage': 'string',\n 'AlgorithmName': 'string',\n 'TrainingInputMode': 'Pipe'|'File',\n 'MetricDefinitions': [\n {\n 'Name': 'string',\n 'Regex': 'string'\n },\n ]\n },\n 'RoleArn': 'string',\n 'InputDataConfig': [\n {\n 'ChannelName': 'string',\n 'DataSource': {\n 'S3DataSource': {\n 'S3DataType': 'ManifestFile'|'S3Prefix'|'AugmentedManifestFile',\n 'S3Uri': 'string',\n 'S3DataDistributionType': 'FullyReplicated'|'ShardedByS3Key',\n 'AttributeNames': [\n 'string',\n ]\n }\n },\n 'ContentType': 'string',\n 'CompressionType': 'None'|'Gzip',\n 'RecordWrapperType': 'None'|'RecordIO',\n 'InputMode': 'Pipe'|'File',\n 'ShuffleConfig': {\n 'Seed': 123\n }\n },\n ],\n 'OutputDataConfig': {\n 'KmsKeyId': 'string',\n 'S3OutputPath': 'string'\n },\n 'ResourceConfig': {\n 'InstanceType': 'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.m5.large'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge',\n 'InstanceCount': 123,\n 'VolumeSizeInGB': 123,\n 'VolumeKmsKeyId': 'string'\n },\n 'VpcConfig': {\n 'SecurityGroupIds': [\n 'string',\n ],\n 'Subnets': [\n 'string',\n ]\n },\n 'StoppingCondition': {\n 'MaxRuntimeInSeconds': 123\n },\n 'CreationTime': datetime(2015, 1, 1),\n 'TrainingStartTime': datetime(2015, 1, 1),\n 'TrainingEndTime': datetime(2015, 1, 1),\n 'LastModifiedTime': datetime(2015, 1, 1),\n 'SecondaryStatusTransitions': [\n {\n 'Status': 'Starting'|'LaunchingMLInstances'|'PreparingTrainingStack'|'Downloading'|'DownloadingTrainingImage'|'Training'|'Uploading'|'Stopping'|'Stopped'|'MaxRuntimeExceeded'|'Completed'|'Failed',\n 'StartTime': datetime(2015, 1, 1),\n 'EndTime': datetime(2015, 1, 1),\n 'StatusMessage': 'string'\n },\n ],\n 'FinalMetricDataList': [\n {\n 'MetricName': 'string',\n 'Value': ...,\n 'Timestamp': datetime(2015, 1, 1)\n },\n ],\n 'EnableNetworkIsolation': True|False,\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n InProgress - The training is in progress.\n Completed - The training job has completed.\n Failed - The training job has failed. To see the reason for the failure, see the FailureReason field in the response to a DescribeTrainingJobResponse call.\n Stopping - The training job is stopping.\n Stopped - The training job has stopped.\n \n \"\"\"\n pass\n\ndef start_notebook_instance(NotebookInstanceName=None):\n \"\"\"\n Launches an ML compute instance with the latest version of the libraries and attaches your ML storage volume. After configuring the notebook instance, Amazon SageMaker sets the notebook instance status to InService . A notebook instance's status must be InService before you can connect to your Jupyter notebook.\n See also: AWS API Documentation\n \n \n :example: response = client.start_notebook_instance(\n NotebookInstanceName='string'\n )\n \n \n :type NotebookInstanceName: string\n :param NotebookInstanceName: [REQUIRED]\n The name of the notebook instance to start.\n \n\n \"\"\"\n pass\n\ndef stop_compilation_job(CompilationJobName=None):\n \"\"\"\n Stops a model compilation job.\n To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal. This gracefully shuts the job down. If the job hasn't stopped, it sends the SIGKILL signal.\n When it receives a StopCompilationJob request, Amazon SageMaker changes the CompilationJobSummary$CompilationJobStatus of the job to Stopping . After Amazon SageMaker stops the job, it sets the CompilationJobSummary$CompilationJobStatus to Stopped .\n See also: AWS API Documentation\n \n \n :example: response = client.stop_compilation_job(\n CompilationJobName='string'\n )\n \n \n :type CompilationJobName: string\n :param CompilationJobName: [REQUIRED]\n The name of the model compilation job to stop.\n \n\n \"\"\"\n pass\n\ndef stop_hyper_parameter_tuning_job(HyperParameterTuningJobName=None):\n \"\"\"\n Stops a running hyperparameter tuning job and all running training jobs that the tuning job launched.\n All model artifacts output from the training jobs are stored in Amazon Simple Storage Service (Amazon S3). All data that the training jobs write to Amazon CloudWatch Logs are still available in CloudWatch. After the tuning job moves to the Stopped state, it releases all reserved resources for the tuning job.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_hyper_parameter_tuning_job(\n HyperParameterTuningJobName='string'\n )\n \n \n :type HyperParameterTuningJobName: string\n :param HyperParameterTuningJobName: [REQUIRED]\n The name of the tuning job to stop.\n \n\n \"\"\"\n pass\n\ndef stop_labeling_job(LabelingJobName=None):\n \"\"\"\n Stops a running labeling job. A job that is stopped cannot be restarted. Any results obtained before the job is stopped are placed in the Amazon S3 output bucket.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_labeling_job(\n LabelingJobName='string'\n )\n \n \n :type LabelingJobName: string\n :param LabelingJobName: [REQUIRED]\n The name of the labeling job to stop.\n \n\n \"\"\"\n pass\n\ndef stop_notebook_instance(NotebookInstanceName=None):\n \"\"\"\n Terminates the ML compute instance. Before terminating the instance, Amazon SageMaker disconnects the ML storage volume from it. Amazon SageMaker preserves the ML storage volume.\n To access data on the ML storage volume for a notebook instance that has been terminated, call the StartNotebookInstance API. StartNotebookInstance launches another ML compute instance, configures it, and attaches the preserved ML storage volume so you can continue your work.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_notebook_instance(\n NotebookInstanceName='string'\n )\n \n \n :type NotebookInstanceName: string\n :param NotebookInstanceName: [REQUIRED]\n The name of the notebook instance to terminate.\n \n\n \"\"\"\n pass\n\ndef stop_training_job(TrainingJobName=None):\n \"\"\"\n Stops a training job. To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms might use this 120-second window to save the model artifacts, so the results of the training is not lost.\n Training algorithms provided by Amazon SageMaker save the intermediate results of a model training job. This intermediate data is a valid model artifact. You can use the model artifacts that are saved when Amazon SageMaker stops a training job to create a model.\n When it receives a StopTrainingJob request, Amazon SageMaker changes the status of the job to Stopping . After Amazon SageMaker stops the job, it sets the status to Stopped .\n See also: AWS API Documentation\n \n \n :example: response = client.stop_training_job(\n TrainingJobName='string'\n )\n \n \n :type TrainingJobName: string\n :param TrainingJobName: [REQUIRED]\n The name of the training job to stop.\n \n\n \"\"\"\n pass\n\ndef stop_transform_job(TransformJobName=None):\n \"\"\"\n Stops a transform job.\n When Amazon SageMaker receives a StopTransformJob request, the status of the job changes to Stopping . After Amazon SageMaker stops the job, the status is set to Stopped . When you stop a transform job before it is completed, Amazon SageMaker doesn't store the job's output in Amazon S3.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_transform_job(\n TransformJobName='string'\n )\n \n \n :type TransformJobName: string\n :param TransformJobName: [REQUIRED]\n The name of the transform job to stop.\n \n\n \"\"\"\n pass\n\ndef update_code_repository(CodeRepositoryName=None, GitConfig=None):\n \"\"\"\n Updates the specified git repository with the specified values.\n See also: AWS API Documentation\n \n \n :example: response = client.update_code_repository(\n CodeRepositoryName='string',\n GitConfig={\n 'SecretArn': 'string'\n }\n )\n \n \n :type CodeRepositoryName: string\n :param CodeRepositoryName: [REQUIRED]\n The name of the git repository to update.\n \n\n :type GitConfig: dict\n :param GitConfig: The configuration of the git repository, including the URL and the Amazon Resource Name (ARN) of the AWS Secrets Manager secret that contains the credentials used to access the repository. The secret must have a staging label of AWSCURRENT and must be in the following format:\n {'username': *UserName* , 'password': *Password* }\n SecretArn (string) --The Amazon Resource Name (ARN) of the AWS Secrets Manager secret that contains the credentials used to access the git repository. The secret must have a staging label of AWSCURRENT and must be in the following format:\n {'username': *UserName* , 'password': *Password* }\n \n\n :rtype: dict\n :return: {\n 'CodeRepositoryArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef update_endpoint(EndpointName=None, EndpointConfigName=None):\n \"\"\"\n Deploys the new EndpointConfig specified in the request, switches to using newly created endpoint, and then deletes resources provisioned for the endpoint using the previous EndpointConfig (there is no availability loss).\n When Amazon SageMaker receives the request, it sets the endpoint status to Updating . After updating the endpoint, it sets the status to InService . To check the status of an endpoint, use the DescribeEndpoint API.\n See also: AWS API Documentation\n \n \n :example: response = client.update_endpoint(\n EndpointName='string',\n EndpointConfigName='string'\n )\n \n \n :type EndpointName: string\n :param EndpointName: [REQUIRED]\n The name of the endpoint whose configuration you want to update.\n \n\n :type EndpointConfigName: string\n :param EndpointConfigName: [REQUIRED]\n The name of the new endpoint configuration.\n \n\n :rtype: dict\n :return: {\n 'EndpointArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef update_endpoint_weights_and_capacities(EndpointName=None, DesiredWeightsAndCapacities=None):\n \"\"\"\n Updates variant weight of one or more variants associated with an existing endpoint, or capacity of one variant associated with an existing endpoint. When it receives the request, Amazon SageMaker sets the endpoint status to Updating . After updating the endpoint, it sets the status to InService . To check the status of an endpoint, use the DescribeEndpoint API.\n See also: AWS API Documentation\n \n \n :example: response = client.update_endpoint_weights_and_capacities(\n EndpointName='string',\n DesiredWeightsAndCapacities=[\n {\n 'VariantName': 'string',\n 'DesiredWeight': ...,\n 'DesiredInstanceCount': 123\n },\n ]\n )\n \n \n :type EndpointName: string\n :param EndpointName: [REQUIRED]\n The name of an existing Amazon SageMaker endpoint.\n \n\n :type DesiredWeightsAndCapacities: list\n :param DesiredWeightsAndCapacities: [REQUIRED]\n An object that provides new capacity and weight values for a variant.\n (dict) --Specifies weight and capacity values for a production variant.\n VariantName (string) -- [REQUIRED]The name of the variant to update.\n DesiredWeight (float) --The variant's weight.\n DesiredInstanceCount (integer) --The variant's capacity.\n \n \n\n :rtype: dict\n :return: {\n 'EndpointArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef update_notebook_instance(NotebookInstanceName=None, InstanceType=None, RoleArn=None, LifecycleConfigName=None, DisassociateLifecycleConfig=None, VolumeSizeInGB=None, DefaultCodeRepository=None, AdditionalCodeRepositories=None, AcceleratorTypes=None, DisassociateAcceleratorTypes=None, DisassociateDefaultCodeRepository=None, DisassociateAdditionalCodeRepositories=None):\n \"\"\"\n Updates a notebook instance. NotebookInstance updates include upgrading or downgrading the ML compute instance used for your notebook instance to accommodate changes in your workload requirements. You can also update the VPC security groups.\n See also: AWS API Documentation\n \n \n :example: response = client.update_notebook_instance(\n NotebookInstanceName='string',\n InstanceType='ml.t2.medium'|'ml.t2.large'|'ml.t2.xlarge'|'ml.t2.2xlarge'|'ml.t3.medium'|'ml.t3.large'|'ml.t3.xlarge'|'ml.t3.2xlarge'|'ml.m4.xlarge'|'ml.m4.2xlarge'|'ml.m4.4xlarge'|'ml.m4.10xlarge'|'ml.m4.16xlarge'|'ml.m5.xlarge'|'ml.m5.2xlarge'|'ml.m5.4xlarge'|'ml.m5.12xlarge'|'ml.m5.24xlarge'|'ml.c4.xlarge'|'ml.c4.2xlarge'|'ml.c4.4xlarge'|'ml.c4.8xlarge'|'ml.c5.xlarge'|'ml.c5.2xlarge'|'ml.c5.4xlarge'|'ml.c5.9xlarge'|'ml.c5.18xlarge'|'ml.c5d.xlarge'|'ml.c5d.2xlarge'|'ml.c5d.4xlarge'|'ml.c5d.9xlarge'|'ml.c5d.18xlarge'|'ml.p2.xlarge'|'ml.p2.8xlarge'|'ml.p2.16xlarge'|'ml.p3.2xlarge'|'ml.p3.8xlarge'|'ml.p3.16xlarge',\n RoleArn='string',\n LifecycleConfigName='string',\n DisassociateLifecycleConfig=True|False,\n VolumeSizeInGB=123,\n DefaultCodeRepository='string',\n AdditionalCodeRepositories=[\n 'string',\n ],\n AcceleratorTypes=[\n 'ml.eia1.medium'|'ml.eia1.large'|'ml.eia1.xlarge',\n ],\n DisassociateAcceleratorTypes=True|False,\n DisassociateDefaultCodeRepository=True|False,\n DisassociateAdditionalCodeRepositories=True|False\n )\n \n \n :type NotebookInstanceName: string\n :param NotebookInstanceName: [REQUIRED]\n The name of the notebook instance to update.\n \n\n :type InstanceType: string\n :param InstanceType: The Amazon ML compute instance type.\n\n :type RoleArn: string\n :param RoleArn: The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker can assume to access the notebook instance. For more information, see Amazon SageMaker Roles .\n Note\n To be able to pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission.\n \n\n :type LifecycleConfigName: string\n :param LifecycleConfigName: The name of a lifecycle configuration to associate with the notebook instance. For information about lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance .\n\n :type DisassociateLifecycleConfig: boolean\n :param DisassociateLifecycleConfig: Set to true to remove the notebook instance lifecycle configuration currently associated with the notebook instance.\n\n :type VolumeSizeInGB: integer\n :param VolumeSizeInGB: The size, in GB, of the ML storage volume to attach to the notebook instance. The default value is 5 GB.\n\n :type DefaultCodeRepository: string\n :param DefaultCodeRepository: The git repository to associate with the notebook instance as its default code repository. This can be either the name of a git repository stored as a resource in your account, or the URL of a git repository in AWS CodeCommit or in any other git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances .\n\n :type AdditionalCodeRepositories: list\n :param AdditionalCodeRepositories: An array of up to 3 git repositories to associate with the notebook instance. These can be either the names of git repositories stored as resources in your account, or the URL of git repositories in AWS CodeCommit or in any other git repository.. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances .\n (string) --\n \n\n :type AcceleratorTypes: list\n :param AcceleratorTypes: A list of the Elastic Inference (EI) instance types to associate with this notebook instance. Currently only one EI instance type can be associated with a notebook instance. For more information, see Using Elastic Inference in Amazon SageMaker .\n (string) --\n \n\n :type DisassociateAcceleratorTypes: boolean\n :param DisassociateAcceleratorTypes: A list of the Elastic Inference (EI) instance types to remove from this notebook instance.\n\n :type DisassociateDefaultCodeRepository: boolean\n :param DisassociateDefaultCodeRepository: The name or URL of the default git repository to remove from this notebook instance.\n\n :type DisassociateAdditionalCodeRepositories: boolean\n :param DisassociateAdditionalCodeRepositories: A list of names or URLs of the default git repositories to remove from this notebook instance.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_notebook_instance_lifecycle_config(NotebookInstanceLifecycleConfigName=None, OnCreate=None, OnStart=None):\n \"\"\"\n Updates a notebook instance lifecycle configuration created with the CreateNotebookInstanceLifecycleConfig API.\n See also: AWS API Documentation\n \n \n :example: response = client.update_notebook_instance_lifecycle_config(\n NotebookInstanceLifecycleConfigName='string',\n OnCreate=[\n {\n 'Content': 'string'\n },\n ],\n OnStart=[\n {\n 'Content': 'string'\n },\n ]\n )\n \n \n :type NotebookInstanceLifecycleConfigName: string\n :param NotebookInstanceLifecycleConfigName: [REQUIRED]\n The name of the lifecycle configuration.\n \n\n :type OnCreate: list\n :param OnCreate: The shell script that runs only once, when you create a notebook instance\n (dict) --Contains the notebook instance lifecycle configuration script.\n Each lifecycle configuration script has a limit of 16384 characters.\n The value of the $PATH environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin .\n View CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook] .\n Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started.\n For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance .\n Content (string) --A base64-encoded string that contains a shell script for a notebook instance lifecycle configuration.\n \n \n\n :type OnStart: list\n :param OnStart: The shell script that runs every time you start a notebook instance, including when you create the notebook instance.\n (dict) --Contains the notebook instance lifecycle configuration script.\n Each lifecycle configuration script has a limit of 16384 characters.\n The value of the $PATH environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin .\n View CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook] .\n Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started.\n For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance .\n Content (string) --A base64-encoded string that contains a shell script for a notebook instance lifecycle configuration.\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_workteam(WorkteamName=None, MemberDefinitions=None, Description=None):\n \"\"\"\n Updates an existing work team with new member definitions or description.\n See also: AWS API Documentation\n \n \n :example: response = client.update_workteam(\n WorkteamName='string',\n MemberDefinitions=[\n {\n 'CognitoMemberDefinition': {\n 'UserPool': 'string',\n 'UserGroup': 'string',\n 'ClientId': 'string'\n }\n },\n ],\n Description='string'\n )\n \n \n :type WorkteamName: string\n :param WorkteamName: [REQUIRED]\n The name of the work team to update.\n \n\n :type MemberDefinitions: list\n :param MemberDefinitions: A list of MemberDefinition objects that contain the updated work team members.\n (dict) --Defines the Amazon Cognito user group that is part of a work team.\n CognitoMemberDefinition (dict) --The Amazon Cognito user group that is part of the work team.\n UserPool (string) -- [REQUIRED]An identifier for a user pool. The user pool must be in the same region as the service that you are calling.\n UserGroup (string) -- [REQUIRED]An identifier for a user group.\n ClientId (string) -- [REQUIRED]An identifier for an application client. You must create the app client ID using Amazon Cognito.\n \n \n\n :type Description: string\n :param Description: An updated description for the work team.\n\n :rtype: dict\n :return: {\n 'Workteam': {\n 'WorkteamName': 'string',\n 'MemberDefinitions': [\n {\n 'CognitoMemberDefinition': {\n 'UserPool': 'string',\n 'UserGroup': 'string',\n 'ClientId': 'string'\n }\n },\n ],\n 'WorkteamArn': 'string',\n 'ProductListingIds': [\n 'string',\n ],\n 'Description': 'string',\n 'SubDomain': 'string',\n 'CreateDate': datetime(2015, 1, 1),\n 'LastUpdatedDate': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.605984628200531, "alphanum_fraction": 0.6102217435836792, "avg_line_length": 55.44091796875, "blob_id": "c6300769465292cb1b4ca1094cf6a192213d04df", "content_id": "175799720450d293012c9ac3e8b146418fc382af", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 140426, "license_type": "permissive", "max_line_length": 788, "num_lines": 2488, "path": "/pyboto3/codebuild.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef batch_delete_builds(ids=None):\n \"\"\"\n Deletes one or more builds.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_delete_builds(\n ids=[\n 'string',\n ]\n )\n \n \n :type ids: list\n :param ids: [REQUIRED]\n The IDs of the builds to delete.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'buildsDeleted': [\n 'string',\n ],\n 'buildsNotDeleted': [\n {\n 'id': 'string',\n 'statusCode': 'string'\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef batch_get_builds(ids=None):\n \"\"\"\n Gets information about builds.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_get_builds(\n ids=[\n 'string',\n ]\n )\n \n \n :type ids: list\n :param ids: [REQUIRED]\n The IDs of the builds.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'builds': [\n {\n 'id': 'string',\n 'arn': 'string',\n 'startTime': datetime(2015, 1, 1),\n 'endTime': datetime(2015, 1, 1),\n 'currentPhase': 'string',\n 'buildStatus': 'SUCCEEDED'|'FAILED'|'FAULT'|'TIMED_OUT'|'IN_PROGRESS'|'STOPPED',\n 'sourceVersion': 'string',\n 'resolvedSourceVersion': 'string',\n 'projectName': 'string',\n 'phases': [\n {\n 'phaseType': 'SUBMITTED'|'QUEUED'|'PROVISIONING'|'DOWNLOAD_SOURCE'|'INSTALL'|'PRE_BUILD'|'BUILD'|'POST_BUILD'|'UPLOAD_ARTIFACTS'|'FINALIZING'|'COMPLETED',\n 'phaseStatus': 'SUCCEEDED'|'FAILED'|'FAULT'|'TIMED_OUT'|'IN_PROGRESS'|'STOPPED',\n 'startTime': datetime(2015, 1, 1),\n 'endTime': datetime(2015, 1, 1),\n 'durationInSeconds': 123,\n 'contexts': [\n {\n 'statusCode': 'string',\n 'message': 'string'\n },\n ]\n },\n ],\n 'source': {\n 'type': 'CODECOMMIT'|'CODEPIPELINE'|'GITHUB'|'S3'|'BITBUCKET'|'GITHUB_ENTERPRISE'|'NO_SOURCE',\n 'location': 'string',\n 'gitCloneDepth': 123,\n 'buildspec': 'string',\n 'auth': {\n 'type': 'OAUTH',\n 'resource': 'string'\n },\n 'reportBuildStatus': True|False,\n 'insecureSsl': True|False,\n 'sourceIdentifier': 'string'\n },\n 'secondarySources': [\n {\n 'type': 'CODECOMMIT'|'CODEPIPELINE'|'GITHUB'|'S3'|'BITBUCKET'|'GITHUB_ENTERPRISE'|'NO_SOURCE',\n 'location': 'string',\n 'gitCloneDepth': 123,\n 'buildspec': 'string',\n 'auth': {\n 'type': 'OAUTH',\n 'resource': 'string'\n },\n 'reportBuildStatus': True|False,\n 'insecureSsl': True|False,\n 'sourceIdentifier': 'string'\n },\n ],\n 'secondarySourceVersions': [\n {\n 'sourceIdentifier': 'string',\n 'sourceVersion': 'string'\n },\n ],\n 'artifacts': {\n 'location': 'string',\n 'sha256sum': 'string',\n 'md5sum': 'string',\n 'overrideArtifactName': True|False,\n 'encryptionDisabled': True|False,\n 'artifactIdentifier': 'string'\n },\n 'secondaryArtifacts': [\n {\n 'location': 'string',\n 'sha256sum': 'string',\n 'md5sum': 'string',\n 'overrideArtifactName': True|False,\n 'encryptionDisabled': True|False,\n 'artifactIdentifier': 'string'\n },\n ],\n 'cache': {\n 'type': 'NO_CACHE'|'S3',\n 'location': 'string'\n },\n 'environment': {\n 'type': 'WINDOWS_CONTAINER'|'LINUX_CONTAINER',\n 'image': 'string',\n 'computeType': 'BUILD_GENERAL1_SMALL'|'BUILD_GENERAL1_MEDIUM'|'BUILD_GENERAL1_LARGE',\n 'environmentVariables': [\n {\n 'name': 'string',\n 'value': 'string',\n 'type': 'PLAINTEXT'|'PARAMETER_STORE'\n },\n ],\n 'privilegedMode': True|False,\n 'certificate': 'string'\n },\n 'serviceRole': 'string',\n 'logs': {\n 'groupName': 'string',\n 'streamName': 'string',\n 'deepLink': 'string',\n 's3DeepLink': 'string',\n 'cloudWatchLogs': {\n 'status': 'ENABLED'|'DISABLED',\n 'groupName': 'string',\n 'streamName': 'string'\n },\n 's3Logs': {\n 'status': 'ENABLED'|'DISABLED',\n 'location': 'string'\n }\n },\n 'timeoutInMinutes': 123,\n 'queuedTimeoutInMinutes': 123,\n 'buildComplete': True|False,\n 'initiator': 'string',\n 'vpcConfig': {\n 'vpcId': 'string',\n 'subnets': [\n 'string',\n ],\n 'securityGroupIds': [\n 'string',\n ]\n },\n 'networkInterface': {\n 'subnetId': 'string',\n 'networkInterfaceId': 'string'\n },\n 'encryptionKey': 'string'\n },\n ],\n 'buildsNotFound': [\n 'string',\n ]\n }\n \n \n :returns: \n FAILED : The build failed.\n FAULT : The build faulted.\n IN_PROGRESS : The build is still in progress.\n STOPPED : The build stopped.\n SUCCEEDED : The build succeeded.\n TIMED_OUT : The build timed out.\n \n \"\"\"\n pass\n\ndef batch_get_projects(names=None):\n \"\"\"\n Gets information about build projects.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_get_projects(\n names=[\n 'string',\n ]\n )\n \n \n :type names: list\n :param names: [REQUIRED]\n The names of the build projects.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'projects': [\n {\n 'name': 'string',\n 'arn': 'string',\n 'description': 'string',\n 'source': {\n 'type': 'CODECOMMIT'|'CODEPIPELINE'|'GITHUB'|'S3'|'BITBUCKET'|'GITHUB_ENTERPRISE'|'NO_SOURCE',\n 'location': 'string',\n 'gitCloneDepth': 123,\n 'buildspec': 'string',\n 'auth': {\n 'type': 'OAUTH',\n 'resource': 'string'\n },\n 'reportBuildStatus': True|False,\n 'insecureSsl': True|False,\n 'sourceIdentifier': 'string'\n },\n 'secondarySources': [\n {\n 'type': 'CODECOMMIT'|'CODEPIPELINE'|'GITHUB'|'S3'|'BITBUCKET'|'GITHUB_ENTERPRISE'|'NO_SOURCE',\n 'location': 'string',\n 'gitCloneDepth': 123,\n 'buildspec': 'string',\n 'auth': {\n 'type': 'OAUTH',\n 'resource': 'string'\n },\n 'reportBuildStatus': True|False,\n 'insecureSsl': True|False,\n 'sourceIdentifier': 'string'\n },\n ],\n 'artifacts': {\n 'type': 'CODEPIPELINE'|'S3'|'NO_ARTIFACTS',\n 'location': 'string',\n 'path': 'string',\n 'namespaceType': 'NONE'|'BUILD_ID',\n 'name': 'string',\n 'packaging': 'NONE'|'ZIP',\n 'overrideArtifactName': True|False,\n 'encryptionDisabled': True|False,\n 'artifactIdentifier': 'string'\n },\n 'secondaryArtifacts': [\n {\n 'type': 'CODEPIPELINE'|'S3'|'NO_ARTIFACTS',\n 'location': 'string',\n 'path': 'string',\n 'namespaceType': 'NONE'|'BUILD_ID',\n 'name': 'string',\n 'packaging': 'NONE'|'ZIP',\n 'overrideArtifactName': True|False,\n 'encryptionDisabled': True|False,\n 'artifactIdentifier': 'string'\n },\n ],\n 'cache': {\n 'type': 'NO_CACHE'|'S3',\n 'location': 'string'\n },\n 'environment': {\n 'type': 'WINDOWS_CONTAINER'|'LINUX_CONTAINER',\n 'image': 'string',\n 'computeType': 'BUILD_GENERAL1_SMALL'|'BUILD_GENERAL1_MEDIUM'|'BUILD_GENERAL1_LARGE',\n 'environmentVariables': [\n {\n 'name': 'string',\n 'value': 'string',\n 'type': 'PLAINTEXT'|'PARAMETER_STORE'\n },\n ],\n 'privilegedMode': True|False,\n 'certificate': 'string'\n },\n 'serviceRole': 'string',\n 'timeoutInMinutes': 123,\n 'queuedTimeoutInMinutes': 123,\n 'encryptionKey': 'string',\n 'tags': [\n {\n 'key': 'string',\n 'value': 'string'\n },\n ],\n 'created': datetime(2015, 1, 1),\n 'lastModified': datetime(2015, 1, 1),\n 'webhook': {\n 'url': 'string',\n 'payloadUrl': 'string',\n 'secret': 'string',\n 'branchFilter': 'string',\n 'lastModifiedSecret': datetime(2015, 1, 1)\n },\n 'vpcConfig': {\n 'vpcId': 'string',\n 'subnets': [\n 'string',\n ],\n 'securityGroupIds': [\n 'string',\n ]\n },\n 'badge': {\n 'badgeEnabled': True|False,\n 'badgeRequestUrl': 'string'\n },\n 'logsConfig': {\n 'cloudWatchLogs': {\n 'status': 'ENABLED'|'DISABLED',\n 'groupName': 'string',\n 'streamName': 'string'\n },\n 's3Logs': {\n 'status': 'ENABLED'|'DISABLED',\n 'location': 'string'\n }\n }\n },\n ],\n 'projectsNotFound': [\n 'string',\n ]\n }\n \n \n :returns: \n BITBUCKET : The source code is in a Bitbucket repository.\n CODECOMMIT : The source code is in an AWS CodeCommit repository.\n CODEPIPELINE : The source code settings are specified in the source action of a pipeline in AWS CodePipeline.\n GITHUB : The source code is in a GitHub repository.\n NO_SOURCE : The project does not have input source code.\n S3 : The source code is in an Amazon Simple Storage Service (Amazon S3) input bucket.\n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_project(name=None, description=None, source=None, secondarySources=None, artifacts=None, secondaryArtifacts=None, cache=None, environment=None, serviceRole=None, timeoutInMinutes=None, queuedTimeoutInMinutes=None, encryptionKey=None, tags=None, vpcConfig=None, badgeEnabled=None, logsConfig=None):\n \"\"\"\n Creates a build project.\n See also: AWS API Documentation\n \n \n :example: response = client.create_project(\n name='string',\n description='string',\n source={\n 'type': 'CODECOMMIT'|'CODEPIPELINE'|'GITHUB'|'S3'|'BITBUCKET'|'GITHUB_ENTERPRISE'|'NO_SOURCE',\n 'location': 'string',\n 'gitCloneDepth': 123,\n 'buildspec': 'string',\n 'auth': {\n 'type': 'OAUTH',\n 'resource': 'string'\n },\n 'reportBuildStatus': True|False,\n 'insecureSsl': True|False,\n 'sourceIdentifier': 'string'\n },\n secondarySources=[\n {\n 'type': 'CODECOMMIT'|'CODEPIPELINE'|'GITHUB'|'S3'|'BITBUCKET'|'GITHUB_ENTERPRISE'|'NO_SOURCE',\n 'location': 'string',\n 'gitCloneDepth': 123,\n 'buildspec': 'string',\n 'auth': {\n 'type': 'OAUTH',\n 'resource': 'string'\n },\n 'reportBuildStatus': True|False,\n 'insecureSsl': True|False,\n 'sourceIdentifier': 'string'\n },\n ],\n artifacts={\n 'type': 'CODEPIPELINE'|'S3'|'NO_ARTIFACTS',\n 'location': 'string',\n 'path': 'string',\n 'namespaceType': 'NONE'|'BUILD_ID',\n 'name': 'string',\n 'packaging': 'NONE'|'ZIP',\n 'overrideArtifactName': True|False,\n 'encryptionDisabled': True|False,\n 'artifactIdentifier': 'string'\n },\n secondaryArtifacts=[\n {\n 'type': 'CODEPIPELINE'|'S3'|'NO_ARTIFACTS',\n 'location': 'string',\n 'path': 'string',\n 'namespaceType': 'NONE'|'BUILD_ID',\n 'name': 'string',\n 'packaging': 'NONE'|'ZIP',\n 'overrideArtifactName': True|False,\n 'encryptionDisabled': True|False,\n 'artifactIdentifier': 'string'\n },\n ],\n cache={\n 'type': 'NO_CACHE'|'S3',\n 'location': 'string'\n },\n environment={\n 'type': 'WINDOWS_CONTAINER'|'LINUX_CONTAINER',\n 'image': 'string',\n 'computeType': 'BUILD_GENERAL1_SMALL'|'BUILD_GENERAL1_MEDIUM'|'BUILD_GENERAL1_LARGE',\n 'environmentVariables': [\n {\n 'name': 'string',\n 'value': 'string',\n 'type': 'PLAINTEXT'|'PARAMETER_STORE'\n },\n ],\n 'privilegedMode': True|False,\n 'certificate': 'string'\n },\n serviceRole='string',\n timeoutInMinutes=123,\n queuedTimeoutInMinutes=123,\n encryptionKey='string',\n tags=[\n {\n 'key': 'string',\n 'value': 'string'\n },\n ],\n vpcConfig={\n 'vpcId': 'string',\n 'subnets': [\n 'string',\n ],\n 'securityGroupIds': [\n 'string',\n ]\n },\n badgeEnabled=True|False,\n logsConfig={\n 'cloudWatchLogs': {\n 'status': 'ENABLED'|'DISABLED',\n 'groupName': 'string',\n 'streamName': 'string'\n },\n 's3Logs': {\n 'status': 'ENABLED'|'DISABLED',\n 'location': 'string'\n }\n }\n )\n \n \n :type name: string\n :param name: [REQUIRED]\n The name of the build project.\n \n\n :type description: string\n :param description: A description that makes the build project easy to identify.\n\n :type source: dict\n :param source: [REQUIRED]\n Information about the build input source code for the build project.\n type (string) -- [REQUIRED]The type of repository that contains the source code to be built. Valid values include:\n BITBUCKET : The source code is in a Bitbucket repository.\n CODECOMMIT : The source code is in an AWS CodeCommit repository.\n CODEPIPELINE : The source code settings are specified in the source action of a pipeline in AWS CodePipeline.\n GITHUB : The source code is in a GitHub repository.\n NO_SOURCE : The project does not have input source code.\n S3 : The source code is in an Amazon Simple Storage Service (Amazon S3) input bucket.\n location (string) --Information about the location of the source code to be built. Valid values include:\n For source code settings that are specified in the source action of a pipeline in AWS CodePipeline, location should not be specified. If it is specified, AWS CodePipeline ignores it. This is because AWS CodePipeline uses the settings in a pipeline's source action instead of this value.\n For source code in an AWS CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the build spec (for example, ``https://git-codecommit.*region-ID* .amazonaws.com/v1/repos/repo-name `` ).\n For source code in an Amazon Simple Storage Service (Amazon S3) input bucket, one of the following.\n The path to the ZIP file that contains the source code (for example, `` bucket-name /path /to /object-name .zip`` ).\n The path to the folder that contains the source code (for example, `` bucket-name /path /to /source-code /folder /`` ).\n For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the build spec. You must connect your AWS account to your GitHub account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub Authorize application page, for Organization access , choose Request access next to each repository you want to allow AWS CodeBuild to have access to, and then choose Authorize application . (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH .\n For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the build spec. You must connect your AWS account to your Bitbucket account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket Confirm access to your account page, choose Grant access . (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH .\n gitCloneDepth (integer) --Information about the git clone depth for the build project.\n buildspec (string) --The build spec declaration to use for the builds in this build project.\n If this value is not specified, a build spec must be included along with the source code to be built.\n auth (dict) --Information about the authorization settings for AWS CodeBuild to access the source code to be built.\n This information is for the AWS CodeBuild console's use only. Your code should not get or set this information directly.\n type (string) -- [REQUIRED]\n Note\n This data type is deprecated and is no longer accurate or used.\n The authorization type to use. The only valid value is OAUTH , which represents the OAuth authorization type.\n resource (string) --The resource value that applies to the specified authorization type.\n reportBuildStatus (boolean) --Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, or Bitbucket. If this is set and you use a different source provider, an invalidInputException is thrown.\n insecureSsl (boolean) --Enable this flag to ignore SSL warnings while connecting to the project source code.\n sourceIdentifier (string) --An identifier for this project source.\n \n\n :type secondarySources: list\n :param secondarySources: An array of ProjectSource objects.\n (dict) --Information about the build input source code for the build project.\n type (string) -- [REQUIRED]The type of repository that contains the source code to be built. Valid values include:\n BITBUCKET : The source code is in a Bitbucket repository.\n CODECOMMIT : The source code is in an AWS CodeCommit repository.\n CODEPIPELINE : The source code settings are specified in the source action of a pipeline in AWS CodePipeline.\n GITHUB : The source code is in a GitHub repository.\n NO_SOURCE : The project does not have input source code.\n S3 : The source code is in an Amazon Simple Storage Service (Amazon S3) input bucket.\n location (string) --Information about the location of the source code to be built. Valid values include:\n For source code settings that are specified in the source action of a pipeline in AWS CodePipeline, location should not be specified. If it is specified, AWS CodePipeline ignores it. This is because AWS CodePipeline uses the settings in a pipeline's source action instead of this value.\n For source code in an AWS CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the build spec (for example, ``https://git-codecommit.*region-ID* .amazonaws.com/v1/repos/repo-name `` ).\n For source code in an Amazon Simple Storage Service (Amazon S3) input bucket, one of the following.\n The path to the ZIP file that contains the source code (for example, `` bucket-name /path /to /object-name .zip`` ).\n The path to the folder that contains the source code (for example, `` bucket-name /path /to /source-code /folder /`` ).\n For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the build spec. You must connect your AWS account to your GitHub account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub Authorize application page, for Organization access , choose Request access next to each repository you want to allow AWS CodeBuild to have access to, and then choose Authorize application . (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH .\n For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the build spec. You must connect your AWS account to your Bitbucket account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket Confirm access to your account page, choose Grant access . (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH .\n gitCloneDepth (integer) --Information about the git clone depth for the build project.\n buildspec (string) --The build spec declaration to use for the builds in this build project.\n If this value is not specified, a build spec must be included along with the source code to be built.\n auth (dict) --Information about the authorization settings for AWS CodeBuild to access the source code to be built.\n This information is for the AWS CodeBuild console's use only. Your code should not get or set this information directly.\n type (string) -- [REQUIRED]\n Note\n This data type is deprecated and is no longer accurate or used.\n The authorization type to use. The only valid value is OAUTH , which represents the OAuth authorization type.\n resource (string) --The resource value that applies to the specified authorization type.\n reportBuildStatus (boolean) --Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, or Bitbucket. If this is set and you use a different source provider, an invalidInputException is thrown.\n insecureSsl (boolean) --Enable this flag to ignore SSL warnings while connecting to the project source code.\n sourceIdentifier (string) --An identifier for this project source.\n \n \n\n :type artifacts: dict\n :param artifacts: [REQUIRED]\n Information about the build output artifacts for the build project.\n type (string) -- [REQUIRED]The type of build output artifact. Valid values include:\n CODEPIPELINE : The build project has build output generated through AWS CodePipeline.\n NO_ARTIFACTS : The build project does not produce any build output.\n S3 : The build project stores build output in Amazon Simple Storage Service (Amazon S3).\n location (string) --Information about the build output artifact location:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output locations instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , this is the name of the output bucket.\n path (string) --Along with namespaceType and name , the pattern that AWS CodeBuild uses to name and store the output artifact:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , this is the path to the output artifact. If path is not specified, path is not used.\n For example, if path is set to MyArtifacts , namespaceType is set to NONE , and name is set to MyArtifact.zip , the output artifact is stored in the output bucket at MyArtifacts/MyArtifact.zip .\n namespaceType (string) --Along with path and name , the pattern that AWS CodeBuild uses to determine the name and location to store the output artifact:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , valid values include:\n BUILD_ID : Include the build ID in the location of the build output artifact.\n NONE : Do not include the build ID. This is the default if namespaceType is not specified.\n For example, if path is set to MyArtifacts , namespaceType is set to BUILD_ID , and name is set to MyArtifact.zip , the output artifact is stored in MyArtifacts/*build-ID* /MyArtifact.zip .\n name (string) --Along with path and namespaceType , the pattern that AWS CodeBuild uses to name and store the output artifact:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , this is the name of the output artifact object. If you set the name to be a forward slash ('/'), the artifact is stored in the root of the output bucket.\n For example:\n If path is set to MyArtifacts , namespaceType is set to BUILD_ID , and name is set to MyArtifact.zip , then the output artifact is stored in MyArtifacts/*build-ID* /MyArtifact.zip .\n If path is empty, namespaceType is set to NONE , and name is set to '/ ', the output artifact is stored in the root of the output bucket.\n If path is set to MyArtifacts , namespaceType is set to BUILD_ID , and name is set to '/ ', the output artifact is stored in ``MyArtifacts/build-ID `` .\n packaging (string) --The type of build output artifact to create:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output artifacts instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , valid values include:\n NONE : AWS CodeBuild creates in the output bucket a folder that contains the build output. This is the default if packaging is not specified.\n ZIP : AWS CodeBuild creates in the output bucket a ZIP file that contains the build output.\n \n overrideArtifactName (boolean) --If this flag is set, a name specified in the build spec file overrides the artifact name. The name specified in a build spec file is calculated at build time and uses the Shell Command Language. For example, you can append a date and time to your artifact name so that it is always unique.\n encryptionDisabled (boolean) --Set to true if you do not want your output artifacts encrypted. This option is valid only if your artifacts type is Amazon Simple Storage Service (Amazon S3). If this is set with another artifacts type, an invalidInputException is thrown.\n artifactIdentifier (string) --An identifier for this artifact definition.\n \n\n :type secondaryArtifacts: list\n :param secondaryArtifacts: An array of ProjectArtifacts objects.\n (dict) --Information about the build output artifacts for the build project.\n type (string) -- [REQUIRED]The type of build output artifact. Valid values include:\n CODEPIPELINE : The build project has build output generated through AWS CodePipeline.\n NO_ARTIFACTS : The build project does not produce any build output.\n S3 : The build project stores build output in Amazon Simple Storage Service (Amazon S3).\n location (string) --Information about the build output artifact location:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output locations instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , this is the name of the output bucket.\n path (string) --Along with namespaceType and name , the pattern that AWS CodeBuild uses to name and store the output artifact:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , this is the path to the output artifact. If path is not specified, path is not used.\n For example, if path is set to MyArtifacts , namespaceType is set to NONE , and name is set to MyArtifact.zip , the output artifact is stored in the output bucket at MyArtifacts/MyArtifact.zip .\n namespaceType (string) --Along with path and name , the pattern that AWS CodeBuild uses to determine the name and location to store the output artifact:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , valid values include:\n BUILD_ID : Include the build ID in the location of the build output artifact.\n NONE : Do not include the build ID. This is the default if namespaceType is not specified.\n For example, if path is set to MyArtifacts , namespaceType is set to BUILD_ID , and name is set to MyArtifact.zip , the output artifact is stored in MyArtifacts/*build-ID* /MyArtifact.zip .\n name (string) --Along with path and namespaceType , the pattern that AWS CodeBuild uses to name and store the output artifact:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , this is the name of the output artifact object. If you set the name to be a forward slash ('/'), the artifact is stored in the root of the output bucket.\n For example:\n If path is set to MyArtifacts , namespaceType is set to BUILD_ID , and name is set to MyArtifact.zip , then the output artifact is stored in MyArtifacts/*build-ID* /MyArtifact.zip .\n If path is empty, namespaceType is set to NONE , and name is set to '/ ', the output artifact is stored in the root of the output bucket.\n If path is set to MyArtifacts , namespaceType is set to BUILD_ID , and name is set to '/ ', the output artifact is stored in ``MyArtifacts/build-ID `` .\n packaging (string) --The type of build output artifact to create:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output artifacts instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , valid values include:\n NONE : AWS CodeBuild creates in the output bucket a folder that contains the build output. This is the default if packaging is not specified.\n ZIP : AWS CodeBuild creates in the output bucket a ZIP file that contains the build output.\n \n overrideArtifactName (boolean) --If this flag is set, a name specified in the build spec file overrides the artifact name. The name specified in a build spec file is calculated at build time and uses the Shell Command Language. For example, you can append a date and time to your artifact name so that it is always unique.\n encryptionDisabled (boolean) --Set to true if you do not want your output artifacts encrypted. This option is valid only if your artifacts type is Amazon Simple Storage Service (Amazon S3). If this is set with another artifacts type, an invalidInputException is thrown.\n artifactIdentifier (string) --An identifier for this artifact definition.\n \n \n\n :type cache: dict\n :param cache: Stores recently used information so that it can be quickly accessed at a later time.\n type (string) -- [REQUIRED]The type of cache used by the build project. Valid values include:\n NO_CACHE : The build project does not use any cache.\n S3 : The build project reads and writes from and to S3.\n location (string) --Information about the cache location:\n NO_CACHE : This value is ignored.\n S3 : This is the S3 bucket name/prefix.\n \n\n :type environment: dict\n :param environment: [REQUIRED]\n Information about the build environment for the build project.\n type (string) -- [REQUIRED]The type of build environment to use for related builds.\n image (string) -- [REQUIRED]The ID of the Docker image to use for this build project.\n computeType (string) -- [REQUIRED]Information about the compute resources the build project uses. Available values include:\n BUILD_GENERAL1_SMALL : Use up to 3 GB memory and 2 vCPUs for builds.\n BUILD_GENERAL1_MEDIUM : Use up to 7 GB memory and 4 vCPUs for builds.\n BUILD_GENERAL1_LARGE : Use up to 15 GB memory and 8 vCPUs for builds.\n environmentVariables (list) --A set of environment variables to make available to builds for this build project.\n (dict) --Information about an environment variable for a build project or a build.\n name (string) -- [REQUIRED]The name or key of the environment variable.\n value (string) -- [REQUIRED]The value of the environment variable.\n Warning\n We strongly discourage the use of environment variables to store sensitive values, especially AWS secret key IDs and secret access keys. Environment variables can be displayed in plain text using the AWS CodeBuild console and the AWS Command Line Interface (AWS CLI).\n type (string) --The type of environment variable. Valid values include:\n PARAMETER_STORE : An environment variable stored in Amazon EC2 Systems Manager Parameter Store.\n PLAINTEXT : An environment variable in plaintext format.\n \n privilegedMode (boolean) --Enables running the Docker daemon inside a Docker container. Set to true only if the build project is be used to build Docker images, and the specified build environment image is not provided by AWS CodeBuild with Docker support. Otherwise, all associated builds that attempt to interact with the Docker daemon fail. You must also start the Docker daemon so that builds can interact with it. One way to do this is to initialize the Docker daemon during the install phase of your build spec by running the following build commands. (Do not run these commands if the specified build environment image is provided by AWS CodeBuild with Docker support.)\n If the operating system's base image is Ubuntu Linux:\n - nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 --storage-driver=overlay& - timeout 15 sh -c 'until docker info; do echo .; sleep 1; done'\n If the operating system's base image is Alpine Linux, add the -t argument to timeout :\n - nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 --storage-driver=overlay& - timeout 15 -t sh -c 'until docker info; do echo .; sleep 1; done'\n certificate (string) --The certificate to use with this build project.\n \n\n :type serviceRole: string\n :param serviceRole: [REQUIRED]\n The ARN of the AWS Identity and Access Management (IAM) role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account.\n \n\n :type timeoutInMinutes: integer\n :param timeoutInMinutes: How long, in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait before it times out any build that has not been marked as completed. The default is 60 minutes.\n\n :type queuedTimeoutInMinutes: integer\n :param queuedTimeoutInMinutes: The number of minutes a build is allowed to be queued before it times out.\n\n :type encryptionKey: string\n :param encryptionKey: The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.\n You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format ``alias/alias-name `` ).\n \n\n :type tags: list\n :param tags: A set of tags for this build project.\n These tags are available for use by AWS services that support AWS CodeBuild build project tags.\n (dict) --A tag, consisting of a key and a value.\n This tag is available for use by AWS services that support tags in AWS CodeBuild.\n key (string) --The tag's key.\n value (string) --The tag's value.\n \n \n\n :type vpcConfig: dict\n :param vpcConfig: VpcConfig enables AWS CodeBuild to access resources in an Amazon VPC.\n vpcId (string) --The ID of the Amazon VPC.\n subnets (list) --A list of one or more subnet IDs in your Amazon VPC.\n (string) --\n securityGroupIds (list) --A list of one or more security groups IDs in your Amazon VPC.\n (string) --\n \n\n :type badgeEnabled: boolean\n :param badgeEnabled: Set this to true to generate a publicly accessible URL for your project's build badge.\n\n :type logsConfig: dict\n :param logsConfig: Information about logs for the build project. These can be logs in Amazon CloudWatch Logs, logs uploaded to a specified S3 bucket, or both.\n cloudWatchLogs (dict) --Information about Amazon CloudWatch Logs for a build project. Amazon CloudWatch Logs are enabled by default.\n status (string) -- [REQUIRED]The current status of the logs in Amazon CloudWatch Logs for a build project. Valid values are:\n ENABLED : Amazon CloudWatch Logs are enabled for this build project.\n DISABLED : Amazon CloudWatch Logs are not enabled for this build project.\n groupName (string) --The group name of the logs in Amazon CloudWatch Logs. For more information, see Working with Log Groups and Log Streams .\n streamName (string) --The prefix of the stream name of the Amazon CloudWatch Logs. For more information, see Working with Log Groups and Log Streams .\n s3Logs (dict) --Information about logs built to an S3 bucket for a build project. S3 logs are not enabled by default.\n status (string) -- [REQUIRED]The current status of the S3 build logs. Valid values are:\n ENABLED : S3 build logs are enabled for this build project.\n DISABLED : S3 build logs are not enabled for this build project.\n location (string) --The ARN of an S3 bucket and the path prefix for S3 logs. If your Amazon S3 bucket name is my-bucket , and your path prefix is build-log , then acceptable formats are my-bucket/build-log or arn:aws:s3:::my-bucket/build-log .\n \n \n\n :rtype: dict\n :return: {\n 'project': {\n 'name': 'string',\n 'arn': 'string',\n 'description': 'string',\n 'source': {\n 'type': 'CODECOMMIT'|'CODEPIPELINE'|'GITHUB'|'S3'|'BITBUCKET'|'GITHUB_ENTERPRISE'|'NO_SOURCE',\n 'location': 'string',\n 'gitCloneDepth': 123,\n 'buildspec': 'string',\n 'auth': {\n 'type': 'OAUTH',\n 'resource': 'string'\n },\n 'reportBuildStatus': True|False,\n 'insecureSsl': True|False,\n 'sourceIdentifier': 'string'\n },\n 'secondarySources': [\n {\n 'type': 'CODECOMMIT'|'CODEPIPELINE'|'GITHUB'|'S3'|'BITBUCKET'|'GITHUB_ENTERPRISE'|'NO_SOURCE',\n 'location': 'string',\n 'gitCloneDepth': 123,\n 'buildspec': 'string',\n 'auth': {\n 'type': 'OAUTH',\n 'resource': 'string'\n },\n 'reportBuildStatus': True|False,\n 'insecureSsl': True|False,\n 'sourceIdentifier': 'string'\n },\n ],\n 'artifacts': {\n 'type': 'CODEPIPELINE'|'S3'|'NO_ARTIFACTS',\n 'location': 'string',\n 'path': 'string',\n 'namespaceType': 'NONE'|'BUILD_ID',\n 'name': 'string',\n 'packaging': 'NONE'|'ZIP',\n 'overrideArtifactName': True|False,\n 'encryptionDisabled': True|False,\n 'artifactIdentifier': 'string'\n },\n 'secondaryArtifacts': [\n {\n 'type': 'CODEPIPELINE'|'S3'|'NO_ARTIFACTS',\n 'location': 'string',\n 'path': 'string',\n 'namespaceType': 'NONE'|'BUILD_ID',\n 'name': 'string',\n 'packaging': 'NONE'|'ZIP',\n 'overrideArtifactName': True|False,\n 'encryptionDisabled': True|False,\n 'artifactIdentifier': 'string'\n },\n ],\n 'cache': {\n 'type': 'NO_CACHE'|'S3',\n 'location': 'string'\n },\n 'environment': {\n 'type': 'WINDOWS_CONTAINER'|'LINUX_CONTAINER',\n 'image': 'string',\n 'computeType': 'BUILD_GENERAL1_SMALL'|'BUILD_GENERAL1_MEDIUM'|'BUILD_GENERAL1_LARGE',\n 'environmentVariables': [\n {\n 'name': 'string',\n 'value': 'string',\n 'type': 'PLAINTEXT'|'PARAMETER_STORE'\n },\n ],\n 'privilegedMode': True|False,\n 'certificate': 'string'\n },\n 'serviceRole': 'string',\n 'timeoutInMinutes': 123,\n 'queuedTimeoutInMinutes': 123,\n 'encryptionKey': 'string',\n 'tags': [\n {\n 'key': 'string',\n 'value': 'string'\n },\n ],\n 'created': datetime(2015, 1, 1),\n 'lastModified': datetime(2015, 1, 1),\n 'webhook': {\n 'url': 'string',\n 'payloadUrl': 'string',\n 'secret': 'string',\n 'branchFilter': 'string',\n 'lastModifiedSecret': datetime(2015, 1, 1)\n },\n 'vpcConfig': {\n 'vpcId': 'string',\n 'subnets': [\n 'string',\n ],\n 'securityGroupIds': [\n 'string',\n ]\n },\n 'badge': {\n 'badgeEnabled': True|False,\n 'badgeRequestUrl': 'string'\n },\n 'logsConfig': {\n 'cloudWatchLogs': {\n 'status': 'ENABLED'|'DISABLED',\n 'groupName': 'string',\n 'streamName': 'string'\n },\n 's3Logs': {\n 'status': 'ENABLED'|'DISABLED',\n 'location': 'string'\n }\n }\n }\n }\n \n \n :returns: \n BITBUCKET : The source code is in a Bitbucket repository.\n CODECOMMIT : The source code is in an AWS CodeCommit repository.\n CODEPIPELINE : The source code settings are specified in the source action of a pipeline in AWS CodePipeline.\n GITHUB : The source code is in a GitHub repository.\n NO_SOURCE : The project does not have input source code.\n S3 : The source code is in an Amazon Simple Storage Service (Amazon S3) input bucket.\n \n \"\"\"\n pass\n\ndef create_webhook(projectName=None, branchFilter=None):\n \"\"\"\n For an existing AWS CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, enables AWS CodeBuild to start rebuilding the source code every time a code change is pushed to the repository.\n See also: AWS API Documentation\n \n \n :example: response = client.create_webhook(\n projectName='string',\n branchFilter='string'\n )\n \n \n :type projectName: string\n :param projectName: [REQUIRED]\n The name of the AWS CodeBuild project.\n \n\n :type branchFilter: string\n :param branchFilter: A regular expression used to determine which repository branches are built when a webhook is triggered. If the name of a branch matches the regular expression, then it is built. If branchFilter is empty, then all branches are built.\n\n :rtype: dict\n :return: {\n 'webhook': {\n 'url': 'string',\n 'payloadUrl': 'string',\n 'secret': 'string',\n 'branchFilter': 'string',\n 'lastModifiedSecret': datetime(2015, 1, 1)\n }\n }\n \n \n \"\"\"\n pass\n\ndef delete_project(name=None):\n \"\"\"\n Deletes a build project.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_project(\n name='string'\n )\n \n \n :type name: string\n :param name: [REQUIRED]\n The name of the build project.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_source_credentials(arn=None):\n \"\"\"\n Deletes a set of GitHub, GitHub Enterprise, or Bitbucket source credentials.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_source_credentials(\n arn='string'\n )\n \n \n :type arn: string\n :param arn: [REQUIRED]\n The Amazon Resource Name (ARN) of the token.\n \n\n :rtype: dict\n :return: {\n 'arn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_webhook(projectName=None):\n \"\"\"\n For an existing AWS CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, stops AWS CodeBuild from rebuilding the source code every time a code change is pushed to the repository.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_webhook(\n projectName='string'\n )\n \n \n :type projectName: string\n :param projectName: [REQUIRED]\n The name of the AWS CodeBuild project.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef import_source_credentials(username=None, token=None, serverType=None, authType=None):\n \"\"\"\n Imports the source repository credentials for an AWS CodeBuild project that has its source code stored in a GitHub, GitHub Enterprise, or Bitbucket repository.\n See also: AWS API Documentation\n \n \n :example: response = client.import_source_credentials(\n username='string',\n token='string',\n serverType='GITHUB'|'BITBUCKET'|'GITHUB_ENTERPRISE',\n authType='OAUTH'|'BASIC_AUTH'|'PERSONAL_ACCESS_TOKEN'\n )\n \n \n :type username: string\n :param username: The Bitbucket username when the authType is BASIC_AUTH. This parameter is not valid for other types of source providers or connections.\n\n :type token: string\n :param token: [REQUIRED]\n For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is the app password.\n \n\n :type serverType: string\n :param serverType: [REQUIRED]\n The source provider used for this project.\n \n\n :type authType: string\n :param authType: [REQUIRED]\n The type of authentication used to connect to a GitHub, GitHub Enterprise, or Bitbucket repository. An OAUTH connection is not supported by the API and must be created using the AWS CodeBuild console.\n \n\n :rtype: dict\n :return: {\n 'arn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef invalidate_project_cache(projectName=None):\n \"\"\"\n Resets the cache for a project.\n See also: AWS API Documentation\n \n \n :example: response = client.invalidate_project_cache(\n projectName='string'\n )\n \n \n :type projectName: string\n :param projectName: [REQUIRED]\n The name of the AWS CodeBuild build project that the cache is reset for.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef list_builds(sortOrder=None, nextToken=None):\n \"\"\"\n Gets a list of build IDs, with each build ID representing a single build.\n See also: AWS API Documentation\n \n \n :example: response = client.list_builds(\n sortOrder='ASCENDING'|'DESCENDING',\n nextToken='string'\n )\n \n \n :type sortOrder: string\n :param sortOrder: The order to list build IDs. Valid values include:\n ASCENDING : List the build IDs in ascending order by build ID.\n DESCENDING : List the build IDs in descending order by build ID.\n \n\n :type nextToken: string\n :param nextToken: During a previous call, if there are more than 100 items in the list, only the first 100 items are returned, along with a unique string called a next token . To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned.\n\n :rtype: dict\n :return: {\n 'ids': [\n 'string',\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_builds_for_project(projectName=None, sortOrder=None, nextToken=None):\n \"\"\"\n Gets a list of build IDs for the specified build project, with each build ID representing a single build.\n See also: AWS API Documentation\n \n \n :example: response = client.list_builds_for_project(\n projectName='string',\n sortOrder='ASCENDING'|'DESCENDING',\n nextToken='string'\n )\n \n \n :type projectName: string\n :param projectName: [REQUIRED]\n The name of the AWS CodeBuild project.\n \n\n :type sortOrder: string\n :param sortOrder: The order to list build IDs. Valid values include:\n ASCENDING : List the build IDs in ascending order by build ID.\n DESCENDING : List the build IDs in descending order by build ID.\n \n\n :type nextToken: string\n :param nextToken: During a previous call, if there are more than 100 items in the list, only the first 100 items are returned, along with a unique string called a next token . To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned.\n\n :rtype: dict\n :return: {\n 'ids': [\n 'string',\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_curated_environment_images():\n \"\"\"\n Gets information about Docker images that are managed by AWS CodeBuild.\n See also: AWS API Documentation\n \n \n :example: response = client.list_curated_environment_images()\n \n \n :rtype: dict\n :return: {\n 'platforms': [\n {\n 'platform': 'DEBIAN'|'AMAZON_LINUX'|'UBUNTU'|'WINDOWS_SERVER',\n 'languages': [\n {\n 'language': 'JAVA'|'PYTHON'|'NODE_JS'|'RUBY'|'GOLANG'|'DOCKER'|'ANDROID'|'DOTNET'|'BASE'|'PHP',\n 'images': [\n {\n 'name': 'string',\n 'description': 'string',\n 'versions': [\n 'string',\n ]\n },\n ]\n },\n ]\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef list_projects(sortBy=None, sortOrder=None, nextToken=None):\n \"\"\"\n Gets a list of build project names, with each build project name representing a single build project.\n See also: AWS API Documentation\n \n \n :example: response = client.list_projects(\n sortBy='NAME'|'CREATED_TIME'|'LAST_MODIFIED_TIME',\n sortOrder='ASCENDING'|'DESCENDING',\n nextToken='string'\n )\n \n \n :type sortBy: string\n :param sortBy: The criterion to be used to list build project names. Valid values include:\n CREATED_TIME : List based on when each build project was created.\n LAST_MODIFIED_TIME : List based on when information about each build project was last changed.\n NAME : List based on each build project's name.\n Use sortOrder to specify in what order to list the build project names based on the preceding criteria.\n \n\n :type sortOrder: string\n :param sortOrder: The order in which to list build projects. Valid values include:\n ASCENDING : List in ascending order.\n DESCENDING : List in descending order.\n Use sortBy to specify the criterion to be used to list build project names.\n \n\n :type nextToken: string\n :param nextToken: During a previous call, if there are more than 100 items in the list, only the first 100 items are returned, along with a unique string called a next token . To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned.\n\n :rtype: dict\n :return: {\n 'nextToken': 'string',\n 'projects': [\n 'string',\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_source_credentials():\n \"\"\"\n Returns a list of SourceCredentialsInfo objects.\n See also: AWS API Documentation\n \n \n :example: response = client.list_source_credentials()\n \n \n :rtype: dict\n :return: {\n 'sourceCredentialsInfos': [\n {\n 'arn': 'string',\n 'serverType': 'GITHUB'|'BITBUCKET'|'GITHUB_ENTERPRISE',\n 'authType': 'OAUTH'|'BASIC_AUTH'|'PERSONAL_ACCESS_TOKEN'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef start_build(projectName=None, secondarySourcesOverride=None, secondarySourcesVersionOverride=None, sourceVersion=None, artifactsOverride=None, secondaryArtifactsOverride=None, environmentVariablesOverride=None, sourceTypeOverride=None, sourceLocationOverride=None, sourceAuthOverride=None, gitCloneDepthOverride=None, buildspecOverride=None, insecureSslOverride=None, reportBuildStatusOverride=None, environmentTypeOverride=None, imageOverride=None, computeTypeOverride=None, certificateOverride=None, cacheOverride=None, serviceRoleOverride=None, privilegedModeOverride=None, timeoutInMinutesOverride=None, queuedTimeoutInMinutesOverride=None, idempotencyToken=None, logsConfigOverride=None):\n \"\"\"\n Starts running a build.\n See also: AWS API Documentation\n \n \n :example: response = client.start_build(\n projectName='string',\n secondarySourcesOverride=[\n {\n 'type': 'CODECOMMIT'|'CODEPIPELINE'|'GITHUB'|'S3'|'BITBUCKET'|'GITHUB_ENTERPRISE'|'NO_SOURCE',\n 'location': 'string',\n 'gitCloneDepth': 123,\n 'buildspec': 'string',\n 'auth': {\n 'type': 'OAUTH',\n 'resource': 'string'\n },\n 'reportBuildStatus': True|False,\n 'insecureSsl': True|False,\n 'sourceIdentifier': 'string'\n },\n ],\n secondarySourcesVersionOverride=[\n {\n 'sourceIdentifier': 'string',\n 'sourceVersion': 'string'\n },\n ],\n sourceVersion='string',\n artifactsOverride={\n 'type': 'CODEPIPELINE'|'S3'|'NO_ARTIFACTS',\n 'location': 'string',\n 'path': 'string',\n 'namespaceType': 'NONE'|'BUILD_ID',\n 'name': 'string',\n 'packaging': 'NONE'|'ZIP',\n 'overrideArtifactName': True|False,\n 'encryptionDisabled': True|False,\n 'artifactIdentifier': 'string'\n },\n secondaryArtifactsOverride=[\n {\n 'type': 'CODEPIPELINE'|'S3'|'NO_ARTIFACTS',\n 'location': 'string',\n 'path': 'string',\n 'namespaceType': 'NONE'|'BUILD_ID',\n 'name': 'string',\n 'packaging': 'NONE'|'ZIP',\n 'overrideArtifactName': True|False,\n 'encryptionDisabled': True|False,\n 'artifactIdentifier': 'string'\n },\n ],\n environmentVariablesOverride=[\n {\n 'name': 'string',\n 'value': 'string',\n 'type': 'PLAINTEXT'|'PARAMETER_STORE'\n },\n ],\n sourceTypeOverride='CODECOMMIT'|'CODEPIPELINE'|'GITHUB'|'S3'|'BITBUCKET'|'GITHUB_ENTERPRISE'|'NO_SOURCE',\n sourceLocationOverride='string',\n sourceAuthOverride={\n 'type': 'OAUTH',\n 'resource': 'string'\n },\n gitCloneDepthOverride=123,\n buildspecOverride='string',\n insecureSslOverride=True|False,\n reportBuildStatusOverride=True|False,\n environmentTypeOverride='WINDOWS_CONTAINER'|'LINUX_CONTAINER',\n imageOverride='string',\n computeTypeOverride='BUILD_GENERAL1_SMALL'|'BUILD_GENERAL1_MEDIUM'|'BUILD_GENERAL1_LARGE',\n certificateOverride='string',\n cacheOverride={\n 'type': 'NO_CACHE'|'S3',\n 'location': 'string'\n },\n serviceRoleOverride='string',\n privilegedModeOverride=True|False,\n timeoutInMinutesOverride=123,\n queuedTimeoutInMinutesOverride=123,\n idempotencyToken='string',\n logsConfigOverride={\n 'cloudWatchLogs': {\n 'status': 'ENABLED'|'DISABLED',\n 'groupName': 'string',\n 'streamName': 'string'\n },\n 's3Logs': {\n 'status': 'ENABLED'|'DISABLED',\n 'location': 'string'\n }\n }\n )\n \n \n :type projectName: string\n :param projectName: [REQUIRED]\n The name of the AWS CodeBuild build project to start running a build.\n \n\n :type secondarySourcesOverride: list\n :param secondarySourcesOverride: An array of ProjectSource objects.\n (dict) --Information about the build input source code for the build project.\n type (string) -- [REQUIRED]The type of repository that contains the source code to be built. Valid values include:\n BITBUCKET : The source code is in a Bitbucket repository.\n CODECOMMIT : The source code is in an AWS CodeCommit repository.\n CODEPIPELINE : The source code settings are specified in the source action of a pipeline in AWS CodePipeline.\n GITHUB : The source code is in a GitHub repository.\n NO_SOURCE : The project does not have input source code.\n S3 : The source code is in an Amazon Simple Storage Service (Amazon S3) input bucket.\n location (string) --Information about the location of the source code to be built. Valid values include:\n For source code settings that are specified in the source action of a pipeline in AWS CodePipeline, location should not be specified. If it is specified, AWS CodePipeline ignores it. This is because AWS CodePipeline uses the settings in a pipeline's source action instead of this value.\n For source code in an AWS CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the build spec (for example, ``https://git-codecommit.*region-ID* .amazonaws.com/v1/repos/repo-name `` ).\n For source code in an Amazon Simple Storage Service (Amazon S3) input bucket, one of the following.\n The path to the ZIP file that contains the source code (for example, `` bucket-name /path /to /object-name .zip`` ).\n The path to the folder that contains the source code (for example, `` bucket-name /path /to /source-code /folder /`` ).\n For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the build spec. You must connect your AWS account to your GitHub account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub Authorize application page, for Organization access , choose Request access next to each repository you want to allow AWS CodeBuild to have access to, and then choose Authorize application . (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH .\n For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the build spec. You must connect your AWS account to your Bitbucket account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket Confirm access to your account page, choose Grant access . (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH .\n gitCloneDepth (integer) --Information about the git clone depth for the build project.\n buildspec (string) --The build spec declaration to use for the builds in this build project.\n If this value is not specified, a build spec must be included along with the source code to be built.\n auth (dict) --Information about the authorization settings for AWS CodeBuild to access the source code to be built.\n This information is for the AWS CodeBuild console's use only. Your code should not get or set this information directly.\n type (string) -- [REQUIRED]\n Note\n This data type is deprecated and is no longer accurate or used.\n The authorization type to use. The only valid value is OAUTH , which represents the OAuth authorization type.\n resource (string) --The resource value that applies to the specified authorization type.\n reportBuildStatus (boolean) --Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, or Bitbucket. If this is set and you use a different source provider, an invalidInputException is thrown.\n insecureSsl (boolean) --Enable this flag to ignore SSL warnings while connecting to the project source code.\n sourceIdentifier (string) --An identifier for this project source.\n \n \n\n :type secondarySourcesVersionOverride: list\n :param secondarySourcesVersionOverride: An array of ProjectSourceVersion objects that specify one or more versions of the project's secondary sources to be used for this build only.\n (dict) --A source identifier and its corresponding version.\n sourceIdentifier (string) -- [REQUIRED]An identifier for a source in the build project.\n sourceVersion (string) -- [REQUIRED]The source version for the corresponding source identifier. If specified, must be one of:\n For AWS CodeCommit: the commit ID to use.\n For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example, pr/25 ). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.\n For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.\n For Amazon Simple Storage Service (Amazon S3): the version ID of the object that represents the build input ZIP file to use.\n \n \n\n :type sourceVersion: string\n :param sourceVersion: A version of the build input to be built, for this build only. If not specified, the latest version is used. If specified, must be one of:\n For AWS CodeCommit: the commit ID to use.\n For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25 ). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.\n For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.\n For Amazon Simple Storage Service (Amazon S3): the version ID of the object that represents the build input ZIP file to use.\n \n\n :type artifactsOverride: dict\n :param artifactsOverride: Build output artifact settings that override, for this build only, the latest ones already defined in the build project.\n type (string) -- [REQUIRED]The type of build output artifact. Valid values include:\n CODEPIPELINE : The build project has build output generated through AWS CodePipeline.\n NO_ARTIFACTS : The build project does not produce any build output.\n S3 : The build project stores build output in Amazon Simple Storage Service (Amazon S3).\n location (string) --Information about the build output artifact location:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output locations instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , this is the name of the output bucket.\n path (string) --Along with namespaceType and name , the pattern that AWS CodeBuild uses to name and store the output artifact:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , this is the path to the output artifact. If path is not specified, path is not used.\n For example, if path is set to MyArtifacts , namespaceType is set to NONE , and name is set to MyArtifact.zip , the output artifact is stored in the output bucket at MyArtifacts/MyArtifact.zip .\n namespaceType (string) --Along with path and name , the pattern that AWS CodeBuild uses to determine the name and location to store the output artifact:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , valid values include:\n BUILD_ID : Include the build ID in the location of the build output artifact.\n NONE : Do not include the build ID. This is the default if namespaceType is not specified.\n For example, if path is set to MyArtifacts , namespaceType is set to BUILD_ID , and name is set to MyArtifact.zip , the output artifact is stored in MyArtifacts/*build-ID* /MyArtifact.zip .\n name (string) --Along with path and namespaceType , the pattern that AWS CodeBuild uses to name and store the output artifact:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , this is the name of the output artifact object. If you set the name to be a forward slash ('/'), the artifact is stored in the root of the output bucket.\n For example:\n If path is set to MyArtifacts , namespaceType is set to BUILD_ID , and name is set to MyArtifact.zip , then the output artifact is stored in MyArtifacts/*build-ID* /MyArtifact.zip .\n If path is empty, namespaceType is set to NONE , and name is set to '/ ', the output artifact is stored in the root of the output bucket.\n If path is set to MyArtifacts , namespaceType is set to BUILD_ID , and name is set to '/ ', the output artifact is stored in ``MyArtifacts/build-ID `` .\n packaging (string) --The type of build output artifact to create:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output artifacts instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , valid values include:\n NONE : AWS CodeBuild creates in the output bucket a folder that contains the build output. This is the default if packaging is not specified.\n ZIP : AWS CodeBuild creates in the output bucket a ZIP file that contains the build output.\n \n overrideArtifactName (boolean) --If this flag is set, a name specified in the build spec file overrides the artifact name. The name specified in a build spec file is calculated at build time and uses the Shell Command Language. For example, you can append a date and time to your artifact name so that it is always unique.\n encryptionDisabled (boolean) --Set to true if you do not want your output artifacts encrypted. This option is valid only if your artifacts type is Amazon Simple Storage Service (Amazon S3). If this is set with another artifacts type, an invalidInputException is thrown.\n artifactIdentifier (string) --An identifier for this artifact definition.\n \n\n :type secondaryArtifactsOverride: list\n :param secondaryArtifactsOverride: An array of ProjectArtifacts objects.\n (dict) --Information about the build output artifacts for the build project.\n type (string) -- [REQUIRED]The type of build output artifact. Valid values include:\n CODEPIPELINE : The build project has build output generated through AWS CodePipeline.\n NO_ARTIFACTS : The build project does not produce any build output.\n S3 : The build project stores build output in Amazon Simple Storage Service (Amazon S3).\n location (string) --Information about the build output artifact location:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output locations instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , this is the name of the output bucket.\n path (string) --Along with namespaceType and name , the pattern that AWS CodeBuild uses to name and store the output artifact:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , this is the path to the output artifact. If path is not specified, path is not used.\n For example, if path is set to MyArtifacts , namespaceType is set to NONE , and name is set to MyArtifact.zip , the output artifact is stored in the output bucket at MyArtifacts/MyArtifact.zip .\n namespaceType (string) --Along with path and name , the pattern that AWS CodeBuild uses to determine the name and location to store the output artifact:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , valid values include:\n BUILD_ID : Include the build ID in the location of the build output artifact.\n NONE : Do not include the build ID. This is the default if namespaceType is not specified.\n For example, if path is set to MyArtifacts , namespaceType is set to BUILD_ID , and name is set to MyArtifact.zip , the output artifact is stored in MyArtifacts/*build-ID* /MyArtifact.zip .\n name (string) --Along with path and namespaceType , the pattern that AWS CodeBuild uses to name and store the output artifact:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , this is the name of the output artifact object. If you set the name to be a forward slash ('/'), the artifact is stored in the root of the output bucket.\n For example:\n If path is set to MyArtifacts , namespaceType is set to BUILD_ID , and name is set to MyArtifact.zip , then the output artifact is stored in MyArtifacts/*build-ID* /MyArtifact.zip .\n If path is empty, namespaceType is set to NONE , and name is set to '/ ', the output artifact is stored in the root of the output bucket.\n If path is set to MyArtifacts , namespaceType is set to BUILD_ID , and name is set to '/ ', the output artifact is stored in ``MyArtifacts/build-ID `` .\n packaging (string) --The type of build output artifact to create:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output artifacts instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , valid values include:\n NONE : AWS CodeBuild creates in the output bucket a folder that contains the build output. This is the default if packaging is not specified.\n ZIP : AWS CodeBuild creates in the output bucket a ZIP file that contains the build output.\n \n overrideArtifactName (boolean) --If this flag is set, a name specified in the build spec file overrides the artifact name. The name specified in a build spec file is calculated at build time and uses the Shell Command Language. For example, you can append a date and time to your artifact name so that it is always unique.\n encryptionDisabled (boolean) --Set to true if you do not want your output artifacts encrypted. This option is valid only if your artifacts type is Amazon Simple Storage Service (Amazon S3). If this is set with another artifacts type, an invalidInputException is thrown.\n artifactIdentifier (string) --An identifier for this artifact definition.\n \n \n\n :type environmentVariablesOverride: list\n :param environmentVariablesOverride: A set of environment variables that overrides, for this build only, the latest ones already defined in the build project.\n (dict) --Information about an environment variable for a build project or a build.\n name (string) -- [REQUIRED]The name or key of the environment variable.\n value (string) -- [REQUIRED]The value of the environment variable.\n Warning\n We strongly discourage the use of environment variables to store sensitive values, especially AWS secret key IDs and secret access keys. Environment variables can be displayed in plain text using the AWS CodeBuild console and the AWS Command Line Interface (AWS CLI).\n type (string) --The type of environment variable. Valid values include:\n PARAMETER_STORE : An environment variable stored in Amazon EC2 Systems Manager Parameter Store.\n PLAINTEXT : An environment variable in plaintext format.\n \n \n\n :type sourceTypeOverride: string\n :param sourceTypeOverride: A source input type, for this build, that overrides the source input defined in the build project.\n\n :type sourceLocationOverride: string\n :param sourceLocationOverride: A location that overrides, for this build, the source location for the one defined in the build project.\n\n :type sourceAuthOverride: dict\n :param sourceAuthOverride: An authorization type for this build that overrides the one defined in the build project. This override applies only if the build project's source is BitBucket or GitHub.\n type (string) -- [REQUIRED]\n Note\n This data type is deprecated and is no longer accurate or used.\n The authorization type to use. The only valid value is OAUTH , which represents the OAuth authorization type.\n resource (string) --The resource value that applies to the specified authorization type.\n \n\n :type gitCloneDepthOverride: integer\n :param gitCloneDepthOverride: The user-defined depth of history, with a minimum value of 0, that overrides, for this build only, any previous depth of history defined in the build project.\n\n :type buildspecOverride: string\n :param buildspecOverride: A build spec declaration that overrides, for this build only, the latest one already defined in the build project.\n\n :type insecureSslOverride: boolean\n :param insecureSslOverride: Enable this flag to override the insecure SSL setting that is specified in the build project. The insecure SSL setting determines whether to ignore SSL warnings while connecting to the project source code. This override applies only if the build's source is GitHub Enterprise.\n\n :type reportBuildStatusOverride: boolean\n :param reportBuildStatusOverride: Set to true to report to your source provider the status of a build's start and completion. If you use this option with a source provider other than GitHub, GitHub Enterprise, or Bitbucket, an invalidInputException is thrown.\n\n :type environmentTypeOverride: string\n :param environmentTypeOverride: A container type for this build that overrides the one specified in the build project.\n\n :type imageOverride: string\n :param imageOverride: The name of an image for this build that overrides the one specified in the build project.\n\n :type computeTypeOverride: string\n :param computeTypeOverride: The name of a compute type for this build that overrides the one specified in the build project.\n\n :type certificateOverride: string\n :param certificateOverride: The name of a certificate for this build that overrides the one specified in the build project.\n\n :type cacheOverride: dict\n :param cacheOverride: A ProjectCache object specified for this build that overrides the one defined in the build project.\n type (string) -- [REQUIRED]The type of cache used by the build project. Valid values include:\n NO_CACHE : The build project does not use any cache.\n S3 : The build project reads and writes from and to S3.\n location (string) --Information about the cache location:\n NO_CACHE : This value is ignored.\n S3 : This is the S3 bucket name/prefix.\n \n\n :type serviceRoleOverride: string\n :param serviceRoleOverride: The name of a service role for this build that overrides the one specified in the build project.\n\n :type privilegedModeOverride: boolean\n :param privilegedModeOverride: Enable this flag to override privileged mode in the build project.\n\n :type timeoutInMinutesOverride: integer\n :param timeoutInMinutesOverride: The number of build timeout minutes, from 5 to 480 (8 hours), that overrides, for this build only, the latest setting already defined in the build project.\n\n :type queuedTimeoutInMinutesOverride: integer\n :param queuedTimeoutInMinutesOverride: The number of minutes a build is allowed to be queued before it times out.\n\n :type idempotencyToken: string\n :param idempotencyToken: A unique, case sensitive identifier you provide to ensure the idempotency of the StartBuild request. The token is included in the StartBuild request and is valid for 12 hours. If you repeat the StartBuild request with the same token, but change a parameter, AWS CodeBuild returns a parameter mismatch error.\n\n :type logsConfigOverride: dict\n :param logsConfigOverride: Log settings for this build that override the log settings defined in the build project.\n cloudWatchLogs (dict) --Information about Amazon CloudWatch Logs for a build project. Amazon CloudWatch Logs are enabled by default.\n status (string) -- [REQUIRED]The current status of the logs in Amazon CloudWatch Logs for a build project. Valid values are:\n ENABLED : Amazon CloudWatch Logs are enabled for this build project.\n DISABLED : Amazon CloudWatch Logs are not enabled for this build project.\n groupName (string) --The group name of the logs in Amazon CloudWatch Logs. For more information, see Working with Log Groups and Log Streams .\n streamName (string) --The prefix of the stream name of the Amazon CloudWatch Logs. For more information, see Working with Log Groups and Log Streams .\n s3Logs (dict) --Information about logs built to an S3 bucket for a build project. S3 logs are not enabled by default.\n status (string) -- [REQUIRED]The current status of the S3 build logs. Valid values are:\n ENABLED : S3 build logs are enabled for this build project.\n DISABLED : S3 build logs are not enabled for this build project.\n location (string) --The ARN of an S3 bucket and the path prefix for S3 logs. If your Amazon S3 bucket name is my-bucket , and your path prefix is build-log , then acceptable formats are my-bucket/build-log or arn:aws:s3:::my-bucket/build-log .\n \n \n\n :rtype: dict\n :return: {\n 'build': {\n 'id': 'string',\n 'arn': 'string',\n 'startTime': datetime(2015, 1, 1),\n 'endTime': datetime(2015, 1, 1),\n 'currentPhase': 'string',\n 'buildStatus': 'SUCCEEDED'|'FAILED'|'FAULT'|'TIMED_OUT'|'IN_PROGRESS'|'STOPPED',\n 'sourceVersion': 'string',\n 'resolvedSourceVersion': 'string',\n 'projectName': 'string',\n 'phases': [\n {\n 'phaseType': 'SUBMITTED'|'QUEUED'|'PROVISIONING'|'DOWNLOAD_SOURCE'|'INSTALL'|'PRE_BUILD'|'BUILD'|'POST_BUILD'|'UPLOAD_ARTIFACTS'|'FINALIZING'|'COMPLETED',\n 'phaseStatus': 'SUCCEEDED'|'FAILED'|'FAULT'|'TIMED_OUT'|'IN_PROGRESS'|'STOPPED',\n 'startTime': datetime(2015, 1, 1),\n 'endTime': datetime(2015, 1, 1),\n 'durationInSeconds': 123,\n 'contexts': [\n {\n 'statusCode': 'string',\n 'message': 'string'\n },\n ]\n },\n ],\n 'source': {\n 'type': 'CODECOMMIT'|'CODEPIPELINE'|'GITHUB'|'S3'|'BITBUCKET'|'GITHUB_ENTERPRISE'|'NO_SOURCE',\n 'location': 'string',\n 'gitCloneDepth': 123,\n 'buildspec': 'string',\n 'auth': {\n 'type': 'OAUTH',\n 'resource': 'string'\n },\n 'reportBuildStatus': True|False,\n 'insecureSsl': True|False,\n 'sourceIdentifier': 'string'\n },\n 'secondarySources': [\n {\n 'type': 'CODECOMMIT'|'CODEPIPELINE'|'GITHUB'|'S3'|'BITBUCKET'|'GITHUB_ENTERPRISE'|'NO_SOURCE',\n 'location': 'string',\n 'gitCloneDepth': 123,\n 'buildspec': 'string',\n 'auth': {\n 'type': 'OAUTH',\n 'resource': 'string'\n },\n 'reportBuildStatus': True|False,\n 'insecureSsl': True|False,\n 'sourceIdentifier': 'string'\n },\n ],\n 'secondarySourceVersions': [\n {\n 'sourceIdentifier': 'string',\n 'sourceVersion': 'string'\n },\n ],\n 'artifacts': {\n 'location': 'string',\n 'sha256sum': 'string',\n 'md5sum': 'string',\n 'overrideArtifactName': True|False,\n 'encryptionDisabled': True|False,\n 'artifactIdentifier': 'string'\n },\n 'secondaryArtifacts': [\n {\n 'location': 'string',\n 'sha256sum': 'string',\n 'md5sum': 'string',\n 'overrideArtifactName': True|False,\n 'encryptionDisabled': True|False,\n 'artifactIdentifier': 'string'\n },\n ],\n 'cache': {\n 'type': 'NO_CACHE'|'S3',\n 'location': 'string'\n },\n 'environment': {\n 'type': 'WINDOWS_CONTAINER'|'LINUX_CONTAINER',\n 'image': 'string',\n 'computeType': 'BUILD_GENERAL1_SMALL'|'BUILD_GENERAL1_MEDIUM'|'BUILD_GENERAL1_LARGE',\n 'environmentVariables': [\n {\n 'name': 'string',\n 'value': 'string',\n 'type': 'PLAINTEXT'|'PARAMETER_STORE'\n },\n ],\n 'privilegedMode': True|False,\n 'certificate': 'string'\n },\n 'serviceRole': 'string',\n 'logs': {\n 'groupName': 'string',\n 'streamName': 'string',\n 'deepLink': 'string',\n 's3DeepLink': 'string',\n 'cloudWatchLogs': {\n 'status': 'ENABLED'|'DISABLED',\n 'groupName': 'string',\n 'streamName': 'string'\n },\n 's3Logs': {\n 'status': 'ENABLED'|'DISABLED',\n 'location': 'string'\n }\n },\n 'timeoutInMinutes': 123,\n 'queuedTimeoutInMinutes': 123,\n 'buildComplete': True|False,\n 'initiator': 'string',\n 'vpcConfig': {\n 'vpcId': 'string',\n 'subnets': [\n 'string',\n ],\n 'securityGroupIds': [\n 'string',\n ]\n },\n 'networkInterface': {\n 'subnetId': 'string',\n 'networkInterfaceId': 'string'\n },\n 'encryptionKey': 'string'\n }\n }\n \n \n :returns: \n FAILED : The build failed.\n FAULT : The build faulted.\n IN_PROGRESS : The build is still in progress.\n STOPPED : The build stopped.\n SUCCEEDED : The build succeeded.\n TIMED_OUT : The build timed out.\n \n \"\"\"\n pass\n\ndef stop_build(id=None):\n \"\"\"\n Attempts to stop running a build.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_build(\n id='string'\n )\n \n \n :type id: string\n :param id: [REQUIRED]\n The ID of the build.\n \n\n :rtype: dict\n :return: {\n 'build': {\n 'id': 'string',\n 'arn': 'string',\n 'startTime': datetime(2015, 1, 1),\n 'endTime': datetime(2015, 1, 1),\n 'currentPhase': 'string',\n 'buildStatus': 'SUCCEEDED'|'FAILED'|'FAULT'|'TIMED_OUT'|'IN_PROGRESS'|'STOPPED',\n 'sourceVersion': 'string',\n 'resolvedSourceVersion': 'string',\n 'projectName': 'string',\n 'phases': [\n {\n 'phaseType': 'SUBMITTED'|'QUEUED'|'PROVISIONING'|'DOWNLOAD_SOURCE'|'INSTALL'|'PRE_BUILD'|'BUILD'|'POST_BUILD'|'UPLOAD_ARTIFACTS'|'FINALIZING'|'COMPLETED',\n 'phaseStatus': 'SUCCEEDED'|'FAILED'|'FAULT'|'TIMED_OUT'|'IN_PROGRESS'|'STOPPED',\n 'startTime': datetime(2015, 1, 1),\n 'endTime': datetime(2015, 1, 1),\n 'durationInSeconds': 123,\n 'contexts': [\n {\n 'statusCode': 'string',\n 'message': 'string'\n },\n ]\n },\n ],\n 'source': {\n 'type': 'CODECOMMIT'|'CODEPIPELINE'|'GITHUB'|'S3'|'BITBUCKET'|'GITHUB_ENTERPRISE'|'NO_SOURCE',\n 'location': 'string',\n 'gitCloneDepth': 123,\n 'buildspec': 'string',\n 'auth': {\n 'type': 'OAUTH',\n 'resource': 'string'\n },\n 'reportBuildStatus': True|False,\n 'insecureSsl': True|False,\n 'sourceIdentifier': 'string'\n },\n 'secondarySources': [\n {\n 'type': 'CODECOMMIT'|'CODEPIPELINE'|'GITHUB'|'S3'|'BITBUCKET'|'GITHUB_ENTERPRISE'|'NO_SOURCE',\n 'location': 'string',\n 'gitCloneDepth': 123,\n 'buildspec': 'string',\n 'auth': {\n 'type': 'OAUTH',\n 'resource': 'string'\n },\n 'reportBuildStatus': True|False,\n 'insecureSsl': True|False,\n 'sourceIdentifier': 'string'\n },\n ],\n 'secondarySourceVersions': [\n {\n 'sourceIdentifier': 'string',\n 'sourceVersion': 'string'\n },\n ],\n 'artifacts': {\n 'location': 'string',\n 'sha256sum': 'string',\n 'md5sum': 'string',\n 'overrideArtifactName': True|False,\n 'encryptionDisabled': True|False,\n 'artifactIdentifier': 'string'\n },\n 'secondaryArtifacts': [\n {\n 'location': 'string',\n 'sha256sum': 'string',\n 'md5sum': 'string',\n 'overrideArtifactName': True|False,\n 'encryptionDisabled': True|False,\n 'artifactIdentifier': 'string'\n },\n ],\n 'cache': {\n 'type': 'NO_CACHE'|'S3',\n 'location': 'string'\n },\n 'environment': {\n 'type': 'WINDOWS_CONTAINER'|'LINUX_CONTAINER',\n 'image': 'string',\n 'computeType': 'BUILD_GENERAL1_SMALL'|'BUILD_GENERAL1_MEDIUM'|'BUILD_GENERAL1_LARGE',\n 'environmentVariables': [\n {\n 'name': 'string',\n 'value': 'string',\n 'type': 'PLAINTEXT'|'PARAMETER_STORE'\n },\n ],\n 'privilegedMode': True|False,\n 'certificate': 'string'\n },\n 'serviceRole': 'string',\n 'logs': {\n 'groupName': 'string',\n 'streamName': 'string',\n 'deepLink': 'string',\n 's3DeepLink': 'string',\n 'cloudWatchLogs': {\n 'status': 'ENABLED'|'DISABLED',\n 'groupName': 'string',\n 'streamName': 'string'\n },\n 's3Logs': {\n 'status': 'ENABLED'|'DISABLED',\n 'location': 'string'\n }\n },\n 'timeoutInMinutes': 123,\n 'queuedTimeoutInMinutes': 123,\n 'buildComplete': True|False,\n 'initiator': 'string',\n 'vpcConfig': {\n 'vpcId': 'string',\n 'subnets': [\n 'string',\n ],\n 'securityGroupIds': [\n 'string',\n ]\n },\n 'networkInterface': {\n 'subnetId': 'string',\n 'networkInterfaceId': 'string'\n },\n 'encryptionKey': 'string'\n }\n }\n \n \n :returns: \n For AWS CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit ID.\n For AWS CodePipeline, the source revision provided by AWS CodePipeline.\n For Amazon Simple Storage Service (Amazon S3), this does not apply.\n \n \"\"\"\n pass\n\ndef update_project(name=None, description=None, source=None, secondarySources=None, artifacts=None, secondaryArtifacts=None, cache=None, environment=None, serviceRole=None, timeoutInMinutes=None, queuedTimeoutInMinutes=None, encryptionKey=None, tags=None, vpcConfig=None, badgeEnabled=None, logsConfig=None):\n \"\"\"\n Changes the settings of a build project.\n See also: AWS API Documentation\n \n \n :example: response = client.update_project(\n name='string',\n description='string',\n source={\n 'type': 'CODECOMMIT'|'CODEPIPELINE'|'GITHUB'|'S3'|'BITBUCKET'|'GITHUB_ENTERPRISE'|'NO_SOURCE',\n 'location': 'string',\n 'gitCloneDepth': 123,\n 'buildspec': 'string',\n 'auth': {\n 'type': 'OAUTH',\n 'resource': 'string'\n },\n 'reportBuildStatus': True|False,\n 'insecureSsl': True|False,\n 'sourceIdentifier': 'string'\n },\n secondarySources=[\n {\n 'type': 'CODECOMMIT'|'CODEPIPELINE'|'GITHUB'|'S3'|'BITBUCKET'|'GITHUB_ENTERPRISE'|'NO_SOURCE',\n 'location': 'string',\n 'gitCloneDepth': 123,\n 'buildspec': 'string',\n 'auth': {\n 'type': 'OAUTH',\n 'resource': 'string'\n },\n 'reportBuildStatus': True|False,\n 'insecureSsl': True|False,\n 'sourceIdentifier': 'string'\n },\n ],\n artifacts={\n 'type': 'CODEPIPELINE'|'S3'|'NO_ARTIFACTS',\n 'location': 'string',\n 'path': 'string',\n 'namespaceType': 'NONE'|'BUILD_ID',\n 'name': 'string',\n 'packaging': 'NONE'|'ZIP',\n 'overrideArtifactName': True|False,\n 'encryptionDisabled': True|False,\n 'artifactIdentifier': 'string'\n },\n secondaryArtifacts=[\n {\n 'type': 'CODEPIPELINE'|'S3'|'NO_ARTIFACTS',\n 'location': 'string',\n 'path': 'string',\n 'namespaceType': 'NONE'|'BUILD_ID',\n 'name': 'string',\n 'packaging': 'NONE'|'ZIP',\n 'overrideArtifactName': True|False,\n 'encryptionDisabled': True|False,\n 'artifactIdentifier': 'string'\n },\n ],\n cache={\n 'type': 'NO_CACHE'|'S3',\n 'location': 'string'\n },\n environment={\n 'type': 'WINDOWS_CONTAINER'|'LINUX_CONTAINER',\n 'image': 'string',\n 'computeType': 'BUILD_GENERAL1_SMALL'|'BUILD_GENERAL1_MEDIUM'|'BUILD_GENERAL1_LARGE',\n 'environmentVariables': [\n {\n 'name': 'string',\n 'value': 'string',\n 'type': 'PLAINTEXT'|'PARAMETER_STORE'\n },\n ],\n 'privilegedMode': True|False,\n 'certificate': 'string'\n },\n serviceRole='string',\n timeoutInMinutes=123,\n queuedTimeoutInMinutes=123,\n encryptionKey='string',\n tags=[\n {\n 'key': 'string',\n 'value': 'string'\n },\n ],\n vpcConfig={\n 'vpcId': 'string',\n 'subnets': [\n 'string',\n ],\n 'securityGroupIds': [\n 'string',\n ]\n },\n badgeEnabled=True|False,\n logsConfig={\n 'cloudWatchLogs': {\n 'status': 'ENABLED'|'DISABLED',\n 'groupName': 'string',\n 'streamName': 'string'\n },\n 's3Logs': {\n 'status': 'ENABLED'|'DISABLED',\n 'location': 'string'\n }\n }\n )\n \n \n :type name: string\n :param name: [REQUIRED]\n The name of the build project.\n Note\n You cannot change a build project's name.\n \n\n :type description: string\n :param description: A new or replacement description of the build project.\n\n :type source: dict\n :param source: Information to be changed about the build input source code for the build project.\n type (string) -- [REQUIRED]The type of repository that contains the source code to be built. Valid values include:\n BITBUCKET : The source code is in a Bitbucket repository.\n CODECOMMIT : The source code is in an AWS CodeCommit repository.\n CODEPIPELINE : The source code settings are specified in the source action of a pipeline in AWS CodePipeline.\n GITHUB : The source code is in a GitHub repository.\n NO_SOURCE : The project does not have input source code.\n S3 : The source code is in an Amazon Simple Storage Service (Amazon S3) input bucket.\n location (string) --Information about the location of the source code to be built. Valid values include:\n For source code settings that are specified in the source action of a pipeline in AWS CodePipeline, location should not be specified. If it is specified, AWS CodePipeline ignores it. This is because AWS CodePipeline uses the settings in a pipeline's source action instead of this value.\n For source code in an AWS CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the build spec (for example, ``https://git-codecommit.*region-ID* .amazonaws.com/v1/repos/repo-name `` ).\n For source code in an Amazon Simple Storage Service (Amazon S3) input bucket, one of the following.\n The path to the ZIP file that contains the source code (for example, `` bucket-name /path /to /object-name .zip`` ).\n The path to the folder that contains the source code (for example, `` bucket-name /path /to /source-code /folder /`` ).\n For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the build spec. You must connect your AWS account to your GitHub account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub Authorize application page, for Organization access , choose Request access next to each repository you want to allow AWS CodeBuild to have access to, and then choose Authorize application . (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH .\n For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the build spec. You must connect your AWS account to your Bitbucket account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket Confirm access to your account page, choose Grant access . (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH .\n gitCloneDepth (integer) --Information about the git clone depth for the build project.\n buildspec (string) --The build spec declaration to use for the builds in this build project.\n If this value is not specified, a build spec must be included along with the source code to be built.\n auth (dict) --Information about the authorization settings for AWS CodeBuild to access the source code to be built.\n This information is for the AWS CodeBuild console's use only. Your code should not get or set this information directly.\n type (string) -- [REQUIRED]\n Note\n This data type is deprecated and is no longer accurate or used.\n The authorization type to use. The only valid value is OAUTH , which represents the OAuth authorization type.\n resource (string) --The resource value that applies to the specified authorization type.\n reportBuildStatus (boolean) --Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, or Bitbucket. If this is set and you use a different source provider, an invalidInputException is thrown.\n insecureSsl (boolean) --Enable this flag to ignore SSL warnings while connecting to the project source code.\n sourceIdentifier (string) --An identifier for this project source.\n \n\n :type secondarySources: list\n :param secondarySources: An array of ProjectSource objects.\n (dict) --Information about the build input source code for the build project.\n type (string) -- [REQUIRED]The type of repository that contains the source code to be built. Valid values include:\n BITBUCKET : The source code is in a Bitbucket repository.\n CODECOMMIT : The source code is in an AWS CodeCommit repository.\n CODEPIPELINE : The source code settings are specified in the source action of a pipeline in AWS CodePipeline.\n GITHUB : The source code is in a GitHub repository.\n NO_SOURCE : The project does not have input source code.\n S3 : The source code is in an Amazon Simple Storage Service (Amazon S3) input bucket.\n location (string) --Information about the location of the source code to be built. Valid values include:\n For source code settings that are specified in the source action of a pipeline in AWS CodePipeline, location should not be specified. If it is specified, AWS CodePipeline ignores it. This is because AWS CodePipeline uses the settings in a pipeline's source action instead of this value.\n For source code in an AWS CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the build spec (for example, ``https://git-codecommit.*region-ID* .amazonaws.com/v1/repos/repo-name `` ).\n For source code in an Amazon Simple Storage Service (Amazon S3) input bucket, one of the following.\n The path to the ZIP file that contains the source code (for example, `` bucket-name /path /to /object-name .zip`` ).\n The path to the folder that contains the source code (for example, `` bucket-name /path /to /source-code /folder /`` ).\n For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the build spec. You must connect your AWS account to your GitHub account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub Authorize application page, for Organization access , choose Request access next to each repository you want to allow AWS CodeBuild to have access to, and then choose Authorize application . (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH .\n For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the build spec. You must connect your AWS account to your Bitbucket account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket Confirm access to your account page, choose Grant access . (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH .\n gitCloneDepth (integer) --Information about the git clone depth for the build project.\n buildspec (string) --The build spec declaration to use for the builds in this build project.\n If this value is not specified, a build spec must be included along with the source code to be built.\n auth (dict) --Information about the authorization settings for AWS CodeBuild to access the source code to be built.\n This information is for the AWS CodeBuild console's use only. Your code should not get or set this information directly.\n type (string) -- [REQUIRED]\n Note\n This data type is deprecated and is no longer accurate or used.\n The authorization type to use. The only valid value is OAUTH , which represents the OAuth authorization type.\n resource (string) --The resource value that applies to the specified authorization type.\n reportBuildStatus (boolean) --Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, or Bitbucket. If this is set and you use a different source provider, an invalidInputException is thrown.\n insecureSsl (boolean) --Enable this flag to ignore SSL warnings while connecting to the project source code.\n sourceIdentifier (string) --An identifier for this project source.\n \n \n\n :type artifacts: dict\n :param artifacts: Information to be changed about the build output artifacts for the build project.\n type (string) -- [REQUIRED]The type of build output artifact. Valid values include:\n CODEPIPELINE : The build project has build output generated through AWS CodePipeline.\n NO_ARTIFACTS : The build project does not produce any build output.\n S3 : The build project stores build output in Amazon Simple Storage Service (Amazon S3).\n location (string) --Information about the build output artifact location:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output locations instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , this is the name of the output bucket.\n path (string) --Along with namespaceType and name , the pattern that AWS CodeBuild uses to name and store the output artifact:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , this is the path to the output artifact. If path is not specified, path is not used.\n For example, if path is set to MyArtifacts , namespaceType is set to NONE , and name is set to MyArtifact.zip , the output artifact is stored in the output bucket at MyArtifacts/MyArtifact.zip .\n namespaceType (string) --Along with path and name , the pattern that AWS CodeBuild uses to determine the name and location to store the output artifact:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , valid values include:\n BUILD_ID : Include the build ID in the location of the build output artifact.\n NONE : Do not include the build ID. This is the default if namespaceType is not specified.\n For example, if path is set to MyArtifacts , namespaceType is set to BUILD_ID , and name is set to MyArtifact.zip , the output artifact is stored in MyArtifacts/*build-ID* /MyArtifact.zip .\n name (string) --Along with path and namespaceType , the pattern that AWS CodeBuild uses to name and store the output artifact:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , this is the name of the output artifact object. If you set the name to be a forward slash ('/'), the artifact is stored in the root of the output bucket.\n For example:\n If path is set to MyArtifacts , namespaceType is set to BUILD_ID , and name is set to MyArtifact.zip , then the output artifact is stored in MyArtifacts/*build-ID* /MyArtifact.zip .\n If path is empty, namespaceType is set to NONE , and name is set to '/ ', the output artifact is stored in the root of the output bucket.\n If path is set to MyArtifacts , namespaceType is set to BUILD_ID , and name is set to '/ ', the output artifact is stored in ``MyArtifacts/build-ID `` .\n packaging (string) --The type of build output artifact to create:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output artifacts instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , valid values include:\n NONE : AWS CodeBuild creates in the output bucket a folder that contains the build output. This is the default if packaging is not specified.\n ZIP : AWS CodeBuild creates in the output bucket a ZIP file that contains the build output.\n \n overrideArtifactName (boolean) --If this flag is set, a name specified in the build spec file overrides the artifact name. The name specified in a build spec file is calculated at build time and uses the Shell Command Language. For example, you can append a date and time to your artifact name so that it is always unique.\n encryptionDisabled (boolean) --Set to true if you do not want your output artifacts encrypted. This option is valid only if your artifacts type is Amazon Simple Storage Service (Amazon S3). If this is set with another artifacts type, an invalidInputException is thrown.\n artifactIdentifier (string) --An identifier for this artifact definition.\n \n\n :type secondaryArtifacts: list\n :param secondaryArtifacts: An array of ProjectSource objects.\n (dict) --Information about the build output artifacts for the build project.\n type (string) -- [REQUIRED]The type of build output artifact. Valid values include:\n CODEPIPELINE : The build project has build output generated through AWS CodePipeline.\n NO_ARTIFACTS : The build project does not produce any build output.\n S3 : The build project stores build output in Amazon Simple Storage Service (Amazon S3).\n location (string) --Information about the build output artifact location:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output locations instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , this is the name of the output bucket.\n path (string) --Along with namespaceType and name , the pattern that AWS CodeBuild uses to name and store the output artifact:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , this is the path to the output artifact. If path is not specified, path is not used.\n For example, if path is set to MyArtifacts , namespaceType is set to NONE , and name is set to MyArtifact.zip , the output artifact is stored in the output bucket at MyArtifacts/MyArtifact.zip .\n namespaceType (string) --Along with path and name , the pattern that AWS CodeBuild uses to determine the name and location to store the output artifact:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , valid values include:\n BUILD_ID : Include the build ID in the location of the build output artifact.\n NONE : Do not include the build ID. This is the default if namespaceType is not specified.\n For example, if path is set to MyArtifacts , namespaceType is set to BUILD_ID , and name is set to MyArtifact.zip , the output artifact is stored in MyArtifacts/*build-ID* /MyArtifact.zip .\n name (string) --Along with path and namespaceType , the pattern that AWS CodeBuild uses to name and store the output artifact:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , this is the name of the output artifact object. If you set the name to be a forward slash ('/'), the artifact is stored in the root of the output bucket.\n For example:\n If path is set to MyArtifacts , namespaceType is set to BUILD_ID , and name is set to MyArtifact.zip , then the output artifact is stored in MyArtifacts/*build-ID* /MyArtifact.zip .\n If path is empty, namespaceType is set to NONE , and name is set to '/ ', the output artifact is stored in the root of the output bucket.\n If path is set to MyArtifacts , namespaceType is set to BUILD_ID , and name is set to '/ ', the output artifact is stored in ``MyArtifacts/build-ID `` .\n packaging (string) --The type of build output artifact to create:\n If type is set to CODEPIPELINE , AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output artifacts instead of AWS CodeBuild.\n If type is set to NO_ARTIFACTS , this value is ignored if specified, because no build output is produced.\n If type is set to S3 , valid values include:\n NONE : AWS CodeBuild creates in the output bucket a folder that contains the build output. This is the default if packaging is not specified.\n ZIP : AWS CodeBuild creates in the output bucket a ZIP file that contains the build output.\n \n overrideArtifactName (boolean) --If this flag is set, a name specified in the build spec file overrides the artifact name. The name specified in a build spec file is calculated at build time and uses the Shell Command Language. For example, you can append a date and time to your artifact name so that it is always unique.\n encryptionDisabled (boolean) --Set to true if you do not want your output artifacts encrypted. This option is valid only if your artifacts type is Amazon Simple Storage Service (Amazon S3). If this is set with another artifacts type, an invalidInputException is thrown.\n artifactIdentifier (string) --An identifier for this artifact definition.\n \n \n\n :type cache: dict\n :param cache: Stores recently used information so that it can be quickly accessed at a later time.\n type (string) -- [REQUIRED]The type of cache used by the build project. Valid values include:\n NO_CACHE : The build project does not use any cache.\n S3 : The build project reads and writes from and to S3.\n location (string) --Information about the cache location:\n NO_CACHE : This value is ignored.\n S3 : This is the S3 bucket name/prefix.\n \n\n :type environment: dict\n :param environment: Information to be changed about the build environment for the build project.\n type (string) -- [REQUIRED]The type of build environment to use for related builds.\n image (string) -- [REQUIRED]The ID of the Docker image to use for this build project.\n computeType (string) -- [REQUIRED]Information about the compute resources the build project uses. Available values include:\n BUILD_GENERAL1_SMALL : Use up to 3 GB memory and 2 vCPUs for builds.\n BUILD_GENERAL1_MEDIUM : Use up to 7 GB memory and 4 vCPUs for builds.\n BUILD_GENERAL1_LARGE : Use up to 15 GB memory and 8 vCPUs for builds.\n environmentVariables (list) --A set of environment variables to make available to builds for this build project.\n (dict) --Information about an environment variable for a build project or a build.\n name (string) -- [REQUIRED]The name or key of the environment variable.\n value (string) -- [REQUIRED]The value of the environment variable.\n Warning\n We strongly discourage the use of environment variables to store sensitive values, especially AWS secret key IDs and secret access keys. Environment variables can be displayed in plain text using the AWS CodeBuild console and the AWS Command Line Interface (AWS CLI).\n type (string) --The type of environment variable. Valid values include:\n PARAMETER_STORE : An environment variable stored in Amazon EC2 Systems Manager Parameter Store.\n PLAINTEXT : An environment variable in plaintext format.\n \n privilegedMode (boolean) --Enables running the Docker daemon inside a Docker container. Set to true only if the build project is be used to build Docker images, and the specified build environment image is not provided by AWS CodeBuild with Docker support. Otherwise, all associated builds that attempt to interact with the Docker daemon fail. You must also start the Docker daemon so that builds can interact with it. One way to do this is to initialize the Docker daemon during the install phase of your build spec by running the following build commands. (Do not run these commands if the specified build environment image is provided by AWS CodeBuild with Docker support.)\n If the operating system's base image is Ubuntu Linux:\n - nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 --storage-driver=overlay& - timeout 15 sh -c 'until docker info; do echo .; sleep 1; done'\n If the operating system's base image is Alpine Linux, add the -t argument to timeout :\n - nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 --storage-driver=overlay& - timeout 15 -t sh -c 'until docker info; do echo .; sleep 1; done'\n certificate (string) --The certificate to use with this build project.\n \n\n :type serviceRole: string\n :param serviceRole: The replacement ARN of the AWS Identity and Access Management (IAM) role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account.\n\n :type timeoutInMinutes: integer\n :param timeoutInMinutes: The replacement value in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait before timing out any related build that did not get marked as completed.\n\n :type queuedTimeoutInMinutes: integer\n :param queuedTimeoutInMinutes: The number of minutes a build is allowed to be queued before it times out.\n\n :type encryptionKey: string\n :param encryptionKey: The replacement AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.\n You can specify either the Amazon Resource Name (ARN)of the CMK or, if available, the CMK's alias (using the format ``alias/alias-name `` ).\n \n\n :type tags: list\n :param tags: The replacement set of tags for this build project.\n These tags are available for use by AWS services that support AWS CodeBuild build project tags.\n (dict) --A tag, consisting of a key and a value.\n This tag is available for use by AWS services that support tags in AWS CodeBuild.\n key (string) --The tag's key.\n value (string) --The tag's value.\n \n \n\n :type vpcConfig: dict\n :param vpcConfig: VpcConfig enables AWS CodeBuild to access resources in an Amazon VPC.\n vpcId (string) --The ID of the Amazon VPC.\n subnets (list) --A list of one or more subnet IDs in your Amazon VPC.\n (string) --\n securityGroupIds (list) --A list of one or more security groups IDs in your Amazon VPC.\n (string) --\n \n\n :type badgeEnabled: boolean\n :param badgeEnabled: Set this to true to generate a publicly accessible URL for your project's build badge.\n\n :type logsConfig: dict\n :param logsConfig: Information about logs for the build project. A project can create logs in Amazon CloudWatch Logs, logs in an S3 bucket, or both.\n cloudWatchLogs (dict) --Information about Amazon CloudWatch Logs for a build project. Amazon CloudWatch Logs are enabled by default.\n status (string) -- [REQUIRED]The current status of the logs in Amazon CloudWatch Logs for a build project. Valid values are:\n ENABLED : Amazon CloudWatch Logs are enabled for this build project.\n DISABLED : Amazon CloudWatch Logs are not enabled for this build project.\n groupName (string) --The group name of the logs in Amazon CloudWatch Logs. For more information, see Working with Log Groups and Log Streams .\n streamName (string) --The prefix of the stream name of the Amazon CloudWatch Logs. For more information, see Working with Log Groups and Log Streams .\n s3Logs (dict) --Information about logs built to an S3 bucket for a build project. S3 logs are not enabled by default.\n status (string) -- [REQUIRED]The current status of the S3 build logs. Valid values are:\n ENABLED : S3 build logs are enabled for this build project.\n DISABLED : S3 build logs are not enabled for this build project.\n location (string) --The ARN of an S3 bucket and the path prefix for S3 logs. If your Amazon S3 bucket name is my-bucket , and your path prefix is build-log , then acceptable formats are my-bucket/build-log or arn:aws:s3:::my-bucket/build-log .\n \n \n\n :rtype: dict\n :return: {\n 'project': {\n 'name': 'string',\n 'arn': 'string',\n 'description': 'string',\n 'source': {\n 'type': 'CODECOMMIT'|'CODEPIPELINE'|'GITHUB'|'S3'|'BITBUCKET'|'GITHUB_ENTERPRISE'|'NO_SOURCE',\n 'location': 'string',\n 'gitCloneDepth': 123,\n 'buildspec': 'string',\n 'auth': {\n 'type': 'OAUTH',\n 'resource': 'string'\n },\n 'reportBuildStatus': True|False,\n 'insecureSsl': True|False,\n 'sourceIdentifier': 'string'\n },\n 'secondarySources': [\n {\n 'type': 'CODECOMMIT'|'CODEPIPELINE'|'GITHUB'|'S3'|'BITBUCKET'|'GITHUB_ENTERPRISE'|'NO_SOURCE',\n 'location': 'string',\n 'gitCloneDepth': 123,\n 'buildspec': 'string',\n 'auth': {\n 'type': 'OAUTH',\n 'resource': 'string'\n },\n 'reportBuildStatus': True|False,\n 'insecureSsl': True|False,\n 'sourceIdentifier': 'string'\n },\n ],\n 'artifacts': {\n 'type': 'CODEPIPELINE'|'S3'|'NO_ARTIFACTS',\n 'location': 'string',\n 'path': 'string',\n 'namespaceType': 'NONE'|'BUILD_ID',\n 'name': 'string',\n 'packaging': 'NONE'|'ZIP',\n 'overrideArtifactName': True|False,\n 'encryptionDisabled': True|False,\n 'artifactIdentifier': 'string'\n },\n 'secondaryArtifacts': [\n {\n 'type': 'CODEPIPELINE'|'S3'|'NO_ARTIFACTS',\n 'location': 'string',\n 'path': 'string',\n 'namespaceType': 'NONE'|'BUILD_ID',\n 'name': 'string',\n 'packaging': 'NONE'|'ZIP',\n 'overrideArtifactName': True|False,\n 'encryptionDisabled': True|False,\n 'artifactIdentifier': 'string'\n },\n ],\n 'cache': {\n 'type': 'NO_CACHE'|'S3',\n 'location': 'string'\n },\n 'environment': {\n 'type': 'WINDOWS_CONTAINER'|'LINUX_CONTAINER',\n 'image': 'string',\n 'computeType': 'BUILD_GENERAL1_SMALL'|'BUILD_GENERAL1_MEDIUM'|'BUILD_GENERAL1_LARGE',\n 'environmentVariables': [\n {\n 'name': 'string',\n 'value': 'string',\n 'type': 'PLAINTEXT'|'PARAMETER_STORE'\n },\n ],\n 'privilegedMode': True|False,\n 'certificate': 'string'\n },\n 'serviceRole': 'string',\n 'timeoutInMinutes': 123,\n 'queuedTimeoutInMinutes': 123,\n 'encryptionKey': 'string',\n 'tags': [\n {\n 'key': 'string',\n 'value': 'string'\n },\n ],\n 'created': datetime(2015, 1, 1),\n 'lastModified': datetime(2015, 1, 1),\n 'webhook': {\n 'url': 'string',\n 'payloadUrl': 'string',\n 'secret': 'string',\n 'branchFilter': 'string',\n 'lastModifiedSecret': datetime(2015, 1, 1)\n },\n 'vpcConfig': {\n 'vpcId': 'string',\n 'subnets': [\n 'string',\n ],\n 'securityGroupIds': [\n 'string',\n ]\n },\n 'badge': {\n 'badgeEnabled': True|False,\n 'badgeRequestUrl': 'string'\n },\n 'logsConfig': {\n 'cloudWatchLogs': {\n 'status': 'ENABLED'|'DISABLED',\n 'groupName': 'string',\n 'streamName': 'string'\n },\n 's3Logs': {\n 'status': 'ENABLED'|'DISABLED',\n 'location': 'string'\n }\n }\n }\n }\n \n \n :returns: \n BITBUCKET : The source code is in a Bitbucket repository.\n CODECOMMIT : The source code is in an AWS CodeCommit repository.\n CODEPIPELINE : The source code settings are specified in the source action of a pipeline in AWS CodePipeline.\n GITHUB : The source code is in a GitHub repository.\n NO_SOURCE : The project does not have input source code.\n S3 : The source code is in an Amazon Simple Storage Service (Amazon S3) input bucket.\n \n \"\"\"\n pass\n\ndef update_webhook(projectName=None, branchFilter=None, rotateSecret=None):\n \"\"\"\n Updates the webhook associated with an AWS CodeBuild build project.\n See also: AWS API Documentation\n \n \n :example: response = client.update_webhook(\n projectName='string',\n branchFilter='string',\n rotateSecret=True|False\n )\n \n \n :type projectName: string\n :param projectName: [REQUIRED]\n The name of the AWS CodeBuild project.\n \n\n :type branchFilter: string\n :param branchFilter: A regular expression used to determine which repository branches are built when a webhook is triggered. If the name of a branch matches the regular expression, then it is built. If branchFilter is empty, then all branches are built.\n\n :type rotateSecret: boolean\n :param rotateSecret: A boolean value that specifies whether the associated GitHub repository's secret token should be updated. If you use Bitbucket for your repository, rotateSecret is ignored.\n\n :rtype: dict\n :return: {\n 'webhook': {\n 'url': 'string',\n 'payloadUrl': 'string',\n 'secret': 'string',\n 'branchFilter': 'string',\n 'lastModifiedSecret': datetime(2015, 1, 1)\n }\n }\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6131713390350342, "alphanum_fraction": 0.617127537727356, "avg_line_length": 30.998722076416016, "blob_id": "ad82111134ffe5c349e45b748f99708ee7af7b42", "content_id": "b906ce0aaf434e3060fb49e5fd35762e82930e70", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25024, "license_type": "permissive", "max_line_length": 483, "num_lines": 782, "path": "/pyboto3/licensemanager.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_license_configuration(Name=None, Description=None, LicenseCountingType=None, LicenseCount=None, LicenseCountHardLimit=None, LicenseRules=None, Tags=None):\n \"\"\"\n Creates a new license configuration object. A license configuration is an abstraction of a customer license agreement that can be consumed and enforced by License Manager. Components include specifications for the license type (licensing by instance, socket, CPU, or VCPU), tenancy (shared tenancy, Amazon EC2 Dedicated Instance, Amazon EC2 Dedicated Host, or any of these), host affinity (how long a VM must be associated with a host), the number of licenses purchased and used.\n See also: AWS API Documentation\n \n \n :example: response = client.create_license_configuration(\n Name='string',\n Description='string',\n LicenseCountingType='vCPU'|'Instance'|'Core'|'Socket',\n LicenseCount=123,\n LicenseCountHardLimit=True|False,\n LicenseRules=[\n 'string',\n ],\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n Name of the license configuration.\n \n\n :type Description: string\n :param Description: Human-friendly description of the license configuration.\n\n :type LicenseCountingType: string\n :param LicenseCountingType: [REQUIRED]\n Dimension to use to track the license inventory.\n \n\n :type LicenseCount: integer\n :param LicenseCount: Number of licenses managed by the license configuration.\n\n :type LicenseCountHardLimit: boolean\n :param LicenseCountHardLimit: Flag indicating whether hard or soft license enforcement is used. Exceeding a hard limit results in the blocked deployment of new instances.\n\n :type LicenseRules: list\n :param LicenseRules: Array of configured License Manager rules.\n (string) --\n \n\n :type Tags: list\n :param Tags: The tags to apply to the resources during launch. You can only tag instances and volumes on launch. The specified tags are applied to all instances or volumes that are created during launch. To tag a resource after it has been created, see CreateTags .\n (dict) --Tag for a resource in a key-value format.\n Key (string) --Key for the resource tag.\n Value (string) --Value for the resource tag.\n \n \n\n :rtype: dict\n :return: {\n 'LicenseConfigurationArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_license_configuration(LicenseConfigurationArn=None):\n \"\"\"\n Deletes an existing license configuration. This action fails if the configuration is in use.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_license_configuration(\n LicenseConfigurationArn='string'\n )\n \n \n :type LicenseConfigurationArn: string\n :param LicenseConfigurationArn: [REQUIRED]\n Unique ID of the configuration object to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_license_configuration(LicenseConfigurationArn=None):\n \"\"\"\n Returns a detailed description of a license configuration.\n See also: AWS API Documentation\n \n \n :example: response = client.get_license_configuration(\n LicenseConfigurationArn='string'\n )\n \n \n :type LicenseConfigurationArn: string\n :param LicenseConfigurationArn: [REQUIRED]\n ARN of the license configuration being requested.\n \n\n :rtype: dict\n :return: {\n 'LicenseConfigurationId': 'string',\n 'LicenseConfigurationArn': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'LicenseCountingType': 'vCPU'|'Instance'|'Core'|'Socket',\n 'LicenseRules': [\n 'string',\n ],\n 'LicenseCount': 123,\n 'LicenseCountHardLimit': True|False,\n 'ConsumedLicenses': 123,\n 'Status': 'string',\n 'OwnerAccountId': 'string',\n 'ConsumedLicenseSummaryList': [\n {\n 'ResourceType': 'EC2_INSTANCE'|'EC2_HOST'|'EC2_AMI',\n 'ConsumedLicenses': 123\n },\n ],\n 'ManagedResourceSummaryList': [\n {\n 'ResourceType': 'EC2_INSTANCE'|'EC2_HOST'|'EC2_AMI',\n 'AssociationCount': 123\n },\n ],\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_service_settings():\n \"\"\"\n Gets License Manager settings for a region. Exposes the configured S3 bucket, SNS topic, etc., for inspection.\n See also: AWS API Documentation\n \n \n :example: response = client.get_service_settings()\n \n \n :rtype: dict\n :return: {\n 'S3BucketArn': 'string',\n 'SnsTopicArn': 'string',\n 'OrganizationConfiguration': {\n 'EnableIntegration': True|False\n },\n 'EnableCrossAccountsDiscovery': True|False\n }\n \n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_associations_for_license_configuration(LicenseConfigurationArn=None, MaxResults=None, NextToken=None):\n \"\"\"\n Lists the resource associations for a license configuration. Resource associations need not consume licenses from a license configuration. For example, an AMI or a stopped instance may not consume a license (depending on the license rules). Use this operation to find all resources associated with a license configuration.\n See also: AWS API Documentation\n \n \n :example: response = client.list_associations_for_license_configuration(\n LicenseConfigurationArn='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type LicenseConfigurationArn: string\n :param LicenseConfigurationArn: [REQUIRED]\n ARN of a LicenseConfiguration object.\n \n\n :type MaxResults: integer\n :param MaxResults: Maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value.\n\n :type NextToken: string\n :param NextToken: Token for the next set of results.\n\n :rtype: dict\n :return: {\n 'LicenseConfigurationAssociations': [\n {\n 'ResourceArn': 'string',\n 'ResourceType': 'EC2_INSTANCE'|'EC2_HOST'|'EC2_AMI',\n 'ResourceOwnerId': 'string',\n 'AssociationTime': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_license_configurations(LicenseConfigurationArns=None, MaxResults=None, NextToken=None, Filters=None):\n \"\"\"\n Lists license configuration objects for an account, each containing the name, description, license type, and other license terms modeled from a license agreement.\n See also: AWS API Documentation\n \n \n :example: response = client.list_license_configurations(\n LicenseConfigurationArns=[\n 'string',\n ],\n MaxResults=123,\n NextToken='string',\n Filters=[\n {\n 'Name': 'string',\n 'Values': [\n 'string',\n ]\n },\n ]\n )\n \n \n :type LicenseConfigurationArns: list\n :param LicenseConfigurationArns: An array of ARNs for the calling account s license configurations.\n (string) --\n \n\n :type MaxResults: integer\n :param MaxResults: Maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value.\n\n :type NextToken: string\n :param NextToken: Token for the next set of results.\n\n :type Filters: list\n :param Filters: One or more filters.\n (dict) --A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a Describe operation are documented with the Describe operation.\n Name (string) --Name of the filter. Filter names are case-sensitive.\n Values (list) --One or more filter values. Filter values are case-sensitive.\n (string) --\n \n \n\n :rtype: dict\n :return: {\n 'LicenseConfigurations': [\n {\n 'LicenseConfigurationId': 'string',\n 'LicenseConfigurationArn': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'LicenseCountingType': 'vCPU'|'Instance'|'Core'|'Socket',\n 'LicenseRules': [\n 'string',\n ],\n 'LicenseCount': 123,\n 'LicenseCountHardLimit': True|False,\n 'ConsumedLicenses': 123,\n 'Status': 'string',\n 'OwnerAccountId': 'string',\n 'ConsumedLicenseSummaryList': [\n {\n 'ResourceType': 'EC2_INSTANCE'|'EC2_HOST'|'EC2_AMI',\n 'ConsumedLicenses': 123\n },\n ],\n 'ManagedResourceSummaryList': [\n {\n 'ResourceType': 'EC2_INSTANCE'|'EC2_HOST'|'EC2_AMI',\n 'AssociationCount': 123\n },\n ]\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_license_specifications_for_resource(ResourceArn=None, MaxResults=None, NextToken=None):\n \"\"\"\n Returns the license configuration for a resource.\n See also: AWS API Documentation\n \n \n :example: response = client.list_license_specifications_for_resource(\n ResourceArn='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type ResourceArn: string\n :param ResourceArn: [REQUIRED]\n ARN of an AMI or Amazon EC2 instance that has an associated license configuration.\n \n\n :type MaxResults: integer\n :param MaxResults: Maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value.\n\n :type NextToken: string\n :param NextToken: Token for the next set of results.\n\n :rtype: dict\n :return: {\n 'LicenseSpecifications': [\n {\n 'LicenseConfigurationArn': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_resource_inventory(MaxResults=None, NextToken=None, Filters=None):\n \"\"\"\n Returns a detailed list of resources.\n See also: AWS API Documentation\n \n \n :example: response = client.list_resource_inventory(\n MaxResults=123,\n NextToken='string',\n Filters=[\n {\n 'Name': 'string',\n 'Condition': 'EQUALS'|'NOT_EQUALS'|'BEGINS_WITH'|'CONTAINS',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type MaxResults: integer\n :param MaxResults: Maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value.\n\n :type NextToken: string\n :param NextToken: Token for the next set of results.\n\n :type Filters: list\n :param Filters: One or more filters.\n (dict) --An inventory filter object.\n Name (string) -- [REQUIRED]The name of the filter.\n Condition (string) -- [REQUIRED]The condition of the filter.\n Value (string) --Value of the filter.\n \n \n\n :rtype: dict\n :return: {\n 'ResourceInventoryList': [\n {\n 'ResourceId': 'string',\n 'ResourceType': 'EC2_INSTANCE'|'EC2_HOST'|'EC2_AMI',\n 'ResourceArn': 'string',\n 'Platform': 'string',\n 'PlatformVersion': 'string',\n 'ResourceOwningAccountId': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_tags_for_resource(ResourceArn=None):\n \"\"\"\n Lists tags attached to a resource.\n See also: AWS API Documentation\n \n \n :example: response = client.list_tags_for_resource(\n ResourceArn='string'\n )\n \n \n :type ResourceArn: string\n :param ResourceArn: [REQUIRED]\n ARN for the resource.\n \n\n :rtype: dict\n :return: {\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef list_usage_for_license_configuration(LicenseConfigurationArn=None, MaxResults=None, NextToken=None, Filters=None):\n \"\"\"\n Lists all license usage records for a license configuration, displaying license consumption details by resource at a selected point in time. Use this action to audit the current license consumption for any license inventory and configuration.\n See also: AWS API Documentation\n \n \n :example: response = client.list_usage_for_license_configuration(\n LicenseConfigurationArn='string',\n MaxResults=123,\n NextToken='string',\n Filters=[\n {\n 'Name': 'string',\n 'Values': [\n 'string',\n ]\n },\n ]\n )\n \n \n :type LicenseConfigurationArn: string\n :param LicenseConfigurationArn: [REQUIRED]\n ARN of the targeted LicenseConfiguration object.\n \n\n :type MaxResults: integer\n :param MaxResults: Maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value.\n\n :type NextToken: string\n :param NextToken: Token for the next set of results.\n\n :type Filters: list\n :param Filters: List of filters to apply.\n (dict) --A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a Describe operation are documented with the Describe operation.\n Name (string) --Name of the filter. Filter names are case-sensitive.\n Values (list) --One or more filter values. Filter values are case-sensitive.\n (string) --\n \n \n\n :rtype: dict\n :return: {\n 'LicenseConfigurationUsageList': [\n {\n 'ResourceArn': 'string',\n 'ResourceType': 'EC2_INSTANCE'|'EC2_HOST'|'EC2_AMI',\n 'ResourceStatus': 'string',\n 'ResourceOwnerId': 'string',\n 'AssociationTime': datetime(2015, 1, 1),\n 'ConsumedLicenses': 123\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef tag_resource(ResourceArn=None, Tags=None):\n \"\"\"\n Attach one of more tags to any resource.\n See also: AWS API Documentation\n \n \n :example: response = client.tag_resource(\n ResourceArn='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type ResourceArn: string\n :param ResourceArn: [REQUIRED]\n Resource of the ARN to be tagged.\n \n\n :type Tags: list\n :param Tags: [REQUIRED]\n Names of the tags to attach to the resource.\n (dict) --Tag for a resource in a key-value format.\n Key (string) --Key for the resource tag.\n Value (string) --Value for the resource tag.\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef untag_resource(ResourceArn=None, TagKeys=None):\n \"\"\"\n Remove tags from a resource.\n See also: AWS API Documentation\n \n \n :example: response = client.untag_resource(\n ResourceArn='string',\n TagKeys=[\n 'string',\n ]\n )\n \n \n :type ResourceArn: string\n :param ResourceArn: [REQUIRED]\n ARN of the resource.\n \n\n :type TagKeys: list\n :param TagKeys: [REQUIRED]\n List keys identifying tags to remove.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_license_configuration(LicenseConfigurationArn=None, LicenseConfigurationStatus=None, LicenseRules=None, LicenseCount=None, LicenseCountHardLimit=None, Name=None, Description=None):\n \"\"\"\n Modifies the attributes of an existing license configuration object. A license configuration is an abstraction of a customer license agreement that can be consumed and enforced by License Manager. Components include specifications for the license type (Instances, cores, sockets, VCPUs), tenancy (shared or Dedicated Host), host affinity (how long a VM is associated with a host), the number of licenses purchased and used.\n See also: AWS API Documentation\n \n \n :example: response = client.update_license_configuration(\n LicenseConfigurationArn='string',\n LicenseConfigurationStatus='AVAILABLE'|'DISABLED',\n LicenseRules=[\n 'string',\n ],\n LicenseCount=123,\n LicenseCountHardLimit=True|False,\n Name='string',\n Description='string'\n )\n \n \n :type LicenseConfigurationArn: string\n :param LicenseConfigurationArn: [REQUIRED]\n ARN for a license configuration.\n \n\n :type LicenseConfigurationStatus: string\n :param LicenseConfigurationStatus: New status of the license configuration (ACTIVE or INACTIVE ).\n\n :type LicenseRules: list\n :param LicenseRules: List of flexible text strings designating license rules.\n (string) --\n \n\n :type LicenseCount: integer\n :param LicenseCount: New number of licenses managed by the license configuration.\n\n :type LicenseCountHardLimit: boolean\n :param LicenseCountHardLimit: Sets the number of available licenses as a hard limit.\n\n :type Name: string\n :param Name: New name of the license configuration.\n\n :type Description: string\n :param Description: New human-friendly description of the license configuration.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_license_specifications_for_resource(ResourceArn=None, AddLicenseSpecifications=None, RemoveLicenseSpecifications=None):\n \"\"\"\n Adds or removes license configurations for a specified AWS resource. This operation currently supports updating the license specifications of AMIs, instances, and hosts. Launch templates and AWS CloudFormation templates are not managed from this operation as those resources send the license configurations directly to a resource creation operation, such as RunInstances .\n See also: AWS API Documentation\n \n \n :example: response = client.update_license_specifications_for_resource(\n ResourceArn='string',\n AddLicenseSpecifications=[\n {\n 'LicenseConfigurationArn': 'string'\n },\n ],\n RemoveLicenseSpecifications=[\n {\n 'LicenseConfigurationArn': 'string'\n },\n ]\n )\n \n \n :type ResourceArn: string\n :param ResourceArn: [REQUIRED]\n ARN for an AWS server resource.\n \n\n :type AddLicenseSpecifications: list\n :param AddLicenseSpecifications: License configuration ARNs to be added to a resource.\n (dict) --Object used for associating a license configuration with a resource.\n LicenseConfigurationArn (string) -- [REQUIRED]ARN of the LicenseConfiguration object.\n \n \n\n :type RemoveLicenseSpecifications: list\n :param RemoveLicenseSpecifications: License configuration ARNs to be removed from a resource.\n (dict) --Object used for associating a license configuration with a resource.\n LicenseConfigurationArn (string) -- [REQUIRED]ARN of the LicenseConfiguration object.\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_service_settings(S3BucketArn=None, SnsTopicArn=None, OrganizationConfiguration=None, EnableCrossAccountsDiscovery=None):\n \"\"\"\n Updates License Manager service settings.\n See also: AWS API Documentation\n \n \n :example: response = client.update_service_settings(\n S3BucketArn='string',\n SnsTopicArn='string',\n OrganizationConfiguration={\n 'EnableIntegration': True|False\n },\n EnableCrossAccountsDiscovery=True|False\n )\n \n \n :type S3BucketArn: string\n :param S3BucketArn: ARN of the Amazon S3 bucket where License Manager information is stored.\n\n :type SnsTopicArn: string\n :param SnsTopicArn: ARN of the Amazon SNS topic used for License Manager alerts.\n\n :type OrganizationConfiguration: dict\n :param OrganizationConfiguration: Integrates AWS Organizations with License Manager for cross-account discovery.\n EnableIntegration (boolean) -- [REQUIRED]Flag to activate AWS Organization integration.\n \n\n :type EnableCrossAccountsDiscovery: boolean\n :param EnableCrossAccountsDiscovery: Activates cross-account discovery.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6169868111610413, "alphanum_fraction": 0.625530481338501, "avg_line_length": 41.73746871948242, "blob_id": "7e81755085f5675e5907067e5693a7adcf8a45b9", "content_id": "c089b13144188bdf920a2c536c6162fc19f5abbb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17908, "license_type": "permissive", "max_line_length": 387, "num_lines": 419, "path": "/pyboto3/pi.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef describe_dimension_keys(ServiceType=None, Identifier=None, StartTime=None, EndTime=None, Metric=None, PeriodInSeconds=None, GroupBy=None, PartitionBy=None, Filter=None, MaxResults=None, NextToken=None):\n \"\"\"\n For a specific time period, retrieve the top N dimension keys for a metric.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_dimension_keys(\n ServiceType='RDS',\n Identifier='string',\n StartTime=datetime(2015, 1, 1),\n EndTime=datetime(2015, 1, 1),\n Metric='string',\n PeriodInSeconds=123,\n GroupBy={\n 'Group': 'string',\n 'Dimensions': [\n 'string',\n ],\n 'Limit': 123\n },\n PartitionBy={\n 'Group': 'string',\n 'Dimensions': [\n 'string',\n ],\n 'Limit': 123\n },\n Filter={\n 'string': 'string'\n },\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type ServiceType: string\n :param ServiceType: [REQUIRED]\n The AWS service for which Performance Insights will return metrics. The only valid value for ServiceType is: RDS\n \n\n :type Identifier: string\n :param Identifier: [REQUIRED]\n An immutable, AWS Region-unique identifier for a data source. Performance Insights gathers metrics from this data source.\n To use an Amazon RDS instance as a data source, you specify its DbiResourceId value - for example: db-FAIHNTYBKTGAUSUZQYPDS2GW4A\n \n\n :type StartTime: datetime\n :param StartTime: [REQUIRED]\n The date and time specifying the beginning of the requested time series data. You can't specify a StartTime that's earlier than 7 days ago. The value specified is inclusive - data points equal to or greater than StartTime will be returned.\n The value for StartTime must be earlier than the value for EndTime .\n \n\n :type EndTime: datetime\n :param EndTime: [REQUIRED]\n The date and time specifying the end of the requested time series data. The value specified is exclusive - data points less than (but not equal to) EndTime will be returned.\n The value for EndTime must be later than the value for StartTime .\n \n\n :type Metric: string\n :param Metric: [REQUIRED]\n The name of a Performance Insights metric to be measured.\n Valid values for Metric are:\n db.load.avg - a scaled representation of the number of active sessions for the database engine.\n db.sampledload.avg - the raw number of active sessions for the database engine.\n \n\n :type PeriodInSeconds: integer\n :param PeriodInSeconds: The granularity, in seconds, of the data points returned from Performance Insights. A period can be as short as one second, or as long as one day (86400 seconds). Valid values are:\n 1 (one second)\n 60 (one minute)\n 300 (five minutes)\n 3600 (one hour)\n 86400 (twenty-four hours)\n If you don't specify PeriodInSeconds , then Performance Insights will choose a value for you, with a goal of returning roughly 100-200 data points in the response.\n \n\n :type GroupBy: dict\n :param GroupBy: [REQUIRED]\n A specification for how to aggregate the data points from a query result. You must specify a valid dimension group. Performance Insights will return all of the dimensions within that group, unless you provide the names of specific dimensions within that group. You can also request that Performance Insights return a limited number of values for a dimension.\n Group (string) -- [REQUIRED]The name of the dimension group. Valid values are:\n db.user\n db.host\n db.sql\n db.sql_tokenized\n db.wait_event\n db.wait_event_type\n Dimensions (list) --A list of specific dimensions from a dimension group. If this parameter is not present, then it signifies that all of the dimensions in the group were requested, or are present in the response.\n Valid values for elements in the Dimensions array are:\n db.user.id\n db.user.name\n db.host.id\n db.host.name\n db.sql.id\n db.sql.db_id\n db.sql.statement\n db.sql.tokenized_id\n db.sql_tokenized.id\n db.sql_tokenized.db_id\n db.sql_tokenized.statement\n db.wait_event.name\n db.wait_event.type\n db.wait_event_type.name\n (string) --\n Limit (integer) --The maximum number of items to fetch for this dimension group.\n \n\n :type PartitionBy: dict\n :param PartitionBy: For each dimension specified in GroupBy , specify a secondary dimension to further subdivide the partition keys in the response.\n Group (string) -- [REQUIRED]The name of the dimension group. Valid values are:\n db.user\n db.host\n db.sql\n db.sql_tokenized\n db.wait_event\n db.wait_event_type\n Dimensions (list) --A list of specific dimensions from a dimension group. If this parameter is not present, then it signifies that all of the dimensions in the group were requested, or are present in the response.\n Valid values for elements in the Dimensions array are:\n db.user.id\n db.user.name\n db.host.id\n db.host.name\n db.sql.id\n db.sql.db_id\n db.sql.statement\n db.sql.tokenized_id\n db.sql_tokenized.id\n db.sql_tokenized.db_id\n db.sql_tokenized.statement\n db.wait_event.name\n db.wait_event.type\n db.wait_event_type.name\n (string) --\n Limit (integer) --The maximum number of items to fetch for this dimension group.\n \n\n :type Filter: dict\n :param Filter: One or more filters to apply in the request. Restrictions:\n Any number of filters by the same dimension, as specified in the GroupBy or Partition parameters.\n A single filter for any other dimension in this dimension group.\n (string) --\n (string) --\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return in the response. If more items exist than the specified MaxRecords value, a pagination token is included in the response so that the remaining results can be retrieved.\n\n :type NextToken: string\n :param NextToken: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the token, up to the value specified by MaxRecords .\n\n :rtype: dict\n :return: {\n 'AlignedStartTime': datetime(2015, 1, 1),\n 'AlignedEndTime': datetime(2015, 1, 1),\n 'PartitionKeys': [\n {\n 'Dimensions': {\n 'string': 'string'\n }\n },\n ],\n 'Keys': [\n {\n 'Dimensions': {\n 'string': 'string'\n },\n 'Total': 123.0,\n 'Partitions': [\n 123.0,\n ]\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_resource_metrics(ServiceType=None, Identifier=None, MetricQueries=None, StartTime=None, EndTime=None, PeriodInSeconds=None, MaxResults=None, NextToken=None):\n \"\"\"\n Retrieve Performance Insights metrics for a set of data sources, over a time period. You can provide specific dimension groups and dimensions, and provide aggregation and filtering criteria for each group.\n See also: AWS API Documentation\n \n \n :example: response = client.get_resource_metrics(\n ServiceType='RDS',\n Identifier='string',\n MetricQueries=[\n {\n 'Metric': 'string',\n 'GroupBy': {\n 'Group': 'string',\n 'Dimensions': [\n 'string',\n ],\n 'Limit': 123\n },\n 'Filter': {\n 'string': 'string'\n }\n },\n ],\n StartTime=datetime(2015, 1, 1),\n EndTime=datetime(2015, 1, 1),\n PeriodInSeconds=123,\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type ServiceType: string\n :param ServiceType: [REQUIRED]\n The AWS service for which Performance Insights will return metrics. The only valid value for ServiceType is: RDS\n \n\n :type Identifier: string\n :param Identifier: [REQUIRED]\n An immutable, AWS Region-unique identifier for a data source. Performance Insights gathers metrics from this data source.\n To use an Amazon RDS instance as a data source, you specify its DbiResourceId value - for example: db-FAIHNTYBKTGAUSUZQYPDS2GW4A\n \n\n :type MetricQueries: list\n :param MetricQueries: [REQUIRED]\n An array of one or more queries to perform. Each query must specify a Performance Insights metric, and can optionally specify aggregation and filtering criteria.\n (dict) --A single query to be processed. You must provide the metric to query. If no other parameters are specified, Performance Insights returns all of the data points for that metric. You can optionally request that the data points be aggregated by dimension group ( GroupBy ), and return only those data points that match your criteria (Filter ).\n Metric (string) -- [REQUIRED]The name of a Performance Insights metric to be measured.\n Valid values for Metric are:\n db.load.avg - a scaled representation of the number of active sessions for the database engine.\n db.sampledload.avg - the raw number of active sessions for the database engine.\n GroupBy (dict) --A specification for how to aggregate the data points from a query result. You must specify a valid dimension group. Performance Insights will return all of the dimensions within that group, unless you provide the names of specific dimensions within that group. You can also request that Performance Insights return a limited number of values for a dimension.\n Group (string) -- [REQUIRED]The name of the dimension group. Valid values are:\n db.user\n db.host\n db.sql\n db.sql_tokenized\n db.wait_event\n db.wait_event_type\n Dimensions (list) --A list of specific dimensions from a dimension group. If this parameter is not present, then it signifies that all of the dimensions in the group were requested, or are present in the response.\n Valid values for elements in the Dimensions array are:\n db.user.id\n db.user.name\n db.host.id\n db.host.name\n db.sql.id\n db.sql.db_id\n db.sql.statement\n db.sql.tokenized_id\n db.sql_tokenized.id\n db.sql_tokenized.db_id\n db.sql_tokenized.statement\n db.wait_event.name\n db.wait_event.type\n db.wait_event_type.name\n (string) --\n Limit (integer) --The maximum number of items to fetch for this dimension group.\n Filter (dict) --One or more filters to apply in the request. Restrictions:\n Any number of filters by the same dimension, as specified in the GroupBy parameter.\n A single filter for any other dimension in this dimension group.\n (string) --\n (string) --\n \n \n\n :type StartTime: datetime\n :param StartTime: [REQUIRED]\n The date and time specifying the beginning of the requested time series data. You can't specify a StartTime that's earlier than 7 days ago. The value specified is inclusive - data points equal to or greater than StartTime will be returned.\n The value for StartTime must be earlier than the value for EndTime .\n \n\n :type EndTime: datetime\n :param EndTime: [REQUIRED]\n The date and time specifiying the end of the requested time series data. The value specified is exclusive - data points less than (but not equal to) EndTime will be returned.\n The value for EndTime must be later than the value for StartTime .\n \n\n :type PeriodInSeconds: integer\n :param PeriodInSeconds: The granularity, in seconds, of the data points returned from Performance Insights. A period can be as short as one second, or as long as one day (86400 seconds). Valid values are:\n 1 (one second)\n 60 (one minute)\n 300 (five minutes)\n 3600 (one hour)\n 86400 (twenty-four hours)\n If you don't specify PeriodInSeconds , then Performance Insights will choose a value for you, with a goal of returning roughly 100-200 data points in the response.\n \n\n :type MaxResults: integer\n :param MaxResults: The maximum number of items to return in the response. If more items exist than the specified MaxRecords value, a pagination token is included in the response so that the remaining results can be retrieved.\n\n :type NextToken: string\n :param NextToken: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the token, up to the value specified by MaxRecords .\n\n :rtype: dict\n :return: {\n 'AlignedStartTime': datetime(2015, 1, 1),\n 'AlignedEndTime': datetime(2015, 1, 1),\n 'Identifier': 'string',\n 'MetricList': [\n {\n 'Key': {\n 'Metric': 'string',\n 'Dimensions': {\n 'string': 'string'\n }\n },\n 'DataPoints': [\n {\n 'Timestamp': datetime(2015, 1, 1),\n 'Value': 123.0\n },\n ]\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n db.load.avg - a scaled representation of the number of active sessions for the database engine.\n db.sampledload.avg - the raw number of active sessions for the database engine.\n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.5939329266548157, "alphanum_fraction": 0.602816641330719, "avg_line_length": 33.09928894042969, "blob_id": "b01c974071c92ce91941515d8811c4ca157244f2", "content_id": "a60874575e8f7dc40e885edb4e9fcabddd9c24c7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 33657, "license_type": "permissive", "max_line_length": 940, "num_lines": 987, "path": "/pyboto3/sfn.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_activity(name=None):\n \"\"\"\n Creates an activity. An activity is a task that you write in any programming language and host on any machine that has access to AWS Step Functions. Activities must poll Step Functions using the GetActivityTask API action and respond using SendTask* API actions. This function lets Step Functions know the existence of your activity and returns an identifier for use in a state machine and when polling from the activity.\n See also: AWS API Documentation\n \n \n :example: response = client.create_activity(\n name='string'\n )\n \n \n :type name: string\n :param name: [REQUIRED]\n The name of the activity to create. This name must be unique for your AWS account and region for 90 days. For more information, see Limits Related to State Machine Executions in the AWS Step Functions Developer Guide .\n A name must not contain:\n whitespace\n brackets < > { } [ ]\n wildcard characters ? *\n special characters ' # % \\ ^ | ~ ` $ & , ; : /\n control characters (U+0000-001F , U+007F-009F )\n \n\n :rtype: dict\n :return: {\n 'activityArn': 'string',\n 'creationDate': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef create_state_machine(name=None, definition=None, roleArn=None):\n \"\"\"\n Creates a state machine. A state machine consists of a collection of states that can do work (Task states), determine to which states to transition next (Choice states), stop an execution with an error (Fail states), and so on. State machines are specified using a JSON-based, structured language.\n See also: AWS API Documentation\n \n \n :example: response = client.create_state_machine(\n name='string',\n definition='string',\n roleArn='string'\n )\n \n \n :type name: string\n :param name: [REQUIRED]\n The name of the state machine.\n A name must not contain:\n whitespace\n brackets < > { } [ ]\n wildcard characters ? *\n special characters ' # % \\ ^ | ~ ` $ & , ; : /\n control characters (U+0000-001F , U+007F-009F )\n \n\n :type definition: string\n :param definition: [REQUIRED]\n The Amazon States Language definition of the state machine. See Amazon States Language .\n \n\n :type roleArn: string\n :param roleArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the IAM role to use for this state machine.\n \n\n :rtype: dict\n :return: {\n 'stateMachineArn': 'string',\n 'creationDate': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef delete_activity(activityArn=None):\n \"\"\"\n Deletes an activity.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_activity(\n activityArn='string'\n )\n \n \n :type activityArn: string\n :param activityArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the activity to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_state_machine(stateMachineArn=None):\n \"\"\"\n Deletes a state machine. This is an asynchronous operation: It sets the state machine's status to DELETING and begins the deletion process. Each state machine execution is deleted the next time it makes a state transition.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_state_machine(\n stateMachineArn='string'\n )\n \n \n :type stateMachineArn: string\n :param stateMachineArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the state machine to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef describe_activity(activityArn=None):\n \"\"\"\n Describes an activity.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_activity(\n activityArn='string'\n )\n \n \n :type activityArn: string\n :param activityArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the activity to describe.\n \n\n :rtype: dict\n :return: {\n 'activityArn': 'string',\n 'name': 'string',\n 'creationDate': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef describe_execution(executionArn=None):\n \"\"\"\n Describes an execution.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_execution(\n executionArn='string'\n )\n \n \n :type executionArn: string\n :param executionArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the execution to describe.\n \n\n :rtype: dict\n :return: {\n 'executionArn': 'string',\n 'stateMachineArn': 'string',\n 'name': 'string',\n 'status': 'RUNNING'|'SUCCEEDED'|'FAILED'|'TIMED_OUT'|'ABORTED',\n 'startDate': datetime(2015, 1, 1),\n 'stopDate': datetime(2015, 1, 1),\n 'input': 'string',\n 'output': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_state_machine(stateMachineArn=None):\n \"\"\"\n Describes a state machine.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_state_machine(\n stateMachineArn='string'\n )\n \n \n :type stateMachineArn: string\n :param stateMachineArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the state machine to describe.\n \n\n :rtype: dict\n :return: {\n 'stateMachineArn': 'string',\n 'name': 'string',\n 'status': 'ACTIVE'|'DELETING',\n 'definition': 'string',\n 'roleArn': 'string',\n 'creationDate': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef describe_state_machine_for_execution(executionArn=None):\n \"\"\"\n Describes the state machine associated with a specific execution.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_state_machine_for_execution(\n executionArn='string'\n )\n \n \n :type executionArn: string\n :param executionArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the execution you want state machine information for.\n \n\n :rtype: dict\n :return: {\n 'stateMachineArn': 'string',\n 'name': 'string',\n 'definition': 'string',\n 'roleArn': 'string',\n 'updateDate': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_activity_task(activityArn=None, workerName=None):\n \"\"\"\n Used by workers to retrieve a task (with the specified activity ARN) which has been scheduled for execution by a running state machine. This initiates a long poll, where the service holds the HTTP connection open and responds as soon as a task becomes available (i.e. an execution of a task of this type is needed.) The maximum time the service holds on to the request before responding is 60 seconds. If no task is available within 60 seconds, the poll returns a taskToken with a null string.\n See also: AWS API Documentation\n \n \n :example: response = client.get_activity_task(\n activityArn='string',\n workerName='string'\n )\n \n \n :type activityArn: string\n :param activityArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the activity to retrieve tasks from (assigned when you create the task using CreateActivity .)\n \n\n :type workerName: string\n :param workerName: You can provide an arbitrary name in order to identify the worker that the task is assigned to. This name is used when it is logged in the execution history.\n\n :rtype: dict\n :return: {\n 'taskToken': 'string',\n 'input': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_execution_history(executionArn=None, maxResults=None, reverseOrder=None, nextToken=None):\n \"\"\"\n Returns the history of the specified execution as a list of events. By default, the results are returned in ascending order of the timeStamp of the events. Use the reverseOrder parameter to get the latest events first.\n If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.\n See also: AWS API Documentation\n \n \n :example: response = client.get_execution_history(\n executionArn='string',\n maxResults=123,\n reverseOrder=True|False,\n nextToken='string'\n )\n \n \n :type executionArn: string\n :param executionArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the execution.\n \n\n :type maxResults: integer\n :param maxResults: The maximum number of results that are returned per call. You can use nextToken to obtain further pages of results. The default is 100 and the maximum allowed page size is 1000. A value of 0 uses the default.\n This is only an upper limit. The actual number of results returned per call might be fewer than the specified maximum.\n \n\n :type reverseOrder: boolean\n :param reverseOrder: Lists events in descending order of their timeStamp .\n\n :type nextToken: string\n :param nextToken: If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.\n\n :rtype: dict\n :return: {\n 'events': [\n {\n 'timestamp': datetime(2015, 1, 1),\n 'type': 'ActivityFailed'|'ActivityScheduleFailed'|'ActivityScheduled'|'ActivityStarted'|'ActivitySucceeded'|'ActivityTimedOut'|'ChoiceStateEntered'|'ChoiceStateExited'|'TaskFailed'|'TaskScheduled'|'TaskStartFailed'|'TaskStarted'|'TaskSubmitFailed'|'TaskSubmitted'|'TaskSucceeded'|'TaskTimedOut'|'ExecutionFailed'|'ExecutionStarted'|'ExecutionSucceeded'|'ExecutionAborted'|'ExecutionTimedOut'|'FailStateEntered'|'LambdaFunctionFailed'|'LambdaFunctionScheduleFailed'|'LambdaFunctionScheduled'|'LambdaFunctionStartFailed'|'LambdaFunctionStarted'|'LambdaFunctionSucceeded'|'LambdaFunctionTimedOut'|'SucceedStateEntered'|'SucceedStateExited'|'TaskStateAborted'|'TaskStateEntered'|'TaskStateExited'|'PassStateEntered'|'PassStateExited'|'ParallelStateAborted'|'ParallelStateEntered'|'ParallelStateExited'|'ParallelStateFailed'|'ParallelStateStarted'|'ParallelStateSucceeded'|'WaitStateAborted'|'WaitStateEntered'|'WaitStateExited',\n 'id': 123,\n 'previousEventId': 123,\n 'activityFailedEventDetails': {\n 'error': 'string',\n 'cause': 'string'\n },\n 'activityScheduleFailedEventDetails': {\n 'error': 'string',\n 'cause': 'string'\n },\n 'activityScheduledEventDetails': {\n 'resource': 'string',\n 'input': 'string',\n 'timeoutInSeconds': 123,\n 'heartbeatInSeconds': 123\n },\n 'activityStartedEventDetails': {\n 'workerName': 'string'\n },\n 'activitySucceededEventDetails': {\n 'output': 'string'\n },\n 'activityTimedOutEventDetails': {\n 'error': 'string',\n 'cause': 'string'\n },\n 'taskFailedEventDetails': {\n 'resourceType': 'string',\n 'resource': 'string',\n 'error': 'string',\n 'cause': 'string'\n },\n 'taskScheduledEventDetails': {\n 'resourceType': 'string',\n 'resource': 'string',\n 'region': 'string',\n 'parameters': 'string',\n 'timeoutInSeconds': 123\n },\n 'taskStartFailedEventDetails': {\n 'resourceType': 'string',\n 'resource': 'string',\n 'error': 'string',\n 'cause': 'string'\n },\n 'taskStartedEventDetails': {\n 'resourceType': 'string',\n 'resource': 'string'\n },\n 'taskSubmitFailedEventDetails': {\n 'resourceType': 'string',\n 'resource': 'string',\n 'error': 'string',\n 'cause': 'string'\n },\n 'taskSubmittedEventDetails': {\n 'resourceType': 'string',\n 'resource': 'string',\n 'output': 'string'\n },\n 'taskSucceededEventDetails': {\n 'resourceType': 'string',\n 'resource': 'string',\n 'output': 'string'\n },\n 'taskTimedOutEventDetails': {\n 'resourceType': 'string',\n 'resource': 'string',\n 'error': 'string',\n 'cause': 'string'\n },\n 'executionFailedEventDetails': {\n 'error': 'string',\n 'cause': 'string'\n },\n 'executionStartedEventDetails': {\n 'input': 'string',\n 'roleArn': 'string'\n },\n 'executionSucceededEventDetails': {\n 'output': 'string'\n },\n 'executionAbortedEventDetails': {\n 'error': 'string',\n 'cause': 'string'\n },\n 'executionTimedOutEventDetails': {\n 'error': 'string',\n 'cause': 'string'\n },\n 'lambdaFunctionFailedEventDetails': {\n 'error': 'string',\n 'cause': 'string'\n },\n 'lambdaFunctionScheduleFailedEventDetails': {\n 'error': 'string',\n 'cause': 'string'\n },\n 'lambdaFunctionScheduledEventDetails': {\n 'resource': 'string',\n 'input': 'string',\n 'timeoutInSeconds': 123\n },\n 'lambdaFunctionStartFailedEventDetails': {\n 'error': 'string',\n 'cause': 'string'\n },\n 'lambdaFunctionSucceededEventDetails': {\n 'output': 'string'\n },\n 'lambdaFunctionTimedOutEventDetails': {\n 'error': 'string',\n 'cause': 'string'\n },\n 'stateEnteredEventDetails': {\n 'name': 'string',\n 'input': 'string'\n },\n 'stateExitedEventDetails': {\n 'name': 'string',\n 'output': 'string'\n }\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n whitespace\n brackets < > { } [ ]\n wildcard characters ? *\n special characters \" # % \\ ^ | ~ ` $ & , ; : /\n control characters (U+0000-001F , U+007F-009F )\n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_activities(maxResults=None, nextToken=None):\n \"\"\"\n Lists the existing activities.\n If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.\n See also: AWS API Documentation\n \n \n :example: response = client.list_activities(\n maxResults=123,\n nextToken='string'\n )\n \n \n :type maxResults: integer\n :param maxResults: The maximum number of results that are returned per call. You can use nextToken to obtain further pages of results. The default is 100 and the maximum allowed page size is 1000. A value of 0 uses the default.\n This is only an upper limit. The actual number of results returned per call might be fewer than the specified maximum.\n \n\n :type nextToken: string\n :param nextToken: If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.\n\n :rtype: dict\n :return: {\n 'activities': [\n {\n 'activityArn': 'string',\n 'name': 'string',\n 'creationDate': datetime(2015, 1, 1)\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n whitespace\n brackets < > { } [ ]\n wildcard characters ? *\n special characters \" # % \\ ^ | ~ ` $ & , ; : /\n control characters (U+0000-001F , U+007F-009F )\n \n \"\"\"\n pass\n\ndef list_executions(stateMachineArn=None, statusFilter=None, maxResults=None, nextToken=None):\n \"\"\"\n Lists the executions of a state machine that meet the filtering criteria. Results are sorted by time, with the most recent execution first.\n If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.\n See also: AWS API Documentation\n \n \n :example: response = client.list_executions(\n stateMachineArn='string',\n statusFilter='RUNNING'|'SUCCEEDED'|'FAILED'|'TIMED_OUT'|'ABORTED',\n maxResults=123,\n nextToken='string'\n )\n \n \n :type stateMachineArn: string\n :param stateMachineArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the state machine whose executions is listed.\n \n\n :type statusFilter: string\n :param statusFilter: If specified, only list the executions whose current execution status matches the given filter.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results that are returned per call. You can use nextToken to obtain further pages of results. The default is 100 and the maximum allowed page size is 1000. A value of 0 uses the default.\n This is only an upper limit. The actual number of results returned per call might be fewer than the specified maximum.\n \n\n :type nextToken: string\n :param nextToken: If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.\n\n :rtype: dict\n :return: {\n 'executions': [\n {\n 'executionArn': 'string',\n 'stateMachineArn': 'string',\n 'name': 'string',\n 'status': 'RUNNING'|'SUCCEEDED'|'FAILED'|'TIMED_OUT'|'ABORTED',\n 'startDate': datetime(2015, 1, 1),\n 'stopDate': datetime(2015, 1, 1)\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n whitespace\n brackets < > { } [ ]\n wildcard characters ? *\n special characters \" # % \\ ^ | ~ ` $ & , ; : /\n control characters (U+0000-001F , U+007F-009F )\n \n \"\"\"\n pass\n\ndef list_state_machines(maxResults=None, nextToken=None):\n \"\"\"\n Lists the existing state machines.\n If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.\n See also: AWS API Documentation\n \n \n :example: response = client.list_state_machines(\n maxResults=123,\n nextToken='string'\n )\n \n \n :type maxResults: integer\n :param maxResults: The maximum number of results that are returned per call. You can use nextToken to obtain further pages of results. The default is 100 and the maximum allowed page size is 1000. A value of 0 uses the default.\n This is only an upper limit. The actual number of results returned per call might be fewer than the specified maximum.\n \n\n :type nextToken: string\n :param nextToken: If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.\n\n :rtype: dict\n :return: {\n 'stateMachines': [\n {\n 'stateMachineArn': 'string',\n 'name': 'string',\n 'creationDate': datetime(2015, 1, 1)\n },\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n whitespace\n brackets < > { } [ ]\n wildcard characters ? *\n special characters \" # % \\ ^ | ~ ` $ & , ; : /\n control characters (U+0000-001F , U+007F-009F )\n \n \"\"\"\n pass\n\ndef list_tags_for_resource(resourceArn=None):\n \"\"\"\n List tags for a given resource.\n See also: AWS API Documentation\n \n \n :example: response = client.list_tags_for_resource(\n resourceArn='string'\n )\n \n \n :type resourceArn: string\n :param resourceArn: [REQUIRED]\n The Amazon Resource Name (ARN) for the Step Functions state machine or activity.\n \n\n :rtype: dict\n :return: {\n 'tags': [\n {\n 'key': 'string',\n 'value': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef send_task_failure(taskToken=None, error=None, cause=None):\n \"\"\"\n Used by workers to report that the task identified by the taskToken failed.\n See also: AWS API Documentation\n \n \n :example: response = client.send_task_failure(\n taskToken='string',\n error='string',\n cause='string'\n )\n \n \n :type taskToken: string\n :param taskToken: [REQUIRED]\n The token that represents this task. Task tokens are generated by the service when the tasks are assigned to a worker (see GetActivityTask::taskToken).\n \n\n :type error: string\n :param error: The error code of the failure.\n\n :type cause: string\n :param cause: A more detailed explanation of the cause of the failure.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef send_task_heartbeat(taskToken=None):\n \"\"\"\n Used by workers to report to the service that the task represented by the specified taskToken is still making progress. This action resets the Heartbeat clock. The Heartbeat threshold is specified in the state machine's Amazon States Language definition. This action does not in itself create an event in the execution history. However, if the task times out, the execution history contains an ActivityTimedOut event.\n See also: AWS API Documentation\n \n \n :example: response = client.send_task_heartbeat(\n taskToken='string'\n )\n \n \n :type taskToken: string\n :param taskToken: [REQUIRED]\n The token that represents this task. Task tokens are generated by the service when the tasks are assigned to a worker (see GetActivityTaskOutput$taskToken ).\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef send_task_success(taskToken=None, output=None):\n \"\"\"\n Used by workers to report that the task identified by the taskToken completed successfully.\n See also: AWS API Documentation\n \n \n :example: response = client.send_task_success(\n taskToken='string',\n output='string'\n )\n \n \n :type taskToken: string\n :param taskToken: [REQUIRED]\n The token that represents this task. Task tokens are generated by the service when the tasks are assigned to a worker (see GetActivityTaskOutput$taskToken ).\n \n\n :type output: string\n :param output: [REQUIRED]\n The JSON output of the task.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef start_execution(stateMachineArn=None, name=None, input=None):\n \"\"\"\n Starts a state machine execution.\n See also: AWS API Documentation\n \n \n :example: response = client.start_execution(\n stateMachineArn='string',\n name='string',\n input='string'\n )\n \n \n :type stateMachineArn: string\n :param stateMachineArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the state machine to execute.\n \n\n :type name: string\n :param name: The name of the execution. This name must be unique for your AWS account and region for 90 days. For more information, see Limits Related to State Machine Executions in the AWS Step Functions Developer Guide .\n A name must not contain:\n whitespace\n brackets < > { } [ ]\n wildcard characters ? *\n special characters ' # % \\ ^ | ~ ` $ & , ; : /\n control characters (U+0000-001F , U+007F-009F )\n \n\n :type input: string\n :param input: The string that contains the JSON input data for the execution, for example:\n 'input': '{\\'first_name\\' : \\'test\\'}'\n Note\n If you don't include any JSON input data, you still must include the two braces, for example: 'input': '{}'\n \n\n :rtype: dict\n :return: {\n 'executionArn': 'string',\n 'startDate': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef stop_execution(executionArn=None, error=None, cause=None):\n \"\"\"\n Stops an execution.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_execution(\n executionArn='string',\n error='string',\n cause='string'\n )\n \n \n :type executionArn: string\n :param executionArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the execution to stop.\n \n\n :type error: string\n :param error: The error code of the failure.\n\n :type cause: string\n :param cause: A more detailed explanation of the cause of the failure.\n\n :rtype: dict\n :return: {\n 'stopDate': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef tag_resource(resourceArn=None, tags=None):\n \"\"\"\n Add a tag to a Step Functions resource.\n See also: AWS API Documentation\n \n \n :example: response = client.tag_resource(\n resourceArn='string',\n tags=[\n {\n 'key': 'string',\n 'value': 'string'\n },\n ]\n )\n \n \n :type resourceArn: string\n :param resourceArn: [REQUIRED]\n The Amazon Resource Name (ARN) for the Step Functions state machine or activity.\n \n\n :type tags: list\n :param tags: [REQUIRED]\n The list of tags to add to a resource.\n Tags may only contain unicode letters, digits, whitespace, or these symbols: _ . : / = + - @ .\n (dict) --Tags are key-value pairs that can be associated with Step Functions state machines and activities.\n key (string) --The key of a tag.\n value (string) --The value of a tag.\n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef untag_resource(resourceArn=None, tagKeys=None):\n \"\"\"\n Remove a tag from a Step Functions resource\n See also: AWS API Documentation\n \n \n :example: response = client.untag_resource(\n resourceArn='string',\n tagKeys=[\n 'string',\n ]\n )\n \n \n :type resourceArn: string\n :param resourceArn: [REQUIRED]\n The Amazon Resource Name (ARN) for the Step Functions state machine or activity.\n \n\n :type tagKeys: list\n :param tagKeys: [REQUIRED]\n The list of tags to remove from the resource.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_state_machine(stateMachineArn=None, definition=None, roleArn=None):\n \"\"\"\n Updates an existing state machine by modifying its definition and/or roleArn . Running executions will continue to use the previous definition and roleArn . You must include at least one of definition or roleArn or you will receive a MissingRequiredParameter error.\n See also: AWS API Documentation\n \n \n :example: response = client.update_state_machine(\n stateMachineArn='string',\n definition='string',\n roleArn='string'\n )\n \n \n :type stateMachineArn: string\n :param stateMachineArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the state machine.\n \n\n :type definition: string\n :param definition: The Amazon States Language definition of the state machine. See Amazon States Language .\n\n :type roleArn: string\n :param roleArn: The Amazon Resource Name (ARN) of the IAM role of the state machine.\n\n :rtype: dict\n :return: {\n 'updateDate': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.49791377782821655, "alphanum_fraction": 0.5022425651550293, "avg_line_length": 43.710933685302734, "blob_id": "f79693b1705d2abcc90d71fb0ee018eb51e063a5", "content_id": "3ed8757063a8f7f5bd89cb0204ed339b83928d22", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 268669, "license_type": "permissive", "max_line_length": 398, "num_lines": 6009, "path": "/pyboto3/securityhub.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef accept_invitation(MasterId=None, InvitationId=None):\n \"\"\"\n Accepts the invitation to be monitored by a master SecurityHub account.\n See also: AWS API Documentation\n \n \n :example: response = client.accept_invitation(\n MasterId='string',\n InvitationId='string'\n )\n \n \n :type MasterId: string\n :param MasterId: The account ID of the master Security Hub account whose invitation you're accepting.\n\n :type InvitationId: string\n :param InvitationId: The ID of the invitation that is sent to the AWS account by the Security Hub master account.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef batch_disable_standards(StandardsSubscriptionArns=None):\n \"\"\"\n Disables the standards specified by the standards subscription ARNs. In the context of Security Hub, supported standards (for example, CIS AWS Foundations) are automated and continuous checks that help determine your compliance status against security industry (including AWS) best practices.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_disable_standards(\n StandardsSubscriptionArns=[\n 'string',\n ]\n )\n \n \n :type StandardsSubscriptionArns: list\n :param StandardsSubscriptionArns: [REQUIRED]\n The ARNS of the standards subscriptions that you want to disable.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'StandardsSubscriptions': [\n {\n 'StandardsSubscriptionArn': 'string',\n 'StandardsArn': 'string',\n 'StandardsInput': {\n 'string': 'string'\n },\n 'StandardsStatus': 'PENDING'|'READY'|'FAILED'|'DELETING'\n },\n ]\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef batch_enable_standards(StandardsSubscriptionRequests=None):\n \"\"\"\n Enables the standards specified by the standards ARNs. In the context of Security Hub, supported standards (for example, CIS AWS Foundations) are automated and continuous checks that help determine your compliance status against security industry (including AWS) best practices.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_enable_standards(\n StandardsSubscriptionRequests=[\n {\n 'StandardsArn': 'string',\n 'StandardsInput': {\n 'string': 'string'\n }\n },\n ]\n )\n \n \n :type StandardsSubscriptionRequests: list\n :param StandardsSubscriptionRequests: [REQUIRED]\n The list of standards that you want to enable.\n (dict) --The standard that you want to enable.\n StandardsArn (string) -- [REQUIRED]The ARN of the standard that you want to enable.\n StandardsInput (dict) --\n (string) --\n (string) --\n \n \n\n :rtype: dict\n :return: {\n 'StandardsSubscriptions': [\n {\n 'StandardsSubscriptionArn': 'string',\n 'StandardsArn': 'string',\n 'StandardsInput': {\n 'string': 'string'\n },\n 'StandardsStatus': 'PENDING'|'READY'|'FAILED'|'DELETING'\n },\n ]\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef batch_import_findings(Findings=None):\n \"\"\"\n Imports security findings that are generated by the integrated third-party products into Security Hub.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_import_findings(\n Findings=[\n {\n 'SchemaVersion': 'string',\n 'Id': 'string',\n 'ProductArn': 'string',\n 'GeneratorId': 'string',\n 'AwsAccountId': 'string',\n 'Types': [\n 'string',\n ],\n 'FirstObservedAt': 'string',\n 'LastObservedAt': 'string',\n 'CreatedAt': 'string',\n 'UpdatedAt': 'string',\n 'Severity': {\n 'Product': 123.0,\n 'Normalized': 123\n },\n 'Confidence': 123,\n 'Criticality': 123,\n 'Title': 'string',\n 'Description': 'string',\n 'Remediation': {\n 'Recommendation': {\n 'Text': 'string',\n 'Url': 'string'\n }\n },\n 'SourceUrl': 'string',\n 'ProductFields': {\n 'string': 'string'\n },\n 'UserDefinedFields': {\n 'string': 'string'\n },\n 'Malware': [\n {\n 'Name': 'string',\n 'Type': 'ADWARE'|'BLENDED_THREAT'|'BOTNET_AGENT'|'COIN_MINER'|'EXPLOIT_KIT'|'KEYLOGGER'|'MACRO'|'POTENTIALLY_UNWANTED'|'SPYWARE'|'RANSOMWARE'|'REMOTE_ACCESS'|'ROOTKIT'|'TROJAN'|'VIRUS'|'WORM',\n 'Path': 'string',\n 'State': 'OBSERVED'|'REMOVAL_FAILED'|'REMOVED'\n },\n ],\n 'Network': {\n 'Direction': 'IN'|'OUT',\n 'Protocol': 'string',\n 'SourceIpV4': 'string',\n 'SourceIpV6': 'string',\n 'SourcePort': 123,\n 'SourceDomain': 'string',\n 'SourceMac': 'string',\n 'DestinationIpV4': 'string',\n 'DestinationIpV6': 'string',\n 'DestinationPort': 123,\n 'DestinationDomain': 'string'\n },\n 'Process': {\n 'Name': 'string',\n 'Path': 'string',\n 'Pid': 123,\n 'ParentPid': 123,\n 'LaunchedAt': 'string',\n 'TerminatedAt': 'string'\n },\n 'ThreatIntelIndicators': [\n {\n 'Type': 'DOMAIN'|'EMAIL_ADDRESS'|'HASH_MD5'|'HASH_SHA1'|'HASH_SHA256'|'HASH_SHA512'|'IPV4_ADDRESS'|'IPV6_ADDRESS'|'MUTEX'|'PROCESS'|'URL',\n 'Value': 'string',\n 'Category': 'BACKDOOR'|'CARD_STEALER'|'COMMAND_AND_CONTROL'|'DROP_SITE'|'EXPLOIT_SITE'|'KEYLOGGER',\n 'LastObservedAt': 'string',\n 'Source': 'string',\n 'SourceUrl': 'string'\n },\n ],\n 'Resources': [\n {\n 'Type': 'string',\n 'Id': 'string',\n 'Partition': 'aws'|'aws-cn'|'aws-us-gov',\n 'Region': 'string',\n 'Tags': {\n 'string': 'string'\n },\n 'Details': {\n 'AwsEc2Instance': {\n 'Type': 'string',\n 'ImageId': 'string',\n 'IpV4Addresses': [\n 'string',\n ],\n 'IpV6Addresses': [\n 'string',\n ],\n 'KeyName': 'string',\n 'IamInstanceProfileArn': 'string',\n 'VpcId': 'string',\n 'SubnetId': 'string',\n 'LaunchedAt': 'string'\n },\n 'AwsS3Bucket': {\n 'OwnerId': 'string',\n 'OwnerName': 'string'\n },\n 'AwsIamAccessKey': {\n 'UserName': 'string',\n 'Status': 'Active'|'Inactive',\n 'CreatedAt': 'string'\n },\n 'Container': {\n 'Name': 'string',\n 'ImageId': 'string',\n 'ImageName': 'string',\n 'LaunchedAt': 'string'\n },\n 'Other': {\n 'string': 'string'\n }\n }\n },\n ],\n 'Compliance': {\n 'Status': 'PASSED'|'WARNING'|'FAILED'|'NOT_AVAILABLE'\n },\n 'VerificationState': 'UNKNOWN'|'TRUE_POSITIVE'|'FALSE_POSITIVE'|'BENIGN_POSITIVE',\n 'WorkflowState': 'NEW'|'ASSIGNED'|'IN_PROGRESS'|'DEFERRED'|'RESOLVED',\n 'RecordState': 'ACTIVE'|'ARCHIVED',\n 'RelatedFindings': [\n {\n 'ProductArn': 'string',\n 'Id': 'string'\n },\n ],\n 'Note': {\n 'Text': 'string',\n 'UpdatedBy': 'string',\n 'UpdatedAt': 'string'\n }\n },\n ]\n )\n \n \n :type Findings: list\n :param Findings: [REQUIRED]\n A list of findings that you want to import. Must be submitted in the AWSSecurityFinding format.\n (dict) --Provides consistent format for the contents of the Security Hub-aggregated findings. AwsSecurityFinding format enables you to share findings between AWS security services and third-party solutions, and compliance checks.\n Note\n A finding is a potential security issue generated either by AWS services (GuardDuty, Inspector, Macie) or by the integrated third-party solutions and compliance checks.\n SchemaVersion (string) -- [REQUIRED]The schema version for which a finding is formatted.\n Id (string) -- [REQUIRED]The security findings provider-specific identifier for a finding.\n ProductArn (string) -- [REQUIRED]The ARN generated by Security Hub that uniquely identifies a third-party company (security findings provider) once this provider's product (solution that generates findings) is registered with Security Hub.\n GeneratorId (string) -- [REQUIRED]This is the identifier for the solution-specific component (a discrete unit of logic) that generated a finding. In various security findings provider's solutions, this generator can be called a rule, a check, a detector, a plug-in, etc.\n AwsAccountId (string) -- [REQUIRED]The AWS account ID in which a finding is generated.\n Types (list) -- [REQUIRED]One or more finding types in the format of 'namespace/category/classifier' that classify a finding.\n Valid namespace values are: Software and Configuration Checks | TTPs | Effects | Unusual Behaviors | Sensitive Data Identifications\n (string) --\n FirstObservedAt (string) --An ISO8601-formatted timestamp that indicates when the potential security issue captured by a finding was first observed by the security findings provider.\n LastObservedAt (string) --An ISO8601-formatted timestamp that indicates when the potential security issue captured by a finding was most recently observed by the security findings provider.\n CreatedAt (string) -- [REQUIRED]An ISO8601-formatted timestamp that indicates when the potential security issue captured by a finding was created by the security findings provider.\n UpdatedAt (string) -- [REQUIRED]An ISO8601-formatted timestamp that indicates when the finding record was last updated by the security findings provider.\n Severity (dict) -- [REQUIRED]A finding's severity.\n Product (float) --The native severity as defined by the security findings provider's solution that generated the finding.\n Normalized (integer) -- [REQUIRED]The normalized severity of a finding.\n Confidence (integer) --A finding's confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. Confidence is scored on a 0-100 basis using a ratio scale. 0 equates zero percent confidence and 100 equates to 100 percent confidence.\n Criticality (integer) --The level of importance assigned to the resources associated with the finding. A score of 0 means the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources.\n Title (string) --A finding's title.\n Description (string) --A finding's description.\n Remediation (dict) --An data type that describes the remediation options for a finding.\n Recommendation (dict) --Provides a recommendation on how to remediate the issue identified within a finding.\n Text (string) --The recommendation of what to do about the issue described in a finding.\n Url (string) --A URL to link to general remediation information for the finding type of a finding.\n \n SourceUrl (string) --A URL that links to a page about the current finding in the security findings provider's solution.\n ProductFields (dict) --A data type where security findings providers can include additional solution-specific details that are not part of the defined AwsSecurityFinding format.\n (string) --\n (string) --\n \n UserDefinedFields (dict) --A list of name/value string pairs associated with the finding. These are custom, user-defined fields added to a finding.\n (string) --\n (string) --\n \n Malware (list) --A list of malware related to a finding.\n (dict) --A list of malware related to a finding.\n Name (string) -- [REQUIRED]The name of the malware that was observed.\n Type (string) --The type of the malware that was observed.\n Path (string) --The filesystem path of the malware that was observed.\n State (string) --The state of the malware that was observed.\n \n Network (dict) --The details of network-related information about a finding.\n Direction (string) --Indicates the direction of network traffic associated with a finding.\n Protocol (string) --The protocol of network-related information about a finding.\n SourceIpV4 (string) --The source IPv4 address of network-related information about a finding.\n SourceIpV6 (string) --The source IPv6 address of network-related information about a finding.\n SourcePort (integer) --The source port of network-related information about a finding.\n SourceDomain (string) --The source domain of network-related information about a finding.\n SourceMac (string) --The source media access control (MAC) address of network-related information about a finding.\n DestinationIpV4 (string) --The destination IPv4 address of network-related information about a finding.\n DestinationIpV6 (string) --The destination IPv6 address of network-related information about a finding.\n DestinationPort (integer) --The destination port of network-related information about a finding.\n DestinationDomain (string) --The destination domain of network-related information about a finding.\n Process (dict) --The details of process-related information about a finding.\n Name (string) --The name of the process.\n Path (string) --The path to the process executable.\n Pid (integer) --The process ID.\n ParentPid (integer) --The parent process ID.\n LaunchedAt (string) --The date/time that the process was launched.\n TerminatedAt (string) --The date/time that the process was terminated.\n ThreatIntelIndicators (list) --Threat intel details related to a finding.\n (dict) --Threat intel details related to a finding.\n Type (string) --The type of a threat intel indicator.\n Value (string) --The value of a threat intel indicator.\n Category (string) --The category of a threat intel indicator.\n LastObservedAt (string) --The date/time of the last observation of a threat intel indicator.\n Source (string) --The source of the threat intel.\n SourceUrl (string) --The URL for more details from the source of the threat intel.\n \n Resources (list) -- [REQUIRED]A set of resource data types that describe the resources to which the finding refers.\n (dict) --A resource data type that describes a resource to which the finding refers.\n Type (string) -- [REQUIRED]Specifies the type of the resource for which details are provided.\n Id (string) -- [REQUIRED]The canonical identifier for the given resource type.\n Partition (string) --The canonical AWS partition name to which the region is assigned.\n Region (string) --The canonical AWS external region name where this resource is located.\n Tags (dict) --A list of AWS tags associated with a resource at the time the finding was processed.\n (string) --\n (string) --\n \n Details (dict) --Provides additional details about the resource.\n AwsEc2Instance (dict) --The details of an AWS EC2 instance.\n Type (string) --The instance type of the instance.\n ImageId (string) --The Amazon Machine Image (AMI) ID of the instance.\n IpV4Addresses (list) --The IPv4 addresses associated with the instance.\n (string) --\n IpV6Addresses (list) --The IPv6 addresses associated with the instance.\n (string) --\n KeyName (string) --The key name associated with the instance.\n IamInstanceProfileArn (string) --The IAM profile ARN of the instance.\n VpcId (string) --The identifier of the VPC in which the instance was launched.\n SubnetId (string) --The identifier of the subnet in which the instance was launched.\n LaunchedAt (string) --The date/time the instance was launched.\n AwsS3Bucket (dict) --The details of an AWS S3 Bucket.\n OwnerId (string) --The canonical user ID of the owner of the S3 bucket.\n OwnerName (string) --The display name of the owner of the S3 bucket.\n AwsIamAccessKey (dict) --AWS IAM access key details related to a finding.\n UserName (string) --The user associated with the IAM access key related to a finding.\n Status (string) --The status of the IAM access key related to a finding.\n CreatedAt (string) --The creation date/time of the IAM access key related to a finding.\n Container (dict) --Container details related to a finding.\n Name (string) --The name of the container related to a finding.\n ImageId (string) --The identifier of the image related to a finding.\n ImageName (string) --The name of the image related to a finding.\n LaunchedAt (string) --The date/time that the container was started.\n Other (dict) --The details of a resource that does not have a specific sub-field for the resource type defined.\n (string) --\n (string) --\n \n \n Compliance (dict) --This data type is exclusive to findings that are generated as the result of a check run against a specific rule in a supported standard (for example, AWS CIS Foundations). Contains compliance-related finding details.\n Status (string) --Indicates the result of a compliance check.\n VerificationState (string) --Indicates the veracity of a finding.\n WorkflowState (string) --The workflow state of a finding.\n RecordState (string) --The record state of a finding.\n RelatedFindings (list) --A list of related findings.\n (dict) --Related finding's details.\n ProductArn (string) -- [REQUIRED]The ARN of the solution that generated a related finding.\n Id (string) -- [REQUIRED]The solution-generated identifier for a related finding.\n \n Note (dict) --A user-defined note added to a finding.\n Text (string) -- [REQUIRED]The text of a note.\n UpdatedBy (string) -- [REQUIRED]The principal that created a note.\n UpdatedAt (string) -- [REQUIRED]The timestamp of when the note was updated.\n \n \n\n :rtype: dict\n :return: {\n 'FailedCount': 123,\n 'SuccessCount': 123,\n 'FailedFindings': [\n {\n 'Id': 'string',\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n },\n ]\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_insight(Name=None, Filters=None, GroupByAttribute=None):\n \"\"\"\n Creates an insight, which is a consolidation of findings that identifies a security area that requires attention or intervention.\n See also: AWS API Documentation\n \n \n :example: response = client.create_insight(\n Name='string',\n Filters={\n 'ProductArn': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'AwsAccountId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'Id': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'GeneratorId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'Type': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'FirstObservedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'LastObservedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'CreatedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'UpdatedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'SeverityProduct': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'SeverityNormalized': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'SeverityLabel': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'Confidence': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'Criticality': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'Title': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'Description': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'RecommendationText': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'SourceUrl': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ProductFields': [\n {\n 'Key': 'string',\n 'Value': 'string',\n 'Comparison': 'CONTAINS'\n },\n ],\n 'ProductName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'CompanyName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'UserDefinedFields': [\n {\n 'Key': 'string',\n 'Value': 'string',\n 'Comparison': 'CONTAINS'\n },\n ],\n 'MalwareName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'MalwareType': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'MalwarePath': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'MalwareState': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NetworkDirection': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NetworkProtocol': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NetworkSourceIpV4': [\n {\n 'Cidr': 'string'\n },\n ],\n 'NetworkSourceIpV6': [\n {\n 'Cidr': 'string'\n },\n ],\n 'NetworkSourcePort': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'NetworkSourceDomain': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NetworkSourceMac': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NetworkDestinationIpV4': [\n {\n 'Cidr': 'string'\n },\n ],\n 'NetworkDestinationIpV6': [\n {\n 'Cidr': 'string'\n },\n ],\n 'NetworkDestinationPort': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'NetworkDestinationDomain': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ProcessName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ProcessPath': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ProcessPid': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'ProcessParentPid': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'ProcessLaunchedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ProcessTerminatedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ThreatIntelIndicatorType': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ThreatIntelIndicatorValue': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ThreatIntelIndicatorCategory': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ThreatIntelIndicatorLastObservedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ThreatIntelIndicatorSource': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ThreatIntelIndicatorSourceUrl': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceType': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourcePartition': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceRegion': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceTags': [\n {\n 'Key': 'string',\n 'Value': 'string',\n 'Comparison': 'CONTAINS'\n },\n ],\n 'ResourceAwsEc2InstanceType': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceImageId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceIpV4Addresses': [\n {\n 'Cidr': 'string'\n },\n ],\n 'ResourceAwsEc2InstanceIpV6Addresses': [\n {\n 'Cidr': 'string'\n },\n ],\n 'ResourceAwsEc2InstanceKeyName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceIamInstanceProfileArn': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceVpcId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceSubnetId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceLaunchedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ResourceAwsS3BucketOwnerId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsS3BucketOwnerName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsIamAccessKeyUserName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsIamAccessKeyStatus': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsIamAccessKeyCreatedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ResourceContainerName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceContainerImageId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceContainerImageName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceContainerLaunchedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ResourceDetailsOther': [\n {\n 'Key': 'string',\n 'Value': 'string',\n 'Comparison': 'CONTAINS'\n },\n ],\n 'ComplianceStatus': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'VerificationState': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'WorkflowState': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'RecordState': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'RelatedFindingsProductArn': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'RelatedFindingsId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NoteText': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NoteUpdatedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'NoteUpdatedBy': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'Keyword': [\n {\n 'Value': 'string'\n },\n ]\n },\n GroupByAttribute='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The user-defined name that identifies the insight that you want to create.\n \n\n :type Filters: dict\n :param Filters: [REQUIRED]\n A collection of attributes that are applied to all active Security Hub-aggregated findings and that result in a subset of findings that are included in this insight.\n ProductArn (list) --The ARN generated by Security Hub that uniquely identifies a third-party company (security findings provider) once this provider's product (solution that generates findings) is registered with Security Hub.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n AwsAccountId (list) --The AWS account ID in which a finding is generated.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n Id (list) --The security findings provider-specific identifier for a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n GeneratorId (list) --This is the identifier for the solution-specific component (a discrete unit of logic) that generated a finding. In various security findings provider's solutions, this generator can be called a rule, a check, a detector, a plug-in, etc.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n Type (list) --A finding type in the format of 'namespace/category/classifier' that classifies a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n FirstObservedAt (list) --An ISO8601-formatted timestamp that indicates when the potential security issue captured by a finding was first observed by the security findings provider.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n LastObservedAt (list) --An ISO8601-formatted timestamp that indicates when the potential security issue captured by a finding was most recently observed by the security findings provider.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n CreatedAt (list) --An ISO8601-formatted timestamp that indicates when the potential security issue captured by a finding was created by the security findings provider.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n UpdatedAt (list) --An ISO8601-formatted timestamp that indicates when the finding record was last updated by the security findings provider.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n SeverityProduct (list) --The native severity as defined by the security findings provider's solution that generated the finding.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n SeverityNormalized (list) --The normalized severity of a finding.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n SeverityLabel (list) --The label of a finding's severity.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n Confidence (list) --A finding's confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. Confidence is scored on a 0-100 basis using a ratio scale. 0 equates zero percent confidence and 100 equates to 100 percent confidence.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n Criticality (list) --The level of importance assigned to the resources associated with the finding. A score of 0 means the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n Title (list) --A finding's title.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n Description (list) --A finding's description.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n RecommendationText (list) --The recommendation of what to do about the issue described in a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n SourceUrl (list) --A URL that links to a page about the current finding in the security findings provider's solution.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ProductFields (list) --A data type where security findings providers can include additional solution-specific details that are not part of the defined AwsSecurityFinding format.\n (dict) --The map filter for querying findings.\n Key (string) --The key of the map filter.\n Value (string) --The value for the key in the map filter.\n Comparison (string) --Represents the condition to be applied to a key value when querying for findings with a map filter.\n \n ProductName (list) --The name of the solution (product) that generates findings.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n CompanyName (list) --The name of the findings provider (company) that owns the solution (product) that generates findings.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n UserDefinedFields (list) --A list of name/value string pairs associated with the finding. These are custom, user-defined fields added to a finding.\n (dict) --The map filter for querying findings.\n Key (string) --The key of the map filter.\n Value (string) --The value for the key in the map filter.\n Comparison (string) --Represents the condition to be applied to a key value when querying for findings with a map filter.\n \n MalwareName (list) --The name of the malware that was observed.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n MalwareType (list) --The type of the malware that was observed.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n MalwarePath (list) --The filesystem path of the malware that was observed.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n MalwareState (list) --The state of the malware that was observed.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NetworkDirection (list) --Indicates the direction of network traffic associated with a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NetworkProtocol (list) --The protocol of network-related information about a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NetworkSourceIpV4 (list) --The source IPv4 address of network-related information about a finding.\n (dict) --The IP filter for querying findings.>\n Cidr (string) --Finding's CIDR value.\n \n NetworkSourceIpV6 (list) --The source IPv6 address of network-related information about a finding.\n (dict) --The IP filter for querying findings.>\n Cidr (string) --Finding's CIDR value.\n \n NetworkSourcePort (list) --The source port of network-related information about a finding.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n NetworkSourceDomain (list) --The source domain of network-related information about a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NetworkSourceMac (list) --The source media access control (MAC) address of network-related information about a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NetworkDestinationIpV4 (list) --The destination IPv4 address of network-related information about a finding.\n (dict) --The IP filter for querying findings.>\n Cidr (string) --Finding's CIDR value.\n \n NetworkDestinationIpV6 (list) --The destination IPv6 address of network-related information about a finding.\n (dict) --The IP filter for querying findings.>\n Cidr (string) --Finding's CIDR value.\n \n NetworkDestinationPort (list) --The destination port of network-related information about a finding.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n NetworkDestinationDomain (list) --The destination domain of network-related information about a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ProcessName (list) --The name of the process.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ProcessPath (list) --The path to the process executable.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ProcessPid (list) --The process ID.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n ProcessParentPid (list) --The parent process ID.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n ProcessLaunchedAt (list) --The date/time that the process was launched.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n ProcessTerminatedAt (list) --The date/time that the process was terminated.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n ThreatIntelIndicatorType (list) --The type of a threat intel indicator.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ThreatIntelIndicatorValue (list) --The value of a threat intel indicator.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ThreatIntelIndicatorCategory (list) --The category of a threat intel indicator.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ThreatIntelIndicatorLastObservedAt (list) --The date/time of the last observation of a threat intel indicator.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n ThreatIntelIndicatorSource (list) --The source of the threat intel.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ThreatIntelIndicatorSourceUrl (list) --The URL for more details from the source of the threat intel.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceType (list) --Specifies the type of the resource for which details are provided.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceId (list) --The canonical identifier for the given resource type.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourcePartition (list) --The canonical AWS partition name to which the region is assigned.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceRegion (list) --The canonical AWS external region name where this resource is located.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceTags (list) --A list of AWS tags associated with a resource at the time the finding was processed.\n (dict) --The map filter for querying findings.\n Key (string) --The key of the map filter.\n Value (string) --The value for the key in the map filter.\n Comparison (string) --Represents the condition to be applied to a key value when querying for findings with a map filter.\n \n ResourceAwsEc2InstanceType (list) --The instance type of the instance.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsEc2InstanceImageId (list) --The Amazon Machine Image (AMI) ID of the instance.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsEc2InstanceIpV4Addresses (list) --The IPv4 addresses associated with the instance.\n (dict) --The IP filter for querying findings.>\n Cidr (string) --Finding's CIDR value.\n \n ResourceAwsEc2InstanceIpV6Addresses (list) --The IPv6 addresses associated with the instance.\n (dict) --The IP filter for querying findings.>\n Cidr (string) --Finding's CIDR value.\n \n ResourceAwsEc2InstanceKeyName (list) --The key name associated with the instance.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsEc2InstanceIamInstanceProfileArn (list) --The IAM profile ARN of the instance.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsEc2InstanceVpcId (list) --The identifier of the VPC in which the instance was launched.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsEc2InstanceSubnetId (list) --The identifier of the subnet in which the instance was launched.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsEc2InstanceLaunchedAt (list) --The date/time the instance was launched.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n ResourceAwsS3BucketOwnerId (list) --The canonical user ID of the owner of the S3 bucket.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsS3BucketOwnerName (list) --The display name of the owner of the S3 bucket.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsIamAccessKeyUserName (list) --The user associated with the IAM access key related to a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsIamAccessKeyStatus (list) --The status of the IAM access key related to a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsIamAccessKeyCreatedAt (list) --The creation date/time of the IAM access key related to a finding.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n ResourceContainerName (list) --The name of the container related to a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceContainerImageId (list) --The identifier of the image related to a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceContainerImageName (list) --The name of the image related to a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceContainerLaunchedAt (list) --The date/time that the container was started.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n ResourceDetailsOther (list) --The details of a resource that does not have a specific sub-field for the resource type defined.\n (dict) --The map filter for querying findings.\n Key (string) --The key of the map filter.\n Value (string) --The value for the key in the map filter.\n Comparison (string) --Represents the condition to be applied to a key value when querying for findings with a map filter.\n \n ComplianceStatus (list) --Exclusive to findings that are generated as the result of a check run against a specific rule in a supported standard (for example, AWS CIS Foundations). Contains compliance-related finding details.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n VerificationState (list) --Indicates the veracity of a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n WorkflowState (list) --The workflow state of a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n RecordState (list) --The updated record state for the finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n RelatedFindingsProductArn (list) --The ARN of the solution that generated a related finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n RelatedFindingsId (list) --The solution-generated identifier for a related finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NoteText (list) --The text of a note.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NoteUpdatedAt (list) --The timestamp of when the note was updated.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n NoteUpdatedBy (list) --The principal that created a note.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n Keyword (list) --A keyword for a finding.\n (dict) --A keyword filter for querying findings.\n Value (string) --A value for the keyword.\n \n \n\n :type GroupByAttribute: string\n :param GroupByAttribute: [REQUIRED]\n The attribute by which the insight's findings are grouped. This attribute is used as a findings aggregator for the purposes of viewing and managing multiple related findings under a single operand.\n \n\n :rtype: dict\n :return: {\n 'InsightArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_members(AccountDetails=None):\n \"\"\"\n Creates member Security Hub accounts in the current AWS account (which becomes the master Security Hub account) that has Security Hub enabled.\n See also: AWS API Documentation\n \n \n :example: response = client.create_members(\n AccountDetails=[\n {\n 'AccountId': 'string',\n 'Email': 'string'\n },\n ]\n )\n \n \n :type AccountDetails: list\n :param AccountDetails: A list of account ID and email address pairs of the accounts that you want to associate with the master Security Hub account.\n (dict) --The details of an AWS account.\n AccountId (string) --The ID of an AWS account.\n Email (string) --The email of an AWS account.\n \n \n\n :rtype: dict\n :return: {\n 'UnprocessedAccounts': [\n {\n 'AccountId': 'string',\n 'ProcessingResult': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef decline_invitations(AccountIds=None):\n \"\"\"\n Declines invitations that are sent to this AWS account (invitee) by the AWS accounts (inviters) that are specified by the account IDs.\n See also: AWS API Documentation\n \n \n :example: response = client.decline_invitations(\n AccountIds=[\n 'string',\n ]\n )\n \n \n :type AccountIds: list\n :param AccountIds: A list of account IDs specifying accounts whose invitations to Security Hub you want to decline.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'UnprocessedAccounts': [\n {\n 'AccountId': 'string',\n 'ProcessingResult': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef delete_insight(InsightArn=None):\n \"\"\"\n Deletes an insight that is specified by the insight ARN.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_insight(\n InsightArn='string'\n )\n \n \n :type InsightArn: string\n :param InsightArn: [REQUIRED]\n The ARN of the insight that you want to delete.\n \n\n :rtype: dict\n :return: {\n 'InsightArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_invitations(AccountIds=None):\n \"\"\"\n Deletes invitations that are sent to this AWS account (invitee) by the AWS accounts (inviters) that are specified by their account IDs.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_invitations(\n AccountIds=[\n 'string',\n ]\n )\n \n \n :type AccountIds: list\n :param AccountIds: A list of account IDs specifying accounts whose invitations to Security Hub you want to delete.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'UnprocessedAccounts': [\n {\n 'AccountId': 'string',\n 'ProcessingResult': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef delete_members(AccountIds=None):\n \"\"\"\n Deletes the Security Hub member accounts that are specified by the account IDs.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_members(\n AccountIds=[\n 'string',\n ]\n )\n \n \n :type AccountIds: list\n :param AccountIds: A list of account IDs of the Security Hub member accounts that you want to delete.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'UnprocessedAccounts': [\n {\n 'AccountId': 'string',\n 'ProcessingResult': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef disable_import_findings_for_product(ProductSubscriptionArn=None):\n \"\"\"\n Stops you from being able to import findings generated by integrated third-party providers into Security Hub.\n See also: AWS API Documentation\n \n \n :example: response = client.disable_import_findings_for_product(\n ProductSubscriptionArn='string'\n )\n \n \n :type ProductSubscriptionArn: string\n :param ProductSubscriptionArn: [REQUIRED]\n The ARN of a resource that represents your subscription to a supported product.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef disable_security_hub():\n \"\"\"\n Disables the AWS Security Hub Service.\n See also: AWS API Documentation\n \n \n :example: response = client.disable_security_hub()\n \n \n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef disassociate_from_master_account():\n \"\"\"\n Disassociates the current Security Hub member account from its master account.\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_from_master_account()\n \n \n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef disassociate_members(AccountIds=None):\n \"\"\"\n Disassociates the Security Hub member accounts that are specified by the account IDs from their master account.\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_members(\n AccountIds=[\n 'string',\n ]\n )\n \n \n :type AccountIds: list\n :param AccountIds: The account IDs of the member accounts that you want to disassociate from the master account.\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef enable_import_findings_for_product(ProductArn=None):\n \"\"\"\n Enables you to import findings generated by integrated third-party providers into Security Hub.\n See also: AWS API Documentation\n \n \n :example: response = client.enable_import_findings_for_product(\n ProductArn='string'\n )\n \n \n :type ProductArn: string\n :param ProductArn: [REQUIRED]\n The ARN of the product that generates findings that you want to import into Security Hub.\n \n\n :rtype: dict\n :return: {\n 'ProductSubscriptionArn': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef enable_security_hub():\n \"\"\"\n Enables the AWS Security Hub service.\n See also: AWS API Documentation\n \n \n :example: response = client.enable_security_hub()\n \n \n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_enabled_standards(StandardsSubscriptionArns=None, NextToken=None, MaxResults=None):\n \"\"\"\n Lists and describes enabled standards.\n See also: AWS API Documentation\n \n \n :example: response = client.get_enabled_standards(\n StandardsSubscriptionArns=[\n 'string',\n ],\n NextToken='string',\n MaxResults=123\n )\n \n \n :type StandardsSubscriptionArns: list\n :param StandardsSubscriptionArns: The list of standards subscription ARNS that you want to list and describe.\n (string) --\n \n\n :type NextToken: string\n :param NextToken: Paginates results. Set the value of this parameter to NULL on your first call to the GetEnabledStandards operation. For subsequent calls to the operation, fill nextToken in the request with the value of nextToken from the previous response to continue listing data.\n\n :type MaxResults: integer\n :param MaxResults: Indicates the maximum number of items that you want in the response.\n\n :rtype: dict\n :return: {\n 'StandardsSubscriptions': [\n {\n 'StandardsSubscriptionArn': 'string',\n 'StandardsArn': 'string',\n 'StandardsInput': {\n 'string': 'string'\n },\n 'StandardsStatus': 'PENDING'|'READY'|'FAILED'|'DELETING'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_findings(Filters=None, SortCriteria=None, NextToken=None, MaxResults=None):\n \"\"\"\n Lists and describes Security Hub-aggregated findings that are specified by filter attributes.\n See also: AWS API Documentation\n \n \n :example: response = client.get_findings(\n Filters={\n 'ProductArn': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'AwsAccountId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'Id': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'GeneratorId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'Type': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'FirstObservedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'LastObservedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'CreatedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'UpdatedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'SeverityProduct': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'SeverityNormalized': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'SeverityLabel': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'Confidence': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'Criticality': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'Title': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'Description': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'RecommendationText': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'SourceUrl': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ProductFields': [\n {\n 'Key': 'string',\n 'Value': 'string',\n 'Comparison': 'CONTAINS'\n },\n ],\n 'ProductName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'CompanyName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'UserDefinedFields': [\n {\n 'Key': 'string',\n 'Value': 'string',\n 'Comparison': 'CONTAINS'\n },\n ],\n 'MalwareName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'MalwareType': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'MalwarePath': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'MalwareState': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NetworkDirection': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NetworkProtocol': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NetworkSourceIpV4': [\n {\n 'Cidr': 'string'\n },\n ],\n 'NetworkSourceIpV6': [\n {\n 'Cidr': 'string'\n },\n ],\n 'NetworkSourcePort': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'NetworkSourceDomain': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NetworkSourceMac': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NetworkDestinationIpV4': [\n {\n 'Cidr': 'string'\n },\n ],\n 'NetworkDestinationIpV6': [\n {\n 'Cidr': 'string'\n },\n ],\n 'NetworkDestinationPort': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'NetworkDestinationDomain': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ProcessName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ProcessPath': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ProcessPid': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'ProcessParentPid': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'ProcessLaunchedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ProcessTerminatedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ThreatIntelIndicatorType': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ThreatIntelIndicatorValue': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ThreatIntelIndicatorCategory': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ThreatIntelIndicatorLastObservedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ThreatIntelIndicatorSource': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ThreatIntelIndicatorSourceUrl': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceType': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourcePartition': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceRegion': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceTags': [\n {\n 'Key': 'string',\n 'Value': 'string',\n 'Comparison': 'CONTAINS'\n },\n ],\n 'ResourceAwsEc2InstanceType': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceImageId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceIpV4Addresses': [\n {\n 'Cidr': 'string'\n },\n ],\n 'ResourceAwsEc2InstanceIpV6Addresses': [\n {\n 'Cidr': 'string'\n },\n ],\n 'ResourceAwsEc2InstanceKeyName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceIamInstanceProfileArn': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceVpcId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceSubnetId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceLaunchedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ResourceAwsS3BucketOwnerId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsS3BucketOwnerName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsIamAccessKeyUserName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsIamAccessKeyStatus': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsIamAccessKeyCreatedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ResourceContainerName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceContainerImageId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceContainerImageName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceContainerLaunchedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ResourceDetailsOther': [\n {\n 'Key': 'string',\n 'Value': 'string',\n 'Comparison': 'CONTAINS'\n },\n ],\n 'ComplianceStatus': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'VerificationState': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'WorkflowState': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'RecordState': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'RelatedFindingsProductArn': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'RelatedFindingsId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NoteText': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NoteUpdatedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'NoteUpdatedBy': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'Keyword': [\n {\n 'Value': 'string'\n },\n ]\n },\n SortCriteria=[\n {\n 'Field': 'string',\n 'SortOrder': 'asc'|'desc'\n },\n ],\n NextToken='string',\n MaxResults=123\n )\n \n \n :type Filters: dict\n :param Filters: A collection of attributes that is use for querying findings.\n ProductArn (list) --The ARN generated by Security Hub that uniquely identifies a third-party company (security findings provider) once this provider's product (solution that generates findings) is registered with Security Hub.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n AwsAccountId (list) --The AWS account ID in which a finding is generated.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n Id (list) --The security findings provider-specific identifier for a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n GeneratorId (list) --This is the identifier for the solution-specific component (a discrete unit of logic) that generated a finding. In various security findings provider's solutions, this generator can be called a rule, a check, a detector, a plug-in, etc.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n Type (list) --A finding type in the format of 'namespace/category/classifier' that classifies a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n FirstObservedAt (list) --An ISO8601-formatted timestamp that indicates when the potential security issue captured by a finding was first observed by the security findings provider.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n LastObservedAt (list) --An ISO8601-formatted timestamp that indicates when the potential security issue captured by a finding was most recently observed by the security findings provider.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n CreatedAt (list) --An ISO8601-formatted timestamp that indicates when the potential security issue captured by a finding was created by the security findings provider.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n UpdatedAt (list) --An ISO8601-formatted timestamp that indicates when the finding record was last updated by the security findings provider.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n SeverityProduct (list) --The native severity as defined by the security findings provider's solution that generated the finding.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n SeverityNormalized (list) --The normalized severity of a finding.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n SeverityLabel (list) --The label of a finding's severity.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n Confidence (list) --A finding's confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. Confidence is scored on a 0-100 basis using a ratio scale. 0 equates zero percent confidence and 100 equates to 100 percent confidence.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n Criticality (list) --The level of importance assigned to the resources associated with the finding. A score of 0 means the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n Title (list) --A finding's title.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n Description (list) --A finding's description.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n RecommendationText (list) --The recommendation of what to do about the issue described in a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n SourceUrl (list) --A URL that links to a page about the current finding in the security findings provider's solution.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ProductFields (list) --A data type where security findings providers can include additional solution-specific details that are not part of the defined AwsSecurityFinding format.\n (dict) --The map filter for querying findings.\n Key (string) --The key of the map filter.\n Value (string) --The value for the key in the map filter.\n Comparison (string) --Represents the condition to be applied to a key value when querying for findings with a map filter.\n \n ProductName (list) --The name of the solution (product) that generates findings.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n CompanyName (list) --The name of the findings provider (company) that owns the solution (product) that generates findings.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n UserDefinedFields (list) --A list of name/value string pairs associated with the finding. These are custom, user-defined fields added to a finding.\n (dict) --The map filter for querying findings.\n Key (string) --The key of the map filter.\n Value (string) --The value for the key in the map filter.\n Comparison (string) --Represents the condition to be applied to a key value when querying for findings with a map filter.\n \n MalwareName (list) --The name of the malware that was observed.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n MalwareType (list) --The type of the malware that was observed.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n MalwarePath (list) --The filesystem path of the malware that was observed.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n MalwareState (list) --The state of the malware that was observed.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NetworkDirection (list) --Indicates the direction of network traffic associated with a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NetworkProtocol (list) --The protocol of network-related information about a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NetworkSourceIpV4 (list) --The source IPv4 address of network-related information about a finding.\n (dict) --The IP filter for querying findings.>\n Cidr (string) --Finding's CIDR value.\n \n NetworkSourceIpV6 (list) --The source IPv6 address of network-related information about a finding.\n (dict) --The IP filter for querying findings.>\n Cidr (string) --Finding's CIDR value.\n \n NetworkSourcePort (list) --The source port of network-related information about a finding.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n NetworkSourceDomain (list) --The source domain of network-related information about a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NetworkSourceMac (list) --The source media access control (MAC) address of network-related information about a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NetworkDestinationIpV4 (list) --The destination IPv4 address of network-related information about a finding.\n (dict) --The IP filter for querying findings.>\n Cidr (string) --Finding's CIDR value.\n \n NetworkDestinationIpV6 (list) --The destination IPv6 address of network-related information about a finding.\n (dict) --The IP filter for querying findings.>\n Cidr (string) --Finding's CIDR value.\n \n NetworkDestinationPort (list) --The destination port of network-related information about a finding.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n NetworkDestinationDomain (list) --The destination domain of network-related information about a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ProcessName (list) --The name of the process.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ProcessPath (list) --The path to the process executable.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ProcessPid (list) --The process ID.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n ProcessParentPid (list) --The parent process ID.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n ProcessLaunchedAt (list) --The date/time that the process was launched.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n ProcessTerminatedAt (list) --The date/time that the process was terminated.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n ThreatIntelIndicatorType (list) --The type of a threat intel indicator.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ThreatIntelIndicatorValue (list) --The value of a threat intel indicator.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ThreatIntelIndicatorCategory (list) --The category of a threat intel indicator.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ThreatIntelIndicatorLastObservedAt (list) --The date/time of the last observation of a threat intel indicator.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n ThreatIntelIndicatorSource (list) --The source of the threat intel.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ThreatIntelIndicatorSourceUrl (list) --The URL for more details from the source of the threat intel.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceType (list) --Specifies the type of the resource for which details are provided.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceId (list) --The canonical identifier for the given resource type.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourcePartition (list) --The canonical AWS partition name to which the region is assigned.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceRegion (list) --The canonical AWS external region name where this resource is located.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceTags (list) --A list of AWS tags associated with a resource at the time the finding was processed.\n (dict) --The map filter for querying findings.\n Key (string) --The key of the map filter.\n Value (string) --The value for the key in the map filter.\n Comparison (string) --Represents the condition to be applied to a key value when querying for findings with a map filter.\n \n ResourceAwsEc2InstanceType (list) --The instance type of the instance.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsEc2InstanceImageId (list) --The Amazon Machine Image (AMI) ID of the instance.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsEc2InstanceIpV4Addresses (list) --The IPv4 addresses associated with the instance.\n (dict) --The IP filter for querying findings.>\n Cidr (string) --Finding's CIDR value.\n \n ResourceAwsEc2InstanceIpV6Addresses (list) --The IPv6 addresses associated with the instance.\n (dict) --The IP filter for querying findings.>\n Cidr (string) --Finding's CIDR value.\n \n ResourceAwsEc2InstanceKeyName (list) --The key name associated with the instance.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsEc2InstanceIamInstanceProfileArn (list) --The IAM profile ARN of the instance.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsEc2InstanceVpcId (list) --The identifier of the VPC in which the instance was launched.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsEc2InstanceSubnetId (list) --The identifier of the subnet in which the instance was launched.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsEc2InstanceLaunchedAt (list) --The date/time the instance was launched.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n ResourceAwsS3BucketOwnerId (list) --The canonical user ID of the owner of the S3 bucket.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsS3BucketOwnerName (list) --The display name of the owner of the S3 bucket.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsIamAccessKeyUserName (list) --The user associated with the IAM access key related to a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsIamAccessKeyStatus (list) --The status of the IAM access key related to a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsIamAccessKeyCreatedAt (list) --The creation date/time of the IAM access key related to a finding.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n ResourceContainerName (list) --The name of the container related to a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceContainerImageId (list) --The identifier of the image related to a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceContainerImageName (list) --The name of the image related to a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceContainerLaunchedAt (list) --The date/time that the container was started.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n ResourceDetailsOther (list) --The details of a resource that does not have a specific sub-field for the resource type defined.\n (dict) --The map filter for querying findings.\n Key (string) --The key of the map filter.\n Value (string) --The value for the key in the map filter.\n Comparison (string) --Represents the condition to be applied to a key value when querying for findings with a map filter.\n \n ComplianceStatus (list) --Exclusive to findings that are generated as the result of a check run against a specific rule in a supported standard (for example, AWS CIS Foundations). Contains compliance-related finding details.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n VerificationState (list) --Indicates the veracity of a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n WorkflowState (list) --The workflow state of a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n RecordState (list) --The updated record state for the finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n RelatedFindingsProductArn (list) --The ARN of the solution that generated a related finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n RelatedFindingsId (list) --The solution-generated identifier for a related finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NoteText (list) --The text of a note.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NoteUpdatedAt (list) --The timestamp of when the note was updated.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n NoteUpdatedBy (list) --The principal that created a note.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n Keyword (list) --A keyword for a finding.\n (dict) --A keyword filter for querying findings.\n Value (string) --A value for the keyword.\n \n \n\n :type SortCriteria: list\n :param SortCriteria: A collection of attributes used for sorting findings.\n (dict) --A collection of attributes used for sorting findings.\n Field (string) --The finding attribute used for sorting findings.\n SortOrder (string) --The order used for sorting findings.\n \n \n\n :type NextToken: string\n :param NextToken: Paginates results. Set the value of this parameter to NULL on your first call to the GetFindings operation. For subsequent calls to the operation, fill nextToken in the request with the value of nextToken from the previous response to continue listing data.\n\n :type MaxResults: integer\n :param MaxResults: Indicates the maximum number of items that you want in the response.\n\n :rtype: dict\n :return: {\n 'Findings': [\n {\n 'SchemaVersion': 'string',\n 'Id': 'string',\n 'ProductArn': 'string',\n 'GeneratorId': 'string',\n 'AwsAccountId': 'string',\n 'Types': [\n 'string',\n ],\n 'FirstObservedAt': 'string',\n 'LastObservedAt': 'string',\n 'CreatedAt': 'string',\n 'UpdatedAt': 'string',\n 'Severity': {\n 'Product': 123.0,\n 'Normalized': 123\n },\n 'Confidence': 123,\n 'Criticality': 123,\n 'Title': 'string',\n 'Description': 'string',\n 'Remediation': {\n 'Recommendation': {\n 'Text': 'string',\n 'Url': 'string'\n }\n },\n 'SourceUrl': 'string',\n 'ProductFields': {\n 'string': 'string'\n },\n 'UserDefinedFields': {\n 'string': 'string'\n },\n 'Malware': [\n {\n 'Name': 'string',\n 'Type': 'ADWARE'|'BLENDED_THREAT'|'BOTNET_AGENT'|'COIN_MINER'|'EXPLOIT_KIT'|'KEYLOGGER'|'MACRO'|'POTENTIALLY_UNWANTED'|'SPYWARE'|'RANSOMWARE'|'REMOTE_ACCESS'|'ROOTKIT'|'TROJAN'|'VIRUS'|'WORM',\n 'Path': 'string',\n 'State': 'OBSERVED'|'REMOVAL_FAILED'|'REMOVED'\n },\n ],\n 'Network': {\n 'Direction': 'IN'|'OUT',\n 'Protocol': 'string',\n 'SourceIpV4': 'string',\n 'SourceIpV6': 'string',\n 'SourcePort': 123,\n 'SourceDomain': 'string',\n 'SourceMac': 'string',\n 'DestinationIpV4': 'string',\n 'DestinationIpV6': 'string',\n 'DestinationPort': 123,\n 'DestinationDomain': 'string'\n },\n 'Process': {\n 'Name': 'string',\n 'Path': 'string',\n 'Pid': 123,\n 'ParentPid': 123,\n 'LaunchedAt': 'string',\n 'TerminatedAt': 'string'\n },\n 'ThreatIntelIndicators': [\n {\n 'Type': 'DOMAIN'|'EMAIL_ADDRESS'|'HASH_MD5'|'HASH_SHA1'|'HASH_SHA256'|'HASH_SHA512'|'IPV4_ADDRESS'|'IPV6_ADDRESS'|'MUTEX'|'PROCESS'|'URL',\n 'Value': 'string',\n 'Category': 'BACKDOOR'|'CARD_STEALER'|'COMMAND_AND_CONTROL'|'DROP_SITE'|'EXPLOIT_SITE'|'KEYLOGGER',\n 'LastObservedAt': 'string',\n 'Source': 'string',\n 'SourceUrl': 'string'\n },\n ],\n 'Resources': [\n {\n 'Type': 'string',\n 'Id': 'string',\n 'Partition': 'aws'|'aws-cn'|'aws-us-gov',\n 'Region': 'string',\n 'Tags': {\n 'string': 'string'\n },\n 'Details': {\n 'AwsEc2Instance': {\n 'Type': 'string',\n 'ImageId': 'string',\n 'IpV4Addresses': [\n 'string',\n ],\n 'IpV6Addresses': [\n 'string',\n ],\n 'KeyName': 'string',\n 'IamInstanceProfileArn': 'string',\n 'VpcId': 'string',\n 'SubnetId': 'string',\n 'LaunchedAt': 'string'\n },\n 'AwsS3Bucket': {\n 'OwnerId': 'string',\n 'OwnerName': 'string'\n },\n 'AwsIamAccessKey': {\n 'UserName': 'string',\n 'Status': 'Active'|'Inactive',\n 'CreatedAt': 'string'\n },\n 'Container': {\n 'Name': 'string',\n 'ImageId': 'string',\n 'ImageName': 'string',\n 'LaunchedAt': 'string'\n },\n 'Other': {\n 'string': 'string'\n }\n }\n },\n ],\n 'Compliance': {\n 'Status': 'PASSED'|'WARNING'|'FAILED'|'NOT_AVAILABLE'\n },\n 'VerificationState': 'UNKNOWN'|'TRUE_POSITIVE'|'FALSE_POSITIVE'|'BENIGN_POSITIVE',\n 'WorkflowState': 'NEW'|'ASSIGNED'|'IN_PROGRESS'|'DEFERRED'|'RESOLVED',\n 'RecordState': 'ACTIVE'|'ARCHIVED',\n 'RelatedFindings': [\n {\n 'ProductArn': 'string',\n 'Id': 'string'\n },\n ],\n 'Note': {\n 'Text': 'string',\n 'UpdatedBy': 'string',\n 'UpdatedAt': 'string'\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_insight_results(InsightArn=None):\n \"\"\"\n Lists the results of the Security Hub insight specified by the insight ARN.\n See also: AWS API Documentation\n \n \n :example: response = client.get_insight_results(\n InsightArn='string'\n )\n \n \n :type InsightArn: string\n :param InsightArn: [REQUIRED]\n The ARN of the insight whose results you want to see.\n \n\n :rtype: dict\n :return: {\n 'InsightResults': {\n 'InsightArn': 'string',\n 'GroupByAttribute': 'string',\n 'ResultValues': [\n {\n 'GroupByAttributeValue': 'string',\n 'Count': 123\n },\n ]\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_insights(InsightArns=None, NextToken=None, MaxResults=None):\n \"\"\"\n Lists and describes insights that are specified by insight ARNs.\n See also: AWS API Documentation\n \n \n :example: response = client.get_insights(\n InsightArns=[\n 'string',\n ],\n NextToken='string',\n MaxResults=123\n )\n \n \n :type InsightArns: list\n :param InsightArns: The ARNS of the insights that you want to describe.\n (string) --\n \n\n :type NextToken: string\n :param NextToken: Paginates results. Set the value of this parameter to NULL on your first call to the GetInsights operation. For subsequent calls to the operation, fill nextToken in the request with the value of nextToken from the previous response to continue listing data.\n\n :type MaxResults: integer\n :param MaxResults: Indicates the maximum number of items that you want in the response.\n\n :rtype: dict\n :return: {\n 'Insights': [\n {\n 'InsightArn': 'string',\n 'Name': 'string',\n 'Filters': {\n 'ProductArn': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'AwsAccountId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'Id': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'GeneratorId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'Type': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'FirstObservedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'LastObservedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'CreatedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'UpdatedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'SeverityProduct': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'SeverityNormalized': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'SeverityLabel': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'Confidence': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'Criticality': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'Title': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'Description': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'RecommendationText': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'SourceUrl': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ProductFields': [\n {\n 'Key': 'string',\n 'Value': 'string',\n 'Comparison': 'CONTAINS'\n },\n ],\n 'ProductName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'CompanyName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'UserDefinedFields': [\n {\n 'Key': 'string',\n 'Value': 'string',\n 'Comparison': 'CONTAINS'\n },\n ],\n 'MalwareName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'MalwareType': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'MalwarePath': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'MalwareState': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NetworkDirection': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NetworkProtocol': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NetworkSourceIpV4': [\n {\n 'Cidr': 'string'\n },\n ],\n 'NetworkSourceIpV6': [\n {\n 'Cidr': 'string'\n },\n ],\n 'NetworkSourcePort': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'NetworkSourceDomain': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NetworkSourceMac': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NetworkDestinationIpV4': [\n {\n 'Cidr': 'string'\n },\n ],\n 'NetworkDestinationIpV6': [\n {\n 'Cidr': 'string'\n },\n ],\n 'NetworkDestinationPort': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'NetworkDestinationDomain': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ProcessName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ProcessPath': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ProcessPid': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'ProcessParentPid': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'ProcessLaunchedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ProcessTerminatedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ThreatIntelIndicatorType': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ThreatIntelIndicatorValue': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ThreatIntelIndicatorCategory': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ThreatIntelIndicatorLastObservedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ThreatIntelIndicatorSource': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ThreatIntelIndicatorSourceUrl': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceType': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourcePartition': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceRegion': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceTags': [\n {\n 'Key': 'string',\n 'Value': 'string',\n 'Comparison': 'CONTAINS'\n },\n ],\n 'ResourceAwsEc2InstanceType': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceImageId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceIpV4Addresses': [\n {\n 'Cidr': 'string'\n },\n ],\n 'ResourceAwsEc2InstanceIpV6Addresses': [\n {\n 'Cidr': 'string'\n },\n ],\n 'ResourceAwsEc2InstanceKeyName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceIamInstanceProfileArn': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceVpcId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceSubnetId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceLaunchedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ResourceAwsS3BucketOwnerId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsS3BucketOwnerName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsIamAccessKeyUserName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsIamAccessKeyStatus': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsIamAccessKeyCreatedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ResourceContainerName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceContainerImageId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceContainerImageName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceContainerLaunchedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ResourceDetailsOther': [\n {\n 'Key': 'string',\n 'Value': 'string',\n 'Comparison': 'CONTAINS'\n },\n ],\n 'ComplianceStatus': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'VerificationState': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'WorkflowState': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'RecordState': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'RelatedFindingsProductArn': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'RelatedFindingsId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NoteText': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NoteUpdatedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'NoteUpdatedBy': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'Keyword': [\n {\n 'Value': 'string'\n },\n ]\n },\n 'GroupByAttribute': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_invitations_count():\n \"\"\"\n Returns the count of all Security Hub membership invitations that were sent to the current member account, not including the currently accepted invitation.\n See also: AWS API Documentation\n \n \n :example: response = client.get_invitations_count()\n \n \n :rtype: dict\n :return: {\n 'InvitationsCount': 123\n }\n \n \n \"\"\"\n pass\n\ndef get_master_account():\n \"\"\"\n Provides the details for the Security Hub master account to the current member account.\n See also: AWS API Documentation\n \n \n :example: response = client.get_master_account()\n \n \n :rtype: dict\n :return: {\n 'Master': {\n 'AccountId': 'string',\n 'InvitationId': 'string',\n 'InvitedAt': datetime(2015, 1, 1),\n 'MemberStatus': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_members(AccountIds=None):\n \"\"\"\n Returns the details on the Security Hub member accounts that are specified by the account IDs.\n See also: AWS API Documentation\n \n \n :example: response = client.get_members(\n AccountIds=[\n 'string',\n ]\n )\n \n \n :type AccountIds: list\n :param AccountIds: [REQUIRED]\n A list of account IDs for the Security Hub member accounts on which you want to return the details.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'Members': [\n {\n 'AccountId': 'string',\n 'Email': 'string',\n 'MasterId': 'string',\n 'MemberStatus': 'string',\n 'InvitedAt': datetime(2015, 1, 1),\n 'UpdatedAt': datetime(2015, 1, 1)\n },\n ],\n 'UnprocessedAccounts': [\n {\n 'AccountId': 'string',\n 'ProcessingResult': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef invite_members(AccountIds=None):\n \"\"\"\n Invites other AWS accounts to enable Security Hub and become Security Hub member accounts. When an account accepts the invitation and becomes a member account, the master account can view Security Hub findings of the member account.\n See also: AWS API Documentation\n \n \n :example: response = client.invite_members(\n AccountIds=[\n 'string',\n ]\n )\n \n \n :type AccountIds: list\n :param AccountIds: A list of IDs of the AWS accounts that you want to invite to Security Hub as members.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'UnprocessedAccounts': [\n {\n 'AccountId': 'string',\n 'ProcessingResult': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef list_enabled_products_for_import(NextToken=None, MaxResults=None):\n \"\"\"\n Lists all Security Hub-integrated third-party findings providers.\n See also: AWS API Documentation\n \n \n :example: response = client.list_enabled_products_for_import(\n NextToken='string',\n MaxResults=123\n )\n \n \n :type NextToken: string\n :param NextToken: Paginates results. Set the value of this parameter to NULL on your first call to the ListEnabledProductsForImport operation. For subsequent calls to the operation, fill nextToken in the request with the value of NextToken from the previous response to continue listing data.\n\n :type MaxResults: integer\n :param MaxResults: Indicates the maximum number of items that you want in the response.\n\n :rtype: dict\n :return: {\n 'ProductSubscriptions': [\n 'string',\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_invitations(MaxResults=None, NextToken=None):\n \"\"\"\n Lists all Security Hub membership invitations that were sent to the current AWS account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_invitations(\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type MaxResults: integer\n :param MaxResults: Indicates the maximum number of items that you want in the response.\n\n :type NextToken: string\n :param NextToken: Paginates results. Set the value of this parameter to NULL on your first call to the ListInvitations operation. For subsequent calls to the operation, fill nextToken in the request with the value of NextToken from the previous response to continue listing data.\n\n :rtype: dict\n :return: {\n 'Invitations': [\n {\n 'AccountId': 'string',\n 'InvitationId': 'string',\n 'InvitedAt': datetime(2015, 1, 1),\n 'MemberStatus': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_members(OnlyAssociated=None, MaxResults=None, NextToken=None):\n \"\"\"\n Lists details about all member accounts for the current Security Hub master account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_members(\n OnlyAssociated=True|False,\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type OnlyAssociated: boolean\n :param OnlyAssociated: Specifies what member accounts the response includes based on their relationship status with the master account. The default value is TRUE. If onlyAssociated is set to TRUE, the response includes member accounts whose relationship status with the master is set to ENABLED or DISABLED. If onlyAssociated is set to FALSE, the response includes all existing member accounts.\n\n :type MaxResults: integer\n :param MaxResults: Indicates the maximum number of items that you want in the response.\n\n :type NextToken: string\n :param NextToken: Paginates results. Set the value of this parameter to NULL on your first call to the ListMembers operation. For subsequent calls to the operation, fill nextToken in the request with the value of NextToken from the previous response to continue listing data.\n\n :rtype: dict\n :return: {\n 'Members': [\n {\n 'AccountId': 'string',\n 'Email': 'string',\n 'MasterId': 'string',\n 'MemberStatus': 'string',\n 'InvitedAt': datetime(2015, 1, 1),\n 'UpdatedAt': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef update_findings(Filters=None, Note=None, RecordState=None):\n \"\"\"\n Updates the AWS Security Hub-aggregated findings specified by the filter attributes.\n See also: AWS API Documentation\n \n \n :example: response = client.update_findings(\n Filters={\n 'ProductArn': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'AwsAccountId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'Id': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'GeneratorId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'Type': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'FirstObservedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'LastObservedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'CreatedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'UpdatedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'SeverityProduct': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'SeverityNormalized': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'SeverityLabel': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'Confidence': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'Criticality': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'Title': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'Description': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'RecommendationText': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'SourceUrl': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ProductFields': [\n {\n 'Key': 'string',\n 'Value': 'string',\n 'Comparison': 'CONTAINS'\n },\n ],\n 'ProductName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'CompanyName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'UserDefinedFields': [\n {\n 'Key': 'string',\n 'Value': 'string',\n 'Comparison': 'CONTAINS'\n },\n ],\n 'MalwareName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'MalwareType': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'MalwarePath': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'MalwareState': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NetworkDirection': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NetworkProtocol': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NetworkSourceIpV4': [\n {\n 'Cidr': 'string'\n },\n ],\n 'NetworkSourceIpV6': [\n {\n 'Cidr': 'string'\n },\n ],\n 'NetworkSourcePort': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'NetworkSourceDomain': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NetworkSourceMac': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NetworkDestinationIpV4': [\n {\n 'Cidr': 'string'\n },\n ],\n 'NetworkDestinationIpV6': [\n {\n 'Cidr': 'string'\n },\n ],\n 'NetworkDestinationPort': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'NetworkDestinationDomain': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ProcessName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ProcessPath': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ProcessPid': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'ProcessParentPid': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'ProcessLaunchedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ProcessTerminatedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ThreatIntelIndicatorType': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ThreatIntelIndicatorValue': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ThreatIntelIndicatorCategory': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ThreatIntelIndicatorLastObservedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ThreatIntelIndicatorSource': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ThreatIntelIndicatorSourceUrl': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceType': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourcePartition': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceRegion': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceTags': [\n {\n 'Key': 'string',\n 'Value': 'string',\n 'Comparison': 'CONTAINS'\n },\n ],\n 'ResourceAwsEc2InstanceType': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceImageId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceIpV4Addresses': [\n {\n 'Cidr': 'string'\n },\n ],\n 'ResourceAwsEc2InstanceIpV6Addresses': [\n {\n 'Cidr': 'string'\n },\n ],\n 'ResourceAwsEc2InstanceKeyName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceIamInstanceProfileArn': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceVpcId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceSubnetId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceLaunchedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ResourceAwsS3BucketOwnerId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsS3BucketOwnerName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsIamAccessKeyUserName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsIamAccessKeyStatus': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsIamAccessKeyCreatedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ResourceContainerName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceContainerImageId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceContainerImageName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceContainerLaunchedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ResourceDetailsOther': [\n {\n 'Key': 'string',\n 'Value': 'string',\n 'Comparison': 'CONTAINS'\n },\n ],\n 'ComplianceStatus': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'VerificationState': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'WorkflowState': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'RecordState': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'RelatedFindingsProductArn': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'RelatedFindingsId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NoteText': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NoteUpdatedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'NoteUpdatedBy': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'Keyword': [\n {\n 'Value': 'string'\n },\n ]\n },\n Note={\n 'Text': 'string',\n 'UpdatedBy': 'string'\n },\n RecordState='ACTIVE'|'ARCHIVED'\n )\n \n \n :type Filters: dict\n :param Filters: [REQUIRED]\n A collection of attributes that specify what findings you want to update.\n ProductArn (list) --The ARN generated by Security Hub that uniquely identifies a third-party company (security findings provider) once this provider's product (solution that generates findings) is registered with Security Hub.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n AwsAccountId (list) --The AWS account ID in which a finding is generated.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n Id (list) --The security findings provider-specific identifier for a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n GeneratorId (list) --This is the identifier for the solution-specific component (a discrete unit of logic) that generated a finding. In various security findings provider's solutions, this generator can be called a rule, a check, a detector, a plug-in, etc.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n Type (list) --A finding type in the format of 'namespace/category/classifier' that classifies a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n FirstObservedAt (list) --An ISO8601-formatted timestamp that indicates when the potential security issue captured by a finding was first observed by the security findings provider.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n LastObservedAt (list) --An ISO8601-formatted timestamp that indicates when the potential security issue captured by a finding was most recently observed by the security findings provider.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n CreatedAt (list) --An ISO8601-formatted timestamp that indicates when the potential security issue captured by a finding was created by the security findings provider.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n UpdatedAt (list) --An ISO8601-formatted timestamp that indicates when the finding record was last updated by the security findings provider.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n SeverityProduct (list) --The native severity as defined by the security findings provider's solution that generated the finding.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n SeverityNormalized (list) --The normalized severity of a finding.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n SeverityLabel (list) --The label of a finding's severity.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n Confidence (list) --A finding's confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. Confidence is scored on a 0-100 basis using a ratio scale. 0 equates zero percent confidence and 100 equates to 100 percent confidence.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n Criticality (list) --The level of importance assigned to the resources associated with the finding. A score of 0 means the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n Title (list) --A finding's title.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n Description (list) --A finding's description.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n RecommendationText (list) --The recommendation of what to do about the issue described in a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n SourceUrl (list) --A URL that links to a page about the current finding in the security findings provider's solution.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ProductFields (list) --A data type where security findings providers can include additional solution-specific details that are not part of the defined AwsSecurityFinding format.\n (dict) --The map filter for querying findings.\n Key (string) --The key of the map filter.\n Value (string) --The value for the key in the map filter.\n Comparison (string) --Represents the condition to be applied to a key value when querying for findings with a map filter.\n \n ProductName (list) --The name of the solution (product) that generates findings.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n CompanyName (list) --The name of the findings provider (company) that owns the solution (product) that generates findings.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n UserDefinedFields (list) --A list of name/value string pairs associated with the finding. These are custom, user-defined fields added to a finding.\n (dict) --The map filter for querying findings.\n Key (string) --The key of the map filter.\n Value (string) --The value for the key in the map filter.\n Comparison (string) --Represents the condition to be applied to a key value when querying for findings with a map filter.\n \n MalwareName (list) --The name of the malware that was observed.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n MalwareType (list) --The type of the malware that was observed.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n MalwarePath (list) --The filesystem path of the malware that was observed.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n MalwareState (list) --The state of the malware that was observed.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NetworkDirection (list) --Indicates the direction of network traffic associated with a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NetworkProtocol (list) --The protocol of network-related information about a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NetworkSourceIpV4 (list) --The source IPv4 address of network-related information about a finding.\n (dict) --The IP filter for querying findings.>\n Cidr (string) --Finding's CIDR value.\n \n NetworkSourceIpV6 (list) --The source IPv6 address of network-related information about a finding.\n (dict) --The IP filter for querying findings.>\n Cidr (string) --Finding's CIDR value.\n \n NetworkSourcePort (list) --The source port of network-related information about a finding.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n NetworkSourceDomain (list) --The source domain of network-related information about a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NetworkSourceMac (list) --The source media access control (MAC) address of network-related information about a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NetworkDestinationIpV4 (list) --The destination IPv4 address of network-related information about a finding.\n (dict) --The IP filter for querying findings.>\n Cidr (string) --Finding's CIDR value.\n \n NetworkDestinationIpV6 (list) --The destination IPv6 address of network-related information about a finding.\n (dict) --The IP filter for querying findings.>\n Cidr (string) --Finding's CIDR value.\n \n NetworkDestinationPort (list) --The destination port of network-related information about a finding.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n NetworkDestinationDomain (list) --The destination domain of network-related information about a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ProcessName (list) --The name of the process.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ProcessPath (list) --The path to the process executable.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ProcessPid (list) --The process ID.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n ProcessParentPid (list) --The parent process ID.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n ProcessLaunchedAt (list) --The date/time that the process was launched.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n ProcessTerminatedAt (list) --The date/time that the process was terminated.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n ThreatIntelIndicatorType (list) --The type of a threat intel indicator.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ThreatIntelIndicatorValue (list) --The value of a threat intel indicator.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ThreatIntelIndicatorCategory (list) --The category of a threat intel indicator.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ThreatIntelIndicatorLastObservedAt (list) --The date/time of the last observation of a threat intel indicator.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n ThreatIntelIndicatorSource (list) --The source of the threat intel.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ThreatIntelIndicatorSourceUrl (list) --The URL for more details from the source of the threat intel.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceType (list) --Specifies the type of the resource for which details are provided.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceId (list) --The canonical identifier for the given resource type.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourcePartition (list) --The canonical AWS partition name to which the region is assigned.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceRegion (list) --The canonical AWS external region name where this resource is located.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceTags (list) --A list of AWS tags associated with a resource at the time the finding was processed.\n (dict) --The map filter for querying findings.\n Key (string) --The key of the map filter.\n Value (string) --The value for the key in the map filter.\n Comparison (string) --Represents the condition to be applied to a key value when querying for findings with a map filter.\n \n ResourceAwsEc2InstanceType (list) --The instance type of the instance.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsEc2InstanceImageId (list) --The Amazon Machine Image (AMI) ID of the instance.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsEc2InstanceIpV4Addresses (list) --The IPv4 addresses associated with the instance.\n (dict) --The IP filter for querying findings.>\n Cidr (string) --Finding's CIDR value.\n \n ResourceAwsEc2InstanceIpV6Addresses (list) --The IPv6 addresses associated with the instance.\n (dict) --The IP filter for querying findings.>\n Cidr (string) --Finding's CIDR value.\n \n ResourceAwsEc2InstanceKeyName (list) --The key name associated with the instance.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsEc2InstanceIamInstanceProfileArn (list) --The IAM profile ARN of the instance.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsEc2InstanceVpcId (list) --The identifier of the VPC in which the instance was launched.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsEc2InstanceSubnetId (list) --The identifier of the subnet in which the instance was launched.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsEc2InstanceLaunchedAt (list) --The date/time the instance was launched.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n ResourceAwsS3BucketOwnerId (list) --The canonical user ID of the owner of the S3 bucket.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsS3BucketOwnerName (list) --The display name of the owner of the S3 bucket.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsIamAccessKeyUserName (list) --The user associated with the IAM access key related to a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsIamAccessKeyStatus (list) --The status of the IAM access key related to a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsIamAccessKeyCreatedAt (list) --The creation date/time of the IAM access key related to a finding.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n ResourceContainerName (list) --The name of the container related to a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceContainerImageId (list) --The identifier of the image related to a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceContainerImageName (list) --The name of the image related to a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceContainerLaunchedAt (list) --The date/time that the container was started.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n ResourceDetailsOther (list) --The details of a resource that does not have a specific sub-field for the resource type defined.\n (dict) --The map filter for querying findings.\n Key (string) --The key of the map filter.\n Value (string) --The value for the key in the map filter.\n Comparison (string) --Represents the condition to be applied to a key value when querying for findings with a map filter.\n \n ComplianceStatus (list) --Exclusive to findings that are generated as the result of a check run against a specific rule in a supported standard (for example, AWS CIS Foundations). Contains compliance-related finding details.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n VerificationState (list) --Indicates the veracity of a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n WorkflowState (list) --The workflow state of a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n RecordState (list) --The updated record state for the finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n RelatedFindingsProductArn (list) --The ARN of the solution that generated a related finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n RelatedFindingsId (list) --The solution-generated identifier for a related finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NoteText (list) --The text of a note.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NoteUpdatedAt (list) --The timestamp of when the note was updated.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n NoteUpdatedBy (list) --The principal that created a note.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n Keyword (list) --A keyword for a finding.\n (dict) --A keyword filter for querying findings.\n Value (string) --A value for the keyword.\n \n \n\n :type Note: dict\n :param Note: The updated note for the finding.\n Text (string) -- [REQUIRED]The updated note text.\n UpdatedBy (string) -- [REQUIRED]The principal that updated the note.\n \n\n :type RecordState: string\n :param RecordState: The updated record state for the finding.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_insight(InsightArn=None, Name=None, Filters=None, GroupByAttribute=None):\n \"\"\"\n Updates the AWS Security Hub insight specified by the insight ARN.\n See also: AWS API Documentation\n \n \n :example: response = client.update_insight(\n InsightArn='string',\n Name='string',\n Filters={\n 'ProductArn': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'AwsAccountId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'Id': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'GeneratorId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'Type': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'FirstObservedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'LastObservedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'CreatedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'UpdatedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'SeverityProduct': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'SeverityNormalized': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'SeverityLabel': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'Confidence': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'Criticality': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'Title': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'Description': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'RecommendationText': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'SourceUrl': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ProductFields': [\n {\n 'Key': 'string',\n 'Value': 'string',\n 'Comparison': 'CONTAINS'\n },\n ],\n 'ProductName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'CompanyName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'UserDefinedFields': [\n {\n 'Key': 'string',\n 'Value': 'string',\n 'Comparison': 'CONTAINS'\n },\n ],\n 'MalwareName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'MalwareType': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'MalwarePath': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'MalwareState': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NetworkDirection': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NetworkProtocol': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NetworkSourceIpV4': [\n {\n 'Cidr': 'string'\n },\n ],\n 'NetworkSourceIpV6': [\n {\n 'Cidr': 'string'\n },\n ],\n 'NetworkSourcePort': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'NetworkSourceDomain': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NetworkSourceMac': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NetworkDestinationIpV4': [\n {\n 'Cidr': 'string'\n },\n ],\n 'NetworkDestinationIpV6': [\n {\n 'Cidr': 'string'\n },\n ],\n 'NetworkDestinationPort': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'NetworkDestinationDomain': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ProcessName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ProcessPath': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ProcessPid': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'ProcessParentPid': [\n {\n 'Gte': 123.0,\n 'Lte': 123.0,\n 'Eq': 123.0\n },\n ],\n 'ProcessLaunchedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ProcessTerminatedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ThreatIntelIndicatorType': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ThreatIntelIndicatorValue': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ThreatIntelIndicatorCategory': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ThreatIntelIndicatorLastObservedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ThreatIntelIndicatorSource': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ThreatIntelIndicatorSourceUrl': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceType': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourcePartition': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceRegion': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceTags': [\n {\n 'Key': 'string',\n 'Value': 'string',\n 'Comparison': 'CONTAINS'\n },\n ],\n 'ResourceAwsEc2InstanceType': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceImageId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceIpV4Addresses': [\n {\n 'Cidr': 'string'\n },\n ],\n 'ResourceAwsEc2InstanceIpV6Addresses': [\n {\n 'Cidr': 'string'\n },\n ],\n 'ResourceAwsEc2InstanceKeyName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceIamInstanceProfileArn': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceVpcId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceSubnetId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsEc2InstanceLaunchedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ResourceAwsS3BucketOwnerId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsS3BucketOwnerName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsIamAccessKeyUserName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsIamAccessKeyStatus': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceAwsIamAccessKeyCreatedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ResourceContainerName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceContainerImageId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceContainerImageName': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'ResourceContainerLaunchedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'ResourceDetailsOther': [\n {\n 'Key': 'string',\n 'Value': 'string',\n 'Comparison': 'CONTAINS'\n },\n ],\n 'ComplianceStatus': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'VerificationState': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'WorkflowState': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'RecordState': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'RelatedFindingsProductArn': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'RelatedFindingsId': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NoteText': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'NoteUpdatedAt': [\n {\n 'Start': 'string',\n 'End': 'string',\n 'DateRange': {\n 'Value': 123,\n 'Unit': 'DAYS'\n }\n },\n ],\n 'NoteUpdatedBy': [\n {\n 'Value': 'string',\n 'Comparison': 'EQUALS'|'CONTAINS'|'PREFIX'\n },\n ],\n 'Keyword': [\n {\n 'Value': 'string'\n },\n ]\n },\n GroupByAttribute='string'\n )\n \n \n :type InsightArn: string\n :param InsightArn: [REQUIRED]\n The ARN of the insight that you want to update.\n \n\n :type Name: string\n :param Name: The updated name for the insight.\n\n :type Filters: dict\n :param Filters: The updated filters that define this insight.\n ProductArn (list) --The ARN generated by Security Hub that uniquely identifies a third-party company (security findings provider) once this provider's product (solution that generates findings) is registered with Security Hub.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n AwsAccountId (list) --The AWS account ID in which a finding is generated.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n Id (list) --The security findings provider-specific identifier for a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n GeneratorId (list) --This is the identifier for the solution-specific component (a discrete unit of logic) that generated a finding. In various security findings provider's solutions, this generator can be called a rule, a check, a detector, a plug-in, etc.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n Type (list) --A finding type in the format of 'namespace/category/classifier' that classifies a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n FirstObservedAt (list) --An ISO8601-formatted timestamp that indicates when the potential security issue captured by a finding was first observed by the security findings provider.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n LastObservedAt (list) --An ISO8601-formatted timestamp that indicates when the potential security issue captured by a finding was most recently observed by the security findings provider.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n CreatedAt (list) --An ISO8601-formatted timestamp that indicates when the potential security issue captured by a finding was created by the security findings provider.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n UpdatedAt (list) --An ISO8601-formatted timestamp that indicates when the finding record was last updated by the security findings provider.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n SeverityProduct (list) --The native severity as defined by the security findings provider's solution that generated the finding.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n SeverityNormalized (list) --The normalized severity of a finding.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n SeverityLabel (list) --The label of a finding's severity.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n Confidence (list) --A finding's confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. Confidence is scored on a 0-100 basis using a ratio scale. 0 equates zero percent confidence and 100 equates to 100 percent confidence.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n Criticality (list) --The level of importance assigned to the resources associated with the finding. A score of 0 means the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n Title (list) --A finding's title.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n Description (list) --A finding's description.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n RecommendationText (list) --The recommendation of what to do about the issue described in a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n SourceUrl (list) --A URL that links to a page about the current finding in the security findings provider's solution.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ProductFields (list) --A data type where security findings providers can include additional solution-specific details that are not part of the defined AwsSecurityFinding format.\n (dict) --The map filter for querying findings.\n Key (string) --The key of the map filter.\n Value (string) --The value for the key in the map filter.\n Comparison (string) --Represents the condition to be applied to a key value when querying for findings with a map filter.\n \n ProductName (list) --The name of the solution (product) that generates findings.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n CompanyName (list) --The name of the findings provider (company) that owns the solution (product) that generates findings.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n UserDefinedFields (list) --A list of name/value string pairs associated with the finding. These are custom, user-defined fields added to a finding.\n (dict) --The map filter for querying findings.\n Key (string) --The key of the map filter.\n Value (string) --The value for the key in the map filter.\n Comparison (string) --Represents the condition to be applied to a key value when querying for findings with a map filter.\n \n MalwareName (list) --The name of the malware that was observed.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n MalwareType (list) --The type of the malware that was observed.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n MalwarePath (list) --The filesystem path of the malware that was observed.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n MalwareState (list) --The state of the malware that was observed.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NetworkDirection (list) --Indicates the direction of network traffic associated with a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NetworkProtocol (list) --The protocol of network-related information about a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NetworkSourceIpV4 (list) --The source IPv4 address of network-related information about a finding.\n (dict) --The IP filter for querying findings.>\n Cidr (string) --Finding's CIDR value.\n \n NetworkSourceIpV6 (list) --The source IPv6 address of network-related information about a finding.\n (dict) --The IP filter for querying findings.>\n Cidr (string) --Finding's CIDR value.\n \n NetworkSourcePort (list) --The source port of network-related information about a finding.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n NetworkSourceDomain (list) --The source domain of network-related information about a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NetworkSourceMac (list) --The source media access control (MAC) address of network-related information about a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NetworkDestinationIpV4 (list) --The destination IPv4 address of network-related information about a finding.\n (dict) --The IP filter for querying findings.>\n Cidr (string) --Finding's CIDR value.\n \n NetworkDestinationIpV6 (list) --The destination IPv6 address of network-related information about a finding.\n (dict) --The IP filter for querying findings.>\n Cidr (string) --Finding's CIDR value.\n \n NetworkDestinationPort (list) --The destination port of network-related information about a finding.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n NetworkDestinationDomain (list) --The destination domain of network-related information about a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ProcessName (list) --The name of the process.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ProcessPath (list) --The path to the process executable.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ProcessPid (list) --The process ID.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n ProcessParentPid (list) --The parent process ID.\n (dict) --A number filter for querying findings.\n Gte (float) --Represents the 'greater than equal' condition to be applied to a single field when querying for findings.\n Lte (float) --Represents the 'less than equal' condition to be applied to a single field when querying for findings.\n Eq (float) --Represents the 'equal to' condition to be applied to a single field when querying for findings.\n \n ProcessLaunchedAt (list) --The date/time that the process was launched.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n ProcessTerminatedAt (list) --The date/time that the process was terminated.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n ThreatIntelIndicatorType (list) --The type of a threat intel indicator.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ThreatIntelIndicatorValue (list) --The value of a threat intel indicator.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ThreatIntelIndicatorCategory (list) --The category of a threat intel indicator.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ThreatIntelIndicatorLastObservedAt (list) --The date/time of the last observation of a threat intel indicator.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n ThreatIntelIndicatorSource (list) --The source of the threat intel.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ThreatIntelIndicatorSourceUrl (list) --The URL for more details from the source of the threat intel.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceType (list) --Specifies the type of the resource for which details are provided.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceId (list) --The canonical identifier for the given resource type.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourcePartition (list) --The canonical AWS partition name to which the region is assigned.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceRegion (list) --The canonical AWS external region name where this resource is located.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceTags (list) --A list of AWS tags associated with a resource at the time the finding was processed.\n (dict) --The map filter for querying findings.\n Key (string) --The key of the map filter.\n Value (string) --The value for the key in the map filter.\n Comparison (string) --Represents the condition to be applied to a key value when querying for findings with a map filter.\n \n ResourceAwsEc2InstanceType (list) --The instance type of the instance.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsEc2InstanceImageId (list) --The Amazon Machine Image (AMI) ID of the instance.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsEc2InstanceIpV4Addresses (list) --The IPv4 addresses associated with the instance.\n (dict) --The IP filter for querying findings.>\n Cidr (string) --Finding's CIDR value.\n \n ResourceAwsEc2InstanceIpV6Addresses (list) --The IPv6 addresses associated with the instance.\n (dict) --The IP filter for querying findings.>\n Cidr (string) --Finding's CIDR value.\n \n ResourceAwsEc2InstanceKeyName (list) --The key name associated with the instance.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsEc2InstanceIamInstanceProfileArn (list) --The IAM profile ARN of the instance.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsEc2InstanceVpcId (list) --The identifier of the VPC in which the instance was launched.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsEc2InstanceSubnetId (list) --The identifier of the subnet in which the instance was launched.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsEc2InstanceLaunchedAt (list) --The date/time the instance was launched.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n ResourceAwsS3BucketOwnerId (list) --The canonical user ID of the owner of the S3 bucket.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsS3BucketOwnerName (list) --The display name of the owner of the S3 bucket.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsIamAccessKeyUserName (list) --The user associated with the IAM access key related to a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsIamAccessKeyStatus (list) --The status of the IAM access key related to a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceAwsIamAccessKeyCreatedAt (list) --The creation date/time of the IAM access key related to a finding.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n ResourceContainerName (list) --The name of the container related to a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceContainerImageId (list) --The identifier of the image related to a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceContainerImageName (list) --The name of the image related to a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n ResourceContainerLaunchedAt (list) --The date/time that the container was started.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n ResourceDetailsOther (list) --The details of a resource that does not have a specific sub-field for the resource type defined.\n (dict) --The map filter for querying findings.\n Key (string) --The key of the map filter.\n Value (string) --The value for the key in the map filter.\n Comparison (string) --Represents the condition to be applied to a key value when querying for findings with a map filter.\n \n ComplianceStatus (list) --Exclusive to findings that are generated as the result of a check run against a specific rule in a supported standard (for example, AWS CIS Foundations). Contains compliance-related finding details.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n VerificationState (list) --Indicates the veracity of a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n WorkflowState (list) --The workflow state of a finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n RecordState (list) --The updated record state for the finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n RelatedFindingsProductArn (list) --The ARN of the solution that generated a related finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n RelatedFindingsId (list) --The solution-generated identifier for a related finding.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NoteText (list) --The text of a note.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n NoteUpdatedAt (list) --The timestamp of when the note was updated.\n (dict) --A date filter for querying findings.\n Start (string) --A start date for the date filter.\n End (string) --An end date for the date filter.\n DateRange (dict) --A date range for the date filter.\n Value (integer) --A date range value for the date filter.\n Unit (string) --A date range unit for the date filter.\n \n NoteUpdatedBy (list) --The principal that created a note.\n (dict) --A string filter for querying findings.\n Value (string) --The string filter value.\n Comparison (string) --Represents the condition to be applied to a string value when querying for findings.\n \n Keyword (list) --A keyword for a finding.\n (dict) --A keyword filter for querying findings.\n Value (string) --A value for the keyword.\n \n \n\n :type GroupByAttribute: string\n :param GroupByAttribute: The updated GroupBy attribute that defines this insight.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.5881333947181702, "alphanum_fraction": 0.5911542773246765, "avg_line_length": 31.166913986206055, "blob_id": "4704dc9e0e9dcf03a283146e6d6b02d0d5431ffc", "content_id": "c0545b103e37415c1c83323c9e240296297cdea3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 151282, "license_type": "permissive", "max_line_length": 481, "num_lines": 4703, "path": "/pyboto3/servicecatalog.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef accept_portfolio_share(AcceptLanguage=None, PortfolioId=None, PortfolioShareType=None):\n \"\"\"\n Accepts an offer to share the specified portfolio.\n See also: AWS API Documentation\n \n \n :example: response = client.accept_portfolio_share(\n AcceptLanguage='string',\n PortfolioId='string',\n PortfolioShareType='IMPORTED'|'AWS_SERVICECATALOG'|'AWS_ORGANIZATIONS'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type PortfolioId: string\n :param PortfolioId: [REQUIRED]\n The portfolio identifier.\n \n\n :type PortfolioShareType: string\n :param PortfolioShareType: The type of shared portfolios to accept. The default is to accept imported portfolios.\n AWS_ORGANIZATIONS - Accept portfolios shared by the master account of your organization.\n IMPORTED - Accept imported portfolios.\n AWS_SERVICECATALOG - Not supported. (Throws ResourceNotFoundException.)\n For example, aws servicecatalog accept-portfolio-share --portfolio-id 'port-2qwzkwxt3y5fk' --portfolio-share-type AWS_ORGANIZATIONS\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef associate_principal_with_portfolio(AcceptLanguage=None, PortfolioId=None, PrincipalARN=None, PrincipalType=None):\n \"\"\"\n Associates the specified principal ARN with the specified portfolio.\n See also: AWS API Documentation\n \n \n :example: response = client.associate_principal_with_portfolio(\n AcceptLanguage='string',\n PortfolioId='string',\n PrincipalARN='string',\n PrincipalType='IAM'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type PortfolioId: string\n :param PortfolioId: [REQUIRED]\n The portfolio identifier.\n \n\n :type PrincipalARN: string\n :param PrincipalARN: [REQUIRED]\n The ARN of the principal (IAM user, role, or group).\n \n\n :type PrincipalType: string\n :param PrincipalType: [REQUIRED]\n The principal type. The supported value is IAM .\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef associate_product_with_portfolio(AcceptLanguage=None, ProductId=None, PortfolioId=None, SourcePortfolioId=None):\n \"\"\"\n Associates the specified product with the specified portfolio.\n See also: AWS API Documentation\n \n \n :example: response = client.associate_product_with_portfolio(\n AcceptLanguage='string',\n ProductId='string',\n PortfolioId='string',\n SourcePortfolioId='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type ProductId: string\n :param ProductId: [REQUIRED]\n The product identifier.\n \n\n :type PortfolioId: string\n :param PortfolioId: [REQUIRED]\n The portfolio identifier.\n \n\n :type SourcePortfolioId: string\n :param SourcePortfolioId: The identifier of the source portfolio.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef associate_service_action_with_provisioning_artifact(ProductId=None, ProvisioningArtifactId=None, ServiceActionId=None, AcceptLanguage=None):\n \"\"\"\n Associates a self-service action with a provisioning artifact.\n See also: AWS API Documentation\n \n \n :example: response = client.associate_service_action_with_provisioning_artifact(\n ProductId='string',\n ProvisioningArtifactId='string',\n ServiceActionId='string',\n AcceptLanguage='string'\n )\n \n \n :type ProductId: string\n :param ProductId: [REQUIRED]\n The product identifier. For example, prod-abcdzk7xy33qa .\n \n\n :type ProvisioningArtifactId: string\n :param ProvisioningArtifactId: [REQUIRED]\n The identifier of the provisioning artifact. For example, pa-4abcdjnxjj6ne .\n \n\n :type ServiceActionId: string\n :param ServiceActionId: [REQUIRED]\n The self-service action identifier. For example, act-fs7abcd89wxyz .\n \n\n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef associate_tag_option_with_resource(ResourceId=None, TagOptionId=None):\n \"\"\"\n Associate the specified TagOption with the specified portfolio or product.\n See also: AWS API Documentation\n \n \n :example: response = client.associate_tag_option_with_resource(\n ResourceId='string',\n TagOptionId='string'\n )\n \n \n :type ResourceId: string\n :param ResourceId: [REQUIRED]\n The resource identifier.\n \n\n :type TagOptionId: string\n :param TagOptionId: [REQUIRED]\n The TagOption identifier.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef batch_associate_service_action_with_provisioning_artifact(ServiceActionAssociations=None, AcceptLanguage=None):\n \"\"\"\n Associates multiple self-service actions with provisioning artifacts.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_associate_service_action_with_provisioning_artifact(\n ServiceActionAssociations=[\n {\n 'ServiceActionId': 'string',\n 'ProductId': 'string',\n 'ProvisioningArtifactId': 'string'\n },\n ],\n AcceptLanguage='string'\n )\n \n \n :type ServiceActionAssociations: list\n :param ServiceActionAssociations: [REQUIRED]\n One or more associations, each consisting of the Action ID, the Product ID, and the Provisioning Artifact ID.\n (dict) --A self-service action association consisting of the Action ID, the Product ID, and the Provisioning Artifact ID.\n ServiceActionId (string) -- [REQUIRED]The self-service action identifier. For example, act-fs7abcd89wxyz .\n ProductId (string) -- [REQUIRED]The product identifier. For example, prod-abcdzk7xy33qa .\n ProvisioningArtifactId (string) -- [REQUIRED]The identifier of the provisioning artifact. For example, pa-4abcdjnxjj6ne .\n \n \n\n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :rtype: dict\n :return: {\n 'FailedServiceActionAssociations': [\n {\n 'ServiceActionId': 'string',\n 'ProductId': 'string',\n 'ProvisioningArtifactId': 'string',\n 'ErrorCode': 'DUPLICATE_RESOURCE'|'INTERNAL_FAILURE'|'LIMIT_EXCEEDED'|'RESOURCE_NOT_FOUND'|'THROTTLING',\n 'ErrorMessage': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef batch_disassociate_service_action_from_provisioning_artifact(ServiceActionAssociations=None, AcceptLanguage=None):\n \"\"\"\n Disassociates a batch of self-service actions from the specified provisioning artifact.\n See also: AWS API Documentation\n \n \n :example: response = client.batch_disassociate_service_action_from_provisioning_artifact(\n ServiceActionAssociations=[\n {\n 'ServiceActionId': 'string',\n 'ProductId': 'string',\n 'ProvisioningArtifactId': 'string'\n },\n ],\n AcceptLanguage='string'\n )\n \n \n :type ServiceActionAssociations: list\n :param ServiceActionAssociations: [REQUIRED]\n One or more associations, each consisting of the Action ID, the Product ID, and the Provisioning Artifact ID.\n (dict) --A self-service action association consisting of the Action ID, the Product ID, and the Provisioning Artifact ID.\n ServiceActionId (string) -- [REQUIRED]The self-service action identifier. For example, act-fs7abcd89wxyz .\n ProductId (string) -- [REQUIRED]The product identifier. For example, prod-abcdzk7xy33qa .\n ProvisioningArtifactId (string) -- [REQUIRED]The identifier of the provisioning artifact. For example, pa-4abcdjnxjj6ne .\n \n \n\n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :rtype: dict\n :return: {\n 'FailedServiceActionAssociations': [\n {\n 'ServiceActionId': 'string',\n 'ProductId': 'string',\n 'ProvisioningArtifactId': 'string',\n 'ErrorCode': 'DUPLICATE_RESOURCE'|'INTERNAL_FAILURE'|'LIMIT_EXCEEDED'|'RESOURCE_NOT_FOUND'|'THROTTLING',\n 'ErrorMessage': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef copy_product(AcceptLanguage=None, SourceProductArn=None, TargetProductId=None, TargetProductName=None, SourceProvisioningArtifactIdentifiers=None, CopyOptions=None, IdempotencyToken=None):\n \"\"\"\n Copies the specified source product to the specified target product or a new product.\n You can copy a product to the same account or another account. You can copy a product to the same region or another region.\n This operation is performed asynchronously. To track the progress of the operation, use DescribeCopyProductStatus .\n See also: AWS API Documentation\n \n \n :example: response = client.copy_product(\n AcceptLanguage='string',\n SourceProductArn='string',\n TargetProductId='string',\n TargetProductName='string',\n SourceProvisioningArtifactIdentifiers=[\n {\n 'string': 'string'\n },\n ],\n CopyOptions=[\n 'CopyTags',\n ],\n IdempotencyToken='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type SourceProductArn: string\n :param SourceProductArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the source product.\n \n\n :type TargetProductId: string\n :param TargetProductId: The identifier of the target product. By default, a new product is created.\n\n :type TargetProductName: string\n :param TargetProductName: A name for the target product. The default is the name of the source product.\n\n :type SourceProvisioningArtifactIdentifiers: list\n :param SourceProvisioningArtifactIdentifiers: The identifiers of the provisioning artifacts (also known as versions) of the product to copy. By default, all provisioning artifacts are copied.\n (dict) --\n (string) --\n (string) --\n \n \n\n :type CopyOptions: list\n :param CopyOptions: The copy options. If the value is CopyTags , the tags from the source product are copied to the target product.\n (string) --\n \n\n :type IdempotencyToken: string\n :param IdempotencyToken: [REQUIRED]\n A unique identifier that you provide to ensure idempotency. If multiple requests differ only by the idempotency token, the same response is returned for each repeated request.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'CopyProductToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_constraint(AcceptLanguage=None, PortfolioId=None, ProductId=None, Parameters=None, Type=None, Description=None, IdempotencyToken=None):\n \"\"\"\n Creates a constraint.\n See also: AWS API Documentation\n \n \n :example: response = client.create_constraint(\n AcceptLanguage='string',\n PortfolioId='string',\n ProductId='string',\n Parameters='string',\n Type='string',\n Description='string',\n IdempotencyToken='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type PortfolioId: string\n :param PortfolioId: [REQUIRED]\n The portfolio identifier.\n \n\n :type ProductId: string\n :param ProductId: [REQUIRED]\n The product identifier.\n \n\n :type Parameters: string\n :param Parameters: [REQUIRED]\n The constraint parameters, in JSON format. The syntax depends on the constraint type as follows:\n LAUNCH\n Specify the RoleArn property as follows:\n {'RoleArn' : 'arn:aws:iam::123456789012:role/LaunchRole'}\n You cannot have both a LAUNCH and a STACKSET constraint.\n You also cannot have more than one LAUNCH constraint on a product and portfolio.\n NOTIFICATION\n Specify the NotificationArns property as follows:\n {'NotificationArns' : ['arn:aws:sns:us-east-1:123456789012:Topic']}\n STACKSET\n Specify the Parameters property as follows:\n {'Version': 'String', 'Properties': {'AccountList': [ 'String' ], 'RegionList': [ 'String' ], 'AdminRole': 'String', 'ExecutionRole': 'String'}}\n You cannot have both a LAUNCH and a STACKSET constraint.\n You also cannot have more than one STACKSET constraint on a product and portfolio.\n Products with a STACKSET constraint will launch an AWS CloudFormation stack set.\n TEMPLATE\n Specify the Rules property. For more information, see Template Constraint Rules .\n \n\n :type Type: string\n :param Type: [REQUIRED]\n The type of constraint.\n LAUNCH\n NOTIFICATION\n STACKSET\n TEMPLATE\n \n\n :type Description: string\n :param Description: The description of the constraint.\n\n :type IdempotencyToken: string\n :param IdempotencyToken: [REQUIRED]\n A unique identifier that you provide to ensure idempotency. If multiple requests differ only by the idempotency token, the same response is returned for each repeated request.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'ConstraintDetail': {\n 'ConstraintId': 'string',\n 'Type': 'string',\n 'Description': 'string',\n 'Owner': 'string'\n },\n 'ConstraintParameters': 'string',\n 'Status': 'AVAILABLE'|'CREATING'|'FAILED'\n }\n \n \n :returns: \n LAUNCH\n NOTIFICATION\n STACKSET\n TEMPLATE\n \n \"\"\"\n pass\n\ndef create_portfolio(AcceptLanguage=None, DisplayName=None, Description=None, ProviderName=None, Tags=None, IdempotencyToken=None):\n \"\"\"\n Creates a portfolio.\n See also: AWS API Documentation\n \n \n :example: response = client.create_portfolio(\n AcceptLanguage='string',\n DisplayName='string',\n Description='string',\n ProviderName='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n IdempotencyToken='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type DisplayName: string\n :param DisplayName: [REQUIRED]\n The name to use for display purposes.\n \n\n :type Description: string\n :param Description: The description of the portfolio.\n\n :type ProviderName: string\n :param ProviderName: [REQUIRED]\n The name of the portfolio provider.\n \n\n :type Tags: list\n :param Tags: One or more tags.\n (dict) --Information about a tag. A tag is a key-value pair. Tags are propagated to the resources created when provisioning a product.\n Key (string) -- [REQUIRED]The tag key.\n Value (string) -- [REQUIRED]The value for this key.\n \n \n\n :type IdempotencyToken: string\n :param IdempotencyToken: [REQUIRED]\n A unique identifier that you provide to ensure idempotency. If multiple requests differ only by the idempotency token, the same response is returned for each repeated request.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'PortfolioDetail': {\n 'Id': 'string',\n 'ARN': 'string',\n 'DisplayName': 'string',\n 'Description': 'string',\n 'CreatedTime': datetime(2015, 1, 1),\n 'ProviderName': 'string'\n },\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef create_portfolio_share(AcceptLanguage=None, PortfolioId=None, AccountId=None, OrganizationNode=None):\n \"\"\"\n Shares the specified portfolio with the specified account or organization node. Shares to an organization node can only be created by the master account of an Organization. AWSOrganizationsAccess must be enabled in order to create a portfolio share to an organization node.\n See also: AWS API Documentation\n \n \n :example: response = client.create_portfolio_share(\n AcceptLanguage='string',\n PortfolioId='string',\n AccountId='string',\n OrganizationNode={\n 'Type': 'ORGANIZATION'|'ORGANIZATIONAL_UNIT'|'ACCOUNT',\n 'Value': 'string'\n }\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type PortfolioId: string\n :param PortfolioId: [REQUIRED]\n The portfolio identifier.\n \n\n :type AccountId: string\n :param AccountId: The AWS account ID. For example, 123456789012 .\n\n :type OrganizationNode: dict\n :param OrganizationNode: The organization node to whom you are going to share. If OrganizationNode is passed in, PortfolioShare will be created for the node and its children (when applies), and a PortfolioShareToken will be returned in the output in order for the administrator to monitor the status of the PortfolioShare creation process.\n Type (string) --The organization node type.\n Value (string) --The identifier of the organization node.\n \n\n :rtype: dict\n :return: {\n 'PortfolioShareToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_product(AcceptLanguage=None, Name=None, Owner=None, Description=None, Distributor=None, SupportDescription=None, SupportEmail=None, SupportUrl=None, ProductType=None, Tags=None, ProvisioningArtifactParameters=None, IdempotencyToken=None):\n \"\"\"\n Creates a product.\n See also: AWS API Documentation\n \n \n :example: response = client.create_product(\n AcceptLanguage='string',\n Name='string',\n Owner='string',\n Description='string',\n Distributor='string',\n SupportDescription='string',\n SupportEmail='string',\n SupportUrl='string',\n ProductType='CLOUD_FORMATION_TEMPLATE'|'MARKETPLACE',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n ProvisioningArtifactParameters={\n 'Name': 'string',\n 'Description': 'string',\n 'Info': {\n 'string': 'string'\n },\n 'Type': 'CLOUD_FORMATION_TEMPLATE'|'MARKETPLACE_AMI'|'MARKETPLACE_CAR'\n },\n IdempotencyToken='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type Name: string\n :param Name: [REQUIRED]\n The name of the product.\n \n\n :type Owner: string\n :param Owner: [REQUIRED]\n The owner of the product.\n \n\n :type Description: string\n :param Description: The description of the product.\n\n :type Distributor: string\n :param Distributor: The distributor of the product.\n\n :type SupportDescription: string\n :param SupportDescription: The support information about the product.\n\n :type SupportEmail: string\n :param SupportEmail: The contact email for product support.\n\n :type SupportUrl: string\n :param SupportUrl: The contact URL for product support.\n\n :type ProductType: string\n :param ProductType: [REQUIRED]\n The type of product.\n \n\n :type Tags: list\n :param Tags: One or more tags.\n (dict) --Information about a tag. A tag is a key-value pair. Tags are propagated to the resources created when provisioning a product.\n Key (string) -- [REQUIRED]The tag key.\n Value (string) -- [REQUIRED]The value for this key.\n \n \n\n :type ProvisioningArtifactParameters: dict\n :param ProvisioningArtifactParameters: [REQUIRED]\n The configuration of the provisioning artifact.\n Name (string) --The name of the provisioning artifact (for example, v1 v2beta). No spaces are allowed.\n Description (string) --The description of the provisioning artifact, including how it differs from the previous provisioning artifact.\n Info (dict) -- [REQUIRED]The URL of the CloudFormation template in Amazon S3. Specify the URL in JSON format as follows:\n 'LoadTemplateFromURL': 'https://s3.amazonaws.com/cf-templates-ozkq9d3hgiq2-us-east-1/...'\n (string) --\n (string) --\n \n Type (string) --The type of provisioning artifact.\n CLOUD_FORMATION_TEMPLATE - AWS CloudFormation template\n MARKETPLACE_AMI - AWS Marketplace AMI\n MARKETPLACE_CAR - AWS Marketplace Clusters and AWS Resources\n \n\n :type IdempotencyToken: string\n :param IdempotencyToken: [REQUIRED]\n A unique identifier that you provide to ensure idempotency. If multiple requests differ only by the idempotency token, the same response is returned for each repeated request.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'ProductViewDetail': {\n 'ProductViewSummary': {\n 'Id': 'string',\n 'ProductId': 'string',\n 'Name': 'string',\n 'Owner': 'string',\n 'ShortDescription': 'string',\n 'Type': 'CLOUD_FORMATION_TEMPLATE'|'MARKETPLACE',\n 'Distributor': 'string',\n 'HasDefaultPath': True|False,\n 'SupportEmail': 'string',\n 'SupportDescription': 'string',\n 'SupportUrl': 'string'\n },\n 'Status': 'AVAILABLE'|'CREATING'|'FAILED',\n 'ProductARN': 'string',\n 'CreatedTime': datetime(2015, 1, 1)\n },\n 'ProvisioningArtifactDetail': {\n 'Id': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'Type': 'CLOUD_FORMATION_TEMPLATE'|'MARKETPLACE_AMI'|'MARKETPLACE_CAR',\n 'CreatedTime': datetime(2015, 1, 1),\n 'Active': True|False\n },\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n \n \n :returns: \n AVAILABLE - The product is ready for use.\n CREATING - Product creation has started; the product is not ready for use.\n FAILED - An action failed.\n \n \"\"\"\n pass\n\ndef create_provisioned_product_plan(AcceptLanguage=None, PlanName=None, PlanType=None, NotificationArns=None, PathId=None, ProductId=None, ProvisionedProductName=None, ProvisioningArtifactId=None, ProvisioningParameters=None, IdempotencyToken=None, Tags=None):\n \"\"\"\n Creates a plan. A plan includes the list of resources to be created (when provisioning a new product) or modified (when updating a provisioned product) when the plan is executed.\n You can create one plan per provisioned product. To create a plan for an existing provisioned product, the product status must be AVAILBLE or TAINTED.\n To view the resource changes in the change set, use DescribeProvisionedProductPlan . To create or modify the provisioned product, use ExecuteProvisionedProductPlan .\n See also: AWS API Documentation\n \n \n :example: response = client.create_provisioned_product_plan(\n AcceptLanguage='string',\n PlanName='string',\n PlanType='CLOUDFORMATION',\n NotificationArns=[\n 'string',\n ],\n PathId='string',\n ProductId='string',\n ProvisionedProductName='string',\n ProvisioningArtifactId='string',\n ProvisioningParameters=[\n {\n 'Key': 'string',\n 'Value': 'string',\n 'UsePreviousValue': True|False\n },\n ],\n IdempotencyToken='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type PlanName: string\n :param PlanName: [REQUIRED]\n The name of the plan.\n \n\n :type PlanType: string\n :param PlanType: [REQUIRED]\n The plan type.\n \n\n :type NotificationArns: list\n :param NotificationArns: Passed to CloudFormation. The SNS topic ARNs to which to publish stack-related events.\n (string) --\n \n\n :type PathId: string\n :param PathId: The path identifier of the product. This value is optional if the product has a default path, and required if the product has more than one path. To list the paths for a product, use ListLaunchPaths .\n\n :type ProductId: string\n :param ProductId: [REQUIRED]\n The product identifier.\n \n\n :type ProvisionedProductName: string\n :param ProvisionedProductName: [REQUIRED]\n A user-friendly name for the provisioned product. This value must be unique for the AWS account and cannot be updated after the product is provisioned.\n \n\n :type ProvisioningArtifactId: string\n :param ProvisioningArtifactId: [REQUIRED]\n The identifier of the provisioning artifact.\n \n\n :type ProvisioningParameters: list\n :param ProvisioningParameters: Parameters specified by the administrator that are required for provisioning the product.\n (dict) --The parameter key-value pair used to update a provisioned product.\n Key (string) --The parameter key.\n Value (string) --The parameter value.\n UsePreviousValue (boolean) --If set to true, Value is ignored and the previous parameter value is kept.\n \n \n\n :type IdempotencyToken: string\n :param IdempotencyToken: [REQUIRED]\n A unique identifier that you provide to ensure idempotency. If multiple requests differ only by the idempotency token, the same response is returned for each repeated request.\n This field is autopopulated if not provided.\n \n\n :type Tags: list\n :param Tags: One or more tags.\n (dict) --Information about a tag. A tag is a key-value pair. Tags are propagated to the resources created when provisioning a product.\n Key (string) -- [REQUIRED]The tag key.\n Value (string) -- [REQUIRED]The value for this key.\n \n \n\n :rtype: dict\n :return: {\n 'PlanName': 'string',\n 'PlanId': 'string',\n 'ProvisionProductId': 'string',\n 'ProvisionedProductName': 'string',\n 'ProvisioningArtifactId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef create_provisioning_artifact(AcceptLanguage=None, ProductId=None, Parameters=None, IdempotencyToken=None):\n \"\"\"\n Creates a provisioning artifact (also known as a version) for the specified product.\n You cannot create a provisioning artifact for a product that was shared with you.\n See also: AWS API Documentation\n \n \n :example: response = client.create_provisioning_artifact(\n AcceptLanguage='string',\n ProductId='string',\n Parameters={\n 'Name': 'string',\n 'Description': 'string',\n 'Info': {\n 'string': 'string'\n },\n 'Type': 'CLOUD_FORMATION_TEMPLATE'|'MARKETPLACE_AMI'|'MARKETPLACE_CAR'\n },\n IdempotencyToken='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type ProductId: string\n :param ProductId: [REQUIRED]\n The product identifier.\n \n\n :type Parameters: dict\n :param Parameters: [REQUIRED]\n The configuration for the provisioning artifact.\n Name (string) --The name of the provisioning artifact (for example, v1 v2beta). No spaces are allowed.\n Description (string) --The description of the provisioning artifact, including how it differs from the previous provisioning artifact.\n Info (dict) -- [REQUIRED]The URL of the CloudFormation template in Amazon S3. Specify the URL in JSON format as follows:\n 'LoadTemplateFromURL': 'https://s3.amazonaws.com/cf-templates-ozkq9d3hgiq2-us-east-1/...'\n (string) --\n (string) --\n \n Type (string) --The type of provisioning artifact.\n CLOUD_FORMATION_TEMPLATE - AWS CloudFormation template\n MARKETPLACE_AMI - AWS Marketplace AMI\n MARKETPLACE_CAR - AWS Marketplace Clusters and AWS Resources\n \n\n :type IdempotencyToken: string\n :param IdempotencyToken: [REQUIRED]\n A unique identifier that you provide to ensure idempotency. If multiple requests differ only by the idempotency token, the same response is returned for each repeated request.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'ProvisioningArtifactDetail': {\n 'Id': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'Type': 'CLOUD_FORMATION_TEMPLATE'|'MARKETPLACE_AMI'|'MARKETPLACE_CAR',\n 'CreatedTime': datetime(2015, 1, 1),\n 'Active': True|False\n },\n 'Info': {\n 'string': 'string'\n },\n 'Status': 'AVAILABLE'|'CREATING'|'FAILED'\n }\n \n \n :returns: \n CLOUD_FORMATION_TEMPLATE - AWS CloudFormation template\n MARKETPLACE_AMI - AWS Marketplace AMI\n MARKETPLACE_CAR - AWS Marketplace Clusters and AWS Resources\n \n \"\"\"\n pass\n\ndef create_service_action(Name=None, DefinitionType=None, Definition=None, Description=None, AcceptLanguage=None, IdempotencyToken=None):\n \"\"\"\n Creates a self-service action.\n See also: AWS API Documentation\n \n \n :example: response = client.create_service_action(\n Name='string',\n DefinitionType='SSM_AUTOMATION',\n Definition={\n 'string': 'string'\n },\n Description='string',\n AcceptLanguage='string',\n IdempotencyToken='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\n The self-service action name.\n \n\n :type DefinitionType: string\n :param DefinitionType: [REQUIRED]\n The service action definition type. For example, SSM_AUTOMATION .\n \n\n :type Definition: dict\n :param Definition: [REQUIRED]\n The self-service action definition. Can be one of the following:\n Name\n The name of the AWS Systems Manager Document. For example, AWS-RestartEC2Instance .\n Version\n The AWS Systems Manager automation document version. For example, 'Version': '1'\n AssumeRole\n The Amazon Resource Name (ARN) of the role that performs the self-service actions on your behalf. For example, 'AssumeRole': 'arn:aws:iam::12345678910:role/ActionRole' .\n To reuse the provisioned product launch role, set to 'AssumeRole': 'LAUNCH_ROLE' .\n Parameters\n The list of parameters in JSON format.\n For example: [{\\'Name\\':\\'InstanceId\\',\\'Type\\':\\'TARGET\\'}] .\n (string) --\n (string) --\n \n\n :type Description: string\n :param Description: The self-service action description.\n\n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type IdempotencyToken: string\n :param IdempotencyToken: [REQUIRED]\n A unique identifier that you provide to ensure idempotency. If multiple requests differ only by the idempotency token, the same response is returned for each repeated request.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'ServiceActionDetail': {\n 'ServiceActionSummary': {\n 'Id': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'DefinitionType': 'SSM_AUTOMATION'\n },\n 'Definition': {\n 'string': 'string'\n }\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef create_tag_option(Key=None, Value=None):\n \"\"\"\n Creates a TagOption.\n See also: AWS API Documentation\n \n \n :example: response = client.create_tag_option(\n Key='string',\n Value='string'\n )\n \n \n :type Key: string\n :param Key: [REQUIRED]\n The TagOption key.\n \n\n :type Value: string\n :param Value: [REQUIRED]\n The TagOption value.\n \n\n :rtype: dict\n :return: {\n 'TagOptionDetail': {\n 'Key': 'string',\n 'Value': 'string',\n 'Active': True|False,\n 'Id': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef delete_constraint(AcceptLanguage=None, Id=None):\n \"\"\"\n Deletes the specified constraint.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_constraint(\n AcceptLanguage='string',\n Id='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type Id: string\n :param Id: [REQUIRED]\n The identifier of the constraint.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_portfolio(AcceptLanguage=None, Id=None):\n \"\"\"\n Deletes the specified portfolio.\n You cannot delete a portfolio if it was shared with you or if it has associated products, users, constraints, or shared accounts.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_portfolio(\n AcceptLanguage='string',\n Id='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type Id: string\n :param Id: [REQUIRED]\n The portfolio identifier.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_portfolio_share(AcceptLanguage=None, PortfolioId=None, AccountId=None, OrganizationNode=None):\n \"\"\"\n Stops sharing the specified portfolio with the specified account or organization node. Shares to an organization node can only be deleted by the master account of an Organization.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_portfolio_share(\n AcceptLanguage='string',\n PortfolioId='string',\n AccountId='string',\n OrganizationNode={\n 'Type': 'ORGANIZATION'|'ORGANIZATIONAL_UNIT'|'ACCOUNT',\n 'Value': 'string'\n }\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type PortfolioId: string\n :param PortfolioId: [REQUIRED]\n The portfolio identifier.\n \n\n :type AccountId: string\n :param AccountId: The AWS account ID.\n\n :type OrganizationNode: dict\n :param OrganizationNode: The organization node to whom you are going to stop sharing.\n Type (string) --The organization node type.\n Value (string) --The identifier of the organization node.\n \n\n :rtype: dict\n :return: {\n 'PortfolioShareToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_product(AcceptLanguage=None, Id=None):\n \"\"\"\n Deletes the specified product.\n You cannot delete a product if it was shared with you or is associated with a portfolio.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_product(\n AcceptLanguage='string',\n Id='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type Id: string\n :param Id: [REQUIRED]\n The product identifier.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_provisioned_product_plan(AcceptLanguage=None, PlanId=None, IgnoreErrors=None):\n \"\"\"\n Deletes the specified plan.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_provisioned_product_plan(\n AcceptLanguage='string',\n PlanId='string',\n IgnoreErrors=True|False\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type PlanId: string\n :param PlanId: [REQUIRED]\n The plan identifier.\n \n\n :type IgnoreErrors: boolean\n :param IgnoreErrors: If set to true, AWS Service Catalog stops managing the specified provisioned product even if it cannot delete the underlying resources.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_provisioning_artifact(AcceptLanguage=None, ProductId=None, ProvisioningArtifactId=None):\n \"\"\"\n Deletes the specified provisioning artifact (also known as a version) for the specified product.\n You cannot delete a provisioning artifact associated with a product that was shared with you. You cannot delete the last provisioning artifact for a product, because a product must have at least one provisioning artifact.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_provisioning_artifact(\n AcceptLanguage='string',\n ProductId='string',\n ProvisioningArtifactId='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type ProductId: string\n :param ProductId: [REQUIRED]\n The product identifier.\n \n\n :type ProvisioningArtifactId: string\n :param ProvisioningArtifactId: [REQUIRED]\n The identifier of the provisioning artifact.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_service_action(Id=None, AcceptLanguage=None):\n \"\"\"\n Deletes a self-service action.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_service_action(\n Id='string',\n AcceptLanguage='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n The self-service action identifier. For example, act-fs7abcd89wxyz .\n \n\n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_tag_option(Id=None):\n \"\"\"\n Deletes the specified TagOption.\n You cannot delete a TagOption if it is associated with a product or portfolio.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_tag_option(\n Id='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n The TagOption identifier.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef describe_constraint(AcceptLanguage=None, Id=None):\n \"\"\"\n Gets information about the specified constraint.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_constraint(\n AcceptLanguage='string',\n Id='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type Id: string\n :param Id: [REQUIRED]\n The identifier of the constraint.\n \n\n :rtype: dict\n :return: {\n 'ConstraintDetail': {\n 'ConstraintId': 'string',\n 'Type': 'string',\n 'Description': 'string',\n 'Owner': 'string'\n },\n 'ConstraintParameters': 'string',\n 'Status': 'AVAILABLE'|'CREATING'|'FAILED'\n }\n \n \n :returns: \n LAUNCH\n NOTIFICATION\n STACKSET\n TEMPLATE\n \n \"\"\"\n pass\n\ndef describe_copy_product_status(AcceptLanguage=None, CopyProductToken=None):\n \"\"\"\n Gets the status of the specified copy product operation.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_copy_product_status(\n AcceptLanguage='string',\n CopyProductToken='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type CopyProductToken: string\n :param CopyProductToken: [REQUIRED]\n The token for the copy product operation. This token is returned by CopyProduct .\n \n\n :rtype: dict\n :return: {\n 'CopyProductStatus': 'SUCCEEDED'|'IN_PROGRESS'|'FAILED',\n 'TargetProductId': 'string',\n 'StatusDetail': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_portfolio(AcceptLanguage=None, Id=None):\n \"\"\"\n Gets information about the specified portfolio.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_portfolio(\n AcceptLanguage='string',\n Id='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type Id: string\n :param Id: [REQUIRED]\n The portfolio identifier.\n \n\n :rtype: dict\n :return: {\n 'PortfolioDetail': {\n 'Id': 'string',\n 'ARN': 'string',\n 'DisplayName': 'string',\n 'Description': 'string',\n 'CreatedTime': datetime(2015, 1, 1),\n 'ProviderName': 'string'\n },\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'TagOptions': [\n {\n 'Key': 'string',\n 'Value': 'string',\n 'Active': True|False,\n 'Id': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef describe_portfolio_share_status(PortfolioShareToken=None):\n \"\"\"\n Gets the status of the specified portfolio share operation. This API can only be called by the master account in the organization.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_portfolio_share_status(\n PortfolioShareToken='string'\n )\n \n \n :type PortfolioShareToken: string\n :param PortfolioShareToken: [REQUIRED]\n The token for the portfolio share operation. This token is returned either by CreatePortfolioShare or by DeletePortfolioShare.\n \n\n :rtype: dict\n :return: {\n 'PortfolioShareToken': 'string',\n 'PortfolioId': 'string',\n 'OrganizationNodeValue': 'string',\n 'Status': 'NOT_STARTED'|'IN_PROGRESS'|'COMPLETED'|'COMPLETED_WITH_ERRORS'|'ERROR',\n 'ShareDetails': {\n 'SuccessfulShares': [\n 'string',\n ],\n 'ShareErrors': [\n {\n 'Accounts': [\n 'string',\n ],\n 'Message': 'string',\n 'Error': 'string'\n },\n ]\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_product(AcceptLanguage=None, Id=None):\n \"\"\"\n Gets information about the specified product.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_product(\n AcceptLanguage='string',\n Id='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type Id: string\n :param Id: [REQUIRED]\n The product identifier.\n \n\n :rtype: dict\n :return: {\n 'ProductViewSummary': {\n 'Id': 'string',\n 'ProductId': 'string',\n 'Name': 'string',\n 'Owner': 'string',\n 'ShortDescription': 'string',\n 'Type': 'CLOUD_FORMATION_TEMPLATE'|'MARKETPLACE',\n 'Distributor': 'string',\n 'HasDefaultPath': True|False,\n 'SupportEmail': 'string',\n 'SupportDescription': 'string',\n 'SupportUrl': 'string'\n },\n 'ProvisioningArtifacts': [\n {\n 'Id': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'CreatedTime': datetime(2015, 1, 1)\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef describe_product_as_admin(AcceptLanguage=None, Id=None):\n \"\"\"\n Gets information about the specified product. This operation is run with administrator access.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_product_as_admin(\n AcceptLanguage='string',\n Id='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type Id: string\n :param Id: [REQUIRED]\n The product identifier.\n \n\n :rtype: dict\n :return: {\n 'ProductViewDetail': {\n 'ProductViewSummary': {\n 'Id': 'string',\n 'ProductId': 'string',\n 'Name': 'string',\n 'Owner': 'string',\n 'ShortDescription': 'string',\n 'Type': 'CLOUD_FORMATION_TEMPLATE'|'MARKETPLACE',\n 'Distributor': 'string',\n 'HasDefaultPath': True|False,\n 'SupportEmail': 'string',\n 'SupportDescription': 'string',\n 'SupportUrl': 'string'\n },\n 'Status': 'AVAILABLE'|'CREATING'|'FAILED',\n 'ProductARN': 'string',\n 'CreatedTime': datetime(2015, 1, 1)\n },\n 'ProvisioningArtifactSummaries': [\n {\n 'Id': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'CreatedTime': datetime(2015, 1, 1),\n 'ProvisioningArtifactMetadata': {\n 'string': 'string'\n }\n },\n ],\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'TagOptions': [\n {\n 'Key': 'string',\n 'Value': 'string',\n 'Active': True|False,\n 'Id': 'string'\n },\n ]\n }\n \n \n :returns: \n AVAILABLE - The product is ready for use.\n CREATING - Product creation has started; the product is not ready for use.\n FAILED - An action failed.\n \n \"\"\"\n pass\n\ndef describe_product_view(AcceptLanguage=None, Id=None):\n \"\"\"\n Gets information about the specified product.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_product_view(\n AcceptLanguage='string',\n Id='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type Id: string\n :param Id: [REQUIRED]\n The product view identifier.\n \n\n :rtype: dict\n :return: {\n 'ProductViewSummary': {\n 'Id': 'string',\n 'ProductId': 'string',\n 'Name': 'string',\n 'Owner': 'string',\n 'ShortDescription': 'string',\n 'Type': 'CLOUD_FORMATION_TEMPLATE'|'MARKETPLACE',\n 'Distributor': 'string',\n 'HasDefaultPath': True|False,\n 'SupportEmail': 'string',\n 'SupportDescription': 'string',\n 'SupportUrl': 'string'\n },\n 'ProvisioningArtifacts': [\n {\n 'Id': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'CreatedTime': datetime(2015, 1, 1)\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef describe_provisioned_product(AcceptLanguage=None, Id=None):\n \"\"\"\n Gets information about the specified provisioned product.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_provisioned_product(\n AcceptLanguage='string',\n Id='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type Id: string\n :param Id: [REQUIRED]\n The provisioned product identifier.\n \n\n :rtype: dict\n :return: {\n 'ProvisionedProductDetail': {\n 'Name': 'string',\n 'Arn': 'string',\n 'Type': 'string',\n 'Id': 'string',\n 'Status': 'AVAILABLE'|'UNDER_CHANGE'|'TAINTED'|'ERROR'|'PLAN_IN_PROGRESS',\n 'StatusMessage': 'string',\n 'CreatedTime': datetime(2015, 1, 1),\n 'IdempotencyToken': 'string',\n 'LastRecordId': 'string',\n 'ProductId': 'string',\n 'ProvisioningArtifactId': 'string'\n },\n 'CloudWatchDashboards': [\n {\n 'Name': 'string'\n },\n ]\n }\n \n \n :returns: \n AVAILABLE - Stable state, ready to perform any operation. The most recent operation succeeded and completed.\n UNDER_CHANGE - Transitive state, operations performed might not have valid results. Wait for an AVAILABLE status before performing operations.\n TAINTED - Stable state, ready to perform any operation. The stack has completed the requested operation but is not exactly what was requested. For example, a request to update to a new version failed and the stack rolled back to the current version.\n ERROR - An unexpected error occurred, the provisioned product exists but the stack is not running. For example, CloudFormation received a parameter value that was not valid and could not launch the stack.\n \n \"\"\"\n pass\n\ndef describe_provisioned_product_plan(AcceptLanguage=None, PlanId=None, PageSize=None, PageToken=None):\n \"\"\"\n Gets information about the resource changes for the specified plan.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_provisioned_product_plan(\n AcceptLanguage='string',\n PlanId='string',\n PageSize=123,\n PageToken='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type PlanId: string\n :param PlanId: [REQUIRED]\n The plan identifier.\n \n\n :type PageSize: integer\n :param PageSize: The maximum number of items to return with this call.\n\n :type PageToken: string\n :param PageToken: The page token for the next set of results. To retrieve the first set of results, use null.\n\n :rtype: dict\n :return: {\n 'ProvisionedProductPlanDetails': {\n 'CreatedTime': datetime(2015, 1, 1),\n 'PathId': 'string',\n 'ProductId': 'string',\n 'PlanName': 'string',\n 'PlanId': 'string',\n 'ProvisionProductId': 'string',\n 'ProvisionProductName': 'string',\n 'PlanType': 'CLOUDFORMATION',\n 'ProvisioningArtifactId': 'string',\n 'Status': 'CREATE_IN_PROGRESS'|'CREATE_SUCCESS'|'CREATE_FAILED'|'EXECUTE_IN_PROGRESS'|'EXECUTE_SUCCESS'|'EXECUTE_FAILED',\n 'UpdatedTime': datetime(2015, 1, 1),\n 'NotificationArns': [\n 'string',\n ],\n 'ProvisioningParameters': [\n {\n 'Key': 'string',\n 'Value': 'string',\n 'UsePreviousValue': True|False\n },\n ],\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'StatusMessage': 'string'\n },\n 'ResourceChanges': [\n {\n 'Action': 'ADD'|'MODIFY'|'REMOVE',\n 'LogicalResourceId': 'string',\n 'PhysicalResourceId': 'string',\n 'ResourceType': 'string',\n 'Replacement': 'TRUE'|'FALSE'|'CONDITIONAL',\n 'Scope': [\n 'PROPERTIES'|'METADATA'|'CREATIONPOLICY'|'UPDATEPOLICY'|'DELETIONPOLICY'|'TAGS',\n ],\n 'Details': [\n {\n 'Target': {\n 'Attribute': 'PROPERTIES'|'METADATA'|'CREATIONPOLICY'|'UPDATEPOLICY'|'DELETIONPOLICY'|'TAGS',\n 'Name': 'string',\n 'RequiresRecreation': 'NEVER'|'CONDITIONALLY'|'ALWAYS'\n },\n 'Evaluation': 'STATIC'|'DYNAMIC',\n 'CausingEntity': 'string'\n },\n ]\n },\n ],\n 'NextPageToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_provisioning_artifact(AcceptLanguage=None, ProvisioningArtifactId=None, ProductId=None, Verbose=None):\n \"\"\"\n Gets information about the specified provisioning artifact (also known as a version) for the specified product.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_provisioning_artifact(\n AcceptLanguage='string',\n ProvisioningArtifactId='string',\n ProductId='string',\n Verbose=True|False\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type ProvisioningArtifactId: string\n :param ProvisioningArtifactId: [REQUIRED]\n The identifier of the provisioning artifact.\n \n\n :type ProductId: string\n :param ProductId: [REQUIRED]\n The product identifier.\n \n\n :type Verbose: boolean\n :param Verbose: Indicates whether a verbose level of detail is enabled.\n\n :rtype: dict\n :return: {\n 'ProvisioningArtifactDetail': {\n 'Id': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'Type': 'CLOUD_FORMATION_TEMPLATE'|'MARKETPLACE_AMI'|'MARKETPLACE_CAR',\n 'CreatedTime': datetime(2015, 1, 1),\n 'Active': True|False\n },\n 'Info': {\n 'string': 'string'\n },\n 'Status': 'AVAILABLE'|'CREATING'|'FAILED'\n }\n \n \n :returns: \n CLOUD_FORMATION_TEMPLATE - AWS CloudFormation template\n MARKETPLACE_AMI - AWS Marketplace AMI\n MARKETPLACE_CAR - AWS Marketplace Clusters and AWS Resources\n \n \"\"\"\n pass\n\ndef describe_provisioning_parameters(AcceptLanguage=None, ProductId=None, ProvisioningArtifactId=None, PathId=None):\n \"\"\"\n Gets information about the configuration required to provision the specified product using the specified provisioning artifact.\n If the output contains a TagOption key with an empty list of values, there is a TagOption conflict for that key. The end user cannot take action to fix the conflict, and launch is not blocked. In subsequent calls to ProvisionProduct , do not include conflicted TagOption keys as tags, or this causes the error \"Parameter validation failed: Missing required parameter in Tags[N ]:Value \". Tag the provisioned product with the value sc-tagoption-conflict-portfolioId-productId .\n See also: AWS API Documentation\n \n \n :example: response = client.describe_provisioning_parameters(\n AcceptLanguage='string',\n ProductId='string',\n ProvisioningArtifactId='string',\n PathId='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type ProductId: string\n :param ProductId: [REQUIRED]\n The product identifier.\n \n\n :type ProvisioningArtifactId: string\n :param ProvisioningArtifactId: [REQUIRED]\n The identifier of the provisioning artifact.\n \n\n :type PathId: string\n :param PathId: The path identifier of the product. This value is optional if the product has a default path, and required if the product has more than one path. To list the paths for a product, use ListLaunchPaths .\n\n :rtype: dict\n :return: {\n 'ProvisioningArtifactParameters': [\n {\n 'ParameterKey': 'string',\n 'DefaultValue': 'string',\n 'ParameterType': 'string',\n 'IsNoEcho': True|False,\n 'Description': 'string',\n 'ParameterConstraints': {\n 'AllowedValues': [\n 'string',\n ]\n }\n },\n ],\n 'ConstraintSummaries': [\n {\n 'Type': 'string',\n 'Description': 'string'\n },\n ],\n 'UsageInstructions': [\n {\n 'Type': 'string',\n 'Value': 'string'\n },\n ],\n 'TagOptions': [\n {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n },\n ],\n 'ProvisioningArtifactPreferences': {\n 'StackSetAccounts': [\n 'string',\n ],\n 'StackSetRegions': [\n 'string',\n ]\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_record(AcceptLanguage=None, Id=None, PageToken=None, PageSize=None):\n \"\"\"\n Gets information about the specified request operation.\n Use this operation after calling a request operation (for example, ProvisionProduct , TerminateProvisionedProduct , or UpdateProvisionedProduct ).\n See also: AWS API Documentation\n \n \n :example: response = client.describe_record(\n AcceptLanguage='string',\n Id='string',\n PageToken='string',\n PageSize=123\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type Id: string\n :param Id: [REQUIRED]\n The record identifier of the provisioned product. This identifier is returned by the request operation.\n \n\n :type PageToken: string\n :param PageToken: The page token for the next set of results. To retrieve the first set of results, use null.\n\n :type PageSize: integer\n :param PageSize: The maximum number of items to return with this call.\n\n :rtype: dict\n :return: {\n 'RecordDetail': {\n 'RecordId': 'string',\n 'ProvisionedProductName': 'string',\n 'Status': 'CREATED'|'IN_PROGRESS'|'IN_PROGRESS_IN_ERROR'|'SUCCEEDED'|'FAILED',\n 'CreatedTime': datetime(2015, 1, 1),\n 'UpdatedTime': datetime(2015, 1, 1),\n 'ProvisionedProductType': 'string',\n 'RecordType': 'string',\n 'ProvisionedProductId': 'string',\n 'ProductId': 'string',\n 'ProvisioningArtifactId': 'string',\n 'PathId': 'string',\n 'RecordErrors': [\n {\n 'Code': 'string',\n 'Description': 'string'\n },\n ],\n 'RecordTags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n },\n 'RecordOutputs': [\n {\n 'OutputKey': 'string',\n 'OutputValue': 'string',\n 'Description': 'string'\n },\n ],\n 'NextPageToken': 'string'\n }\n \n \n :returns: \n CREATED - The request was created but the operation has not started.\n IN_PROGRESS - The requested operation is in progress.\n IN_PROGRESS_IN_ERROR - The provisioned product is under change but the requested operation failed and some remediation is occurring. For example, a rollback.\n SUCCEEDED - The requested operation has successfully completed.\n FAILED - The requested operation has unsuccessfully completed. Investigate using the error messages returned.\n \n \"\"\"\n pass\n\ndef describe_service_action(Id=None, AcceptLanguage=None):\n \"\"\"\n Describes a self-service action.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_service_action(\n Id='string',\n AcceptLanguage='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n The self-service action identifier.\n \n\n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :rtype: dict\n :return: {\n 'ServiceActionDetail': {\n 'ServiceActionSummary': {\n 'Id': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'DefinitionType': 'SSM_AUTOMATION'\n },\n 'Definition': {\n 'string': 'string'\n }\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef describe_tag_option(Id=None):\n \"\"\"\n Gets information about the specified TagOption.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_tag_option(\n Id='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n The TagOption identifier.\n \n\n :rtype: dict\n :return: {\n 'TagOptionDetail': {\n 'Key': 'string',\n 'Value': 'string',\n 'Active': True|False,\n 'Id': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef disable_aws_organizations_access():\n \"\"\"\n Disable portfolio sharing through AWS Organizations feature. This feature will not delete your current shares but it will prevent you from creating new shares throughout your organization. Current shares will not be in sync with your organization structure if it changes after calling this API. This API can only be called by the master account in the organization.\n See also: AWS API Documentation\n \n \n :example: response = client.disable_aws_organizations_access()\n \n \n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef disassociate_principal_from_portfolio(AcceptLanguage=None, PortfolioId=None, PrincipalARN=None):\n \"\"\"\n Disassociates a previously associated principal ARN from a specified portfolio.\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_principal_from_portfolio(\n AcceptLanguage='string',\n PortfolioId='string',\n PrincipalARN='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type PortfolioId: string\n :param PortfolioId: [REQUIRED]\n The portfolio identifier.\n \n\n :type PrincipalARN: string\n :param PrincipalARN: [REQUIRED]\n The ARN of the principal (IAM user, role, or group).\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef disassociate_product_from_portfolio(AcceptLanguage=None, ProductId=None, PortfolioId=None):\n \"\"\"\n Disassociates the specified product from the specified portfolio.\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_product_from_portfolio(\n AcceptLanguage='string',\n ProductId='string',\n PortfolioId='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type ProductId: string\n :param ProductId: [REQUIRED]\n The product identifier.\n \n\n :type PortfolioId: string\n :param PortfolioId: [REQUIRED]\n The portfolio identifier.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef disassociate_service_action_from_provisioning_artifact(ProductId=None, ProvisioningArtifactId=None, ServiceActionId=None, AcceptLanguage=None):\n \"\"\"\n Disassociates the specified self-service action association from the specified provisioning artifact.\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_service_action_from_provisioning_artifact(\n ProductId='string',\n ProvisioningArtifactId='string',\n ServiceActionId='string',\n AcceptLanguage='string'\n )\n \n \n :type ProductId: string\n :param ProductId: [REQUIRED]\n The product identifier. For example, prod-abcdzk7xy33qa .\n \n\n :type ProvisioningArtifactId: string\n :param ProvisioningArtifactId: [REQUIRED]\n The identifier of the provisioning artifact. For example, pa-4abcdjnxjj6ne .\n \n\n :type ServiceActionId: string\n :param ServiceActionId: [REQUIRED]\n The self-service action identifier. For example, act-fs7abcd89wxyz .\n \n\n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef disassociate_tag_option_from_resource(ResourceId=None, TagOptionId=None):\n \"\"\"\n Disassociates the specified TagOption from the specified resource.\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_tag_option_from_resource(\n ResourceId='string',\n TagOptionId='string'\n )\n \n \n :type ResourceId: string\n :param ResourceId: [REQUIRED]\n The resource identifier.\n \n\n :type TagOptionId: string\n :param TagOptionId: [REQUIRED]\n The TagOption identifier.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef enable_aws_organizations_access():\n \"\"\"\n Enable portfolio sharing feature through AWS Organizations. This API will allow Service Catalog to receive updates on your organization in order to sync your shares with the current structure. This API can only be called by the master account in the organization.\n By calling this API Service Catalog will make a call to organizations:EnableAWSServiceAccess on your behalf so that your shares can be in sync with any changes in your AWS Organizations structure.\n See also: AWS API Documentation\n \n \n :example: response = client.enable_aws_organizations_access()\n \n \n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef execute_provisioned_product_plan(AcceptLanguage=None, PlanId=None, IdempotencyToken=None):\n \"\"\"\n Provisions or modifies a product based on the resource changes for the specified plan.\n See also: AWS API Documentation\n \n \n :example: response = client.execute_provisioned_product_plan(\n AcceptLanguage='string',\n PlanId='string',\n IdempotencyToken='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type PlanId: string\n :param PlanId: [REQUIRED]\n The plan identifier.\n \n\n :type IdempotencyToken: string\n :param IdempotencyToken: [REQUIRED]\n A unique identifier that you provide to ensure idempotency. If multiple requests differ only by the idempotency token, the same response is returned for each repeated request.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'RecordDetail': {\n 'RecordId': 'string',\n 'ProvisionedProductName': 'string',\n 'Status': 'CREATED'|'IN_PROGRESS'|'IN_PROGRESS_IN_ERROR'|'SUCCEEDED'|'FAILED',\n 'CreatedTime': datetime(2015, 1, 1),\n 'UpdatedTime': datetime(2015, 1, 1),\n 'ProvisionedProductType': 'string',\n 'RecordType': 'string',\n 'ProvisionedProductId': 'string',\n 'ProductId': 'string',\n 'ProvisioningArtifactId': 'string',\n 'PathId': 'string',\n 'RecordErrors': [\n {\n 'Code': 'string',\n 'Description': 'string'\n },\n ],\n 'RecordTags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n }\n \n \n :returns: \n CREATED - The request was created but the operation has not started.\n IN_PROGRESS - The requested operation is in progress.\n IN_PROGRESS_IN_ERROR - The provisioned product is under change but the requested operation failed and some remediation is occurring. For example, a rollback.\n SUCCEEDED - The requested operation has successfully completed.\n FAILED - The requested operation has unsuccessfully completed. Investigate using the error messages returned.\n \n \"\"\"\n pass\n\ndef execute_provisioned_product_service_action(ProvisionedProductId=None, ServiceActionId=None, ExecuteToken=None, AcceptLanguage=None):\n \"\"\"\n Executes a self-service action against a provisioned product.\n See also: AWS API Documentation\n \n \n :example: response = client.execute_provisioned_product_service_action(\n ProvisionedProductId='string',\n ServiceActionId='string',\n ExecuteToken='string',\n AcceptLanguage='string'\n )\n \n \n :type ProvisionedProductId: string\n :param ProvisionedProductId: [REQUIRED]\n The identifier of the provisioned product.\n \n\n :type ServiceActionId: string\n :param ServiceActionId: [REQUIRED]\n The self-service action identifier. For example, act-fs7abcd89wxyz .\n \n\n :type ExecuteToken: string\n :param ExecuteToken: [REQUIRED]\n An idempotency token that uniquely identifies the execute request.\n This field is autopopulated if not provided.\n \n\n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :rtype: dict\n :return: {\n 'RecordDetail': {\n 'RecordId': 'string',\n 'ProvisionedProductName': 'string',\n 'Status': 'CREATED'|'IN_PROGRESS'|'IN_PROGRESS_IN_ERROR'|'SUCCEEDED'|'FAILED',\n 'CreatedTime': datetime(2015, 1, 1),\n 'UpdatedTime': datetime(2015, 1, 1),\n 'ProvisionedProductType': 'string',\n 'RecordType': 'string',\n 'ProvisionedProductId': 'string',\n 'ProductId': 'string',\n 'ProvisioningArtifactId': 'string',\n 'PathId': 'string',\n 'RecordErrors': [\n {\n 'Code': 'string',\n 'Description': 'string'\n },\n ],\n 'RecordTags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n }\n \n \n :returns: \n CREATED - The request was created but the operation has not started.\n IN_PROGRESS - The requested operation is in progress.\n IN_PROGRESS_IN_ERROR - The provisioned product is under change but the requested operation failed and some remediation is occurring. For example, a rollback.\n SUCCEEDED - The requested operation has successfully completed.\n FAILED - The requested operation has unsuccessfully completed. Investigate using the error messages returned.\n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_aws_organizations_access_status():\n \"\"\"\n Get the Access Status for AWS Organization portfolio share feature. This API can only be called by the master account in the organization.\n See also: AWS API Documentation\n \n \n :example: response = client.get_aws_organizations_access_status()\n \n \n :rtype: dict\n :return: {\n 'AccessStatus': 'ENABLED'|'UNDER_CHANGE'|'DISABLED'\n }\n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_accepted_portfolio_shares(AcceptLanguage=None, PageToken=None, PageSize=None, PortfolioShareType=None):\n \"\"\"\n Lists all portfolios for which sharing was accepted by this account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_accepted_portfolio_shares(\n AcceptLanguage='string',\n PageToken='string',\n PageSize=123,\n PortfolioShareType='IMPORTED'|'AWS_SERVICECATALOG'|'AWS_ORGANIZATIONS'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type PageToken: string\n :param PageToken: The page token for the next set of results. To retrieve the first set of results, use null.\n\n :type PageSize: integer\n :param PageSize: The maximum number of items to return with this call.\n\n :type PortfolioShareType: string\n :param PortfolioShareType: The type of shared portfolios to list. The default is to list imported portfolios.\n AWS_ORGANIZATIONS - List portfolios shared by the master account of your organization\n AWS_SERVICECATALOG - List default portfolios\n IMPORTED - List imported portfolios\n \n\n :rtype: dict\n :return: {\n 'PortfolioDetails': [\n {\n 'Id': 'string',\n 'ARN': 'string',\n 'DisplayName': 'string',\n 'Description': 'string',\n 'CreatedTime': datetime(2015, 1, 1),\n 'ProviderName': 'string'\n },\n ],\n 'NextPageToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_constraints_for_portfolio(AcceptLanguage=None, PortfolioId=None, ProductId=None, PageSize=None, PageToken=None):\n \"\"\"\n Lists the constraints for the specified portfolio and product.\n See also: AWS API Documentation\n \n \n :example: response = client.list_constraints_for_portfolio(\n AcceptLanguage='string',\n PortfolioId='string',\n ProductId='string',\n PageSize=123,\n PageToken='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type PortfolioId: string\n :param PortfolioId: [REQUIRED]\n The portfolio identifier.\n \n\n :type ProductId: string\n :param ProductId: The product identifier.\n\n :type PageSize: integer\n :param PageSize: The maximum number of items to return with this call.\n\n :type PageToken: string\n :param PageToken: The page token for the next set of results. To retrieve the first set of results, use null.\n\n :rtype: dict\n :return: {\n 'ConstraintDetails': [\n {\n 'ConstraintId': 'string',\n 'Type': 'string',\n 'Description': 'string',\n 'Owner': 'string'\n },\n ],\n 'NextPageToken': 'string'\n }\n \n \n :returns: \n LAUNCH\n NOTIFICATION\n STACKSET\n TEMPLATE\n \n \"\"\"\n pass\n\ndef list_launch_paths(AcceptLanguage=None, ProductId=None, PageSize=None, PageToken=None):\n \"\"\"\n Lists the paths to the specified product. A path is how the user has access to a specified product, and is necessary when provisioning a product. A path also determines the constraints put on the product.\n See also: AWS API Documentation\n \n \n :example: response = client.list_launch_paths(\n AcceptLanguage='string',\n ProductId='string',\n PageSize=123,\n PageToken='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type ProductId: string\n :param ProductId: [REQUIRED]\n The product identifier.\n \n\n :type PageSize: integer\n :param PageSize: The maximum number of items to return with this call.\n\n :type PageToken: string\n :param PageToken: The page token for the next set of results. To retrieve the first set of results, use null.\n\n :rtype: dict\n :return: {\n 'LaunchPathSummaries': [\n {\n 'Id': 'string',\n 'ConstraintSummaries': [\n {\n 'Type': 'string',\n 'Description': 'string'\n },\n ],\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'Name': 'string'\n },\n ],\n 'NextPageToken': 'string'\n }\n \n \n :returns: \n LAUNCH\n NOTIFICATION\n STACKSET\n TEMPLATE\n \n \"\"\"\n pass\n\ndef list_organization_portfolio_access(AcceptLanguage=None, PortfolioId=None, OrganizationNodeType=None, PageToken=None, PageSize=None):\n \"\"\"\n Lists the organization nodes that have access to the specified portfolio. This API can only be called by the master account in the organization.\n See also: AWS API Documentation\n \n \n :example: response = client.list_organization_portfolio_access(\n AcceptLanguage='string',\n PortfolioId='string',\n OrganizationNodeType='ORGANIZATION'|'ORGANIZATIONAL_UNIT'|'ACCOUNT',\n PageToken='string',\n PageSize=123\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type PortfolioId: string\n :param PortfolioId: [REQUIRED]\n The portfolio identifier. For example, port-2abcdext3y5fk .\n \n\n :type OrganizationNodeType: string\n :param OrganizationNodeType: [REQUIRED]\n The organization node type that will be returned in the output.\n ORGANIZATION - Organization that has access to the portfolio.\n ORGANIZATIONAL_UNIT - Organizational unit that has access to the portfolio within your organization.\n ACCOUNT - Account that has access to the portfolio within your organization.\n \n\n :type PageToken: string\n :param PageToken: The page token for the next set of results. To retrieve the first set of results, use null.\n\n :type PageSize: integer\n :param PageSize: The maximum number of items to return with this call.\n\n :rtype: dict\n :return: {\n 'OrganizationNodes': [\n {\n 'Type': 'ORGANIZATION'|'ORGANIZATIONAL_UNIT'|'ACCOUNT',\n 'Value': 'string'\n },\n ],\n 'NextPageToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_portfolio_access(AcceptLanguage=None, PortfolioId=None):\n \"\"\"\n Lists the account IDs that have access to the specified portfolio.\n See also: AWS API Documentation\n \n \n :example: response = client.list_portfolio_access(\n AcceptLanguage='string',\n PortfolioId='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type PortfolioId: string\n :param PortfolioId: [REQUIRED]\n The portfolio identifier.\n \n\n :rtype: dict\n :return: {\n 'AccountIds': [\n 'string',\n ],\n 'NextPageToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_portfolios(AcceptLanguage=None, PageToken=None, PageSize=None):\n \"\"\"\n Lists all portfolios in the catalog.\n See also: AWS API Documentation\n \n \n :example: response = client.list_portfolios(\n AcceptLanguage='string',\n PageToken='string',\n PageSize=123\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type PageToken: string\n :param PageToken: The page token for the next set of results. To retrieve the first set of results, use null.\n\n :type PageSize: integer\n :param PageSize: The maximum number of items to return with this call.\n\n :rtype: dict\n :return: {\n 'PortfolioDetails': [\n {\n 'Id': 'string',\n 'ARN': 'string',\n 'DisplayName': 'string',\n 'Description': 'string',\n 'CreatedTime': datetime(2015, 1, 1),\n 'ProviderName': 'string'\n },\n ],\n 'NextPageToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_portfolios_for_product(AcceptLanguage=None, ProductId=None, PageToken=None, PageSize=None):\n \"\"\"\n Lists all portfolios that the specified product is associated with.\n See also: AWS API Documentation\n \n \n :example: response = client.list_portfolios_for_product(\n AcceptLanguage='string',\n ProductId='string',\n PageToken='string',\n PageSize=123\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type ProductId: string\n :param ProductId: [REQUIRED]\n The product identifier.\n \n\n :type PageToken: string\n :param PageToken: The page token for the next set of results. To retrieve the first set of results, use null.\n\n :type PageSize: integer\n :param PageSize: The maximum number of items to return with this call.\n\n :rtype: dict\n :return: {\n 'PortfolioDetails': [\n {\n 'Id': 'string',\n 'ARN': 'string',\n 'DisplayName': 'string',\n 'Description': 'string',\n 'CreatedTime': datetime(2015, 1, 1),\n 'ProviderName': 'string'\n },\n ],\n 'NextPageToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_principals_for_portfolio(AcceptLanguage=None, PortfolioId=None, PageSize=None, PageToken=None):\n \"\"\"\n Lists all principal ARNs associated with the specified portfolio.\n See also: AWS API Documentation\n \n \n :example: response = client.list_principals_for_portfolio(\n AcceptLanguage='string',\n PortfolioId='string',\n PageSize=123,\n PageToken='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type PortfolioId: string\n :param PortfolioId: [REQUIRED]\n The portfolio identifier.\n \n\n :type PageSize: integer\n :param PageSize: The maximum number of items to return with this call.\n\n :type PageToken: string\n :param PageToken: The page token for the next set of results. To retrieve the first set of results, use null.\n\n :rtype: dict\n :return: {\n 'Principals': [\n {\n 'PrincipalARN': 'string',\n 'PrincipalType': 'IAM'\n },\n ],\n 'NextPageToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_provisioned_product_plans(AcceptLanguage=None, ProvisionProductId=None, PageSize=None, PageToken=None, AccessLevelFilter=None):\n \"\"\"\n Lists the plans for the specified provisioned product or all plans to which the user has access.\n See also: AWS API Documentation\n \n \n :example: response = client.list_provisioned_product_plans(\n AcceptLanguage='string',\n ProvisionProductId='string',\n PageSize=123,\n PageToken='string',\n AccessLevelFilter={\n 'Key': 'Account'|'Role'|'User',\n 'Value': 'string'\n }\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type ProvisionProductId: string\n :param ProvisionProductId: The product identifier.\n\n :type PageSize: integer\n :param PageSize: The maximum number of items to return with this call.\n\n :type PageToken: string\n :param PageToken: The page token for the next set of results. To retrieve the first set of results, use null.\n\n :type AccessLevelFilter: dict\n :param AccessLevelFilter: The access level to use to obtain results. The default is User .\n Key (string) --The access level.\n Account - Filter results based on the account.\n Role - Filter results based on the federated role of the specified user.\n User - Filter results based on the specified user.\n Value (string) --The user to which the access level applies. The only supported value is Self .\n \n\n :rtype: dict\n :return: {\n 'ProvisionedProductPlans': [\n {\n 'PlanName': 'string',\n 'PlanId': 'string',\n 'ProvisionProductId': 'string',\n 'ProvisionProductName': 'string',\n 'PlanType': 'CLOUDFORMATION',\n 'ProvisioningArtifactId': 'string'\n },\n ],\n 'NextPageToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_provisioning_artifacts(AcceptLanguage=None, ProductId=None):\n \"\"\"\n Lists all provisioning artifacts (also known as versions) for the specified product.\n See also: AWS API Documentation\n \n \n :example: response = client.list_provisioning_artifacts(\n AcceptLanguage='string',\n ProductId='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type ProductId: string\n :param ProductId: [REQUIRED]\n The product identifier.\n \n\n :rtype: dict\n :return: {\n 'ProvisioningArtifactDetails': [\n {\n 'Id': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'Type': 'CLOUD_FORMATION_TEMPLATE'|'MARKETPLACE_AMI'|'MARKETPLACE_CAR',\n 'CreatedTime': datetime(2015, 1, 1),\n 'Active': True|False\n },\n ],\n 'NextPageToken': 'string'\n }\n \n \n :returns: \n CLOUD_FORMATION_TEMPLATE - AWS CloudFormation template\n MARKETPLACE_AMI - AWS Marketplace AMI\n MARKETPLACE_CAR - AWS Marketplace Clusters and AWS Resources\n \n \"\"\"\n pass\n\ndef list_provisioning_artifacts_for_service_action(ServiceActionId=None, PageSize=None, PageToken=None, AcceptLanguage=None):\n \"\"\"\n Lists all provisioning artifacts (also known as versions) for the specified self-service action.\n See also: AWS API Documentation\n \n \n :example: response = client.list_provisioning_artifacts_for_service_action(\n ServiceActionId='string',\n PageSize=123,\n PageToken='string',\n AcceptLanguage='string'\n )\n \n \n :type ServiceActionId: string\n :param ServiceActionId: [REQUIRED]\n The self-service action identifier. For example, act-fs7abcd89wxyz .\n \n\n :type PageSize: integer\n :param PageSize: The maximum number of items to return with this call.\n\n :type PageToken: string\n :param PageToken: The page token for the next set of results. To retrieve the first set of results, use null.\n\n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :rtype: dict\n :return: {\n 'ProvisioningArtifactViews': [\n {\n 'ProductViewSummary': {\n 'Id': 'string',\n 'ProductId': 'string',\n 'Name': 'string',\n 'Owner': 'string',\n 'ShortDescription': 'string',\n 'Type': 'CLOUD_FORMATION_TEMPLATE'|'MARKETPLACE',\n 'Distributor': 'string',\n 'HasDefaultPath': True|False,\n 'SupportEmail': 'string',\n 'SupportDescription': 'string',\n 'SupportUrl': 'string'\n },\n 'ProvisioningArtifact': {\n 'Id': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'CreatedTime': datetime(2015, 1, 1)\n }\n },\n ],\n 'NextPageToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_record_history(AcceptLanguage=None, AccessLevelFilter=None, SearchFilter=None, PageSize=None, PageToken=None):\n \"\"\"\n Lists the specified requests or all performed requests.\n See also: AWS API Documentation\n \n \n :example: response = client.list_record_history(\n AcceptLanguage='string',\n AccessLevelFilter={\n 'Key': 'Account'|'Role'|'User',\n 'Value': 'string'\n },\n SearchFilter={\n 'Key': 'string',\n 'Value': 'string'\n },\n PageSize=123,\n PageToken='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type AccessLevelFilter: dict\n :param AccessLevelFilter: The access level to use to obtain results. The default is User .\n Key (string) --The access level.\n Account - Filter results based on the account.\n Role - Filter results based on the federated role of the specified user.\n User - Filter results based on the specified user.\n Value (string) --The user to which the access level applies. The only supported value is Self .\n \n\n :type SearchFilter: dict\n :param SearchFilter: The search filter to scope the results.\n Key (string) --The filter key.\n product - Filter results based on the specified product identifier.\n provisionedproduct - Filter results based on the provisioned product identifier.\n Value (string) --The filter value.\n \n\n :type PageSize: integer\n :param PageSize: The maximum number of items to return with this call.\n\n :type PageToken: string\n :param PageToken: The page token for the next set of results. To retrieve the first set of results, use null.\n\n :rtype: dict\n :return: {\n 'RecordDetails': [\n {\n 'RecordId': 'string',\n 'ProvisionedProductName': 'string',\n 'Status': 'CREATED'|'IN_PROGRESS'|'IN_PROGRESS_IN_ERROR'|'SUCCEEDED'|'FAILED',\n 'CreatedTime': datetime(2015, 1, 1),\n 'UpdatedTime': datetime(2015, 1, 1),\n 'ProvisionedProductType': 'string',\n 'RecordType': 'string',\n 'ProvisionedProductId': 'string',\n 'ProductId': 'string',\n 'ProvisioningArtifactId': 'string',\n 'PathId': 'string',\n 'RecordErrors': [\n {\n 'Code': 'string',\n 'Description': 'string'\n },\n ],\n 'RecordTags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n },\n ],\n 'NextPageToken': 'string'\n }\n \n \n :returns: \n CREATED - The request was created but the operation has not started.\n IN_PROGRESS - The requested operation is in progress.\n IN_PROGRESS_IN_ERROR - The provisioned product is under change but the requested operation failed and some remediation is occurring. For example, a rollback.\n SUCCEEDED - The requested operation has successfully completed.\n FAILED - The requested operation has unsuccessfully completed. Investigate using the error messages returned.\n \n \"\"\"\n pass\n\ndef list_resources_for_tag_option(TagOptionId=None, ResourceType=None, PageSize=None, PageToken=None):\n \"\"\"\n Lists the resources associated with the specified TagOption.\n See also: AWS API Documentation\n \n \n :example: response = client.list_resources_for_tag_option(\n TagOptionId='string',\n ResourceType='string',\n PageSize=123,\n PageToken='string'\n )\n \n \n :type TagOptionId: string\n :param TagOptionId: [REQUIRED]\n The TagOption identifier.\n \n\n :type ResourceType: string\n :param ResourceType: The resource type.\n Portfolio\n Product\n \n\n :type PageSize: integer\n :param PageSize: The maximum number of items to return with this call.\n\n :type PageToken: string\n :param PageToken: The page token for the next set of results. To retrieve the first set of results, use null.\n\n :rtype: dict\n :return: {\n 'ResourceDetails': [\n {\n 'Id': 'string',\n 'ARN': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'CreatedTime': datetime(2015, 1, 1)\n },\n ],\n 'PageToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_service_actions(AcceptLanguage=None, PageSize=None, PageToken=None):\n \"\"\"\n Lists all self-service actions.\n See also: AWS API Documentation\n \n \n :example: response = client.list_service_actions(\n AcceptLanguage='string',\n PageSize=123,\n PageToken='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type PageSize: integer\n :param PageSize: The maximum number of items to return with this call.\n\n :type PageToken: string\n :param PageToken: The page token for the next set of results. To retrieve the first set of results, use null.\n\n :rtype: dict\n :return: {\n 'ServiceActionSummaries': [\n {\n 'Id': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'DefinitionType': 'SSM_AUTOMATION'\n },\n ],\n 'NextPageToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_service_actions_for_provisioning_artifact(ProductId=None, ProvisioningArtifactId=None, PageSize=None, PageToken=None, AcceptLanguage=None):\n \"\"\"\n Returns a paginated list of self-service actions associated with the specified Product ID and Provisioning Artifact ID.\n See also: AWS API Documentation\n \n \n :example: response = client.list_service_actions_for_provisioning_artifact(\n ProductId='string',\n ProvisioningArtifactId='string',\n PageSize=123,\n PageToken='string',\n AcceptLanguage='string'\n )\n \n \n :type ProductId: string\n :param ProductId: [REQUIRED]\n The product identifier. For example, prod-abcdzk7xy33qa .\n \n\n :type ProvisioningArtifactId: string\n :param ProvisioningArtifactId: [REQUIRED]\n The identifier of the provisioning artifact. For example, pa-4abcdjnxjj6ne .\n \n\n :type PageSize: integer\n :param PageSize: The maximum number of items to return with this call.\n\n :type PageToken: string\n :param PageToken: The page token for the next set of results. To retrieve the first set of results, use null.\n\n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :rtype: dict\n :return: {\n 'ServiceActionSummaries': [\n {\n 'Id': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'DefinitionType': 'SSM_AUTOMATION'\n },\n ],\n 'NextPageToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_tag_options(Filters=None, PageSize=None, PageToken=None):\n \"\"\"\n Lists the specified TagOptions or all TagOptions.\n See also: AWS API Documentation\n \n \n :example: response = client.list_tag_options(\n Filters={\n 'Key': 'string',\n 'Value': 'string',\n 'Active': True|False\n },\n PageSize=123,\n PageToken='string'\n )\n \n \n :type Filters: dict\n :param Filters: The search filters. If no search filters are specified, the output includes all TagOptions.\n Key (string) --The TagOption key.\n Value (string) --The TagOption value.\n Active (boolean) --The active state.\n \n\n :type PageSize: integer\n :param PageSize: The maximum number of items to return with this call.\n\n :type PageToken: string\n :param PageToken: The page token for the next set of results. To retrieve the first set of results, use null.\n\n :rtype: dict\n :return: {\n 'TagOptionDetails': [\n {\n 'Key': 'string',\n 'Value': 'string',\n 'Active': True|False,\n 'Id': 'string'\n },\n ],\n 'PageToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef provision_product(AcceptLanguage=None, ProductId=None, ProvisioningArtifactId=None, PathId=None, ProvisionedProductName=None, ProvisioningParameters=None, ProvisioningPreferences=None, Tags=None, NotificationArns=None, ProvisionToken=None):\n \"\"\"\n Provisions the specified product.\n A provisioned product is a resourced instance of a product. For example, provisioning a product based on a CloudFormation template launches a CloudFormation stack and its underlying resources. You can check the status of this request using DescribeRecord .\n If the request contains a tag key with an empty list of values, there is a tag conflict for that key. Do not include conflicted keys as tags, or this causes the error \"Parameter validation failed: Missing required parameter in Tags[N ]:Value \".\n See also: AWS API Documentation\n \n \n :example: response = client.provision_product(\n AcceptLanguage='string',\n ProductId='string',\n ProvisioningArtifactId='string',\n PathId='string',\n ProvisionedProductName='string',\n ProvisioningParameters=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n ProvisioningPreferences={\n 'StackSetAccounts': [\n 'string',\n ],\n 'StackSetRegions': [\n 'string',\n ],\n 'StackSetFailureToleranceCount': 123,\n 'StackSetFailureTolerancePercentage': 123,\n 'StackSetMaxConcurrencyCount': 123,\n 'StackSetMaxConcurrencyPercentage': 123\n },\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n NotificationArns=[\n 'string',\n ],\n ProvisionToken='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type ProductId: string\n :param ProductId: [REQUIRED]\n The product identifier.\n \n\n :type ProvisioningArtifactId: string\n :param ProvisioningArtifactId: [REQUIRED]\n The identifier of the provisioning artifact.\n \n\n :type PathId: string\n :param PathId: The path identifier of the product. This value is optional if the product has a default path, and required if the product has more than one path. To list the paths for a product, use ListLaunchPaths .\n\n :type ProvisionedProductName: string\n :param ProvisionedProductName: [REQUIRED]\n A user-friendly name for the provisioned product. This value must be unique for the AWS account and cannot be updated after the product is provisioned.\n \n\n :type ProvisioningParameters: list\n :param ProvisioningParameters: Parameters specified by the administrator that are required for provisioning the product.\n (dict) --Information about a parameter used to provision a product.\n Key (string) --The parameter key.\n Value (string) --The parameter value.\n \n \n\n :type ProvisioningPreferences: dict\n :param ProvisioningPreferences: An object that contains information about the provisioning preferences for a stack set.\n StackSetAccounts (list) --One or more AWS accounts that will have access to the provisioned product.\n Applicable only to a CFN_STACKSET provisioned product type.\n The AWS accounts specified should be within the list of accounts in the STACKSET constraint. To get the list of accounts in the STACKSET constraint, use the DescribeProvisioningParameters operation.\n If no values are specified, the default value is all accounts from the STACKSET constraint.\n (string) --\n StackSetRegions (list) --One or more AWS Regions where the provisioned product will be available.\n Applicable only to a CFN_STACKSET provisioned product type.\n The specified regions should be within the list of regions from the STACKSET constraint. To get the list of regions in the STACKSET constraint, use the DescribeProvisioningParameters operation.\n If no values are specified, the default value is all regions from the STACKSET constraint.\n (string) --\n StackSetFailureToleranceCount (integer) --The number of accounts, per region, for which this operation can fail before AWS Service Catalog stops the operation in that region. If the operation is stopped in a region, AWS Service Catalog doesn't attempt the operation in any subsequent regions.\n Applicable only to a CFN_STACKSET provisioned product type.\n Conditional: You must specify either StackSetFailureToleranceCount or StackSetFailureTolerancePercentage , but not both.\n The default value is 0 if no value is specified.\n StackSetFailureTolerancePercentage (integer) --The percentage of accounts, per region, for which this stack operation can fail before AWS Service Catalog stops the operation in that region. If the operation is stopped in a region, AWS Service Catalog doesn't attempt the operation in any subsequent regions.\n When calculating the number of accounts based on the specified percentage, AWS Service Catalog rounds down to the next whole number.\n Applicable only to a CFN_STACKSET provisioned product type.\n Conditional: You must specify either StackSetFailureToleranceCount or StackSetFailureTolerancePercentage , but not both.\n StackSetMaxConcurrencyCount (integer) --The maximum number of accounts in which to perform this operation at one time. This is dependent on the value of StackSetFailureToleranceCount . StackSetMaxConcurrentCount is at most one more than the StackSetFailureToleranceCount .\n Note that this setting lets you specify the maximum for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.\n Applicable only to a CFN_STACKSET provisioned product type.\n Conditional: You must specify either StackSetMaxConcurrentCount or StackSetMaxConcurrentPercentage , but not both.\n StackSetMaxConcurrencyPercentage (integer) --The maximum percentage of accounts in which to perform this operation at one time.\n When calculating the number of accounts based on the specified percentage, AWS Service Catalog rounds down to the next whole number. This is true except in cases where rounding down would result is zero. In this case, AWS Service Catalog sets the number as 1 instead.\n Note that this setting lets you specify the maximum for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.\n Applicable only to a CFN_STACKSET provisioned product type.\n Conditional: You must specify either StackSetMaxConcurrentCount or StackSetMaxConcurrentPercentage , but not both.\n \n\n :type Tags: list\n :param Tags: One or more tags.\n (dict) --Information about a tag. A tag is a key-value pair. Tags are propagated to the resources created when provisioning a product.\n Key (string) -- [REQUIRED]The tag key.\n Value (string) -- [REQUIRED]The value for this key.\n \n \n\n :type NotificationArns: list\n :param NotificationArns: Passed to CloudFormation. The SNS topic ARNs to which to publish stack-related events.\n (string) --\n \n\n :type ProvisionToken: string\n :param ProvisionToken: [REQUIRED]\n An idempotency token that uniquely identifies the provisioning request.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'RecordDetail': {\n 'RecordId': 'string',\n 'ProvisionedProductName': 'string',\n 'Status': 'CREATED'|'IN_PROGRESS'|'IN_PROGRESS_IN_ERROR'|'SUCCEEDED'|'FAILED',\n 'CreatedTime': datetime(2015, 1, 1),\n 'UpdatedTime': datetime(2015, 1, 1),\n 'ProvisionedProductType': 'string',\n 'RecordType': 'string',\n 'ProvisionedProductId': 'string',\n 'ProductId': 'string',\n 'ProvisioningArtifactId': 'string',\n 'PathId': 'string',\n 'RecordErrors': [\n {\n 'Code': 'string',\n 'Description': 'string'\n },\n ],\n 'RecordTags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n }\n \n \n :returns: \n CREATED - The request was created but the operation has not started.\n IN_PROGRESS - The requested operation is in progress.\n IN_PROGRESS_IN_ERROR - The provisioned product is under change but the requested operation failed and some remediation is occurring. For example, a rollback.\n SUCCEEDED - The requested operation has successfully completed.\n FAILED - The requested operation has unsuccessfully completed. Investigate using the error messages returned.\n \n \"\"\"\n pass\n\ndef reject_portfolio_share(AcceptLanguage=None, PortfolioId=None, PortfolioShareType=None):\n \"\"\"\n Rejects an offer to share the specified portfolio.\n See also: AWS API Documentation\n \n \n :example: response = client.reject_portfolio_share(\n AcceptLanguage='string',\n PortfolioId='string',\n PortfolioShareType='IMPORTED'|'AWS_SERVICECATALOG'|'AWS_ORGANIZATIONS'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type PortfolioId: string\n :param PortfolioId: [REQUIRED]\n The portfolio identifier.\n \n\n :type PortfolioShareType: string\n :param PortfolioShareType: The type of shared portfolios to reject. The default is to reject imported portfolios.\n AWS_ORGANIZATIONS - Reject portfolios shared by the master account of your organization.\n IMPORTED - Reject imported portfolios.\n AWS_SERVICECATALOG - Not supported. (Throws ResourceNotFoundException.)\n For example, aws servicecatalog reject-portfolio-share --portfolio-id 'port-2qwzkwxt3y5fk' --portfolio-share-type AWS_ORGANIZATIONS\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef scan_provisioned_products(AcceptLanguage=None, AccessLevelFilter=None, PageSize=None, PageToken=None):\n \"\"\"\n Lists the provisioned products that are available (not terminated).\n To use additional filtering, see SearchProvisionedProducts .\n See also: AWS API Documentation\n \n \n :example: response = client.scan_provisioned_products(\n AcceptLanguage='string',\n AccessLevelFilter={\n 'Key': 'Account'|'Role'|'User',\n 'Value': 'string'\n },\n PageSize=123,\n PageToken='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type AccessLevelFilter: dict\n :param AccessLevelFilter: The access level to use to obtain results. The default is User .\n Key (string) --The access level.\n Account - Filter results based on the account.\n Role - Filter results based on the federated role of the specified user.\n User - Filter results based on the specified user.\n Value (string) --The user to which the access level applies. The only supported value is Self .\n \n\n :type PageSize: integer\n :param PageSize: The maximum number of items to return with this call.\n\n :type PageToken: string\n :param PageToken: The page token for the next set of results. To retrieve the first set of results, use null.\n\n :rtype: dict\n :return: {\n 'ProvisionedProducts': [\n {\n 'Name': 'string',\n 'Arn': 'string',\n 'Type': 'string',\n 'Id': 'string',\n 'Status': 'AVAILABLE'|'UNDER_CHANGE'|'TAINTED'|'ERROR'|'PLAN_IN_PROGRESS',\n 'StatusMessage': 'string',\n 'CreatedTime': datetime(2015, 1, 1),\n 'IdempotencyToken': 'string',\n 'LastRecordId': 'string',\n 'ProductId': 'string',\n 'ProvisioningArtifactId': 'string'\n },\n ],\n 'NextPageToken': 'string'\n }\n \n \n :returns: \n AVAILABLE - Stable state, ready to perform any operation. The most recent operation succeeded and completed.\n UNDER_CHANGE - Transitive state, operations performed might not have valid results. Wait for an AVAILABLE status before performing operations.\n TAINTED - Stable state, ready to perform any operation. The stack has completed the requested operation but is not exactly what was requested. For example, a request to update to a new version failed and the stack rolled back to the current version.\n ERROR - An unexpected error occurred, the provisioned product exists but the stack is not running. For example, CloudFormation received a parameter value that was not valid and could not launch the stack.\n \n \"\"\"\n pass\n\ndef search_products(AcceptLanguage=None, Filters=None, PageSize=None, SortBy=None, SortOrder=None, PageToken=None):\n \"\"\"\n Gets information about the products to which the caller has access.\n See also: AWS API Documentation\n \n \n :example: response = client.search_products(\n AcceptLanguage='string',\n Filters={\n 'string': [\n 'string',\n ]\n },\n PageSize=123,\n SortBy='Title'|'VersionCount'|'CreationDate',\n SortOrder='ASCENDING'|'DESCENDING',\n PageToken='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type Filters: dict\n :param Filters: The search filters. If no search filters are specified, the output includes all products to which the caller has access.\n (string) --\n (list) --\n (string) --\n \n \n\n :type PageSize: integer\n :param PageSize: The maximum number of items to return with this call.\n\n :type SortBy: string\n :param SortBy: The sort field. If no value is specified, the results are not sorted.\n\n :type SortOrder: string\n :param SortOrder: The sort order. If no value is specified, the results are not sorted.\n\n :type PageToken: string\n :param PageToken: The page token for the next set of results. To retrieve the first set of results, use null.\n\n :rtype: dict\n :return: {\n 'ProductViewSummaries': [\n {\n 'Id': 'string',\n 'ProductId': 'string',\n 'Name': 'string',\n 'Owner': 'string',\n 'ShortDescription': 'string',\n 'Type': 'CLOUD_FORMATION_TEMPLATE'|'MARKETPLACE',\n 'Distributor': 'string',\n 'HasDefaultPath': True|False,\n 'SupportEmail': 'string',\n 'SupportDescription': 'string',\n 'SupportUrl': 'string'\n },\n ],\n 'ProductViewAggregations': {\n 'string': [\n {\n 'Value': 'string',\n 'ApproximateCount': 123\n },\n ]\n },\n 'NextPageToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef search_products_as_admin(AcceptLanguage=None, PortfolioId=None, Filters=None, SortBy=None, SortOrder=None, PageToken=None, PageSize=None, ProductSource=None):\n \"\"\"\n Gets information about the products for the specified portfolio or all products.\n See also: AWS API Documentation\n \n \n :example: response = client.search_products_as_admin(\n AcceptLanguage='string',\n PortfolioId='string',\n Filters={\n 'string': [\n 'string',\n ]\n },\n SortBy='Title'|'VersionCount'|'CreationDate',\n SortOrder='ASCENDING'|'DESCENDING',\n PageToken='string',\n PageSize=123,\n ProductSource='ACCOUNT'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type PortfolioId: string\n :param PortfolioId: The portfolio identifier.\n\n :type Filters: dict\n :param Filters: The search filters. If no search filters are specified, the output includes all products to which the administrator has access.\n (string) --\n (list) --\n (string) --\n \n \n\n :type SortBy: string\n :param SortBy: The sort field. If no value is specified, the results are not sorted.\n\n :type SortOrder: string\n :param SortOrder: The sort order. If no value is specified, the results are not sorted.\n\n :type PageToken: string\n :param PageToken: The page token for the next set of results. To retrieve the first set of results, use null.\n\n :type PageSize: integer\n :param PageSize: The maximum number of items to return with this call.\n\n :type ProductSource: string\n :param ProductSource: Access level of the source of the product.\n\n :rtype: dict\n :return: {\n 'ProductViewDetails': [\n {\n 'ProductViewSummary': {\n 'Id': 'string',\n 'ProductId': 'string',\n 'Name': 'string',\n 'Owner': 'string',\n 'ShortDescription': 'string',\n 'Type': 'CLOUD_FORMATION_TEMPLATE'|'MARKETPLACE',\n 'Distributor': 'string',\n 'HasDefaultPath': True|False,\n 'SupportEmail': 'string',\n 'SupportDescription': 'string',\n 'SupportUrl': 'string'\n },\n 'Status': 'AVAILABLE'|'CREATING'|'FAILED',\n 'ProductARN': 'string',\n 'CreatedTime': datetime(2015, 1, 1)\n },\n ],\n 'NextPageToken': 'string'\n }\n \n \n :returns: \n AVAILABLE - The product is ready for use.\n CREATING - Product creation has started; the product is not ready for use.\n FAILED - An action failed.\n \n \"\"\"\n pass\n\ndef search_provisioned_products(AcceptLanguage=None, AccessLevelFilter=None, Filters=None, SortBy=None, SortOrder=None, PageSize=None, PageToken=None):\n \"\"\"\n Gets information about the provisioned products that meet the specified criteria.\n See also: AWS API Documentation\n \n \n :example: response = client.search_provisioned_products(\n AcceptLanguage='string',\n AccessLevelFilter={\n 'Key': 'Account'|'Role'|'User',\n 'Value': 'string'\n },\n Filters={\n 'string': [\n 'string',\n ]\n },\n SortBy='string',\n SortOrder='ASCENDING'|'DESCENDING',\n PageSize=123,\n PageToken='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type AccessLevelFilter: dict\n :param AccessLevelFilter: The access level to use to obtain results. The default is User .\n Key (string) --The access level.\n Account - Filter results based on the account.\n Role - Filter results based on the federated role of the specified user.\n User - Filter results based on the specified user.\n Value (string) --The user to which the access level applies. The only supported value is Self .\n \n\n :type Filters: dict\n :param Filters: The search filters.\n When the key is SearchQuery , the searchable fields are arn , createdTime , id , lastRecordId , idempotencyToken , name , physicalId , productId , provisioningArtifact , type , status , tags , userArn , and userArnSession .\n Example: 'SearchQuery':['status:AVAILABLE']\n (string) --\n (list) --\n (string) --\n \n \n\n :type SortBy: string\n :param SortBy: The sort field. If no value is specified, the results are not sorted. The valid values are arn , id , name , and lastRecordId .\n\n :type SortOrder: string\n :param SortOrder: The sort order. If no value is specified, the results are not sorted.\n\n :type PageSize: integer\n :param PageSize: The maximum number of items to return with this call.\n\n :type PageToken: string\n :param PageToken: The page token for the next set of results. To retrieve the first set of results, use null.\n\n :rtype: dict\n :return: {\n 'ProvisionedProducts': [\n {\n 'Name': 'string',\n 'Arn': 'string',\n 'Type': 'string',\n 'Id': 'string',\n 'Status': 'AVAILABLE'|'UNDER_CHANGE'|'TAINTED'|'ERROR'|'PLAN_IN_PROGRESS',\n 'StatusMessage': 'string',\n 'CreatedTime': datetime(2015, 1, 1),\n 'IdempotencyToken': 'string',\n 'LastRecordId': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'PhysicalId': 'string',\n 'ProductId': 'string',\n 'ProvisioningArtifactId': 'string',\n 'UserArn': 'string',\n 'UserArnSession': 'string'\n },\n ],\n 'TotalResultsCount': 123,\n 'NextPageToken': 'string'\n }\n \n \n :returns: \n AVAILABLE - Stable state, ready to perform any operation. The most recent operation succeeded and completed.\n UNDER_CHANGE - Transitive state, operations performed might not have valid results. Wait for an AVAILABLE status before performing operations.\n TAINTED - Stable state, ready to perform any operation. The stack has completed the requested operation but is not exactly what was requested. For example, a request to update to a new version failed and the stack rolled back to the current version.\n ERROR - An unexpected error occurred, the provisioned product exists but the stack is not running. For example, CloudFormation received a parameter value that was not valid and could not launch the stack.\n \n \"\"\"\n pass\n\ndef terminate_provisioned_product(ProvisionedProductName=None, ProvisionedProductId=None, TerminateToken=None, IgnoreErrors=None, AcceptLanguage=None):\n \"\"\"\n Terminates the specified provisioned product.\n This operation does not delete any records associated with the provisioned product.\n You can check the status of this request using DescribeRecord .\n See also: AWS API Documentation\n \n \n :example: response = client.terminate_provisioned_product(\n ProvisionedProductName='string',\n ProvisionedProductId='string',\n TerminateToken='string',\n IgnoreErrors=True|False,\n AcceptLanguage='string'\n )\n \n \n :type ProvisionedProductName: string\n :param ProvisionedProductName: The name of the provisioned product. You cannot specify both ProvisionedProductName and ProvisionedProductId .\n\n :type ProvisionedProductId: string\n :param ProvisionedProductId: The identifier of the provisioned product. You cannot specify both ProvisionedProductName and ProvisionedProductId .\n\n :type TerminateToken: string\n :param TerminateToken: [REQUIRED]\n An idempotency token that uniquely identifies the termination request. This token is only valid during the termination process. After the provisioned product is terminated, subsequent requests to terminate the same provisioned product always return ResourceNotFound .\n This field is autopopulated if not provided.\n \n\n :type IgnoreErrors: boolean\n :param IgnoreErrors: If set to true, AWS Service Catalog stops managing the specified provisioned product even if it cannot delete the underlying resources.\n\n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :rtype: dict\n :return: {\n 'RecordDetail': {\n 'RecordId': 'string',\n 'ProvisionedProductName': 'string',\n 'Status': 'CREATED'|'IN_PROGRESS'|'IN_PROGRESS_IN_ERROR'|'SUCCEEDED'|'FAILED',\n 'CreatedTime': datetime(2015, 1, 1),\n 'UpdatedTime': datetime(2015, 1, 1),\n 'ProvisionedProductType': 'string',\n 'RecordType': 'string',\n 'ProvisionedProductId': 'string',\n 'ProductId': 'string',\n 'ProvisioningArtifactId': 'string',\n 'PathId': 'string',\n 'RecordErrors': [\n {\n 'Code': 'string',\n 'Description': 'string'\n },\n ],\n 'RecordTags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n }\n \n \n :returns: \n CREATED - The request was created but the operation has not started.\n IN_PROGRESS - The requested operation is in progress.\n IN_PROGRESS_IN_ERROR - The provisioned product is under change but the requested operation failed and some remediation is occurring. For example, a rollback.\n SUCCEEDED - The requested operation has successfully completed.\n FAILED - The requested operation has unsuccessfully completed. Investigate using the error messages returned.\n \n \"\"\"\n pass\n\ndef update_constraint(AcceptLanguage=None, Id=None, Description=None):\n \"\"\"\n Updates the specified constraint.\n See also: AWS API Documentation\n \n \n :example: response = client.update_constraint(\n AcceptLanguage='string',\n Id='string',\n Description='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type Id: string\n :param Id: [REQUIRED]\n The identifier of the constraint.\n \n\n :type Description: string\n :param Description: The updated description of the constraint.\n\n :rtype: dict\n :return: {\n 'ConstraintDetail': {\n 'ConstraintId': 'string',\n 'Type': 'string',\n 'Description': 'string',\n 'Owner': 'string'\n },\n 'ConstraintParameters': 'string',\n 'Status': 'AVAILABLE'|'CREATING'|'FAILED'\n }\n \n \n :returns: \n LAUNCH\n NOTIFICATION\n STACKSET\n TEMPLATE\n \n \"\"\"\n pass\n\ndef update_portfolio(AcceptLanguage=None, Id=None, DisplayName=None, Description=None, ProviderName=None, AddTags=None, RemoveTags=None):\n \"\"\"\n Updates the specified portfolio.\n You cannot update a product that was shared with you.\n See also: AWS API Documentation\n \n \n :example: response = client.update_portfolio(\n AcceptLanguage='string',\n Id='string',\n DisplayName='string',\n Description='string',\n ProviderName='string',\n AddTags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n RemoveTags=[\n 'string',\n ]\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type Id: string\n :param Id: [REQUIRED]\n The portfolio identifier.\n \n\n :type DisplayName: string\n :param DisplayName: The name to use for display purposes.\n\n :type Description: string\n :param Description: The updated description of the portfolio.\n\n :type ProviderName: string\n :param ProviderName: The updated name of the portfolio provider.\n\n :type AddTags: list\n :param AddTags: The tags to add.\n (dict) --Information about a tag. A tag is a key-value pair. Tags are propagated to the resources created when provisioning a product.\n Key (string) -- [REQUIRED]The tag key.\n Value (string) -- [REQUIRED]The value for this key.\n \n \n\n :type RemoveTags: list\n :param RemoveTags: The tags to remove.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'PortfolioDetail': {\n 'Id': 'string',\n 'ARN': 'string',\n 'DisplayName': 'string',\n 'Description': 'string',\n 'CreatedTime': datetime(2015, 1, 1),\n 'ProviderName': 'string'\n },\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef update_product(AcceptLanguage=None, Id=None, Name=None, Owner=None, Description=None, Distributor=None, SupportDescription=None, SupportEmail=None, SupportUrl=None, AddTags=None, RemoveTags=None):\n \"\"\"\n Updates the specified product.\n See also: AWS API Documentation\n \n \n :example: response = client.update_product(\n AcceptLanguage='string',\n Id='string',\n Name='string',\n Owner='string',\n Description='string',\n Distributor='string',\n SupportDescription='string',\n SupportEmail='string',\n SupportUrl='string',\n AddTags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n RemoveTags=[\n 'string',\n ]\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type Id: string\n :param Id: [REQUIRED]\n The product identifier.\n \n\n :type Name: string\n :param Name: The updated product name.\n\n :type Owner: string\n :param Owner: The updated owner of the product.\n\n :type Description: string\n :param Description: The updated description of the product.\n\n :type Distributor: string\n :param Distributor: The updated distributor of the product.\n\n :type SupportDescription: string\n :param SupportDescription: The updated support description for the product.\n\n :type SupportEmail: string\n :param SupportEmail: The updated support email for the product.\n\n :type SupportUrl: string\n :param SupportUrl: The updated support URL for the product.\n\n :type AddTags: list\n :param AddTags: The tags to add to the product.\n (dict) --Information about a tag. A tag is a key-value pair. Tags are propagated to the resources created when provisioning a product.\n Key (string) -- [REQUIRED]The tag key.\n Value (string) -- [REQUIRED]The value for this key.\n \n \n\n :type RemoveTags: list\n :param RemoveTags: The tags to remove from the product.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'ProductViewDetail': {\n 'ProductViewSummary': {\n 'Id': 'string',\n 'ProductId': 'string',\n 'Name': 'string',\n 'Owner': 'string',\n 'ShortDescription': 'string',\n 'Type': 'CLOUD_FORMATION_TEMPLATE'|'MARKETPLACE',\n 'Distributor': 'string',\n 'HasDefaultPath': True|False,\n 'SupportEmail': 'string',\n 'SupportDescription': 'string',\n 'SupportUrl': 'string'\n },\n 'Status': 'AVAILABLE'|'CREATING'|'FAILED',\n 'ProductARN': 'string',\n 'CreatedTime': datetime(2015, 1, 1)\n },\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n \n \n :returns: \n AVAILABLE - The product is ready for use.\n CREATING - Product creation has started; the product is not ready for use.\n FAILED - An action failed.\n \n \"\"\"\n pass\n\ndef update_provisioned_product(AcceptLanguage=None, ProvisionedProductName=None, ProvisionedProductId=None, ProductId=None, ProvisioningArtifactId=None, PathId=None, ProvisioningParameters=None, ProvisioningPreferences=None, UpdateToken=None):\n \"\"\"\n Requests updates to the configuration of the specified provisioned product.\n If there are tags associated with the object, they cannot be updated or added. Depending on the specific updates requested, this operation can update with no interruption, with some interruption, or replace the provisioned product entirely.\n You can check the status of this request using DescribeRecord .\n See also: AWS API Documentation\n \n \n :example: response = client.update_provisioned_product(\n AcceptLanguage='string',\n ProvisionedProductName='string',\n ProvisionedProductId='string',\n ProductId='string',\n ProvisioningArtifactId='string',\n PathId='string',\n ProvisioningParameters=[\n {\n 'Key': 'string',\n 'Value': 'string',\n 'UsePreviousValue': True|False\n },\n ],\n ProvisioningPreferences={\n 'StackSetAccounts': [\n 'string',\n ],\n 'StackSetRegions': [\n 'string',\n ],\n 'StackSetFailureToleranceCount': 123,\n 'StackSetFailureTolerancePercentage': 123,\n 'StackSetMaxConcurrencyCount': 123,\n 'StackSetMaxConcurrencyPercentage': 123,\n 'StackSetOperationType': 'CREATE'|'UPDATE'|'DELETE'\n },\n UpdateToken='string'\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type ProvisionedProductName: string\n :param ProvisionedProductName: The updated name of the provisioned product. You cannot specify both ProvisionedProductName and ProvisionedProductId .\n\n :type ProvisionedProductId: string\n :param ProvisionedProductId: The identifier of the provisioned product. You cannot specify both ProvisionedProductName and ProvisionedProductId .\n\n :type ProductId: string\n :param ProductId: The identifier of the product.\n\n :type ProvisioningArtifactId: string\n :param ProvisioningArtifactId: The identifier of the provisioning artifact.\n\n :type PathId: string\n :param PathId: The new path identifier. This value is optional if the product has a default path, and required if the product has more than one path.\n\n :type ProvisioningParameters: list\n :param ProvisioningParameters: The new parameters.\n (dict) --The parameter key-value pair used to update a provisioned product.\n Key (string) --The parameter key.\n Value (string) --The parameter value.\n UsePreviousValue (boolean) --If set to true, Value is ignored and the previous parameter value is kept.\n \n \n\n :type ProvisioningPreferences: dict\n :param ProvisioningPreferences: An object that contains information about the provisioning preferences for a stack set.\n StackSetAccounts (list) --One or more AWS accounts that will have access to the provisioned product.\n Applicable only to a CFN_STACKSET provisioned product type.\n The AWS accounts specified should be within the list of accounts in the STACKSET constraint. To get the list of accounts in the STACKSET constraint, use the DescribeProvisioningParameters operation.\n If no values are specified, the default value is all accounts from the STACKSET constraint.\n (string) --\n StackSetRegions (list) --One or more AWS Regions where the provisioned product will be available.\n Applicable only to a CFN_STACKSET provisioned product type.\n The specified regions should be within the list of regions from the STACKSET constraint. To get the list of regions in the STACKSET constraint, use the DescribeProvisioningParameters operation.\n If no values are specified, the default value is all regions from the STACKSET constraint.\n (string) --\n StackSetFailureToleranceCount (integer) --The number of accounts, per region, for which this operation can fail before AWS Service Catalog stops the operation in that region. If the operation is stopped in a region, AWS Service Catalog doesn't attempt the operation in any subsequent regions.\n Applicable only to a CFN_STACKSET provisioned product type.\n Conditional: You must specify either StackSetFailureToleranceCount or StackSetFailureTolerancePercentage , but not both.\n The default value is 0 if no value is specified.\n StackSetFailureTolerancePercentage (integer) --The percentage of accounts, per region, for which this stack operation can fail before AWS Service Catalog stops the operation in that region. If the operation is stopped in a region, AWS Service Catalog doesn't attempt the operation in any subsequent regions.\n When calculating the number of accounts based on the specified percentage, AWS Service Catalog rounds down to the next whole number.\n Applicable only to a CFN_STACKSET provisioned product type.\n Conditional: You must specify either StackSetFailureToleranceCount or StackSetFailureTolerancePercentage , but not both.\n StackSetMaxConcurrencyCount (integer) --The maximum number of accounts in which to perform this operation at one time. This is dependent on the value of StackSetFailureToleranceCount . StackSetMaxConcurrentCount is at most one more than the StackSetFailureToleranceCount .\n Note that this setting lets you specify the maximum for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.\n Applicable only to a CFN_STACKSET provisioned product type.\n Conditional: You must specify either StackSetMaxConcurrentCount or StackSetMaxConcurrentPercentage , but not both.\n StackSetMaxConcurrencyPercentage (integer) --The maximum percentage of accounts in which to perform this operation at one time.\n When calculating the number of accounts based on the specified percentage, AWS Service Catalog rounds down to the next whole number. This is true except in cases where rounding down would result is zero. In this case, AWS Service Catalog sets the number as 1 instead.\n Note that this setting lets you specify the maximum for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.\n Applicable only to a CFN_STACKSET provisioned product type.\n Conditional: You must specify either StackSetMaxConcurrentCount or StackSetMaxConcurrentPercentage , but not both.\n StackSetOperationType (string) --Determines what action AWS Service Catalog performs to a stack set or a stack instance represented by the provisioned product. The default value is UPDATE if nothing is specified.\n Applicable only to a CFN_STACKSET provisioned product type.\n CREATE\n Creates a new stack instance in the stack set represented by the provisioned product. In this case, only new stack instances are created based on accounts and regions; if new ProductId or ProvisioningArtifactID are passed, they will be ignored.\n UPDATE\n Updates the stack set represented by the provisioned product and also its stack instances.\n DELETE\n Deletes a stack instance in the stack set represented by the provisioned product.\n \n\n :type UpdateToken: string\n :param UpdateToken: [REQUIRED]\n The idempotency token that uniquely identifies the provisioning update request.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'RecordDetail': {\n 'RecordId': 'string',\n 'ProvisionedProductName': 'string',\n 'Status': 'CREATED'|'IN_PROGRESS'|'IN_PROGRESS_IN_ERROR'|'SUCCEEDED'|'FAILED',\n 'CreatedTime': datetime(2015, 1, 1),\n 'UpdatedTime': datetime(2015, 1, 1),\n 'ProvisionedProductType': 'string',\n 'RecordType': 'string',\n 'ProvisionedProductId': 'string',\n 'ProductId': 'string',\n 'ProvisioningArtifactId': 'string',\n 'PathId': 'string',\n 'RecordErrors': [\n {\n 'Code': 'string',\n 'Description': 'string'\n },\n ],\n 'RecordTags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n }\n \n \n :returns: \n CREATED - The request was created but the operation has not started.\n IN_PROGRESS - The requested operation is in progress.\n IN_PROGRESS_IN_ERROR - The provisioned product is under change but the requested operation failed and some remediation is occurring. For example, a rollback.\n SUCCEEDED - The requested operation has successfully completed.\n FAILED - The requested operation has unsuccessfully completed. Investigate using the error messages returned.\n \n \"\"\"\n pass\n\ndef update_provisioning_artifact(AcceptLanguage=None, ProductId=None, ProvisioningArtifactId=None, Name=None, Description=None, Active=None):\n \"\"\"\n Updates the specified provisioning artifact (also known as a version) for the specified product.\n You cannot update a provisioning artifact for a product that was shared with you.\n See also: AWS API Documentation\n \n \n :example: response = client.update_provisioning_artifact(\n AcceptLanguage='string',\n ProductId='string',\n ProvisioningArtifactId='string',\n Name='string',\n Description='string',\n Active=True|False\n )\n \n \n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :type ProductId: string\n :param ProductId: [REQUIRED]\n The product identifier.\n \n\n :type ProvisioningArtifactId: string\n :param ProvisioningArtifactId: [REQUIRED]\n The identifier of the provisioning artifact.\n \n\n :type Name: string\n :param Name: The updated name of the provisioning artifact.\n\n :type Description: string\n :param Description: The updated description of the provisioning artifact.\n\n :type Active: boolean\n :param Active: Indicates whether the product version is active.\n\n :rtype: dict\n :return: {\n 'ProvisioningArtifactDetail': {\n 'Id': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'Type': 'CLOUD_FORMATION_TEMPLATE'|'MARKETPLACE_AMI'|'MARKETPLACE_CAR',\n 'CreatedTime': datetime(2015, 1, 1),\n 'Active': True|False\n },\n 'Info': {\n 'string': 'string'\n },\n 'Status': 'AVAILABLE'|'CREATING'|'FAILED'\n }\n \n \n :returns: \n CLOUD_FORMATION_TEMPLATE - AWS CloudFormation template\n MARKETPLACE_AMI - AWS Marketplace AMI\n MARKETPLACE_CAR - AWS Marketplace Clusters and AWS Resources\n \n \"\"\"\n pass\n\ndef update_service_action(Id=None, Name=None, Definition=None, Description=None, AcceptLanguage=None):\n \"\"\"\n Updates a self-service action.\n See also: AWS API Documentation\n \n \n :example: response = client.update_service_action(\n Id='string',\n Name='string',\n Definition={\n 'string': 'string'\n },\n Description='string',\n AcceptLanguage='string'\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n The self-service action identifier.\n \n\n :type Name: string\n :param Name: The self-service action name.\n\n :type Definition: dict\n :param Definition: A map that defines the self-service action.\n (string) --\n (string) --\n \n\n :type Description: string\n :param Description: The self-service action description.\n\n :type AcceptLanguage: string\n :param AcceptLanguage: The language code.\n en - English (default)\n jp - Japanese\n zh - Chinese\n \n\n :rtype: dict\n :return: {\n 'ServiceActionDetail': {\n 'ServiceActionSummary': {\n 'Id': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'DefinitionType': 'SSM_AUTOMATION'\n },\n 'Definition': {\n 'string': 'string'\n }\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef update_tag_option(Id=None, Value=None, Active=None):\n \"\"\"\n Updates the specified TagOption.\n See also: AWS API Documentation\n \n \n :example: response = client.update_tag_option(\n Id='string',\n Value='string',\n Active=True|False\n )\n \n \n :type Id: string\n :param Id: [REQUIRED]\n The TagOption identifier.\n \n\n :type Value: string\n :param Value: The updated value.\n\n :type Active: boolean\n :param Active: The updated active state.\n\n :rtype: dict\n :return: {\n 'TagOptionDetail': {\n 'Key': 'string',\n 'Value': 'string',\n 'Active': True|False,\n 'Id': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.5927523970603943, "alphanum_fraction": 0.5967358350753784, "avg_line_length": 27.373626708984375, "blob_id": "70c5d9d00258ea95034f6e4e43ae40b7065a00a1", "content_id": "8b27415392be974c4ac2cb606a808e9da2a7ba04", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18075, "license_type": "permissive", "max_line_length": 332, "num_lines": 637, "path": "/pyboto3/iot1clickprojects.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef associate_device_with_placement(projectName=None, placementName=None, deviceId=None, deviceTemplateName=None):\n \"\"\"\n Associates a physical device with a placement.\n See also: AWS API Documentation\n \n \n :example: response = client.associate_device_with_placement(\n projectName='string',\n placementName='string',\n deviceId='string',\n deviceTemplateName='string'\n )\n \n \n :type projectName: string\n :param projectName: [REQUIRED]\n The name of the project containing the placement in which to associate the device.\n \n\n :type placementName: string\n :param placementName: [REQUIRED]\n The name of the placement in which to associate the device.\n \n\n :type deviceId: string\n :param deviceId: [REQUIRED]\n The ID of the physical device to be associated with the given placement in the project. Note that a mandatory 4 character prefix is required for all deviceId values.\n \n\n :type deviceTemplateName: string\n :param deviceTemplateName: [REQUIRED]\n The device template name to associate with the device ID.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_placement(placementName=None, projectName=None, attributes=None):\n \"\"\"\n Creates an empty placement.\n See also: AWS API Documentation\n \n \n :example: response = client.create_placement(\n placementName='string',\n projectName='string',\n attributes={\n 'string': 'string'\n }\n )\n \n \n :type placementName: string\n :param placementName: [REQUIRED]\n The name of the placement to be created.\n \n\n :type projectName: string\n :param projectName: [REQUIRED]\n The name of the project in which to create the placement.\n \n\n :type attributes: dict\n :param attributes: Optional user-defined key/value pairs providing contextual data (such as location or function) for the placement.\n (string) --\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef create_project(projectName=None, description=None, placementTemplate=None):\n \"\"\"\n Creates an empty project with a placement template. A project contains zero or more placements that adhere to the placement template defined in the project.\n See also: AWS API Documentation\n \n \n :example: response = client.create_project(\n projectName='string',\n description='string',\n placementTemplate={\n 'defaultAttributes': {\n 'string': 'string'\n },\n 'deviceTemplates': {\n 'string': {\n 'deviceType': 'string',\n 'callbackOverrides': {\n 'string': 'string'\n }\n }\n }\n }\n )\n \n \n :type projectName: string\n :param projectName: [REQUIRED]\n The name of the project to create.\n \n\n :type description: string\n :param description: An optional description for the project.\n\n :type placementTemplate: dict\n :param placementTemplate: The schema defining the placement to be created. A placement template defines placement default attributes and device templates. You cannot add or remove device templates after the project has been created. However, you can update callbackOverrides for the device templates using the UpdateProject API.\n defaultAttributes (dict) --The default attributes (key/value pairs) to be applied to all placements using this template.\n (string) --\n (string) --\n \n deviceTemplates (dict) --An object specifying the DeviceTemplate for all placements using this ( PlacementTemplate ) template.\n (string) --\n (dict) --An object representing a device for a placement template (see PlacementTemplate ).\n deviceType (string) --The device type, which currently must be 'button' .\n callbackOverrides (dict) --An optional Lambda function to invoke instead of the default Lambda function provided by the placement template.\n (string) --\n (string) --\n \n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_placement(placementName=None, projectName=None):\n \"\"\"\n Deletes a placement. To delete a placement, it must not have any devices associated with it.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_placement(\n placementName='string',\n projectName='string'\n )\n \n \n :type placementName: string\n :param placementName: [REQUIRED]\n The name of the empty placement to delete.\n \n\n :type projectName: string\n :param projectName: [REQUIRED]\n The project containing the empty placement to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_project(projectName=None):\n \"\"\"\n Deletes a project. To delete a project, it must not have any placements associated with it.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_project(\n projectName='string'\n )\n \n \n :type projectName: string\n :param projectName: [REQUIRED]\n The name of the empty project to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef describe_placement(placementName=None, projectName=None):\n \"\"\"\n Describes a placement in a project.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_placement(\n placementName='string',\n projectName='string'\n )\n \n \n :type placementName: string\n :param placementName: [REQUIRED]\n The name of the placement within a project.\n \n\n :type projectName: string\n :param projectName: [REQUIRED]\n The project containing the placement to be described.\n \n\n :rtype: dict\n :return: {\n 'placement': {\n 'projectName': 'string',\n 'placementName': 'string',\n 'attributes': {\n 'string': 'string'\n },\n 'createdDate': datetime(2015, 1, 1),\n 'updatedDate': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef describe_project(projectName=None):\n \"\"\"\n Returns an object describing a project.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_project(\n projectName='string'\n )\n \n \n :type projectName: string\n :param projectName: [REQUIRED]\n The name of the project to be described.\n \n\n :rtype: dict\n :return: {\n 'project': {\n 'projectName': 'string',\n 'description': 'string',\n 'createdDate': datetime(2015, 1, 1),\n 'updatedDate': datetime(2015, 1, 1),\n 'placementTemplate': {\n 'defaultAttributes': {\n 'string': 'string'\n },\n 'deviceTemplates': {\n 'string': {\n 'deviceType': 'string',\n 'callbackOverrides': {\n 'string': 'string'\n }\n }\n }\n }\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef disassociate_device_from_placement(projectName=None, placementName=None, deviceTemplateName=None):\n \"\"\"\n Removes a physical device from a placement.\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_device_from_placement(\n projectName='string',\n placementName='string',\n deviceTemplateName='string'\n )\n \n \n :type projectName: string\n :param projectName: [REQUIRED]\n The name of the project that contains the placement.\n \n\n :type placementName: string\n :param placementName: [REQUIRED]\n The name of the placement that the device should be removed from.\n \n\n :type deviceTemplateName: string\n :param deviceTemplateName: [REQUIRED]\n The device ID that should be removed from the placement.\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_devices_in_placement(projectName=None, placementName=None):\n \"\"\"\n Returns an object enumerating the devices in a placement.\n See also: AWS API Documentation\n \n \n :example: response = client.get_devices_in_placement(\n projectName='string',\n placementName='string'\n )\n \n \n :type projectName: string\n :param projectName: [REQUIRED]\n The name of the project containing the placement.\n \n\n :type placementName: string\n :param placementName: [REQUIRED]\n The name of the placement to get the devices from.\n \n\n :rtype: dict\n :return: {\n 'devices': {\n 'string': 'string'\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_placements(projectName=None, nextToken=None, maxResults=None):\n \"\"\"\n Lists the placement(s) of a project.\n See also: AWS API Documentation\n \n \n :example: response = client.list_placements(\n projectName='string',\n nextToken='string',\n maxResults=123\n )\n \n \n :type projectName: string\n :param projectName: [REQUIRED]\n The project containing the placements to be listed.\n \n\n :type nextToken: string\n :param nextToken: The token to retrieve the next set of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return per request. If not set, a default value of 100 is used.\n\n :rtype: dict\n :return: {\n 'placements': [\n {\n 'projectName': 'string',\n 'placementName': 'string',\n 'createdDate': datetime(2015, 1, 1),\n 'updatedDate': datetime(2015, 1, 1)\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef list_projects(nextToken=None, maxResults=None):\n \"\"\"\n Lists the AWS IoT 1-Click project(s) associated with your AWS account and region.\n See also: AWS API Documentation\n \n \n :example: response = client.list_projects(\n nextToken='string',\n maxResults=123\n )\n \n \n :type nextToken: string\n :param nextToken: The token to retrieve the next set of results.\n\n :type maxResults: integer\n :param maxResults: The maximum number of results to return per request. If not set, a default value of 100 is used.\n\n :rtype: dict\n :return: {\n 'projects': [\n {\n 'projectName': 'string',\n 'createdDate': datetime(2015, 1, 1),\n 'updatedDate': datetime(2015, 1, 1)\n },\n ],\n 'nextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef update_placement(placementName=None, projectName=None, attributes=None):\n \"\"\"\n Updates a placement with the given attributes. To clear an attribute, pass an empty value (i.e., \"\").\n See also: AWS API Documentation\n \n \n :example: response = client.update_placement(\n placementName='string',\n projectName='string',\n attributes={\n 'string': 'string'\n }\n )\n \n \n :type placementName: string\n :param placementName: [REQUIRED]\n The name of the placement to update.\n \n\n :type projectName: string\n :param projectName: [REQUIRED]\n The name of the project containing the placement to be updated.\n \n\n :type attributes: dict\n :param attributes: The user-defined object of attributes used to update the placement. The maximum number of key/value pairs is 50.\n (string) --\n (string) --\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_project(projectName=None, description=None, placementTemplate=None):\n \"\"\"\n Updates a project associated with your AWS account and region. With the exception of device template names, you can pass just the values that need to be updated because the update request will change only the values that are provided. To clear a value, pass the empty string (i.e., \"\" ).\n See also: AWS API Documentation\n \n \n :example: response = client.update_project(\n projectName='string',\n description='string',\n placementTemplate={\n 'defaultAttributes': {\n 'string': 'string'\n },\n 'deviceTemplates': {\n 'string': {\n 'deviceType': 'string',\n 'callbackOverrides': {\n 'string': 'string'\n }\n }\n }\n }\n )\n \n \n :type projectName: string\n :param projectName: [REQUIRED]\n The name of the project to be updated.\n \n\n :type description: string\n :param description: An optional user-defined description for the project.\n\n :type placementTemplate: dict\n :param placementTemplate: An object defining the project update. Once a project has been created, you cannot add device template names to the project. However, for a given placementTemplate , you can update the associated callbackOverrides for the device definition using this API.\n defaultAttributes (dict) --The default attributes (key/value pairs) to be applied to all placements using this template.\n (string) --\n (string) --\n \n deviceTemplates (dict) --An object specifying the DeviceTemplate for all placements using this ( PlacementTemplate ) template.\n (string) --\n (dict) --An object representing a device for a placement template (see PlacementTemplate ).\n deviceType (string) --The device type, which currently must be 'button' .\n callbackOverrides (dict) --An optional Lambda function to invoke instead of the default Lambda function provided by the placement template.\n (string) --\n (string) --\n \n \n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.5988505482673645, "alphanum_fraction": 0.6029643416404724, "avg_line_length": 33.870269775390625, "blob_id": "c23a8990f259c373f4727e1c659365d8659916e5", "content_id": "77e067ab10a725c405c5882a9098db0f73529cd9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 83864, "license_type": "permissive", "max_line_length": 409, "num_lines": 2405, "path": "/pyboto3/guardduty.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef accept_invitation(DetectorId=None, InvitationId=None, MasterId=None):\n \"\"\"\n Accepts the invitation to be monitored by a master GuardDuty account.\n See also: AWS API Documentation\n \n \n :example: response = client.accept_invitation(\n DetectorId='string',\n InvitationId='string',\n MasterId='string'\n )\n \n \n :type DetectorId: string\n :param DetectorId: [REQUIRED] The unique ID of the detector of the GuardDuty member account.\n\n :type InvitationId: string\n :param InvitationId: [REQUIRED] This value is used to validate the master account to the member account.\n\n :type MasterId: string\n :param MasterId: [REQUIRED] The account ID of the master GuardDuty account whose invitation you're accepting.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) -- 200 response\n \n \"\"\"\n pass\n\ndef archive_findings(DetectorId=None, FindingIds=None):\n \"\"\"\n Archives Amazon GuardDuty findings specified by the list of finding IDs.\n See also: AWS API Documentation\n \n \n :example: response = client.archive_findings(\n DetectorId='string',\n FindingIds=[\n 'string',\n ]\n )\n \n \n :type DetectorId: string\n :param DetectorId: [REQUIRED] The ID of the detector that specifies the GuardDuty service whose findings you want to archive.\n\n :type FindingIds: list\n :param FindingIds: [REQUIRED] IDs of the findings that you want to archive.\n (string) -- The unique identifier for the Finding\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) -- 200 response\n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_detector(ClientToken=None, Enable=None, FindingPublishingFrequency=None):\n \"\"\"\n Creates a single Amazon GuardDuty detector. A detector is an object that represents the GuardDuty service. A detector must be created in order for GuardDuty to become operational.\n See also: AWS API Documentation\n \n \n :example: response = client.create_detector(\n ClientToken='string',\n Enable=True|False,\n FindingPublishingFrequency='FIFTEEN_MINUTES'|'ONE_HOUR'|'SIX_HOURS'\n )\n \n \n :type ClientToken: string\n :param ClientToken: The idempotency token for the create request.This field is autopopulated if not provided.\n\n :type Enable: boolean\n :param Enable: [REQUIRED] A boolean value that specifies whether the detector is to be enabled.\n\n :type FindingPublishingFrequency: string\n :param FindingPublishingFrequency: A enum value that specifies how frequently customer got Finding updates published.\n\n :rtype: dict\n :return: {\n 'DetectorId': 'string'\n }\n \n \n :returns: \n (dict) -- 200 response\n DetectorId (string) -- The unique ID of the created detector.\n \n \n \n \"\"\"\n pass\n\ndef create_filter(Action=None, ClientToken=None, Description=None, DetectorId=None, FindingCriteria=None, Name=None, Rank=None):\n \"\"\"\n Creates a filter using the specified finding criteria.\n See also: AWS API Documentation\n \n \n :example: response = client.create_filter(\n Action='NOOP'|'ARCHIVE',\n ClientToken='string',\n Description='string',\n DetectorId='string',\n FindingCriteria={\n 'Criterion': {\n 'string': {\n 'Eq': [\n 'string',\n ],\n 'Gt': 123,\n 'Gte': 123,\n 'Lt': 123,\n 'Lte': 123,\n 'Neq': [\n 'string',\n ]\n }\n }\n },\n Name='string',\n Rank=123\n )\n \n \n :type Action: string\n :param Action: Specifies the action that is to be applied to the findings that match the filter.\n\n :type ClientToken: string\n :param ClientToken: The idempotency token for the create request.This field is autopopulated if not provided.\n\n :type Description: string\n :param Description: The description of the filter.\n\n :type DetectorId: string\n :param DetectorId: [REQUIRED] The unique ID of the detector that you want to update.\n\n :type FindingCriteria: dict\n :param FindingCriteria: [REQUIRED] Represents the criteria to be used in the filter for querying findings.\n Criterion (dict) -- Represents a map of finding properties that match specified conditions and values when querying findings.\n (string) --\n (dict) -- Finding attribute (for example, accountId) for which conditions and values must be specified when querying findings.\n Eq (list) -- Represents the equal condition to be applied to a single field when querying for findings.\n (string) --\n Gt (integer) -- Represents the greater than condition to be applied to a single field when querying for findings.\n Gte (integer) -- Represents the greater than equal condition to be applied to a single field when querying for findings.\n Lt (integer) -- Represents the less than condition to be applied to a single field when querying for findings.\n Lte (integer) -- Represents the less than equal condition to be applied to a single field when querying for findings.\n Neq (list) -- Represents the not equal condition to be applied to a single field when querying for findings.\n (string) --\n \n \n \n\n :type Name: string\n :param Name: [REQUIRED] The name of the filter.\n\n :type Rank: integer\n :param Rank: Specifies the position of the filter in the list of current filters. Also specifies the order in which this filter is applied to the findings.\n\n :rtype: dict\n :return: {\n 'Name': 'string'\n }\n \n \n :returns: \n (dict) -- 200 response\n Name (string) -- The name of the successfully created filter.\n \n \n \n \"\"\"\n pass\n\ndef create_ip_set(Activate=None, ClientToken=None, DetectorId=None, Format=None, Location=None, Name=None):\n \"\"\"\n Creates a new IPSet - a list of trusted IP addresses that have been whitelisted for secure communication with AWS infrastructure and applications.\n See also: AWS API Documentation\n \n \n :example: response = client.create_ip_set(\n Activate=True|False,\n ClientToken='string',\n DetectorId='string',\n Format='TXT'|'STIX'|'OTX_CSV'|'ALIEN_VAULT'|'PROOF_POINT'|'FIRE_EYE',\n Location='string',\n Name='string'\n )\n \n \n :type Activate: boolean\n :param Activate: [REQUIRED] A boolean value that indicates whether GuardDuty is to start using the uploaded IPSet.\n\n :type ClientToken: string\n :param ClientToken: The idempotency token for the create request.This field is autopopulated if not provided.\n\n :type DetectorId: string\n :param DetectorId: [REQUIRED] The unique ID of the detector that you want to update.\n\n :type Format: string\n :param Format: [REQUIRED] The format of the file that contains the IPSet.\n\n :type Location: string\n :param Location: [REQUIRED] The URI of the file that contains the IPSet. For example (https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key)\n\n :type Name: string\n :param Name: [REQUIRED] The user friendly name to identify the IPSet. This name is displayed in all findings that are triggered by activity that involves IP addresses included in this IPSet.\n\n :rtype: dict\n :return: {\n 'IpSetId': 'string'\n }\n \n \n :returns: \n (dict) -- 200 response\n IpSetId (string) -- The unique identifier for an IP Set\n \n \n \n \"\"\"\n pass\n\ndef create_members(AccountDetails=None, DetectorId=None):\n \"\"\"\n Creates member accounts of the current AWS account by specifying a list of AWS account IDs. The current AWS account can then invite these members to manage GuardDuty in their accounts.\n See also: AWS API Documentation\n \n \n :example: response = client.create_members(\n AccountDetails=[\n {\n 'AccountId': 'string',\n 'Email': 'string'\n },\n ],\n DetectorId='string'\n )\n \n \n :type AccountDetails: list\n :param AccountDetails: [REQUIRED] A list of account ID and email address pairs of the accounts that you want to associate with the master GuardDuty account.\n (dict) -- An object containing the member's accountId and email address.\n AccountId (string) -- [REQUIRED] Member account ID.\n Email (string) -- [REQUIRED] Member account's email address.\n \n\n :type DetectorId: string\n :param DetectorId: [REQUIRED] The unique ID of the detector of the GuardDuty account with which you want to associate member accounts.\n\n :rtype: dict\n :return: {\n 'UnprocessedAccounts': [\n {\n 'AccountId': 'string',\n 'Result': 'string'\n },\n ]\n }\n \n \n :returns: \n (dict) -- 200 response\n UnprocessedAccounts (list) -- A list of objects containing the unprocessed account and a result string explaining why it was unprocessed.\n (dict) -- An object containing the unprocessed account and a result string explaining why it was unprocessed.\n AccountId (string) -- AWS Account ID.\n Result (string) -- A reason why the account hasn't been processed.\n \n \n \n \n \n \n \n \"\"\"\n pass\n\ndef create_sample_findings(DetectorId=None, FindingTypes=None):\n \"\"\"\n Generates example findings of types specified by the list of finding types. If 'NULL' is specified for findingTypes, the API generates example findings of all supported finding types.\n See also: AWS API Documentation\n \n \n :example: response = client.create_sample_findings(\n DetectorId='string',\n FindingTypes=[\n 'string',\n ]\n )\n \n \n :type DetectorId: string\n :param DetectorId: [REQUIRED] The ID of the detector to create sample findings for.\n\n :type FindingTypes: list\n :param FindingTypes: Types of sample findings that you want to generate.\n (string) -- The finding type for the finding\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) -- 200 response\n \n \"\"\"\n pass\n\ndef create_threat_intel_set(Activate=None, ClientToken=None, DetectorId=None, Format=None, Location=None, Name=None):\n \"\"\"\n Create a new ThreatIntelSet. ThreatIntelSets consist of known malicious IP addresses. GuardDuty generates findings based on ThreatIntelSets.\n See also: AWS API Documentation\n \n \n :example: response = client.create_threat_intel_set(\n Activate=True|False,\n ClientToken='string',\n DetectorId='string',\n Format='TXT'|'STIX'|'OTX_CSV'|'ALIEN_VAULT'|'PROOF_POINT'|'FIRE_EYE',\n Location='string',\n Name='string'\n )\n \n \n :type Activate: boolean\n :param Activate: [REQUIRED] A boolean value that indicates whether GuardDuty is to start using the uploaded ThreatIntelSet.\n\n :type ClientToken: string\n :param ClientToken: The idempotency token for the create request.This field is autopopulated if not provided.\n\n :type DetectorId: string\n :param DetectorId: [REQUIRED] The unique ID of the detector that you want to update.\n\n :type Format: string\n :param Format: [REQUIRED] The format of the file that contains the ThreatIntelSet.\n\n :type Location: string\n :param Location: [REQUIRED] The URI of the file that contains the ThreatIntelSet. For example (https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key).\n\n :type Name: string\n :param Name: [REQUIRED] A user-friendly ThreatIntelSet name that is displayed in all finding generated by activity that involves IP addresses included in this ThreatIntelSet.\n\n :rtype: dict\n :return: {\n 'ThreatIntelSetId': 'string'\n }\n \n \n :returns: \n (dict) -- 200 response\n ThreatIntelSetId (string) -- The unique identifier for an threat intel set\n \n \n \n \"\"\"\n pass\n\ndef decline_invitations(AccountIds=None):\n \"\"\"\n Declines invitations sent to the current member account by AWS account specified by their account IDs.\n See also: AWS API Documentation\n \n \n :example: response = client.decline_invitations(\n AccountIds=[\n 'string',\n ]\n )\n \n \n :type AccountIds: list\n :param AccountIds: [REQUIRED] A list of account IDs of the AWS accounts that sent invitations to the current member account that you want to decline invitations from.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'UnprocessedAccounts': [\n {\n 'AccountId': 'string',\n 'Result': 'string'\n },\n ]\n }\n \n \n :returns: \n (dict) -- 200 response\n UnprocessedAccounts (list) -- A list of objects containing the unprocessed account and a result string explaining why it was unprocessed.\n (dict) -- An object containing the unprocessed account and a result string explaining why it was unprocessed.\n AccountId (string) -- AWS Account ID.\n Result (string) -- A reason why the account hasn't been processed.\n \n \n \n \n \n \n \n \"\"\"\n pass\n\ndef delete_detector(DetectorId=None):\n \"\"\"\n Deletes a Amazon GuardDuty detector specified by the detector ID.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_detector(\n DetectorId='string'\n )\n \n \n :type DetectorId: string\n :param DetectorId: [REQUIRED] The unique ID that specifies the detector that you want to delete.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_filter(DetectorId=None, FilterName=None):\n \"\"\"\n Deletes the filter specified by the filter name.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_filter(\n DetectorId='string',\n FilterName='string'\n )\n \n \n :type DetectorId: string\n :param DetectorId: [REQUIRED] The unique ID that specifies the detector where you want to delete a filter.\n\n :type FilterName: string\n :param FilterName: [REQUIRED] The name of the filter.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) -- 200 response\n \n \"\"\"\n pass\n\ndef delete_invitations(AccountIds=None):\n \"\"\"\n Deletes invitations sent to the current member account by AWS accounts specified by their account IDs.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_invitations(\n AccountIds=[\n 'string',\n ]\n )\n \n \n :type AccountIds: list\n :param AccountIds: [REQUIRED] A list of account IDs of the AWS accounts that sent invitations to the current member account that you want to delete invitations from.\n (string) --\n \n\n :rtype: dict\n :return: {\n 'UnprocessedAccounts': [\n {\n 'AccountId': 'string',\n 'Result': 'string'\n },\n ]\n }\n \n \n :returns: \n (dict) -- 200 response\n UnprocessedAccounts (list) -- A list of objects containing the unprocessed account and a result string explaining why it was unprocessed.\n (dict) -- An object containing the unprocessed account and a result string explaining why it was unprocessed.\n AccountId (string) -- AWS Account ID.\n Result (string) -- A reason why the account hasn't been processed.\n \n \n \n \n \n \n \n \"\"\"\n pass\n\ndef delete_ip_set(DetectorId=None, IpSetId=None):\n \"\"\"\n Deletes the IPSet specified by the IPSet ID.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_ip_set(\n DetectorId='string',\n IpSetId='string'\n )\n \n \n :type DetectorId: string\n :param DetectorId: [REQUIRED] The detectorID that specifies the GuardDuty service whose IPSet you want to delete.\n\n :type IpSetId: string\n :param IpSetId: [REQUIRED] The unique ID that specifies the IPSet that you want to delete.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) -- 200 response\n \n \"\"\"\n pass\n\ndef delete_members(AccountIds=None, DetectorId=None):\n \"\"\"\n Deletes GuardDuty member accounts (to the current GuardDuty master account) specified by the account IDs.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_members(\n AccountIds=[\n 'string',\n ],\n DetectorId='string'\n )\n \n \n :type AccountIds: list\n :param AccountIds: [REQUIRED] A list of account IDs of the GuardDuty member accounts that you want to delete.\n (string) --\n \n\n :type DetectorId: string\n :param DetectorId: [REQUIRED] The unique ID of the detector of the GuardDuty account whose members you want to delete.\n\n :rtype: dict\n :return: {\n 'UnprocessedAccounts': [\n {\n 'AccountId': 'string',\n 'Result': 'string'\n },\n ]\n }\n \n \n :returns: \n (dict) -- 200 response\n UnprocessedAccounts (list) -- A list of objects containing the unprocessed account and a result string explaining why it was unprocessed.\n (dict) -- An object containing the unprocessed account and a result string explaining why it was unprocessed.\n AccountId (string) -- AWS Account ID.\n Result (string) -- A reason why the account hasn't been processed.\n \n \n \n \n \n \n \n \"\"\"\n pass\n\ndef delete_threat_intel_set(DetectorId=None, ThreatIntelSetId=None):\n \"\"\"\n Deletes ThreatIntelSet specified by the ThreatIntelSet ID.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_threat_intel_set(\n DetectorId='string',\n ThreatIntelSetId='string'\n )\n \n \n :type DetectorId: string\n :param DetectorId: [REQUIRED] The detectorID that specifies the GuardDuty service whose ThreatIntelSet you want to delete.\n\n :type ThreatIntelSetId: string\n :param ThreatIntelSetId: [REQUIRED] The unique ID that specifies the ThreatIntelSet that you want to delete.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) -- 200 response\n \n \"\"\"\n pass\n\ndef disassociate_from_master_account(DetectorId=None):\n \"\"\"\n Disassociates the current GuardDuty member account from its master account.\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_from_master_account(\n DetectorId='string'\n )\n \n \n :type DetectorId: string\n :param DetectorId: [REQUIRED] The unique ID of the detector of the GuardDuty member account.\n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef disassociate_members(AccountIds=None, DetectorId=None):\n \"\"\"\n Disassociates GuardDuty member accounts (to the current GuardDuty master account) specified by the account IDs.\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_members(\n AccountIds=[\n 'string',\n ],\n DetectorId='string'\n )\n \n \n :type AccountIds: list\n :param AccountIds: [REQUIRED] A list of account IDs of the GuardDuty member accounts that you want to disassociate from master.\n (string) --\n \n\n :type DetectorId: string\n :param DetectorId: [REQUIRED] The unique ID of the detector of the GuardDuty account whose members you want to disassociate from master.\n\n :rtype: dict\n :return: {\n 'UnprocessedAccounts': [\n {\n 'AccountId': 'string',\n 'Result': 'string'\n },\n ]\n }\n \n \n :returns: \n (dict) -- 200 response\n UnprocessedAccounts (list) -- A list of objects containing the unprocessed account and a result string explaining why it was unprocessed.\n (dict) -- An object containing the unprocessed account and a result string explaining why it was unprocessed.\n AccountId (string) -- AWS Account ID.\n Result (string) -- A reason why the account hasn't been processed.\n \n \n \n \n \n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_detector(DetectorId=None):\n \"\"\"\n Retrieves an Amazon GuardDuty detector specified by the detectorId.\n See also: AWS API Documentation\n \n \n :example: response = client.get_detector(\n DetectorId='string'\n )\n \n \n :type DetectorId: string\n :param DetectorId: [REQUIRED] The unique ID of the detector that you want to retrieve.\n\n :rtype: dict\n :return: {\n 'CreatedAt': 'string',\n 'FindingPublishingFrequency': 'FIFTEEN_MINUTES'|'ONE_HOUR'|'SIX_HOURS',\n 'ServiceRole': 'string',\n 'Status': 'ENABLED'|'DISABLED',\n 'UpdatedAt': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef get_filter(DetectorId=None, FilterName=None):\n \"\"\"\n Returns the details of the filter specified by the filter name.\n See also: AWS API Documentation\n \n \n :example: response = client.get_filter(\n DetectorId='string',\n FilterName='string'\n )\n \n \n :type DetectorId: string\n :param DetectorId: [REQUIRED] The detector ID that specifies the GuardDuty service where you want to list the details of the specified filter.\n\n :type FilterName: string\n :param FilterName: [REQUIRED] The name of the filter whose details you want to get.\n\n :rtype: dict\n :return: {\n 'Action': 'NOOP'|'ARCHIVE',\n 'Description': 'string',\n 'FindingCriteria': {\n 'Criterion': {\n 'string': {\n 'Eq': [\n 'string',\n ],\n 'Gt': 123,\n 'Gte': 123,\n 'Lt': 123,\n 'Lte': 123,\n 'Neq': [\n 'string',\n ]\n }\n }\n },\n 'Name': 'string',\n 'Rank': 123\n }\n \n \n :returns: \n (dict) -- 200 response\n Action (string) -- Specifies the action that is to be applied to the findings that match the filter.\n Description (string) -- The description of the filter.\n FindingCriteria (dict) -- Represents the criteria to be used in the filter for querying findings.\n Criterion (dict) -- Represents a map of finding properties that match specified conditions and values when querying findings.\n (string) --\n (dict) -- Finding attribute (for example, accountId) for which conditions and values must be specified when querying findings.\n Eq (list) -- Represents the equal condition to be applied to a single field when querying for findings.\n (string) --\n \n \n Gt (integer) -- Represents the greater than condition to be applied to a single field when querying for findings.\n Gte (integer) -- Represents the greater than equal condition to be applied to a single field when querying for findings.\n Lt (integer) -- Represents the less than condition to be applied to a single field when querying for findings.\n Lte (integer) -- Represents the less than equal condition to be applied to a single field when querying for findings.\n Neq (list) -- Represents the not equal condition to be applied to a single field when querying for findings.\n (string) --\n \n \n \n \n \n \n \n \n \n \n Name (string) -- The name of the filter.\n Rank (integer) -- Specifies the position of the filter in the list of current filters. Also specifies the order in which this filter is applied to the findings.\n \n \n \n \"\"\"\n pass\n\ndef get_findings(DetectorId=None, FindingIds=None, SortCriteria=None):\n \"\"\"\n Describes Amazon GuardDuty findings specified by finding IDs.\n See also: AWS API Documentation\n \n \n :example: response = client.get_findings(\n DetectorId='string',\n FindingIds=[\n 'string',\n ],\n SortCriteria={\n 'AttributeName': 'string',\n 'OrderBy': 'ASC'|'DESC'\n }\n )\n \n \n :type DetectorId: string\n :param DetectorId: [REQUIRED] The ID of the detector that specifies the GuardDuty service whose findings you want to retrieve.\n\n :type FindingIds: list\n :param FindingIds: [REQUIRED] IDs of the findings that you want to retrieve.\n (string) -- The unique identifier for the Finding\n \n\n :type SortCriteria: dict\n :param SortCriteria: Represents the criteria used for sorting findings.\n AttributeName (string) -- Represents the finding attribute (for example, accountId) by which to sort findings.\n OrderBy (string) -- Order by which the sorted findings are to be displayed.\n \n\n :rtype: dict\n :return: {\n 'Findings': [\n {\n 'AccountId': 'string',\n 'Arn': 'string',\n 'Confidence': 123.0,\n 'CreatedAt': 'string',\n 'Description': 'string',\n 'Id': 'string',\n 'Partition': 'string',\n 'Region': 'string',\n 'Resource': {\n 'AccessKeyDetails': {\n 'AccessKeyId': 'string',\n 'PrincipalId': 'string',\n 'UserName': 'string',\n 'UserType': 'string'\n },\n 'InstanceDetails': {\n 'AvailabilityZone': 'string',\n 'IamInstanceProfile': {\n 'Arn': 'string',\n 'Id': 'string'\n },\n 'ImageDescription': 'string',\n 'ImageId': 'string',\n 'InstanceId': 'string',\n 'InstanceState': 'string',\n 'InstanceType': 'string',\n 'LaunchTime': 'string',\n 'NetworkInterfaces': [\n {\n 'Ipv6Addresses': [\n 'string',\n ],\n 'NetworkInterfaceId': 'string',\n 'PrivateDnsName': 'string',\n 'PrivateIpAddress': 'string',\n 'PrivateIpAddresses': [\n {\n 'PrivateDnsName': 'string',\n 'PrivateIpAddress': 'string'\n },\n ],\n 'PublicDnsName': 'string',\n 'PublicIp': 'string',\n 'SecurityGroups': [\n {\n 'GroupId': 'string',\n 'GroupName': 'string'\n },\n ],\n 'SubnetId': 'string',\n 'VpcId': 'string'\n },\n ],\n 'Platform': 'string',\n 'ProductCodes': [\n {\n 'Code': 'string',\n 'ProductType': 'string'\n },\n ],\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n },\n 'ResourceType': 'string'\n },\n 'SchemaVersion': 'string',\n 'Service': {\n 'Action': {\n 'ActionType': 'string',\n 'AwsApiCallAction': {\n 'Api': 'string',\n 'CallerType': 'string',\n 'DomainDetails': {},\n 'RemoteIpDetails': {\n 'City': {\n 'CityName': 'string'\n },\n 'Country': {\n 'CountryCode': 'string',\n 'CountryName': 'string'\n },\n 'GeoLocation': {\n 'Lat': 123.0,\n 'Lon': 123.0\n },\n 'IpAddressV4': 'string',\n 'Organization': {\n 'Asn': 'string',\n 'AsnOrg': 'string',\n 'Isp': 'string',\n 'Org': 'string'\n }\n },\n 'ServiceName': 'string'\n },\n 'DnsRequestAction': {\n 'Domain': 'string'\n },\n 'NetworkConnectionAction': {\n 'Blocked': True|False,\n 'ConnectionDirection': 'string',\n 'LocalPortDetails': {\n 'Port': 123,\n 'PortName': 'string'\n },\n 'Protocol': 'string',\n 'RemoteIpDetails': {\n 'City': {\n 'CityName': 'string'\n },\n 'Country': {\n 'CountryCode': 'string',\n 'CountryName': 'string'\n },\n 'GeoLocation': {\n 'Lat': 123.0,\n 'Lon': 123.0\n },\n 'IpAddressV4': 'string',\n 'Organization': {\n 'Asn': 'string',\n 'AsnOrg': 'string',\n 'Isp': 'string',\n 'Org': 'string'\n }\n },\n 'RemotePortDetails': {\n 'Port': 123,\n 'PortName': 'string'\n }\n },\n 'PortProbeAction': {\n 'Blocked': True|False,\n 'PortProbeDetails': [\n {\n 'LocalPortDetails': {\n 'Port': 123,\n 'PortName': 'string'\n },\n 'RemoteIpDetails': {\n 'City': {\n 'CityName': 'string'\n },\n 'Country': {\n 'CountryCode': 'string',\n 'CountryName': 'string'\n },\n 'GeoLocation': {\n 'Lat': 123.0,\n 'Lon': 123.0\n },\n 'IpAddressV4': 'string',\n 'Organization': {\n 'Asn': 'string',\n 'AsnOrg': 'string',\n 'Isp': 'string',\n 'Org': 'string'\n }\n }\n },\n ]\n }\n },\n 'Archived': True|False,\n 'Count': 123,\n 'DetectorId': 'string',\n 'EventFirstSeen': 'string',\n 'EventLastSeen': 'string',\n 'ResourceRole': 'string',\n 'ServiceName': 'string',\n 'UserFeedback': 'string'\n },\n 'Severity': 123.0,\n 'Title': 'string',\n 'Type': 'string',\n 'UpdatedAt': 'string'\n },\n ]\n }\n \n \n :returns: \n (dict) -- 200 response\n Findings (list) -- A list of findings.\n (dict) -- Representation of a abnormal or suspicious activity.\n AccountId (string) -- AWS account ID where the activity occurred that prompted GuardDuty to generate a finding.\n Arn (string) -- The ARN of a finding described by the action.\n Confidence (float) -- The confidence level of a finding.\n CreatedAt (string) -- The time stamp at which a finding was generated.\n Description (string) -- The description of a finding.\n Id (string) -- The identifier that corresponds to a finding described by the action.\n Partition (string) -- The AWS resource partition.\n Region (string) -- The AWS region where the activity occurred that prompted GuardDuty to generate a finding.\n Resource (dict) -- The AWS resource associated with the activity that prompted GuardDuty to generate a finding.\n AccessKeyDetails (dict) -- The IAM access key details (IAM user information) of a user that engaged in the activity that prompted GuardDuty to generate a finding.\n AccessKeyId (string) -- Access key ID of the user.\n PrincipalId (string) -- The principal ID of the user.\n UserName (string) -- The name of the user.\n UserType (string) -- The type of the user.\n \n \n InstanceDetails (dict) -- The information about the EC2 instance associated with the activity that prompted GuardDuty to generate a finding.\n AvailabilityZone (string) -- The availability zone of the EC2 instance.\n IamInstanceProfile (dict) -- The profile information of the EC2 instance.\n Arn (string) -- AWS EC2 instance profile ARN.\n Id (string) -- AWS EC2 instance profile ID.\n \n \n ImageDescription (string) -- The image description of the EC2 instance.\n ImageId (string) -- The image ID of the EC2 instance.\n InstanceId (string) -- The ID of the EC2 instance.\n InstanceState (string) -- The state of the EC2 instance.\n InstanceType (string) -- The type of the EC2 instance.\n LaunchTime (string) -- The launch time of the EC2 instance.\n NetworkInterfaces (list) -- The network interface information of the EC2 instance.\n (dict) -- The network interface information of the EC2 instance.\n Ipv6Addresses (list) -- A list of EC2 instance IPv6 address information.\n (string) -- IpV6 address of the EC2 instance.\n \n \n NetworkInterfaceId (string) -- The ID of the network interface\n PrivateDnsName (string) -- Private DNS name of the EC2 instance.\n PrivateIpAddress (string) -- Private IP address of the EC2 instance.\n PrivateIpAddresses (list) -- Other private IP address information of the EC2 instance.\n (dict) -- Other private IP address information of the EC2 instance.\n PrivateDnsName (string) -- Private DNS name of the EC2 instance.\n PrivateIpAddress (string) -- Private IP address of the EC2 instance.\n \n \n \n \n PublicDnsName (string) -- Public DNS name of the EC2 instance.\n PublicIp (string) -- Public IP address of the EC2 instance.\n SecurityGroups (list) -- Security groups associated with the EC2 instance.\n (dict) -- Security groups associated with the EC2 instance.\n GroupId (string) -- EC2 instance's security group ID.\n GroupName (string) -- EC2 instance's security group name.\n \n \n \n \n SubnetId (string) -- The subnet ID of the EC2 instance.\n VpcId (string) -- The VPC ID of the EC2 instance.\n \n \n \n \n Platform (string) -- The platform of the EC2 instance.\n ProductCodes (list) -- The product code of the EC2 instance.\n (dict) -- The product code of the EC2 instance.\n Code (string) -- Product code information.\n ProductType (string) -- Product code type.\n \n \n \n \n Tags (list) -- The tags of the EC2 instance.\n (dict) -- A tag of the EC2 instance.\n Key (string) -- EC2 instance tag key.\n Value (string) -- EC2 instance tag value.\n \n \n \n \n \n \n ResourceType (string) -- The type of the AWS resource.\n \n \n SchemaVersion (string) -- Findings' schema version.\n Service (dict) -- Additional information assigned to the generated finding by GuardDuty.\n Action (dict) -- Information about the activity described in a finding.\n ActionType (string) -- GuardDuty Finding activity type.\n AwsApiCallAction (dict) -- Information about the AWS_API_CALL action described in this finding.\n Api (string) -- AWS API name.\n CallerType (string) -- AWS API caller type.\n DomainDetails (dict) -- Domain information for the AWS API call.\n RemoteIpDetails (dict) -- Remote IP information of the connection.\n City (dict) -- City information of the remote IP address.\n CityName (string) -- City name of the remote IP address.\n \n \n Country (dict) -- Country code of the remote IP address.\n CountryCode (string) -- Country code of the remote IP address.\n CountryName (string) -- Country name of the remote IP address.\n \n \n GeoLocation (dict) -- Location information of the remote IP address.\n Lat (float) -- Latitude information of remote IP address.\n Lon (float) -- Longitude information of remote IP address.\n \n \n IpAddressV4 (string) -- IPV4 remote address of the connection.\n Organization (dict) -- ISP Organization information of the remote IP address.\n Asn (string) -- Autonomous system number of the internet provider of the remote IP address.\n AsnOrg (string) -- Organization that registered this ASN.\n Isp (string) -- ISP information for the internet provider.\n Org (string) -- Name of the internet provider.\n \n \n \n \n ServiceName (string) -- AWS service name whose API was invoked.\n \n \n DnsRequestAction (dict) -- Information about the DNS_REQUEST action described in this finding.\n Domain (string) -- Domain information for the DNS request.\n \n \n NetworkConnectionAction (dict) -- Information about the NETWORK_CONNECTION action described in this finding.\n Blocked (boolean) -- Network connection blocked information.\n ConnectionDirection (string) -- Network connection direction.\n LocalPortDetails (dict) -- Local port information of the connection.\n Port (integer) -- Port number of the local connection.\n PortName (string) -- Port name of the local connection.\n \n \n Protocol (string) -- Network connection protocol.\n RemoteIpDetails (dict) -- Remote IP information of the connection.\n City (dict) -- City information of the remote IP address.\n CityName (string) -- City name of the remote IP address.\n \n \n Country (dict) -- Country code of the remote IP address.\n CountryCode (string) -- Country code of the remote IP address.\n CountryName (string) -- Country name of the remote IP address.\n \n \n GeoLocation (dict) -- Location information of the remote IP address.\n Lat (float) -- Latitude information of remote IP address.\n Lon (float) -- Longitude information of remote IP address.\n \n \n IpAddressV4 (string) -- IPV4 remote address of the connection.\n Organization (dict) -- ISP Organization information of the remote IP address.\n Asn (string) -- Autonomous system number of the internet provider of the remote IP address.\n AsnOrg (string) -- Organization that registered this ASN.\n Isp (string) -- ISP information for the internet provider.\n Org (string) -- Name of the internet provider.\n \n \n \n \n RemotePortDetails (dict) -- Remote port information of the connection.\n Port (integer) -- Port number of the remote connection.\n PortName (string) -- Port name of the remote connection.\n \n \n \n \n PortProbeAction (dict) -- Information about the PORT_PROBE action described in this finding.\n Blocked (boolean) -- Port probe blocked information.\n PortProbeDetails (list) -- A list of port probe details objects.\n (dict) -- Details about the port probe finding.\n LocalPortDetails (dict) -- Local port information of the connection.\n Port (integer) -- Port number of the local connection.\n PortName (string) -- Port name of the local connection.\n \n \n RemoteIpDetails (dict) -- Remote IP information of the connection.\n City (dict) -- City information of the remote IP address.\n CityName (string) -- City name of the remote IP address.\n \n \n Country (dict) -- Country code of the remote IP address.\n CountryCode (string) -- Country code of the remote IP address.\n CountryName (string) -- Country name of the remote IP address.\n \n \n GeoLocation (dict) -- Location information of the remote IP address.\n Lat (float) -- Latitude information of remote IP address.\n Lon (float) -- Longitude information of remote IP address.\n \n \n IpAddressV4 (string) -- IPV4 remote address of the connection.\n Organization (dict) -- ISP Organization information of the remote IP address.\n Asn (string) -- Autonomous system number of the internet provider of the remote IP address.\n AsnOrg (string) -- Organization that registered this ASN.\n Isp (string) -- ISP information for the internet provider.\n Org (string) -- Name of the internet provider.\n \n \n \n \n \n \n \n \n \n \n \n \n Archived (boolean) -- Indicates whether this finding is archived.\n Count (integer) -- Total count of the occurrences of this finding type.\n DetectorId (string) -- Detector ID for the GuardDuty service.\n EventFirstSeen (string) -- First seen timestamp of the activity that prompted GuardDuty to generate this finding.\n EventLastSeen (string) -- Last seen timestamp of the activity that prompted GuardDuty to generate this finding.\n ResourceRole (string) -- Resource role information for this finding.\n ServiceName (string) -- The name of the AWS service (GuardDuty) that generated a finding.\n UserFeedback (string) -- Feedback left about the finding.\n \n \n Severity (float) -- The severity of a finding.\n Title (string) -- The title of a finding.\n Type (string) -- The type of a finding described by the action.\n UpdatedAt (string) -- The time stamp at which a finding was last updated.\n \n \n \n \n \n \n \n \"\"\"\n pass\n\ndef get_findings_statistics(DetectorId=None, FindingCriteria=None, FindingStatisticTypes=None):\n \"\"\"\n Lists Amazon GuardDuty findings' statistics for the specified detector ID.\n See also: AWS API Documentation\n \n \n :example: response = client.get_findings_statistics(\n DetectorId='string',\n FindingCriteria={\n 'Criterion': {\n 'string': {\n 'Eq': [\n 'string',\n ],\n 'Gt': 123,\n 'Gte': 123,\n 'Lt': 123,\n 'Lte': 123,\n 'Neq': [\n 'string',\n ]\n }\n }\n },\n FindingStatisticTypes=[\n 'COUNT_BY_SEVERITY',\n ]\n )\n \n \n :type DetectorId: string\n :param DetectorId: [REQUIRED] The ID of the detector that specifies the GuardDuty service whose findings' statistics you want to retrieve.\n\n :type FindingCriteria: dict\n :param FindingCriteria: Represents the criteria used for querying findings.\n Criterion (dict) -- Represents a map of finding properties that match specified conditions and values when querying findings.\n (string) --\n (dict) -- Finding attribute (for example, accountId) for which conditions and values must be specified when querying findings.\n Eq (list) -- Represents the equal condition to be applied to a single field when querying for findings.\n (string) --\n Gt (integer) -- Represents the greater than condition to be applied to a single field when querying for findings.\n Gte (integer) -- Represents the greater than equal condition to be applied to a single field when querying for findings.\n Lt (integer) -- Represents the less than condition to be applied to a single field when querying for findings.\n Lte (integer) -- Represents the less than equal condition to be applied to a single field when querying for findings.\n Neq (list) -- Represents the not equal condition to be applied to a single field when querying for findings.\n (string) --\n \n \n \n\n :type FindingStatisticTypes: list\n :param FindingStatisticTypes: [REQUIRED] Types of finding statistics to retrieve.\n (string) -- The types of finding statistics.\n \n\n :rtype: dict\n :return: {\n 'FindingStatistics': {\n 'CountBySeverity': {\n 'string': 123\n }\n }\n }\n \n \n :returns: \n (dict) -- 200 response\n FindingStatistics (dict) -- Finding statistics object.\n CountBySeverity (dict) -- Represents a map of severity to count statistic for a set of findings\n (string) --\n (integer) -- The count of findings for the given severity.\n \n \n \n \n \n \n \n \n \n \"\"\"\n pass\n\ndef get_invitations_count():\n \"\"\"\n Returns the count of all GuardDuty membership invitations that were sent to the current member account except the currently accepted invitation.\n See also: AWS API Documentation\n \n \n :example: response = client.get_invitations_count()\n \n \n :rtype: dict\n :return: {\n 'InvitationsCount': 123\n }\n \n \n \"\"\"\n pass\n\ndef get_ip_set(DetectorId=None, IpSetId=None):\n \"\"\"\n Retrieves the IPSet specified by the IPSet ID.\n See also: AWS API Documentation\n \n \n :example: response = client.get_ip_set(\n DetectorId='string',\n IpSetId='string'\n )\n \n \n :type DetectorId: string\n :param DetectorId: [REQUIRED] The detectorID that specifies the GuardDuty service whose IPSet you want to retrieve.\n\n :type IpSetId: string\n :param IpSetId: [REQUIRED] The unique ID that specifies the IPSet that you want to describe.\n\n :rtype: dict\n :return: {\n 'Format': 'TXT'|'STIX'|'OTX_CSV'|'ALIEN_VAULT'|'PROOF_POINT'|'FIRE_EYE',\n 'Location': 'string',\n 'Name': 'string',\n 'Status': 'INACTIVE'|'ACTIVATING'|'ACTIVE'|'DEACTIVATING'|'ERROR'|'DELETE_PENDING'|'DELETED'\n }\n \n \n :returns: \n (dict) -- 200 response\n Format (string) -- The format of the file that contains the IPSet.\n Location (string) -- The URI of the file that contains the IPSet. For example (https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key)\n Name (string) -- The user friendly name to identify the IPSet. This name is displayed in all findings that are triggered by activity that involves IP addresses included in this IPSet.\n Status (string) -- The status of ipSet file uploaded.\n \n \n \n \"\"\"\n pass\n\ndef get_master_account(DetectorId=None):\n \"\"\"\n Provides the details for the GuardDuty master account to the current GuardDuty member account.\n See also: AWS API Documentation\n \n \n :example: response = client.get_master_account(\n DetectorId='string'\n )\n \n \n :type DetectorId: string\n :param DetectorId: [REQUIRED] The unique ID of the detector of the GuardDuty member account.\n\n :rtype: dict\n :return: {\n 'Master': {\n 'AccountId': 'string',\n 'InvitationId': 'string',\n 'InvitedAt': 'string',\n 'RelationshipStatus': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_members(AccountIds=None, DetectorId=None):\n \"\"\"\n Retrieves GuardDuty member accounts (to the current GuardDuty master account) specified by the account IDs.\n See also: AWS API Documentation\n \n \n :example: response = client.get_members(\n AccountIds=[\n 'string',\n ],\n DetectorId='string'\n )\n \n \n :type AccountIds: list\n :param AccountIds: [REQUIRED] A list of account IDs of the GuardDuty member accounts that you want to describe.\n (string) --\n \n\n :type DetectorId: string\n :param DetectorId: [REQUIRED] The unique ID of the detector of the GuardDuty account whose members you want to retrieve.\n\n :rtype: dict\n :return: {\n 'Members': [\n {\n 'AccountId': 'string',\n 'DetectorId': 'string',\n 'Email': 'string',\n 'InvitedAt': 'string',\n 'MasterId': 'string',\n 'RelationshipStatus': 'string',\n 'UpdatedAt': 'string'\n },\n ],\n 'UnprocessedAccounts': [\n {\n 'AccountId': 'string',\n 'Result': 'string'\n },\n ]\n }\n \n \n :returns: \n (dict) -- 200 response\n Members (list) -- A list of member descriptions.\n (dict) -- Contains details about the member account.\n AccountId (string) -- AWS account ID.\n DetectorId (string) -- The unique identifier for a detector.\n Email (string) -- Member account's email address.\n InvitedAt (string) -- Timestamp at which the invitation was sent\n MasterId (string) -- The master account ID.\n RelationshipStatus (string) -- The status of the relationship between the member and the master.\n UpdatedAt (string) -- The first time a resource was created. The format will be ISO-8601.\n \n \n \n \n UnprocessedAccounts (list) -- A list of objects containing the unprocessed account and a result string explaining why it was unprocessed.\n (dict) -- An object containing the unprocessed account and a result string explaining why it was unprocessed.\n AccountId (string) -- AWS Account ID.\n Result (string) -- A reason why the account hasn't been processed.\n \n \n \n \n \n \n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_threat_intel_set(DetectorId=None, ThreatIntelSetId=None):\n \"\"\"\n Retrieves the ThreatIntelSet that is specified by the ThreatIntelSet ID.\n See also: AWS API Documentation\n \n \n :example: response = client.get_threat_intel_set(\n DetectorId='string',\n ThreatIntelSetId='string'\n )\n \n \n :type DetectorId: string\n :param DetectorId: [REQUIRED] The detectorID that specifies the GuardDuty service whose ThreatIntelSet you want to describe.\n\n :type ThreatIntelSetId: string\n :param ThreatIntelSetId: [REQUIRED] The unique ID that specifies the ThreatIntelSet that you want to describe.\n\n :rtype: dict\n :return: {\n 'Format': 'TXT'|'STIX'|'OTX_CSV'|'ALIEN_VAULT'|'PROOF_POINT'|'FIRE_EYE',\n 'Location': 'string',\n 'Name': 'string',\n 'Status': 'INACTIVE'|'ACTIVATING'|'ACTIVE'|'DEACTIVATING'|'ERROR'|'DELETE_PENDING'|'DELETED'\n }\n \n \n :returns: \n (dict) -- 200 response\n Format (string) -- The format of the threatIntelSet.\n Location (string) -- The URI of the file that contains the ThreatIntelSet. For example (https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key).\n Name (string) -- A user-friendly ThreatIntelSet name that is displayed in all finding generated by activity that involves IP addresses included in this ThreatIntelSet.\n Status (string) -- The status of threatIntelSet file uploaded.\n \n \n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef invite_members(AccountIds=None, DetectorId=None, DisableEmailNotification=None, Message=None):\n \"\"\"\n Invites other AWS accounts (created as members of the current AWS account by CreateMembers) to enable GuardDuty and allow the current AWS account to view and manage these accounts' GuardDuty findings on their behalf as the master account.\n See also: AWS API Documentation\n \n \n :example: response = client.invite_members(\n AccountIds=[\n 'string',\n ],\n DetectorId='string',\n DisableEmailNotification=True|False,\n Message='string'\n )\n \n \n :type AccountIds: list\n :param AccountIds: [REQUIRED] A list of account IDs of the accounts that you want to invite to GuardDuty as members.\n (string) --\n \n\n :type DetectorId: string\n :param DetectorId: [REQUIRED] The unique ID of the detector of the GuardDuty account with which you want to invite members.\n\n :type DisableEmailNotification: boolean\n :param DisableEmailNotification: A boolean value that specifies whether you want to disable email notification to the accounts that you re inviting to GuardDuty as members.\n\n :type Message: string\n :param Message: The invitation message that you want to send to the accounts that you re inviting to GuardDuty as members.\n\n :rtype: dict\n :return: {\n 'UnprocessedAccounts': [\n {\n 'AccountId': 'string',\n 'Result': 'string'\n },\n ]\n }\n \n \n :returns: \n (dict) -- 200 response\n UnprocessedAccounts (list) -- A list of objects containing the unprocessed account and a result string explaining why it was unprocessed.\n (dict) -- An object containing the unprocessed account and a result string explaining why it was unprocessed.\n AccountId (string) -- AWS Account ID.\n Result (string) -- A reason why the account hasn't been processed.\n \n \n \n \n \n \n \n \"\"\"\n pass\n\ndef list_detectors(MaxResults=None, NextToken=None):\n \"\"\"\n Lists detectorIds of all the existing Amazon GuardDuty detector resources.\n See also: AWS API Documentation\n \n \n :example: response = client.list_detectors(\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type MaxResults: integer\n :param MaxResults: You can use this parameter to indicate the maximum number of detectors that you want in the response.\n\n :type NextToken: string\n :param NextToken: You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the ListDetectors action. For subsequent calls to the action fill nextToken in the request with the value of nextToken from the previous response to continue listing data.\n\n :rtype: dict\n :return: {\n 'DetectorIds': [\n 'string',\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (dict) -- 200 response\n DetectorIds (list) -- A list of detector Ids.\n (string) -- The unique identifier for a detector.\n \n \n NextToken (string) -- You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.\n \n \n \n \"\"\"\n pass\n\ndef list_filters(DetectorId=None, MaxResults=None, NextToken=None):\n \"\"\"\n Returns a paginated list of the current filters.\n See also: AWS API Documentation\n \n \n :example: response = client.list_filters(\n DetectorId='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type DetectorId: string\n :param DetectorId: [REQUIRED] The ID of the detector that specifies the GuardDuty service where you want to list filters.\n\n :type MaxResults: integer\n :param MaxResults: Indicates the maximum number of items that you want in the response. The maximum value is 50.\n\n :type NextToken: string\n :param NextToken: Paginates results. Set the value of this parameter to NULL on your first call to the ListFilters operation.For subsequent calls to the operation, fill nextToken in the request with the value of nextToken from the previous response to continue listing data.\n\n :rtype: dict\n :return: {\n 'FilterNames': [\n 'string',\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (dict) -- 200 response\n FilterNames (list) -- A list of filter names\n (string) -- The unique identifier for a filter\n \n \n NextToken (string) -- You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.\n \n \n \n \"\"\"\n pass\n\ndef list_findings(DetectorId=None, FindingCriteria=None, MaxResults=None, NextToken=None, SortCriteria=None):\n \"\"\"\n Lists Amazon GuardDuty findings for the specified detector ID.\n See also: AWS API Documentation\n \n \n :example: response = client.list_findings(\n DetectorId='string',\n FindingCriteria={\n 'Criterion': {\n 'string': {\n 'Eq': [\n 'string',\n ],\n 'Gt': 123,\n 'Gte': 123,\n 'Lt': 123,\n 'Lte': 123,\n 'Neq': [\n 'string',\n ]\n }\n }\n },\n MaxResults=123,\n NextToken='string',\n SortCriteria={\n 'AttributeName': 'string',\n 'OrderBy': 'ASC'|'DESC'\n }\n )\n \n \n :type DetectorId: string\n :param DetectorId: [REQUIRED] The ID of the detector that specifies the GuardDuty service whose findings you want to list.\n\n :type FindingCriteria: dict\n :param FindingCriteria: Represents the criteria used for querying findings.\n Criterion (dict) -- Represents a map of finding properties that match specified conditions and values when querying findings.\n (string) --\n (dict) -- Finding attribute (for example, accountId) for which conditions and values must be specified when querying findings.\n Eq (list) -- Represents the equal condition to be applied to a single field when querying for findings.\n (string) --\n Gt (integer) -- Represents the greater than condition to be applied to a single field when querying for findings.\n Gte (integer) -- Represents the greater than equal condition to be applied to a single field when querying for findings.\n Lt (integer) -- Represents the less than condition to be applied to a single field when querying for findings.\n Lte (integer) -- Represents the less than equal condition to be applied to a single field when querying for findings.\n Neq (list) -- Represents the not equal condition to be applied to a single field when querying for findings.\n (string) --\n \n \n \n\n :type MaxResults: integer\n :param MaxResults: You can use this parameter to indicate the maximum number of items you want in the response. The default value is 50. The maximum value is 50.\n\n :type NextToken: string\n :param NextToken: You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the ListFindings action. For subsequent calls to the action fill nextToken in the request with the value of nextToken from the previous response to continue listing data.\n\n :type SortCriteria: dict\n :param SortCriteria: Represents the criteria used for sorting findings.\n AttributeName (string) -- Represents the finding attribute (for example, accountId) by which to sort findings.\n OrderBy (string) -- Order by which the sorted findings are to be displayed.\n \n\n :rtype: dict\n :return: {\n 'FindingIds': [\n 'string',\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (dict) -- 200 response\n FindingIds (list) -- The list of the Findings.\n (string) -- The unique identifier for the Finding\n \n \n NextToken (string) -- You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.\n \n \n \n \"\"\"\n pass\n\ndef list_invitations(MaxResults=None, NextToken=None):\n \"\"\"\n Lists all GuardDuty membership invitations that were sent to the current AWS account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_invitations(\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type MaxResults: integer\n :param MaxResults: You can use this parameter to indicate the maximum number of invitations you want in the response. The default value is 50. The maximum value is 50.\n\n :type NextToken: string\n :param NextToken: You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the ListInvitations action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.\n\n :rtype: dict\n :return: {\n 'Invitations': [\n {\n 'AccountId': 'string',\n 'InvitationId': 'string',\n 'InvitedAt': 'string',\n 'RelationshipStatus': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (dict) -- 200 response\n Invitations (list) -- A list of invitation descriptions.\n (dict) -- Invitation from an AWS account to become the current account's master.\n AccountId (string) -- Inviter account ID\n InvitationId (string) -- This value is used to validate the inviter account to the member account.\n InvitedAt (string) -- Timestamp at which the invitation was sent\n RelationshipStatus (string) -- The status of the relationship between the inviter and invitee accounts.\n \n \n \n \n NextToken (string) -- You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.\n \n \n \n \"\"\"\n pass\n\ndef list_ip_sets(DetectorId=None, MaxResults=None, NextToken=None):\n \"\"\"\n Lists the IPSets of the GuardDuty service specified by the detector ID.\n See also: AWS API Documentation\n \n \n :example: response = client.list_ip_sets(\n DetectorId='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type DetectorId: string\n :param DetectorId: [REQUIRED] The unique ID of the detector that you want to retrieve.\n\n :type MaxResults: integer\n :param MaxResults: You can use this parameter to indicate the maximum number of items that you want in the response. The default value is 7. The maximum value is 7.\n\n :type NextToken: string\n :param NextToken: You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the ListIPSet action. For subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.\n\n :rtype: dict\n :return: {\n 'IpSetIds': [\n 'string',\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (dict) -- 200 response\n IpSetIds (list) -- A list of the IP set IDs\n (string) -- The unique identifier for an IP Set\n \n \n NextToken (string) -- You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.\n \n \n \n \"\"\"\n pass\n\ndef list_members(DetectorId=None, MaxResults=None, NextToken=None, OnlyAssociated=None):\n \"\"\"\n Lists details about all member accounts for the current GuardDuty master account.\n See also: AWS API Documentation\n \n \n :example: response = client.list_members(\n DetectorId='string',\n MaxResults=123,\n NextToken='string',\n OnlyAssociated='string'\n )\n \n \n :type DetectorId: string\n :param DetectorId: [REQUIRED] The unique ID of the detector of the GuardDuty account whose members you want to list.\n\n :type MaxResults: integer\n :param MaxResults: You can use this parameter to indicate the maximum number of items you want in the response. The default value is 1. The maximum value is 50.\n\n :type NextToken: string\n :param NextToken: You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the ListMembers action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.\n\n :type OnlyAssociated: string\n :param OnlyAssociated: Specifies what member accounts the response is to include based on their relationship status with the master account. The default value is TRUE. If onlyAssociated is set to TRUE, the response will include member accounts whose relationship status with the master is set to Enabled, Disabled. If onlyAssociated is set to FALSE, the response will include all existing member accounts.\n\n :rtype: dict\n :return: {\n 'Members': [\n {\n 'AccountId': 'string',\n 'DetectorId': 'string',\n 'Email': 'string',\n 'InvitedAt': 'string',\n 'MasterId': 'string',\n 'RelationshipStatus': 'string',\n 'UpdatedAt': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (dict) -- 200 response\n Members (list) -- A list of member descriptions.\n (dict) -- Contains details about the member account.\n AccountId (string) -- AWS account ID.\n DetectorId (string) -- The unique identifier for a detector.\n Email (string) -- Member account's email address.\n InvitedAt (string) -- Timestamp at which the invitation was sent\n MasterId (string) -- The master account ID.\n RelationshipStatus (string) -- The status of the relationship between the member and the master.\n UpdatedAt (string) -- The first time a resource was created. The format will be ISO-8601.\n \n \n \n \n NextToken (string) -- You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.\n \n \n \n \"\"\"\n pass\n\ndef list_threat_intel_sets(DetectorId=None, MaxResults=None, NextToken=None):\n \"\"\"\n Lists the ThreatIntelSets of the GuardDuty service specified by the detector ID.\n See also: AWS API Documentation\n \n \n :example: response = client.list_threat_intel_sets(\n DetectorId='string',\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type DetectorId: string\n :param DetectorId: [REQUIRED] The detectorID that specifies the GuardDuty service whose ThreatIntelSets you want to list.\n\n :type MaxResults: integer\n :param MaxResults: You can use this parameter to indicate the maximum number of items that you want in the response. The default value is 7. The maximum value is 7.\n\n :type NextToken: string\n :param NextToken: Pagination token to start retrieving threat intel sets from.\n\n :rtype: dict\n :return: {\n 'NextToken': 'string',\n 'ThreatIntelSetIds': [\n 'string',\n ]\n }\n \n \n :returns: \n (dict) -- 200 response\n NextToken (string) -- You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.\n ThreatIntelSetIds (list) -- The list of the threat intel set IDs\n (string) -- The unique identifier for an threat intel set\n \n \n \n \n \n \"\"\"\n pass\n\ndef start_monitoring_members(AccountIds=None, DetectorId=None):\n \"\"\"\n Re-enables GuardDuty to monitor findings of the member accounts specified by the account IDs. A master GuardDuty account can run this command after disabling GuardDuty from monitoring these members' findings by running StopMonitoringMembers.\n See also: AWS API Documentation\n \n \n :example: response = client.start_monitoring_members(\n AccountIds=[\n 'string',\n ],\n DetectorId='string'\n )\n \n \n :type AccountIds: list\n :param AccountIds: [REQUIRED] A list of account IDs of the GuardDuty member accounts whose findings you want the master account to monitor.\n (string) --\n \n\n :type DetectorId: string\n :param DetectorId: [REQUIRED] The unique ID of the detector of the GuardDuty account whom you want to re-enable to monitor members' findings.\n\n :rtype: dict\n :return: {\n 'UnprocessedAccounts': [\n {\n 'AccountId': 'string',\n 'Result': 'string'\n },\n ]\n }\n \n \n :returns: \n (dict) -- 200 response\n UnprocessedAccounts (list) -- A list of objects containing the unprocessed account and a result string explaining why it was unprocessed.\n (dict) -- An object containing the unprocessed account and a result string explaining why it was unprocessed.\n AccountId (string) -- AWS Account ID.\n Result (string) -- A reason why the account hasn't been processed.\n \n \n \n \n \n \n \n \"\"\"\n pass\n\ndef stop_monitoring_members(AccountIds=None, DetectorId=None):\n \"\"\"\n Disables GuardDuty from monitoring findings of the member accounts specified by the account IDs. After running this command, a master GuardDuty account can run StartMonitoringMembers to re-enable GuardDuty to monitor these members findings.\n See also: AWS API Documentation\n \n \n :example: response = client.stop_monitoring_members(\n AccountIds=[\n 'string',\n ],\n DetectorId='string'\n )\n \n \n :type AccountIds: list\n :param AccountIds: [REQUIRED] A list of account IDs of the GuardDuty member accounts whose findings you want the master account to stop monitoring.\n (string) --\n \n\n :type DetectorId: string\n :param DetectorId: [REQUIRED] The unique ID of the detector of the GuardDuty account that you want to stop from monitor members' findings.\n\n :rtype: dict\n :return: {\n 'UnprocessedAccounts': [\n {\n 'AccountId': 'string',\n 'Result': 'string'\n },\n ]\n }\n \n \n :returns: \n (dict) -- 200 response\n UnprocessedAccounts (list) -- A list of objects containing the unprocessed account and a result string explaining why it was unprocessed.\n (dict) -- An object containing the unprocessed account and a result string explaining why it was unprocessed.\n AccountId (string) -- AWS Account ID.\n Result (string) -- A reason why the account hasn't been processed.\n \n \n \n \n \n \n \n \"\"\"\n pass\n\ndef unarchive_findings(DetectorId=None, FindingIds=None):\n \"\"\"\n Unarchives Amazon GuardDuty findings specified by the list of finding IDs.\n See also: AWS API Documentation\n \n \n :example: response = client.unarchive_findings(\n DetectorId='string',\n FindingIds=[\n 'string',\n ]\n )\n \n \n :type DetectorId: string\n :param DetectorId: [REQUIRED] The ID of the detector that specifies the GuardDuty service whose findings you want to unarchive.\n\n :type FindingIds: list\n :param FindingIds: [REQUIRED] IDs of the findings that you want to unarchive.\n (string) -- The unique identifier for the Finding\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) -- 200 response\n \n \"\"\"\n pass\n\ndef update_detector(DetectorId=None, Enable=None, FindingPublishingFrequency=None):\n \"\"\"\n Updates an Amazon GuardDuty detector specified by the detectorId.\n See also: AWS API Documentation\n \n \n :example: response = client.update_detector(\n DetectorId='string',\n Enable=True|False,\n FindingPublishingFrequency='FIFTEEN_MINUTES'|'ONE_HOUR'|'SIX_HOURS'\n )\n \n \n :type DetectorId: string\n :param DetectorId: [REQUIRED] The unique ID of the detector that you want to update.\n\n :type Enable: boolean\n :param Enable: Updated boolean value for the detector that specifies whether the detector is enabled.\n\n :type FindingPublishingFrequency: string\n :param FindingPublishingFrequency: A enum value that specifies how frequently customer got Finding updates published.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) -- 200 response\n \n \"\"\"\n pass\n\ndef update_filter(Action=None, Description=None, DetectorId=None, FilterName=None, FindingCriteria=None, Rank=None):\n \"\"\"\n Updates the filter specified by the filter name.\n See also: AWS API Documentation\n \n \n :example: response = client.update_filter(\n Action='NOOP'|'ARCHIVE',\n Description='string',\n DetectorId='string',\n FilterName='string',\n FindingCriteria={\n 'Criterion': {\n 'string': {\n 'Eq': [\n 'string',\n ],\n 'Gt': 123,\n 'Gte': 123,\n 'Lt': 123,\n 'Lte': 123,\n 'Neq': [\n 'string',\n ]\n }\n }\n },\n Rank=123\n )\n \n \n :type Action: string\n :param Action: Specifies the action that is to be applied to the findings that match the filter.\n\n :type Description: string\n :param Description: The description of the filter.\n\n :type DetectorId: string\n :param DetectorId: [REQUIRED] The unique ID of the detector that specifies the GuardDuty service where you want to update a filter.\n\n :type FilterName: string\n :param FilterName: [REQUIRED] The name of the filter.\n\n :type FindingCriteria: dict\n :param FindingCriteria: Represents the criteria to be used in the filter for querying findings.\n Criterion (dict) -- Represents a map of finding properties that match specified conditions and values when querying findings.\n (string) --\n (dict) -- Finding attribute (for example, accountId) for which conditions and values must be specified when querying findings.\n Eq (list) -- Represents the equal condition to be applied to a single field when querying for findings.\n (string) --\n Gt (integer) -- Represents the greater than condition to be applied to a single field when querying for findings.\n Gte (integer) -- Represents the greater than equal condition to be applied to a single field when querying for findings.\n Lt (integer) -- Represents the less than condition to be applied to a single field when querying for findings.\n Lte (integer) -- Represents the less than equal condition to be applied to a single field when querying for findings.\n Neq (list) -- Represents the not equal condition to be applied to a single field when querying for findings.\n (string) --\n \n \n \n\n :type Rank: integer\n :param Rank: Specifies the position of the filter in the list of current filters. Also specifies the order in which this filter is applied to the findings.\n\n :rtype: dict\n :return: {\n 'Name': 'string'\n }\n \n \n :returns: \n (dict) -- 200 response\n Name (string) -- The name of the filter.\n \n \n \n \"\"\"\n pass\n\ndef update_findings_feedback(Comments=None, DetectorId=None, Feedback=None, FindingIds=None):\n \"\"\"\n Marks specified Amazon GuardDuty findings as useful or not useful.\n See also: AWS API Documentation\n \n \n :example: response = client.update_findings_feedback(\n Comments='string',\n DetectorId='string',\n Feedback='USEFUL'|'NOT_USEFUL',\n FindingIds=[\n 'string',\n ]\n )\n \n \n :type Comments: string\n :param Comments: Additional feedback about the GuardDuty findings.\n\n :type DetectorId: string\n :param DetectorId: [REQUIRED] The ID of the detector that specifies the GuardDuty service whose findings you want to mark as useful or not useful.\n\n :type Feedback: string\n :param Feedback: [REQUIRED] Valid values: USEFUL | NOT_USEFUL\n\n :type FindingIds: list\n :param FindingIds: [REQUIRED] IDs of the findings that you want to mark as useful or not useful.\n (string) -- The unique identifier for the Finding\n \n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) -- 200 response\n \n \"\"\"\n pass\n\ndef update_ip_set(Activate=None, DetectorId=None, IpSetId=None, Location=None, Name=None):\n \"\"\"\n Updates the IPSet specified by the IPSet ID.\n See also: AWS API Documentation\n \n \n :example: response = client.update_ip_set(\n Activate=True|False,\n DetectorId='string',\n IpSetId='string',\n Location='string',\n Name='string'\n )\n \n \n :type Activate: boolean\n :param Activate: The updated boolean value that specifies whether the IPSet is active or not.\n\n :type DetectorId: string\n :param DetectorId: [REQUIRED] The detectorID that specifies the GuardDuty service whose IPSet you want to update.\n\n :type IpSetId: string\n :param IpSetId: [REQUIRED] The unique ID that specifies the IPSet that you want to update.\n\n :type Location: string\n :param Location: The updated URI of the file that contains the IPSet. For example (https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key).\n\n :type Name: string\n :param Name: The unique ID that specifies the IPSet that you want to update.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) -- 200 response\n \n \"\"\"\n pass\n\ndef update_threat_intel_set(Activate=None, DetectorId=None, Location=None, Name=None, ThreatIntelSetId=None):\n \"\"\"\n Updates the ThreatIntelSet specified by ThreatIntelSet ID.\n See also: AWS API Documentation\n \n \n :example: response = client.update_threat_intel_set(\n Activate=True|False,\n DetectorId='string',\n Location='string',\n Name='string',\n ThreatIntelSetId='string'\n )\n \n \n :type Activate: boolean\n :param Activate: The updated boolean value that specifies whether the ThreateIntelSet is active or not.\n\n :type DetectorId: string\n :param DetectorId: [REQUIRED] The detectorID that specifies the GuardDuty service whose ThreatIntelSet you want to update.\n\n :type Location: string\n :param Location: The updated URI of the file that contains the ThreateIntelSet. For example (https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key)\n\n :type Name: string\n :param Name: The unique ID that specifies the ThreatIntelSet that you want to update.\n\n :type ThreatIntelSetId: string\n :param ThreatIntelSetId: [REQUIRED] The unique ID that specifies the ThreatIntelSet that you want to update.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) -- 200 response\n \n \"\"\"\n pass\n\n" }, { "alpha_fraction": 0.6277990341186523, "alphanum_fraction": 0.630928635597229, "avg_line_length": 37.5280647277832, "blob_id": "7f316ed069f05b9b0644df51d67dcfd59ca15832", "content_id": "0e43df4fb7b5a5845517a46099d8b0fc4df6334b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18533, "license_type": "permissive", "max_line_length": 519, "num_lines": 481, "path": "/pyboto3/eks.py", "repo_name": "Semc/pyboto3", "src_encoding": "UTF-8", "text": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_cluster(name=None, version=None, roleArn=None, resourcesVpcConfig=None, clientRequestToken=None):\n \"\"\"\n Creates an Amazon EKS control plane.\n The Amazon EKS control plane consists of control plane instances that run the Kubernetes software, like etcd and the API server. The control plane runs in an account managed by AWS, and the Kubernetes API is exposed via the Amazon EKS API server endpoint.\n Amazon EKS worker nodes run in your AWS account and connect to your cluster's control plane via the Kubernetes API server endpoint and a certificate file that is created for your cluster.\n The cluster control plane is provisioned across multiple Availability Zones and fronted by an Elastic Load Balancing Network Load Balancer. Amazon EKS also provisions elastic network interfaces in your VPC subnets to provide connectivity from the control plane instances to the worker nodes (for example, to support kubectl exec , logs , and proxy data flows).\n After you create an Amazon EKS cluster, you must configure your Kubernetes tooling to communicate with the API server and launch worker nodes into your cluster. For more information, see Managing Cluster Authentication and Launching Amazon EKS Worker Nodes in the Amazon EKS User Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.create_cluster(\n name='string',\n version='string',\n roleArn='string',\n resourcesVpcConfig={\n 'subnetIds': [\n 'string',\n ],\n 'securityGroupIds': [\n 'string',\n ]\n },\n clientRequestToken='string'\n )\n \n \n :type name: string\n :param name: [REQUIRED]\n The unique name to give to your cluster.\n \n\n :type version: string\n :param version: The desired Kubernetes version for your cluster. If you do not specify a value here, the latest version available in Amazon EKS is used.\n\n :type roleArn: string\n :param roleArn: [REQUIRED]\n The Amazon Resource Name (ARN) of the IAM role that provides permissions for Amazon EKS to make calls to other AWS API operations on your behalf. For more information, see Amazon EKS Service IAM Role in the * Amazon EKS User Guide * .\n \n\n :type resourcesVpcConfig: dict\n :param resourcesVpcConfig: [REQUIRED]\n The VPC subnets and security groups used by the cluster control plane. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see Cluster VPC Considerations and Cluster Security Group Considerations in the Amazon EKS User Guide . You must specify at least two subnets. You may specify up to five security groups, but we recommend that you use a dedicated security group for your cluster control plane.\n subnetIds (list) -- [REQUIRED]Specify subnets for your Amazon EKS worker nodes. Amazon EKS creates cross-account elastic network interfaces in these subnets to allow communication between your worker nodes and the Kubernetes control plane.\n (string) --\n securityGroupIds (list) --Specify one or more security groups for the cross-account elastic network interfaces that Amazon EKS creates to use to allow communication between your worker nodes and the Kubernetes control plane. If you do not specify a security group, the default security group for your VPC is used.\n (string) --\n \n\n :type clientRequestToken: string\n :param clientRequestToken: Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'cluster': {\n 'name': 'string',\n 'arn': 'string',\n 'createdAt': datetime(2015, 1, 1),\n 'version': 'string',\n 'endpoint': 'string',\n 'roleArn': 'string',\n 'resourcesVpcConfig': {\n 'subnetIds': [\n 'string',\n ],\n 'securityGroupIds': [\n 'string',\n ],\n 'vpcId': 'string'\n },\n 'status': 'CREATING'|'ACTIVE'|'DELETING'|'FAILED',\n 'certificateAuthority': {\n 'data': 'string'\n },\n 'clientRequestToken': 'string',\n 'platformVersion': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef delete_cluster(name=None):\n \"\"\"\n Deletes the Amazon EKS cluster control plane.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_cluster(\n name='string'\n )\n \n \n :type name: string\n :param name: [REQUIRED]\n The name of the cluster to delete.\n \n\n :rtype: dict\n :return: {\n 'cluster': {\n 'name': 'string',\n 'arn': 'string',\n 'createdAt': datetime(2015, 1, 1),\n 'version': 'string',\n 'endpoint': 'string',\n 'roleArn': 'string',\n 'resourcesVpcConfig': {\n 'subnetIds': [\n 'string',\n ],\n 'securityGroupIds': [\n 'string',\n ],\n 'vpcId': 'string'\n },\n 'status': 'CREATING'|'ACTIVE'|'DELETING'|'FAILED',\n 'certificateAuthority': {\n 'data': 'string'\n },\n 'clientRequestToken': 'string',\n 'platformVersion': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_cluster(name=None):\n \"\"\"\n Returns descriptive information about an Amazon EKS cluster.\n The API server endpoint and certificate authority data returned by this operation are required for kubelet and kubectl to communicate with your Kubernetes API server. For more information, see Create a kubeconfig for Amazon EKS .\n See also: AWS API Documentation\n \n \n :example: response = client.describe_cluster(\n name='string'\n )\n \n \n :type name: string\n :param name: [REQUIRED]\n The name of the cluster to describe.\n \n\n :rtype: dict\n :return: {\n 'cluster': {\n 'name': 'string',\n 'arn': 'string',\n 'createdAt': datetime(2015, 1, 1),\n 'version': 'string',\n 'endpoint': 'string',\n 'roleArn': 'string',\n 'resourcesVpcConfig': {\n 'subnetIds': [\n 'string',\n ],\n 'securityGroupIds': [\n 'string',\n ],\n 'vpcId': 'string'\n },\n 'status': 'CREATING'|'ACTIVE'|'DELETING'|'FAILED',\n 'certificateAuthority': {\n 'data': 'string'\n },\n 'clientRequestToken': 'string',\n 'platformVersion': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_update(name=None, updateId=None):\n \"\"\"\n Returns descriptive information about an update against your Amazon EKS cluster.\n When the status of the update is Succeeded , the update is complete. If an update fails, the status is Failed , and an error detail explains the reason for the failure.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_update(\n name='string',\n updateId='string'\n )\n \n \n :type name: string\n :param name: [REQUIRED]\n The name of the Amazon EKS cluster to update.\n \n\n :type updateId: string\n :param updateId: [REQUIRED]\n The ID of the update to describe.\n \n\n :rtype: dict\n :return: {\n 'update': {\n 'id': 'string',\n 'status': 'InProgress'|'Failed'|'Cancelled'|'Successful',\n 'type': 'VersionUpdate',\n 'params': [\n {\n 'type': 'Version'|'PlatformVersion',\n 'value': 'string'\n },\n ],\n 'createdAt': datetime(2015, 1, 1),\n 'errors': [\n {\n 'errorCode': 'SubnetNotFound'|'SecurityGroupNotFound'|'EniLimitReached'|'IpNotAvailable'|'AccessDenied'|'OperationNotPermitted'|'VpcIdNotFound'|'Unknown',\n 'errorMessage': 'string',\n 'resourceIds': [\n 'string',\n ]\n },\n ]\n }\n }\n \n \n :returns: \n SubnetNotFound : One of the subnets associated with the cluster could not be found.\n SecurityGroupNotFound : One of the security groups associated with the cluster could not be found.\n EniLimitReached : You have reached the elastic network interface limit for your account.\n IpNotAvailable : A subnet associated with the cluster does not have any free IP addresses.\n AccessDenied : You do not have permissions to perform the specified operation.\n OperationNotPermitted : The service role associated with the cluster does not have the required access permissions for Amazon EKS.\n VpcIdNotFound : The VPC associated with the cluster could not be found.\n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\ndef list_clusters(maxResults=None, nextToken=None):\n \"\"\"\n Lists the Amazon EKS clusters in your AWS account in the specified Region.\n See also: AWS API Documentation\n \n \n :example: response = client.list_clusters(\n maxResults=123,\n nextToken='string'\n )\n \n \n :type maxResults: integer\n :param maxResults: The maximum number of cluster results returned by ListClusters in paginated output. When this parameter is used, ListClusters only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListClusters request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListClusters returns up to 100 results and a nextToken value if applicable.\n\n :type nextToken: string\n :param nextToken: The nextToken value returned from a previous paginated ListClusters request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.\n Note\n This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.\n \n\n :rtype: dict\n :return: {\n 'clusters': [\n 'string',\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_updates(name=None, nextToken=None, maxResults=None):\n \"\"\"\n Lists the updates associated with an Amazon EKS cluster in your AWS account, in the specified Region.\n See also: AWS API Documentation\n \n \n :example: response = client.list_updates(\n name='string',\n nextToken='string',\n maxResults=123\n )\n \n \n :type name: string\n :param name: [REQUIRED]\n The name of the Amazon EKS cluster for which to list updates.\n \n\n :type nextToken: string\n :param nextToken: The nextToken value returned from a previous paginated ListUpdates request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.\n\n :type maxResults: integer\n :param maxResults: The maximum number of update results returned by ListUpdates in paginated output. When this parameter is used, ListUpdates only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListUpdates request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListUpdates returns up to 100 results and a nextToken value if applicable.\n\n :rtype: dict\n :return: {\n 'updateIds': [\n 'string',\n ],\n 'nextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef update_cluster_version(name=None, version=None, clientRequestToken=None):\n \"\"\"\n Updates an Amazon EKS cluster to the specified Kubernetes version. Your cluster continues to function during the update. The response output includes an update ID that you can use to track the status of your cluster update with the DescribeUpdate API operation.\n Cluster updates are asynchronous, and they should finish within a few minutes. During an update, the cluster status moves to UPDATING (this status transition is eventually consistent). When the update is complete (either Failed or Successful ), the cluster status moves to Active .\n See also: AWS API Documentation\n \n \n :example: response = client.update_cluster_version(\n name='string',\n version='string',\n clientRequestToken='string'\n )\n \n \n :type name: string\n :param name: [REQUIRED]\n The name of the Amazon EKS cluster to update.\n \n\n :type version: string\n :param version: [REQUIRED]\n The desired Kubernetes version following a successful update.\n \n\n :type clientRequestToken: string\n :param clientRequestToken: Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.\n This field is autopopulated if not provided.\n \n\n :rtype: dict\n :return: {\n 'update': {\n 'id': 'string',\n 'status': 'InProgress'|'Failed'|'Cancelled'|'Successful',\n 'type': 'VersionUpdate',\n 'params': [\n {\n 'type': 'Version'|'PlatformVersion',\n 'value': 'string'\n },\n ],\n 'createdAt': datetime(2015, 1, 1),\n 'errors': [\n {\n 'errorCode': 'SubnetNotFound'|'SecurityGroupNotFound'|'EniLimitReached'|'IpNotAvailable'|'AccessDenied'|'OperationNotPermitted'|'VpcIdNotFound'|'Unknown',\n 'errorMessage': 'string',\n 'resourceIds': [\n 'string',\n ]\n },\n ]\n }\n }\n \n \n :returns: \n SubnetNotFound : One of the subnets associated with the cluster could not be found.\n SecurityGroupNotFound : One of the security groups associated with the cluster could not be found.\n EniLimitReached : You have reached the elastic network interface limit for your account.\n IpNotAvailable : A subnet associated with the cluster does not have any free IP addresses.\n AccessDenied : You do not have permissions to perform the specified operation.\n OperationNotPermitted : The service role associated with the cluster does not have the required access permissions for Amazon EKS.\n VpcIdNotFound : The VPC associated with the cluster could not be found.\n \n \"\"\"\n pass\n\n" } ]
84
patison5/Rest-Api-Python
https://github.com/patison5/Rest-Api-Python
08cdea427eb4ff9fc452c82ada899e8a38a552e5
307ef954041e828a9bd83c41c5e769e10bc22dba
d7de1cc4decb442f2921d76b9bb6b97e9e3fe58b
refs/heads/master
2022-11-09T08:23:28.668947
2020-06-20T13:55:37
2020-06-20T13:55:37
273,684,057
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.45178335905075073, "alphanum_fraction": 0.4544253647327423, "avg_line_length": 21.939393997192383, "blob_id": "0056cae12c05fa4e104565185f3327a59b5fb80a", "content_id": "25901851b10d4afb6637f5a5e52bbbcccb77e6b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 904, "license_type": "no_license", "max_line_length": 89, "num_lines": 33, "path": "/README.md", "repo_name": "patison5/Rest-Api-Python", "src_encoding": "UTF-8", "text": "# Rest-Api-Python\nRest Api (python)\n\n### Example:\n\nsource file: restApi.json\n\n```sh\n{\n \"users\": [\n {\n \"id\": 0,\n \"name\": \"Misha\",\n \"surname\": \"Pidor\"\n },\n {\n \"id\": 1,\n \"name\": \"Fedor\",\n \"surname\": \"Krasauchik\"\n }\n ]\n}\n```\n\n### Routes:\n\n| URL | Параметры | Описание |\n| :--- | :--- | :--- |\n| / | нет | возвращает всех пользователей в виде json |\n| /user/\\<int:user_id\\> | id | возвращает конкретного пользователя по id |\n| /user/update | id, name, surname | изменяет пользователя |\n| /user/delete | id | удаляет пользователя |\n| /user/add | name, surname | добавляет пользователя |\n" }, { "alpha_fraction": 0.5659589171409607, "alphanum_fraction": 0.5676992535591125, "avg_line_length": 25.850467681884766, "blob_id": "014ee300d9c17f7716fbb05b7b3238f2bd4f920e", "content_id": "bdfc54bf3315e705f2e01cd0f42ec26d3df413af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3004, "license_type": "no_license", "max_line_length": 92, "num_lines": 107, "path": "/app.py", "repo_name": "patison5/Rest-Api-Python", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom flask import request\nimport json\n\napp = Flask(__name__)\n\n\ndef save_data(data, filename=\"restApi.json\"): # Функция сохранения базы в txt файле\n with open(filename, \"w\") as file: # Открываем файл на запись\n # json.dump(data, file)\n data = json.dumps(data, default=lambda o: o.__dict__, sort_keys=True, indent=4)\n file.write(data)\n\n\ndef read_data():\n with open('restApi.json') as json_file:\n return json.load(json_file)\n\n\nclass Users:\n def __init__(self, id, name, surname):\n self.id = id\n self.name = name\n self.surname = surname\n\n # советуют также искользовать jsonpickle - более удобный инструмент сериализации в JSON\n def toJSON(self):\n return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)\n\n\ndef findUserById(id):\n users = read_data()['users']\n for user in users:\n if user['id'] == int(id):\n return user\n\n\nusersList = {\"users\": []}\nusersList[\"users\"].append(Users(0, \"Misha\", \"Pidor\"))\nusersList[\"users\"].append(Users(1, \"Fedor\", \"Krasauchik\"))\nsave_data(usersList)\n\n\[email protected]('/')\ndef hello_world():\n return read_data()\n\n\[email protected]('/user/<int:user_id>')\ndef show_post(user_id):\n # вывести сообщение с данным\n for user in read_data()[\"users\"]:\n if user['id'] == int(user_id):\n return user\n return f\"No user with {user_id} found\"\n\n\[email protected]('/user', methods=['GET', 'PUT', 'DELETE', 'POST'])\ndef user():\n if request.method == 'GET':\n return read_data()\n\n if request.method == 'PUT':\n id = request.args.get('id')\n name = request.args.get('name')\n surname = request.args.get('surname')\n\n if not id:\n return \"error, no id sended\"\n\n filedata = read_data()\n users = filedata['users']\n for user in users:\n if user['id'] == int(id):\n if name:\n user['name'] = name\n if surname:\n user['surname'] = surname\n\n save_data(filedata)\n return f\"User #{id} was updated\"\n\n elif request.method == \"DELETE\":\n user_id = request.args.get('id')\n filedata = read_data()\n\n for user in filedata[\"users\"]:\n if user['id'] == int(user_id):\n filedata['users'].remove(user)\n save_data(filedata)\n return f'User {user_id} was deleted.'\n\n return f\"No user with {user_id} found\"\n\n elif request.method == 'POST':\n name = request.args.get('name')\n surname = request.args.get('surname')\n\n filedata = read_data()\n filedata[\"users\"].append(Users(len(filedata[\"users\"]), name, surname))\n save_data(filedata)\n\n return f\"User {name} {surname} [{len(filedata['users']) - 1}] was added\"\n\n\nif __name__ == '__main__':\n app.run()\n" } ]
2
bhaveshmunot1/GeeksForGeeks-InterviewExperiences-Scrapper
https://github.com/bhaveshmunot1/GeeksForGeeks-InterviewExperiences-Scrapper
1c12d1cce1e129faa8d0442194d9887d821afbcf
be6dc4c85b9d75182f5ee45eb13ad52e1c7b1c1d
c53fe08b8f379973baf33ab51305f40032f7c879
refs/heads/master
2021-09-01T17:47:04.448663
2017-12-28T04:42:22
2017-12-28T04:42:22
115,585,573
0
2
null
null
null
null
null
[ { "alpha_fraction": 0.5344641804695129, "alphanum_fraction": 0.5363489389419556, "avg_line_length": 29.702478408813477, "blob_id": "6e528e30bd5c6674c8cc6c614b416336e64ee87a", "content_id": "3fb53c5d1ce90ad6fb65ec30cc5c60d9ab394032", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3714, "license_type": "no_license", "max_line_length": 87, "num_lines": 121, "path": "/main.py", "repo_name": "bhaveshmunot1/GeeksForGeeks-InterviewExperiences-Scrapper", "src_encoding": "UTF-8", "text": "import urllib2\nfrom lxml import html\nfrom bs4 import BeautifulSoup\nimport json\nimport os\n\n# globals\ncompaniesList = [\n {\n 'name' : 'Amazon',\n 'link' : 'http://www.geeksforgeeks.org/tag/amazon/page/',\n },\n {\n 'name' : 'GoldmanSachs',\n 'link' : 'http://www.geeksforgeeks.org/tag/goldman-sachs/page/',\n }\n]\n\ndef writeToFile(filename, data):\n if not os.path.exists(os.path.dirname(filename)):\n try:\n os.makedirs(os.path.dirname(filename))\n except OSError as exc: # Guard against race condition\n pass\n\n with open(filename, \"w\") as f:\n f.write(str(data))\n\ndef GetPageSource(company, page_num):\n pathWithFolders = company['name'] + '//' + 'metaData' + str(page_num)\n #print (pathWithFolders)\n if os.path.exists(pathWithFolders):\n print \"offline\"\n with open(pathWithFolders, 'r') as f:\n soup = f.read()\n f.close()\n return BeautifulSoup(soup, \"lxml\")\n else:\n print \"online\"\n link = company['link'] + str(page_num) + \"/\"\n print(link)\n try :\n page = urllib2.urlopen(link)\n #print page.getcode()\n #print page\n soup = BeautifulSoup(page, \"lxml\")\n with open(pathWithFolders, 'w') as f:\n f.write(str(soup))\n f.close()\n return soup\n except urllib2.HTTPError, e:\n print e.getcode()\n\ndef ProcessArticle(soup, filename):\n # if not os.path.exists(filename):\n # with open(filename, 'w') as f:\n # content = soup.find(\"div\", class_=\"entry-content\")\n # paragraphs = content.find_all(\"p\")\n # for p in paragraphs:\n # f.write(str(p.get_text()))\n # f.close()\n pass\n\ndef CreateArticle(company, url, filename):\n print url\n pathWithFolders = company['name'] + '//RawArticles//' + filename\n #print pathWithFolders\n if os.path.exists(pathWithFolders):\n print \"offline article\"\n else :\n print \"online article\"\n try :\n page = urllib2.urlopen(url)\n soup = BeautifulSoup(page, \"lxml\")\n with open(pathWithFolders, 'w') as f:\n f.write(str(soup))\n f.close()\n except :\n raise\n pathWithFolders = company['name'] + '//Articles//' + filename\n ProcessArticle(soup, pathWithFolders)\n\ndef GetListOfArticles(soup):\n lists = soup.find_all(\"article\")\n return lists\n\ndef GetArticleLink(soup):\n link = soup.find(\"a\")\n return link\n\ntry :\n for company in companiesList :\n pageNum = 1\n firstArticle = True\n while True:\n soup = GetPageSource(company, pageNum)\n try :\n listOfArticles = GetListOfArticles(soup)\n #print (len(listOfArticles))\n for article in listOfArticles:\n articleLink = GetArticleLink(article)\n #print articleLink[\"href\"]\n print articleLink.get_text()\n if firstArticle == True:\n firstArticle = False\n pathWithFolders = company['name'] + '//' + 'metaData'\n #print (pathWithFolders)\n with open(pathWithFolders, 'w') as f:\n f.write(articleLink.get_text())\n f.close()\n CreateArticle(company, articleLink[\"href\"], articleLink.get_text())\n pageNum = pageNum + 1\n except:\n print \"Article not found\"\n break\nexcept :\n print \"Page not found\"\n\nfor company in companiesList :\n\n pass" } ]
1
kevin-chen/WalkerBuddy-TechFestival2019
https://github.com/kevin-chen/WalkerBuddy-TechFestival2019
17fcdaf616b2a980f16a4d57cb557b7320ab589f
35ef4e15c5cd29ac52e94b2c8a89e4e601da6acd
04a50b9295ffc89ddf4a11839811202e2c0ab921
refs/heads/master
2020-05-05T09:34:27.193166
2019-05-29T20:16:40
2019-05-29T20:16:40
179,909,011
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5510817170143127, "alphanum_fraction": 0.5548878312110901, "avg_line_length": 33.90909194946289, "blob_id": "545d2a256e676f0f7f16e1c880a8d3dc3bfd455a", "content_id": "e4589ce76ac9172e02fa7be5f98653f94b909b4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 4993, "license_type": "no_license", "max_line_length": 188, "num_lines": 143, "path": "/WalkerBuddy/WalkerBuddy/ViewController.swift", "repo_name": "kevin-chen/WalkerBuddy-TechFestival2019", "src_encoding": "UTF-8", "text": "//\n// ViewController.swift\n// ConnectRaspi\n//\n// Created by Kevin Chen on 4/6/2019.\n// Copyright © 2019 New York University. All rights reserved.\n//\n\nimport UIKit\nimport Firebase\nimport WebKit\nimport Speech\nimport CoreLocation\nimport MapKit\n \nclass ViewController: UIViewController, SFSpeechRecognizerDelegate, CLLocationManagerDelegate {\n \n @IBOutlet weak var btn: UILabel!\n var timer = Timer()\n @IBOutlet weak var feed: WKWebView!\n @IBOutlet weak var textView: UILabel!\n var locManager = CLLocationManager()\n var currentLocation: CLLocation!\n @IBOutlet weak var locationTextView: UILabel!\n \n private let speechRecognizer = SFSpeechRecognizer(locale: Locale.init(identifier: \"en-US\")) //1\n private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest?\n private var recognitionTask: SFSpeechRecognitionTask?\n private let audioEngine = AVAudioEngine()\n\n override func viewDidLoad() {\n super.viewDidLoad()\n say(item: \"Press Trigger to Activate\")\n }\n \n func getting_address() {\n \n tier = 1\n // NEW LOCATION Detection\n let database = Database.database().reference(fromURL: \"*****\").child(\"restart\")\n database.observe(.childChanged, with: { (snapshot) -> Void in\n print(\"RECOGNIZED RESTART\")\n //self.feed.reload()\n var trigger = false\n var speech = \"Nothing to Say\"\n database.observeSingleEvent(of: .value, with: { (snapshot) in\n let userDict = snapshot.value as! [String: Any]\n trigger = userDict[\"triggeredPressed\"] as! Bool\n\n let ref = Database.database().reference(fromURL: \"******\").child(\"restart/speech\")\n ref.observeSingleEvent(of: .value, with: { (snapshot) in\n let userDict = snapshot.value as! [String: Any]\n if trigger == true {\n speech = userDict[\"start\"] as! String\n }\n else {\n speech = userDict[\"end\"] as! String\n self.stopRecording()\n }\n if trigger == true{\n SpeechService.shared.speak(text: speech, voiceType: .waveNetFemale) { self.startRecording() }\n }\n else {\n do {\n try AVAudioSession.sharedInstance().setCategory(AVAudioSession.Category.playAndRecord, mode: .default, options: AVAudioSession.CategoryOptions.defaultToSpeaker)\n }\n catch {\n print(\"can't default to speaker \")\n }\n }\n })\n })\n })\n }\n \n func stopRecording() {\n audioEngine.stop()\n recognitionRequest?.endAudio()\n btn.text = \"Press Trigger to Activate\"\n }\n \n func startRecording() {\n \n btn.text = \"Listening ...\"\n textView.text = \"Where do you want to go, I'm listening!\"\n \n if recognitionTask != nil {\n recognitionTask?.cancel()\n recognitionTask = nil\n }\n\n let audioSession = AVAudioSession.sharedInstance()\n do {\n try audioSession.setCategory(AVAudioSession.Category.playAndRecord, mode: .default)\n } catch {\n print(\"audioSession properties weren't set because of an error.\")\n }\n \n recognitionRequest = SFSpeechAudioBufferRecognitionRequest()\n \n let inputNode = audioEngine.inputNode\n \n guard let recognitionRequest = recognitionRequest else {\n fatalError(\"Unable to create an SFSpeechAudioBufferRecognitionRequest object\")\n }\n \n recognitionRequest.shouldReportPartialResults = true\n \n recognitionTask = speechRecognizer?.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in\n \n var isFinal = false\n \n if result != nil {\n \n self.textView.text = result?.bestTranscription.formattedString\n isFinal = (result?.isFinal)!\n }\n \n if error != nil || isFinal {\n self.audioEngine.stop()\n inputNode.removeTap(onBus: 0)\n \n self.recognitionRequest = nil\n self.recognitionTask = nil\n self.btn.isEnabled = true\n }\n })\n \n let recordingFormat = inputNode.outputFormat(forBus: 0)\n inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, when) in\n self.recognitionRequest?.append(buffer)\n }\n \n audioEngine.prepare()\n \n do {\n try audioEngine.start()\n } catch {\n print(\"audioEngine couldn't start because of an error.\")\n }\n \n }\n}\n" }, { "alpha_fraction": 0.6486486196517944, "alphanum_fraction": 0.6518918871879578, "avg_line_length": 28.838708877563477, "blob_id": "a910a0a7f6dec7d7f6c463bcb9769a4e69105c1e", "content_id": "206c33724b7f3bb7837f4b6bfcbca421d1661457", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1850, "license_type": "no_license", "max_line_length": 130, "num_lines": 62, "path": "/street.py", "repo_name": "kevin-chen/WalkerBuddy-TechFestival2019", "src_encoding": "UTF-8", "text": "from google.cloud import storage\nfrom google.cloud import automl_v1beta1 as automl\nimport os\nimport picamera\nimport datetime\nimport time as t\nimport RPi.GPIO as GPIO\nfrom firebase import firebase\n \n# AutoML Vision \nproject_id = '***********'\ncompute_region = '***********'\nmodel_id = '***********'\nfile_path = '***********'\nscore_threshold = '0.5'\nresponse_display_name = \"\"\n\n# Firebase\ntouch = 11\nfirebase = firebase.FirebaseApplication('***********', None)\ntouch_original = firebase.get('restart', 'triggeredPressed')\nfirebase.put('restart', 'triggeredPressed', (not touch_original))\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(touch, GPIO.IN)\n\ndef touch_sensor():\n global touch_original\n touch_pressed = GPIO.input(touch)\n if touch_pressed == touch_original:\n touch_original = (not touch_original)\n firebase.put('restart', 'triggeredPressed', (not touch_original))\n \ndef analyze():\n global response_display_name\n os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]=\"***********\"\n project = '***********'\n storage_client = storage.Client(project=project)\n bucket = storage_client.get_bucket('***********')\n\n automl_client = automl.AutoMlClient()\n model_full_id = automl_client.model_path(project_id, compute_region, model_id) # Get the full path of the model.\n\n with open(file_path, \"rb\") as image_file:\n content = image_file.read()\n payload = {\"image\": {\"image_bytes\": content}}\n\n params = { }\n\n if score_threshold:\n params = {\"score_threshold\": score_threshold}\n\n response = prediction_client.predict(model_full_id, payload, params)\n for result in response.payload:\n print(\"Date: {} Prediction: {} {}\".format(str(datetime.datetime.now()), result.display_name, result.classification.score))\n\ndef main():\n while True:\n analyze()\n touch_sensor()\n\nmain()\n" }, { "alpha_fraction": 0.7421383857727051, "alphanum_fraction": 0.7758719325065613, "avg_line_length": 45.02631759643555, "blob_id": "0ee4e71dc55f8052fde13b405f8574266cf2395c", "content_id": "9255efe5509d14cc32cae16009559453187d5943", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1751, "license_type": "no_license", "max_line_length": 469, "num_lines": 38, "path": "/README.md", "repo_name": "kevin-chen/WalkerBuddy-TechFestival2019", "src_encoding": "UTF-8", "text": "# WalkerBuddy-TechFestival2019\n\n**In Summary** \"Google Maps for the blind\"\n\nWalkerBuddy is a cost-effective application that improves the accessibility of travel for the visually impaired to navigate in the urban environment. Those who are visually impaired will find it difficult to complete what may be perceived as simple tasks by those who are not disabled such as crossing streets, finding directions, and safely reaching one’s destination. This project directly addresses the demand for more disability-friendly applications for the blind.\n\n## Features\n- [X] User is able to enter destination verbally using a touch switch trigger.\n- [X] Device uses Raspberry Pi to calculate direction and output audio to user\n \n## Resources\nXCode\n- [X] Front End - (App) Coded through Xcode & Swift\n\nRaspberry Pi\n\n- [X] Back End - Raspberry Pi Python\n\nGoogle Cloud Platform\n\n- [X] Back End - Google Firebase Database\n- [X] Back End - Google AutoVision ML\n- [X] Back End - Google Storage\n- [X] Back End - Google Maps Directions API\n- [X] Back End - Google Speech Service\n- [ ] Back End - Google Translate\n\nApple API\n\n- [X] Back End - Apple Speech Recognition\n\nProject Portfolio: https://devpost.com/software/walkerbuddy\n\n<img src=\"https://github.com/kc3585/WalkerBuddy-TechFestival2019/blob/master/designDescription.png\" width=500><br>\n<img src=\"https://github.com/kc3585/WalkerBuddy-TechFestival2019/blob/master/homeScreen.png\" width=500><br>\n<img src=\"https://github.com/kc3585/WalkerBuddy-TechFestival2019/blob/master/screen.png\" width=500><br>\n<img src=\"https://github.com/kc3585/WalkerBuddy-TechFestival2019/blob/master/Firebase.png\" width=500><br>\n<img src=\"https://github.com/kc3585/WalkerBuddy-TechFestival2019/blob/master/Storage.png\" width=500><br>\n" } ]
3
plenoi/optimize
https://github.com/plenoi/optimize
43511eb82812e3ef85c77e2d9c7b6d597736a793
9c1a52847178ed83304ce6283ea99cbba3716366
6d47d001a361f67d73473c044384eef96f4b6336
refs/heads/master
2022-04-24T20:07:41.966589
2020-04-21T01:27:51
2020-04-21T01:27:51
256,422,718
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.48711127042770386, "alphanum_fraction": 0.5057375431060791, "avg_line_length": 26.96744155883789, "blob_id": "80d0b7603a8ac6fe5a70ccf8d123deed42e6e3cc", "content_id": "6de97f95171e171a9298a822fd5c29af8fd2880a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6013, "license_type": "no_license", "max_line_length": 103, "num_lines": 215, "path": "/BAT.py", "repo_name": "plenoi/optimize", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nAj. Plenoi as CAMT CMU\nModified from\nhttps://github.com/7ossam81/EvoloPy\nhttps://www.scitepress.org/Papers/2016/60482/60482.pdf\n\"\"\"\nimport numpy as np\nimport random\nimport time\n\n\nclass solution:\n def __init__(self):\n self.best = 0\n self.bestIndividual=[]\n self.population=[]\n self.convergence = []\n self.optimizer=\"\"\n self.objfname=\"\"\n self.startTime=0\n self.endTime=0\n self.executionTime=0\n self.lb=0\n self.ub=0\n self.dim=0\n self.popnum=0\n self.maxiers=0\n\ndef cv(clf, X, y, nr_fold):\n ix = np.zeros(len(y))\n for i in range(0, len(y)):\n ix[i] = i\n ix = np.array(ix)\n \n allACC = np.zeros(nr_fold)\n allSENS = np.zeros(nr_fold)\n allSPEC = np.zeros(nr_fold)\n allMCC = np.zeros(nr_fold)\n #allAUC = np.zeros(nr_fold)\n for j in range(0, nr_fold):\n train_ix = ((ix % nr_fold) != j)\n test_ix = ((ix % nr_fold) == j)\n train_X, test_X = X[train_ix], X[test_ix]\n train_y, test_y = y[train_ix], y[test_ix]\n clf.fit(train_X, train_y) \n #pr = clf.predict_proba(test_X)[:,1] \n #p = np.round(pr)\n p = clf.predict(test_X)\n TP=0 \n FP=0\n TN=0\n FN=0\n for i in range(0,len(test_y)):\n if test_y[i]==0 and p[i]==0:\n TP+= 1\n elif test_y[i]==0 and p[i]==1:\n FN+= 1\n elif test_y[i]==1 and p[i]==0:\n FP+= 1\n elif test_y[i]==1 and p[i]==1:\n TN+= 1\n ACC = (TP+TN)/(TP+FP+TN+FN)\n SENS = TP/(TP+FN)\n SPEC = TN/(TN+FP)\n det = np.sqrt((TP+FP)*(TP+FN)*(TN+FP)*(TN+FN))\n if (det == 0): \n MCC = 0 \n else:\n MCC = ((TP*TN)-(FP*FN))/det\n #AUC = roc_auc_score(test_y,pr)\n allACC[j] = ACC\n allSENS[j] = SENS\n allSPEC[j] = SPEC\n allMCC[j] = MCC\n #allAUC[j] = AUC\n #np.mean(allACC),np.mean(allSENS),np.mean(allSPEC),np.mean(allMCC),np.mean(allAUC)\n return np.mean(allACC)\n\nfrom sklearn.svm import SVC\ndef fitness(gene):\n gene = np.array(np.round(gene),dtype=int)\n f = np.where(gene[0:numFeat]==1)[0]\n ci = int(''.join([\"%g\"%item for item in gene[numFeat:numFeat+3]]),2)\n gi = int(''.join([\"%g\"%item for item in gene[numFeat+3:numFeat+numPar]]),2)\n c =parC[ci]\n g =parG[gi]\n clf = SVC(C=c,gamma=g) \n X_train_norm = X[:,f]\n return 0.95*(1-cv(clf,X_train_norm,y ,numFold)) + 0.05*(len(f)/numFeat) \n\ndef BAT(objf,lb,ub,SearchAgents_no,Max_iteration):\n \n n=SearchAgents_no; # Population size\n #lb=-50\n #ub=50\n dim = len(lb)\n N_gen=Max_iteration # Number of generations\n \n A=0.5; # Loudness (constant or decreasing)\n r=0.5; # Pulse rate (constant or decreasing)\n \n Qmin=0 # Frequency minimum\n Qmax=2 # Frequency maximum\n \n \n d=dim # Number of dimensions \n \n # Initializing arrays\n Q=np.zeros(n) # Frequency\n v=np.zeros((n,d)) # Velocities\n Convergence_curve=[];\n \n # Initialize the population/solutions\n Sol = np.zeros((n,d))\n for i in range(dim):\n Sol[:, i] = np.random.randint(0,2,n) * (ub[i] - lb[i]) + lb[i]\n\n S=np.zeros((n,d))\n S=np.copy(Sol)\n Fitness=np.zeros(n)\n \n \n # initialize solution for the final results \n s=solution()\n print(\"BAT is optimizing \\\"\"+objf.__name__+\"\\\"\") \n \n # Initialize timer for the experiment\n timerStart=time.time() \n s.startTime=time.strftime(\"%Y-%m-%d-%H-%M-%S\")\n \n #Evaluate initial random solutions\n for i in range(0,n):\n Fitness[i]=objf(Sol[i,:])\n \n \n # Find the initial best solution\n i=np.argmin(Fitness)\n best=np.copy(Sol[i,:]) \n fmin = Fitness[i]\n \n # Main loop\n for t in range (0,N_gen): \n \n # Loop over all bats(solutions)\n for i in range (0,n):\n Q[i]=Qmin+(Qmin-Qmax)*random.random()\n v[i,:]=v[i,:]+(Sol[i,:]-best)*Q[i]\n S[i,:]=Sol[i,:]+v[i,:]\n \n # Check boundaries\n for j in range(d):\n Sol[i,j] = np.clip(Sol[i,j], lb[j], ub[j])\n \n\n \n # Pulse rate\n if random.random()>r:\n S[i,:]=best+0.001*np.random.randn(d)\n \n # Evaluate new solutions\n # Binary BAT by sigmoid\n for j in range(d):\n Xn=S[i,j]\n TF=1/(1+np.exp(-10*(Xn-0.5)));\n if TF >= np.random.uniform(0,1):\n X_binary=1 \n else:\n X_binary=0\n S[i,j]=X_binary\n \n Fnew=objf(S[i,:])\n \n # Update if the solution improves\n if ((Fnew<=Fitness[i]) and (random.random()<A) ):\n Sol[i,:]=np.copy(S[i,:])\n Fitness[i]=Fnew;\n \n \n # Update the current best solution\n if Fnew<=fmin:\n best=np.copy(S[i,:])\n fmin=Fnew\n \n #update convergence curve\n Convergence_curve.append(fmin) \n\n if (t%1==0):\n print(['At iteration '+ str(t)+ ' the best '+str(numFold)+'CV accuracy is '+ str(1-fmin)]);\n \n \n timerEnd=time.time() \n s.endTime=time.strftime(\"%Y-%m-%d-%H-%M-%S\")\n s.executionTime=timerEnd-timerStart\n s.convergence=Convergence_curve\n s.optimizer=\"BAT\" \n s.bestIndividual = best\n s.population = Sol\n s.objfname=objf.__name__\n \n return s\n\nfrom sklearn.datasets import load_svmlight_file\ndata = load_svmlight_file(\"trainnorm.scl\", zero_based=False)\nX = data[0].toarray()\ny = data[1]\nparC = np.array([2 ** i for i in np.arange(0,8, dtype=float)])\nparG = np.array([2 ** i for i in np.arange(-8,8, dtype=float)])\n\nnumPar = (3+4)\nnumFold = 10\nnumFeat = np.size(X,1)\nlb = np.zeros(numFeat+numPar)\nub = np.ones(numFeat+numPar)\ns = BAT(fitness,lb,ub,40,20)\n" }, { "alpha_fraction": 0.47122904658317566, "alphanum_fraction": 0.4980446994304657, "avg_line_length": 31.389141082763672, "blob_id": "d6803af3418f1051ead7cf7e2a33b65cb85d1bc0", "content_id": "e1228f9d596e6b50c3064f0e6dbda1a1f6824a35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7160, "license_type": "no_license", "max_line_length": 113, "num_lines": 221, "path": "/GWO.py", "repo_name": "plenoi/optimize", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nAj. Plenoi as CAMT CMU\nModified from\nhttps://github.com/7ossam81/EvoloPy\nhttps://www.scitepress.org/Papers/2016/60482/60482.pdf\n\"\"\"\n############# Minimize #################\nimport random\nimport time\nimport numpy as np\n\nclass solution:\n def __init__(self):\n self.best = 0\n self.bestIndividual=[]\n self.convergence = []\n self.optimizer=\"\"\n self.objfname=\"\"\n self.startTime=0\n self.endTime=0\n self.executionTime=0\n self.lb=0\n self.ub=0\n self.dim=0\n self.popnum=0\n self.maxiers=0\n self.population=[]\n \ndef cv(clf, X, y, nr_fold):\n ix = np.zeros(len(y))\n for i in range(0, len(y)):\n ix[i] = i\n ix = np.array(ix)\n \n allACC = np.zeros(nr_fold)\n allSENS = np.zeros(nr_fold)\n allSPEC = np.zeros(nr_fold)\n allMCC = np.zeros(nr_fold)\n #allAUC = np.zeros(nr_fold)\n for j in range(0, nr_fold):\n train_ix = ((ix % nr_fold) != j)\n test_ix = ((ix % nr_fold) == j)\n train_X, test_X = X[train_ix], X[test_ix]\n train_y, test_y = y[train_ix], y[test_ix]\n clf.fit(train_X, train_y) \n #pr = clf.predict_proba(test_X)[:,1] \n #p = np.round(pr)\n p = clf.predict(test_X)\n TP=0 \n FP=0\n TN=0\n FN=0\n for i in range(0,len(test_y)):\n if test_y[i]==0 and p[i]==0:\n TP+= 1\n elif test_y[i]==0 and p[i]==1:\n FN+= 1\n elif test_y[i]==1 and p[i]==0:\n FP+= 1\n elif test_y[i]==1 and p[i]==1:\n TN+= 1\n ACC = (TP+TN)/(TP+FP+TN+FN)\n SENS = TP/(TP+FN)\n SPEC = TN/(TN+FP)\n det = np.sqrt((TP+FP)*(TP+FN)*(TN+FP)*(TN+FN))\n if (det == 0): \n MCC = 0 \n else:\n MCC = ((TP*TN)-(FP*FN))/det\n #AUC = roc_auc_score(test_y,pr)\n allACC[j] = ACC\n allSENS[j] = SENS\n allSPEC[j] = SPEC\n allMCC[j] = MCC\n #allAUC[j] = AUC\n #np.mean(allACC),np.mean(allSENS),np.mean(allSPEC),np.mean(allMCC),np.mean(allAUC)\n return np.mean(allACC)\n\nfrom sklearn.svm import SVC\ndef fitness(gene):\n gene = np.array(np.round(gene),dtype=int)\n f = np.where(gene[0:numFeat]==1)[0]\n ci = int(''.join([\"%g\"%item for item in gene[numFeat:numFeat+3]]),2)\n gi = int(''.join([\"%g\"%item for item in gene[numFeat+3:numFeat+numPar]]),2)\n c =parC[ci]\n g =parG[gi]\n clf = SVC(C=c,gamma=g) \n X_train_norm = X[:,f]\n return 0.95*(1-cv(clf,X_train_norm,y ,numFold)) + 0.05*(len(f)/numFeat) \n\ndef GWO(objf,lb,ub, SearchAgents_no,Max_iter):\n #Max_iter=1000\n #lb=-100\n #ub=100\n #dim=30 \n #SearchAgents_no=5\n dim = len(lb)\n # initialize alpha, beta, and delta_pos\n Alpha_pos=np.zeros(dim)\n Alpha_score=float(\"inf\")\n \n Beta_pos=np.zeros(dim)\n Beta_score=float(\"inf\")\n \n Delta_pos=np.zeros(dim)\n Delta_score=float(\"inf\")\n \n #Initialize the positions of search agents\n Positions = np.zeros((SearchAgents_no, dim))\n for i in range(dim):\n Positions[:, i] = np.random.randint(0,2, SearchAgents_no) * (ub[i] - lb[i]) + lb[i]\n \n Convergence_curve=np.zeros(Max_iter)\n s=solution()\n\n # Loop counter\n print(\"GWO is optimizing \\\"\"+objf.__name__+\"\\\"\") \n \n timerStart=time.time() \n s.startTime=time.strftime(\"%Y-%m-%d-%H-%M-%S\")\n # Main loop\n for l in range(0,Max_iter):\n for i in range(0,SearchAgents_no):\n \n # Return back the search agents that go beyond the boundaries of the search space\n for j in range(dim):\n Positions[i,j]=np.clip(Positions[i,j], lb[j], ub[j])\n\n # Calculate objective function for each search agent\n fitness=objf(Positions[i,:])\n \n # Update Alpha, Beta, and Delta\n if fitness<Alpha_score :\n Alpha_score=fitness; # Update alpha\n Alpha_pos=Positions[i,:].copy()\n \n \n if (fitness>Alpha_score and fitness<Beta_score ):\n Beta_score=fitness # Update beta\n Beta_pos=Positions[i,:].copy()\n \n \n if (fitness>Alpha_score and fitness>Beta_score and fitness<Delta_score): \n Delta_score=fitness # Update delta\n Delta_pos=Positions[i,:].copy()\n \n a=2-l*((2)/Max_iter); # a decreases linearly fron 2 to 0\n \n # Update the Position of search agents including omegas\n for i in range(0,SearchAgents_no):\n for j in range (0,dim): \n \n r1=random.random() # r1 is a random number in [0,1]\n r2=random.random() # r2 is a random number in [0,1]\n \n A1=2*a*r1-a; # Equation (3.3)\n C1=2*r2; # Equation (3.4)\n \n D_alpha=abs(C1*Alpha_pos[j]-Positions[i,j]); # Equation (3.5)-part 1\n X1=Alpha_pos[j]-A1*D_alpha; # Equation (3.6)-part 1\n \n r1=random.random()\n r2=random.random()\n \n A2=2*a*r1-a; # Equation (3.3)\n C2=2*r2; # Equation (3.4)\n \n D_beta=abs(C2*Beta_pos[j]-Positions[i,j]); # Equation (3.5)-part 2\n X2=Beta_pos[j]-A2*D_beta; # Equation (3.6)-part 2 \n \n r1=random.random()\n r2=random.random() \n \n A3=2*a*r1-a; # Equation (3.3)\n C3=2*r2; # Equation (3.4)\n \n D_delta=abs(C3*Delta_pos[j]-Positions[i,j]); # Equation (3.5)-part 3\n X3=Delta_pos[j]-A3*D_delta; # Equation (3.5)-part 3 \n \n Xn=(X1+X2+X3)/3 \n # Binary GWO by sigmoid\n TF=1/(1+np.exp(-10*(Xn-0.5)));\n if TF >= np.random.uniform(0,1):\n X_binary=1 \n else:\n X_binary=0\n \n Positions[i,j]=X_binary # Equation (3.7)\n \n Convergence_curve[l]=Alpha_score;\n\n if (l%1==0):\n print(['At iteration '+ str(l)+ ' the best '+str(numFold)+'CV accuracy is '+ str(1-Alpha_score)]);\n \n timerEnd=time.time() \n s.endTime=time.strftime(\"%Y-%m-%d-%H-%M-%S\")\n s.executionTime=timerEnd-timerStart\n s.convergence=Convergence_curve\n s.optimizer=\"GWO\"\n s.objfname=objf.__name__\n s.best = Alpha_score\n s.bestIndividual = Alpha_pos\n s.population = Positions\n\n return s\n\nfrom sklearn.datasets import load_svmlight_file\ndata = load_svmlight_file(\"trainnorm.scl\", zero_based=False)\nX = data[0].toarray()\ny = data[1]\nparC = np.array([2 ** i for i in np.arange(0,8, dtype=float)])\nparG = np.array([2 ** i for i in np.arange(-8,8, dtype=float)])\n\nnumPar = (3+4)\nnumFold = 10\nnumFeat = np.size(X,1)\nlb = np.zeros(numFeat+numPar)\nub = np.ones(numFeat+numPar)\ns = GWO(fitness,lb,ub,40,20)\n\n\n" } ]
2
fullstackenviormentss/scrapy-monkeylearn
https://github.com/fullstackenviormentss/scrapy-monkeylearn
1ee8050b99c9ab8b8373acf15788c7b039bd6efc
366340daded7bc76127806a986703da3cb56b5ec
2a54036f9795cc3913e1c3f9e84f1242c1d4bef4
refs/heads/master
2021-04-26T22:55:52.975186
2017-04-28T12:51:02
2017-04-28T12:51:02
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6230740547180176, "alphanum_fraction": 0.6242014169692993, "avg_line_length": 40.90550994873047, "blob_id": "35b2c14c6257ccf8af6887b0f722d5c14c3a3b06", "content_id": "52117befc24f212066e44c3e9ca1be869e9f2ee3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5322, "license_type": "permissive", "max_line_length": 90, "num_lines": 127, "path": "/scrapy_monkeylearn/pipelines.py", "repo_name": "fullstackenviormentss/scrapy-monkeylearn", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport logging\nfrom threading import Thread, Lock, Event\n\nimport six\nfrom monkeylearn import MonkeyLearn\nfrom scrapy.exceptions import NotConfigured\nfrom scrapy import signals\nfrom twisted.internet import defer\n\nlogger = logging.getLogger(__name__)\n\n\nclass MonkeyLearnPipeline(object):\n\n MAX_DELAY_BETWEEN_REQUESTS = 10\n\n def __init__(self, token, module_id, fields_to_classify,\n field_classification_output, batch_size, use_sandbox, crawler):\n self.deferreds = []\n self.token = token\n self.module_id = module_id\n self.ml = MonkeyLearn(token)\n if isinstance(fields_to_classify, six.string_types):\n fields_to_classify = [x.strip() for x in fields_to_classify.split(',')]\n elif not isinstance(fields_to_classify, (list, tuple)):\n fields_to_classify = []\n self.fields_to_classify = fields_to_classify\n self.field_classification_output = field_classification_output\n self.batch_size = batch_size\n self.use_sandbox = use_sandbox\n self.crawler = crawler\n self._lock = Lock()\n self._requester_thread = Thread(target=self._run_requester_thread)\n self._event = Event()\n self._stopped = False\n\n @classmethod\n def from_crawler(cls, crawler):\n # first check if the extension should be enabled and raise NotConfigured otherwise\n required_settings = [\n 'MONKEYLEARN_TOKEN',\n 'MONKEYLEARN_MODULE',\n 'MONKEYLEARN_FIELD_OUTPUT',\n 'MONKEYLEARN_FIELD_TO_PROCESS'\n ]\n if not all(crawler.settings.get(s) for s in required_settings):\n raise NotConfigured\n\n token = crawler.settings.get('MONKEYLEARN_TOKEN')\n module_id = crawler.settings.get('MONKEYLEARN_MODULE')\n fields_to_classify = crawler.settings.getlist('MONKEYLEARN_FIELD_TO_PROCESS')\n field_classification_output = crawler.settings.get('MONKEYLEARN_FIELD_OUTPUT')\n batch_size = crawler.settings.get('MONKEYLEARN_BATCH_SIZE', 200)\n use_sandbox = crawler.settings.get('MONKEYLEARN_USE_SANDBOX', False)\n\n pipeline = cls(token, module_id,\n fields_to_classify, field_classification_output,\n batch_size, use_sandbox, crawler)\n crawler.signals.connect(pipeline.spider_opened, signal=signals.spider_opened)\n crawler.signals.connect(pipeline.spider_closed, signal=signals.spider_closed)\n\n return pipeline\n\n def process_item(self, item, spider):\n if (not self.fields_to_classify or\n not all([f in item for f in self.fields_to_classify])):\n return item\n dfd = defer.Deferred()\n with self._lock:\n self.deferreds.append((dfd, item))\n if len(self.deferreds) >= self.batch_size:\n self._event.set()\n return dfd\n\n def _run_requester_thread(self):\n while True:\n self._event.wait(self.MAX_DELAY_BETWEEN_REQUESTS)\n self._event.clear()\n if self._stopped:\n break\n # Requests to MonkeyLearn API should be issued from a separate thread.\n # This happens because pipeline returns deferreds that are activated\n # by the following method. If spider finishes before required amount of\n # items is collected in the batch this pipeline should call the API one\n # final time to cleanup. This cleanup cannot be done in spider_closed or\n # spider_idle handler because it causes job to deadlock. Requests associated\n # with the remaining items are not removed from active set in the engine slot\n # because respective deferreds were not activated yet and this blocks\n # spider_idle and spider_closed signals.\n logger.info('Sending request to MonkeyLearn API')\n try:\n self._analyze_items_batch_with_monkeylearn()\n except:\n logger.exception('Error requesting MonkeyLearn API')\n\n def _analyze_items_batch_with_monkeylearn(self):\n with self._lock:\n deferreds, self.deferreds = self.deferreds, []\n if not deferreds:\n return\n text_list = []\n for _, item in deferreds:\n text = ' '.join([\n six.text_type(item[f]).strip() for f in self.fields_to_classify\n ]).strip()\n text_list.append(text)\n self.crawler.stats.inc_value('monkeylearn_api/requests_count')\n if self.module_id.startswith('cl_'):\n result = self.ml.classifiers.classify(\n self.module_id, text_list, sandbox=self.use_sandbox).result\n elif self.module_id.startswith('ex_'):\n result = self.ml.extractors.extract(self.module_id, text_list).result\n else:\n result = self.ml.pipelines.run(self.module_id, text_list).result\n for i, (dfd, item) in enumerate(deferreds):\n item[self.field_classification_output] = result[i]\n # activate deferred\n dfd.callback(item)\n\n def spider_opened(self, spider):\n self._requester_thread.start()\n\n def spider_closed(self, spider):\n self._stopped = True\n self._event.set()\n self._requester_thread.join()\n" }, { "alpha_fraction": 0.6442647576332092, "alphanum_fraction": 0.6533603072166443, "avg_line_length": 16.359649658203125, "blob_id": "75d868e33ef992060862933f1c077ca7c3fc44eb", "content_id": "961b8111e7ece874a2671ef2572be501e9c4e53f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1979, "license_type": "permissive", "max_line_length": 122, "num_lines": 114, "path": "/README.rst", "repo_name": "fullstackenviormentss/scrapy-monkeylearn", "src_encoding": "UTF-8", "text": "scrapy-monkeylearn\n==================\n\nA `Scrapy`_ pipeline to categorize items using `MonkeyLearn`_.\n\nSettings\n--------\n\nMONKEYLEARN_BATCH_SIZE\n~~~~~~~~~~~~~~~~~~~~~~\n\nThe size of the item batches sent to MonkeyLearn.\n\nDefault: ``200``\n\nExample:\n\n.. code-block:: python\n\n MONKEYLEARN_BATCH_SIZE = 200\n\nMONKEYLEARN_MODULE\n~~~~~~~~~~~~~~~~~~\n\nThe ID of the monkeylearn module.\n\nExample:\n\n.. code-block:: python\n\n MONKEYLEARN_MODULE = 'cl_oFKL5wft'\n\nMONKEYLEARN_USE_SANDBOX\n~~~~~~~~~~~~~~~~~~~~~~~\n\nIn case of using a classifier, if the sandbox version should be used.\n\nDefault: ``False``\n\nExample:\n\n.. code-block:: python\n\n MONKEYLEARN_USE_SANDBOX = True\n\nMONKEYLEARN_TOKEN\n~~~~~~~~~~~~~~~~~\n\nThe auth token.\n\nExample:\n\n.. code-block:: python\n\n MONKEYLEARN_TOKEN = 'TWFuIGlzIGRp...'\n\nMONKEYLEARN_FIELD_TO_PROCESS\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nA field or list of Item text fields to use for classification.\nAlso comma-separated string with field names is supported.\n\nExample:\n\n.. code-block:: python\n\n MONKEYLEARN_FIELD_TO_PROCESS = 'title'\n\n.. code-block:: python\n\n MONKEYLEARN_FIELD_TO_PROCESS = ['title', 'description']\n\n.. code-block:: python\n\n MONKEYLEARN_FIELD_TO_PROCESS = 'title,description'\n\nMONKEYLEARN_FIELD_OUTPUT\n~~~~~~~~~~~~~~~~~~~~~~~~\n\nThe field where the MonkeyLearn output will be stored.\n\nExample:\n\n.. code-block:: python\n\n MONKEYLEARN_FIELD_OUTPUT = 'categories'\n\n\nAn example value of the `MONKEYLEARN_FIELD_OUTPUT` field after classification is:\n\n.. code-block:: python\n\n [{'label': 'English', 'probability': 0.321}]\n\nUsage\n-----\n\nIn your *settings.py* file, add the previously described settings and add ``MonkeyLearnPipeline`` to your pipelines, e.g.:\n\n.. code-block:: python\n\n ITEM_PIPELINES = {\n 'scrapy_monkeylearn.pipelines.MonkeyLearnPipeline': 100,\n }\n\nLicense\n-------\n\nCopyright (c) 2015 `MonkeyLearn`_.\n\nReleased under the MIT license.\n\n.. _Scrapy: http://scrapy.org/\n.. _MonkeyLearn: http://www.monkeylearn.com/\n" }, { "alpha_fraction": 0.5508021116256714, "alphanum_fraction": 0.574331521987915, "avg_line_length": 29.161291122436523, "blob_id": "0461dd615eba8d80d889dd61d32885c510995e26", "content_id": "ed1d8e6277b5d07e8515b32291999dfbc1a2f0e6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 935, "license_type": "permissive", "max_line_length": 50, "num_lines": 31, "path": "/setup.py", "repo_name": "fullstackenviormentss/scrapy-monkeylearn", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nfrom setuptools import setup\n\nsetup(\n name='scrapy-monkeylearn',\n version='0.3.0',\n description='MonkeyLearn pipeline for Scrapy',\n author='Fernando Borretti',\n author_email='[email protected]',\n maintainer='Scrapinghub',\n maintainer_email='[email protected]',\n packages=['scrapy_monkeylearn'],\n install_requires=[\n 'Scrapy>=1.0',\n 'monkeylearn>=0.2.4',\n 'six>=1.5.2'\n ],\n classifiers=[\n 'Framework :: Scrapy',\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n" }, { "alpha_fraction": 0.4545454680919647, "alphanum_fraction": 0.6590909361839294, "avg_line_length": 13.666666984558105, "blob_id": "986ee18118172636bc8e76f9aa0a356304c5379f", "content_id": "e0361c727c0277c80bf42c466bc31fc844cb64ce", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 44, "license_type": "permissive", "max_line_length": 18, "num_lines": 3, "path": "/requirements.txt", "repo_name": "fullstackenviormentss/scrapy-monkeylearn", "src_encoding": "UTF-8", "text": "Scrapy==1.0.5\nmonkeylearn==0.2.4\nsix>=1.5.2\n" } ]
4
KRHS-GameProgramming-2019/Mining-Man
https://github.com/KRHS-GameProgramming-2019/Mining-Man
39f258f2b187efe76d74404a7a89fe12c4f6807f
394b9f00e9cdc289e8ea617a39acf990fa7b5f8d
073baa73618b10c17fcf130f5c62cf87d96e14cc
refs/heads/master
2020-09-26T23:04:09.262421
2020-06-02T13:31:58
2020-06-02T13:31:58
226,362,228
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7532467246055603, "alphanum_fraction": 0.7532467246055603, "avg_line_length": 49.66666793823242, "blob_id": "1296bbc148f8e4f3402b812d51888549c898d109", "content_id": "51173d0bcfe2be03095ba700d3de87ec52f2a289", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 154, "license_type": "no_license", "max_line_length": 80, "num_lines": 3, "path": "/titlescreen.py", "repo_name": "KRHS-GameProgramming-2019/Mining-Man", "src_encoding": "UTF-8", "text": "#Screen that pops up when you open the game, has Play, Options, and Quit buttons\n\nimport Pickaxe, Ore, Game, Settings, Player, Screens, Getters, pygame\n\n\n" }, { "alpha_fraction": 0.5714722275733948, "alphanum_fraction": 0.591325581073761, "avg_line_length": 25.609756469726562, "blob_id": "2c68ab0ce717ae4d681ba5e0ca243182df924c4c", "content_id": "3b841177fcc2fe17c33c58e8590eeea8f0629302", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3274, "license_type": "no_license", "max_line_length": 134, "num_lines": 123, "path": "/Ore.py", "repo_name": "KRHS-GameProgramming-2019/Mining-Man", "src_encoding": "UTF-8", "text": "#Ore blocks file, add different ones (color, texture)\n\nimport math, pygame, random\n\n\nores = []\n\nclass Ore():\n\tdef __init__(self, kind=None, pos=[0]):\n\t\tself.sound = pygame.mixer.Sound('Sound/pickaxe/test.ogg')\n\t\toreTypes = [\"coal\", \"iron\", \"ruby\", \"diamond\", \"amethyst\", \"emerald\", \"rainbow\"]\n\t\tif kind == None:\n\t\t\tnum = random.randint(0,99)\n\t\t\tif num < 11:\n\t\t\t\tkind = \"rainbow\"\n\t\t\telif num < 11 + 11:\n\t\t\t\tkind = \"diamond\"\n\t\t\telif num < 22 + 11:\n\t\t\t\tkind = \"emerald\"\n\t\t\telif num < 33 + 11:\n\t\t\t\tkind = \"amethyst\"\n\t\t\telif num < 44 + 11:\n\t\t\t\tkind = \"ruby\"\n\t\t\telif num < 55 + 11:\n\t\t\t\tkind = \"iron\"\n\t\t\telif num < 66 + 11:\n\t\t\t\tkind = \"coal\"\n\t\t\telse:\n\t\t\t\tkind = \"dirt\"\n\t\t\t\n\t\tif kind == \"coal\":\n\t\t\tself.image = pygame.image.load(\"images/Ores/coalDirt.png\")\n\t\telif kind == \"iron\":\n\t\t\tself.image = pygame.image.load(\"images/Ores/IRONDirt.png\")\n\t\telif kind == \"ruby\":\n\t\t\tself.image = pygame.image.load(\"images/Ores/RubieDirt.png\")\n\t\telif kind == \"diamond\":\n\t\t\tself.image = pygame.image.load(\"images/Ores/diamondDirt.png\")\n\t\telif kind == \"amethyst\":\n\t\t\tself.image = pygame.image.load(\"images/Ores/AmethestDirt.png\")\n\t\telif kind == \"emerald\":\n\t\t\tself.image = pygame.image.load(\"images/Ores/EmeraldDirt.png\")\n\t\telif kind == \"rainbow\":\n\t\t\tself.image = pygame.image.load(\"images/Ores/RainbowDirt.png\")\n\t\telif kind == \"dirt\":\n\t\t\tself.image = pygame.image.load(\"images/Ores/BaseDirt.png\")\n\t\telif kind == \"dead\":\n\t\t\tself.image = pygame.image.load(\"images/Ores/Dead.png\")\n\t\t\t\t\t \n\t\t\t\t\t \n\t\tself.rect = self.image.get_rect(topleft = pos)\n\t\t\n\t\tself.living = True\n\t\tself.kind = kind\n\t\t\n\t\tself.vane = None\n\t \n\tdef __str__(self):\n\t\treturn self.kind + \" at \" + str(self.rect.left)+ \", \" + str(self.rect.top) + \" vane with \" + str(len(self.vane)-1) + \" other blockS\"\n\t\t\n\tdef getDist(self, other):\n\t\tx1 = self.rect.centerx\n\t\tx2 = other.rect.centerx\n\t\ty1 = self.rect.centery\n\t\ty2 = other.rect.centery\n\t\treturn math.sqrt((x2-x1)**2 + (y2-y1)**2)\n\t \n\tdef moveOver(self): \n\t\tself.rect = self.rect.move([80,0])\n\t\n\tdef moveBack(self): \n\t\tself.rect = self.rect.move([-80,0])\n\t\t\n\tdef moveDown(self): \n\t\tself.rect = self.rect.move([0,80])\n\t\n\tdef moveUp(self): \n\t\tself.rect = self.rect.move([0,-80])\n\t\t\n\tdef kill(self):\n\t\tself.living = False\n\t\n\tdef oreCollide(self, other):\n\t\tif self != other:\n\t\t\tif self.rect.right > other.rect.left:\n\t\t\t\tif self.rect.left < other.rect.right:\n\t\t\t\t\tif self.rect.bottom > other.rect.top:\n\t\t\t\t\t\tif self.rect.top < other.rect.bottom:\n\t\t\t\t\t\t\treturn True\n\t\treturn False\n\t\t\n\t# ~ def rareness(kind = None):\n\t\t# ~ if kind == \"coal\":\n\t\t\t# ~ rare = 2\n\t\t# ~ if kind == \"iron\":\n\t\t\t# ~ rare = 3\n\t\t# ~ if kind == \"ruby\":\n\t\t\t# ~ rare = 4\n\t\t# ~ if kind == \"diamond\":\n\t\t\t# ~ rare = 7\n\t\t# ~ if kind == \"amethyst\":\n\t\t\t# ~ rare = 5\n\t\t# ~ if kind == \"emerald\":\n\t\t\t# ~ rare = 6\n\t\t# ~ if kind == \"rainbow\":\n\t\t\t# ~ rare = 8\n\t\t# ~ if kind == \"dirt\":\n\t\t\t# ~ rare = 1\n\t\n\tdef pickCollide(self, other):\n\t\tif self != other:\n\t\t\tself.sound.play()\n\t\t\tif self.rect.right > other.rect.centerx:\n\t\t\t\tif self.rect.left < other.rect.centerx:\n\t\t\t\t\tif self.rect.bottom > other.rect.centery:\n\t\t\t\t\t\tif self.rect.top < other.rect.centery:\n\t\t\t\t\t\t\tself.living = False\n\t\t\t\t\t\t\treturn True\n\t\treturn False\n\t\t\n\t# ~ def endgame(self):\n\t\t# ~ if self.rect > [800, 0]:\n\t\t\t# ~ screens == \"gameover\" #ignore for now\n\n" }, { "alpha_fraction": 0.5051482915878296, "alphanum_fraction": 0.5189456343650818, "avg_line_length": 36.068702697753906, "blob_id": "befb5e02d1f6bc3b4ee5c64716b4404e77fa8667", "content_id": "a700739b3d898e1b4f5640794f07b28ac2928c58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4856, "license_type": "no_license", "max_line_length": 93, "num_lines": 131, "path": "/Pickaxe.py", "repo_name": "KRHS-GameProgramming-2019/Mining-Man", "src_encoding": "UTF-8", "text": "import pygame\nfrom math import *\n\n#Pickaxe(s), one breaks clusters and the other one breaks one block, only on the first row\n\nclass Pickaxe():\n def __init__(self, pos=[860,595]):\n self.images = [pygame.image.load(\"images/Pickaxe/pickaxe1.png\"),\n pygame.image.load(\"images/Pickaxe/pickaxe2.png\"),\n pygame.image.load(\"images/Pickaxe/pickaxe3.png\"),\n pygame.image.load(\"images/Pickaxe/pickaxe4.png\"),\n pygame.image.load(\"images/Pickaxe/pickaxe5.png\"),\n pygame.image.load(\"images/Pickaxe/pickaxe6.png\"),\n pygame.image.load(\"images/Pickaxe/pickaxe7.png\"),\n pygame.image.load(\"images/Pickaxe/pickaxe8.png\")\n ]\n \n \n self.images2 = [pygame.image.load(\"images/Pickaxe/pickaxe1.png\"),\n pygame.image.load(\"images/Pickaxe/pickaxe8.png\"),\n pygame.image.load(\"images/Pickaxe/pickaxe7.png\"),\n pygame.image.load(\"images/Pickaxe/pickaxe6.png\"),\n pygame.image.load(\"images/Pickaxe/pickaxe5.png\"),\n pygame.image.load(\"images/Pickaxe/pickaxe4.png\"),\n pygame.image.load(\"images/Pickaxe/pickaxe3.png\"),\n pygame.image.load(\"images/Pickaxe/pickaxe2.png\")\n ]\n \n self.sound = pygame.mixer.Sound('Sound/pickaxe/test.ogg')\n \n self.frame = 0\n self.frameMax = len(self.images)-1 \n \n self.frameMax2 = len(self.images2)-1 \n self.image2 = self.images2[self.frame]\n \n self.image = self.images[self.frame]\n self.rect = self.image.get_rect(bottomright = pos)\n self.maxSpeed = -15\n self.startPos = pos\n \n self.speed = self.speedx, self.speedy = 0,0\n self.launched = False\n self.rx = self.rect.centerx\n self.ry = self.rect.centery\n self.target = [None, None]\n self.direct = \"\"\n \n self.animationTimer = 0\n self.animationTimerMax = 60/10\n \n self.canHit = False\n \n def go(self, pos):\n xdist = float(self.rect.centerx - pos[0])\n ydist = float(self.rect.centery - pos[1])\n if xdist > 0 and ydist > 0:\n angle = degrees(atan(ydist/xdist))\n \n \n self.speedx = self.maxSpeed * cos(radians(angle))\n self.speedy = self.maxSpeed * sin(radians(angle))\n self.target = pos\n self.direct = \"send\"\n self.launched = True\n\n \n def back(self):\n xdist = float(self.rect.centerx - self.startPos[0])\n ydist = float(self.rect.centery - self.startPos[1])\n if xdist < 0 and ydist < 0:\n angle = degrees(atan(ydist/xdist))\n \n self.speedx = -(self.maxSpeed * cos(radians(angle)))\n self.speedy = -(self.maxSpeed * sin(radians(angle)))\n self.target = self.startPos\n self.direct = \"back\"\n self.launched = True\n self.canHit = True\n \n def move(self):\n self.rx += self.speedx\n self.ry += self.speedy\n x = int(self.rx)\n y = int(self.ry)\n self.rect.center = [x,y]\n \n def update(self):\n self.move()\n \n if self.direct == \"send\":\n self.animate2()\n if self.rect.centerx < self.target[0] and self.rect.centery < self.target[1]:\n self.back()\n elif self.direct == \"back\":\n self.canHit = False\n self.animate()\n if self.rx > self.target[0] and self.ry > self.target[1]:\n self.rect = self.image.get_rect(bottomright = self.startPos)\n self.speed = self.speedx, self.speedy = 0,0\n self.launched = False\n self.rx = self.rect.centerx\n self.ry = self.rect.centery\n self.target = [None, None]\n self.direct = \"\"\n self.frame = 0\n self.animationTimer = 0\n self.image = self.images[self.frame]\n\n\n def animate(self):\n self.animationTimer+= 3\n if self.animationTimer > self.animationTimerMax:\n self.animationTimer = 0\n \n if self.frame >= self.frameMax:\n self.frame = 0\n else:\n self.frame += 1\n self.image = self.images[self.frame]\n \n def animate2(self):\n self.animationTimer+= 3\n if self.animationTimer > self.animationTimerMax:\n self.animationTimer = 0\n \n if self.frame >= self.frameMax2:\n self.frame = 0\n else:\n self.frame += 1\n self.image = self.images2[self.frame]\n" }, { "alpha_fraction": 0.7213375568389893, "alphanum_fraction": 0.7213375568389893, "avg_line_length": 37.5625, "blob_id": "fdf141b515eadffa07f131286f7b89791fc8ea43", "content_id": "7ce310d0db2509b07e90d881e133818a3cbe63b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 628, "license_type": "no_license", "max_line_length": 126, "num_lines": 16, "path": "/Screens.py", "repo_name": "KRHS-GameProgramming-2019/Mining-Man", "src_encoding": "UTF-8", "text": "def Titlescreen(debug=False):\n if debug: print(\"TitleScreen Function\")\n \n backgroundImage = (r\"C:\\Users\\Student\\Documents\\Game Programming\\Mining Man\\Images\\TitleScreen\\titlescreenbackground.png\")\n \n \ndef options(debug=False):\n if debug: print(\"TitleScreen Function\")\n \n backgroundImage = (r\"C:\\Users\\Student\\Documents\\Game Programming\\Mining Man\\Images\\TitleScreen\\titlescreenbackground.png\")\n \n \ndef unicorn(debug=False):\n if debug: print(\"TitleScreen Function\")\n \n backgroundImage = (r\"C:\\Users\\Student\\Documents\\Game Programming\\Mining Man\\Images\\TitleScreen\\download.jpg\")\n \n\n \n" }, { "alpha_fraction": 0.5911664962768555, "alphanum_fraction": 0.5979614853858948, "avg_line_length": 20.825000762939453, "blob_id": "18027bea0df4f13f8acf48d1708c9defa5590b4e", "content_id": "14b9a9e1bbfb3596db66a9c3a276c871858892a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 883, "license_type": "no_license", "max_line_length": 66, "num_lines": 40, "path": "/practice.py", "repo_name": "KRHS-GameProgramming-2019/Mining-Man", "src_encoding": "UTF-8", "text": "import pygame, sys, math, random\nfrom Pickaxe import *\nfrom Player import *\nfrom Game import *\nfrom Ore import *\npygame.init()\n\nsize = [900, 640]\nscreen = pygame.display.set_mode(size)\n\n\nimage = pygame.image.load(\"images/TitleScreen/tempbackground.png\")\nimgRect = image.get_rect()\n\n\n\n\npick = Pickaxe()\nGuy = Guy()\n\n\nwhile True:\n for event in pygame.event.get():\n imgRect = image.get_rect()\n if event.type == pygame.QUIT:\n sys.exit();\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if not pick.launched:\n pick.go(event.pos)\n print(event.pos)\n #elif pick.launched: #return before hits spot\n # pick.back()\n \n\n pick.update() \n \n screen.blit(image, imgRect)\n screen.blit(Guy.image, Guy.rect)\n screen.blit(pick.image, pick.rect)\n pygame.display.flip()\n \n \n\n" }, { "alpha_fraction": 0.4754241108894348, "alphanum_fraction": 0.48534145951271057, "avg_line_length": 33.60542297363281, "blob_id": "d065c8ce47823c9b5d1c47b07281e4fada69e4fb", "content_id": "81926d2a54bb1de41dcc2de24215ef6422badc5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11495, "license_type": "no_license", "max_line_length": 86, "num_lines": 332, "path": "/Game.py", "repo_name": "KRHS-GameProgramming-2019/Mining-Man", "src_encoding": "UTF-8", "text": "#Main game file\nimport pygame, sys, math, random\nfrom Player import * \nfrom Screens import *\nfrom Getters import *\nfrom Settings import *\nfrom options import *\nfrom practice import *\nfrom Button import *\nfrom Ore import *\nfrom Cluster import *\nfrom Text import *\npygame.init()\n\nsize = [900, 640]\nscreen = pygame.display.set_mode(size)\nscreens = \"menu\"\n\ncounter = 1;\nscore = Hud(\"Score: \", [780,30])\nkills = 0\n\nclock = pygame.time.Clock()\n\n#Music code by caden\npygame.mixer.init()\nsongs = [\"Sound/Music/spacecave.ogg\"\n]\nsongNum = 0\nmaxSongNum = len(songs)-1\npygame.mixer.music.load(songs[songNum])\npygame.mixer.music.set_volume(0.4)\n\npickaxe_sound = pygame.mixer.Sound('Sound/pickaxe/test.ogg')\n\n\n\n#----------------------------Game Code----------------------------------\n\n\n\n\nwhile True:\n #---------------------------Menu------------------------------------\n image = pygame.image.load(\"images/background/caveentrancea.png\")\n imgRect = image.get_rect()\n pygame.mixer.init()\n pygame.mixer.music.load(\"Sound/Music/spacecave.ogg\")\n pygame.mixer.music.play(loops=-1, start=0.0)\n playButton=Button(\"play\", [350,100])\n optionsButton=Button(\"options\", [350, 300])\n exitButton=Button(\"exit\", [350, 500])\n # ~ nightButton=Button(\"test\", [100,200])\n \n while screens == \"menu\":\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit();\n elif event.type == pygame.MOUSEMOTION:\n \n playButton.update(event.pos, event.buttons)\n optionsButton.update(event.pos, event.buttons)\n exitButton.update(event.pos, event.buttons)\n # ~ nightButton.update(event.pos, event.buttons)\n \n elif event.type == pygame.MOUSEBUTTONDOWN:\n \n playButton.click(event.pos)\n optionsButton.click(event.pos)\n exitButton.click(event.pos)\n # ~ nightButton.click(event.pos)\n \n elif event.type == pygame.MOUSEBUTTONUP:\n if playButton.click(event.pos):\n screens = \"game\"\n if optionsButton.click(event.pos):\n screens = \"options\"\n # ~ if nightButton.click(event.pos):\n # ~ screens = \"night\"\n if exitButton.click(event.pos):\n sys.exit()\n \n screen.blit(image, imgRect)\n screen.blit(playButton.image, playButton.rect)\n screen.blit(optionsButton.image, optionsButton.rect)\n screen.blit(exitButton.image, exitButton.rect)\n # ~ screen.blit(nightButton.image, nightButton.rect)\n pygame.display.flip()\n\n#-------------------------- Options------------------------------------- \n image = pygame.image.load(\"images/TitleScreen/titlescreenbackground-options1.png\")\n imgRect = image.get_rect()\n backButton=Button(\"test\", [675,50]) \n \n while screens == \"options\":\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit();\n elif event.type == pygame.MOUSEMOTION:\n backButton.update(event.pos, event.buttons)\n \n elif event.type == pygame.MOUSEBUTTONDOWN:\n backButton.click(event.pos)\n \n elif event.type == pygame.MOUSEBUTTONUP:\n if backButton.click(event.pos):\n screens = \"menu\"\n \n screen.blit(image, imgRect)\n screen.blit(backButton.image, backButton.rect)\n pygame.display.flip()\n \n #--------------------------Game Selection-----------------------------\n image = pygame.image.load(\"images/TitleScreen/tempselection.png\")\n imgRect = image.get_rect()\n resumeButton=Button(\"test\", [350,100])\n optionsButton=Button(\"test\", [350, 300])\n menuButton=Button(\"test\", [350, 500])\n \n while screens == \"select\":\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit();\n elif event.type == pygame.MOUSEMOTION:\n \n resumeButton.update(event.pos, event.buttons)\n optionsButton.update(event.pos, event.buttons)\n menuButton.update(event.pos, event.buttons)\n \n elif event.type == pygame.MOUSEBUTTONDOWN:\n \n resumeButton.click(event.pos)\n optionsButton.click(event.pos)\n menuButton.click(event.pos)\n \n elif event.type == pygame.MOUSEBUTTONUP:\n if resumeButton.click(event.pos):\n screens = \"game\"\n if optionsButton.click(event.pos):\n screens = \"gameoptions\"\n if menuButton.click(event.pos):\n screens = \"menu\"\n \n screen.blit(image, imgRect)\n screen.blit(resumeButton.image, resumeButton.rect)\n screen.blit(optionsButton.image, optionsButton.rect)\n screen.blit(menuButton.image, menuButton.rect)\n pygame.display.flip()\n \n #--------------------------Game Options-----------------------------\n image = pygame.image.load(\"images/TitleScreen/titlescreenbackground-options1.png\")\n imgRect = image.get_rect()\n backButton=Button(\"test\", [675,50]) \n \n while screens == \"gameoptions\":\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit();\n elif event.type == pygame.MOUSEMOTION:\n backButton.update(event.pos, event.buttons)\n \n elif event.type == pygame.MOUSEBUTTONDOWN:\n backButton.click(event.pos)\n \n elif event.type == pygame.MOUSEBUTTONUP:\n if backButton.click(event.pos):\n screens = \"select\"\n \n \n screen.blit(image, imgRect)\n screen.blit(backButton.image, backButton.rect)\n pygame.display.flip()\n #--------------------------Night Mode-------------------------------\n image = pygame.image.load(\"images/background/black entrance.png\")\n imgRect = image.get_rect()\n playButton=Button(\"test\", [350,100])\n optionsButton=Button(\"test\", [350, 300])\n exitButton=Button(\"test\", [350, 500])\n \n while screens == \"night\":\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit();\n elif event.type == pygame.MOUSEMOTION:\n \n playButton.update(event.pos, event.buttons)\n optionsButton.update(event.pos, event.buttons)\n exitButton.update(event.pos, event.buttons)\n \n \n elif event.type == pygame.MOUSEBUTTONDOWN:\n \n playButton.click(event.pos)\n optionsButton.click(event.pos)\n exitButton.click(event.pos)\n \n \n elif event.type == pygame.MOUSEBUTTONUP:\n if playButton.click(event.pos):\n screens = \"game\"\n if optionsButton.click(event.pos):\n screens = \"options\"\n if exitButton.click(event.pos):\n sys.exit()\n \n screen.blit(image, imgRect)\n screen.blit(playButton.image, playButton.rect)\n screen.blit(optionsButton.image, optionsButton.rect)\n screen.blit(exitButton.image, exitButton.rect)\n pygame.display.flip()\n \n #---------------------------end game----------------------------------#\n image = pygame.image.load(\"images/background/gameover.png\")\n imgRect = image.get_rect()\n \n\t \n while screens == \"gameover\":\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit();\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n screens = \"game\"\n elif event.key == pygame.K_ESCAPE:\n screens = \"menu\"\n screen.blit(image, imgRect)\n pygame.display.flip()\n \n \n \n #---------------------------Game------------------------------------\n image = pygame.image.load(\"images/TitleScreen/tempbackground.png\")\n imgRect = image.get_rect()\n pick = Pickaxe()\n guy = Guy()\n cluster = Cluster()\n oreTimer = 0 \n oreTimerMax = 3*60 # 3 seconds\n while screens == \"game\":\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit();\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n screens = \"select\"\n \n # ~ if event.key ==pygame.K_SPACE:\n # ~ for oreC in ores:\n # ~ for ore in oreC:\n # ~ ore.moveOver()\n \n # ~ #------Manual Ores----------#\n # ~ if event.key ==pygame.K_SPACE:\n # ~ cluster.addCol()\n # ~ #-------End Manual Ores----------------#\n\n \n elif event.key == pygame.K_i:\n print(str(cluster))\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if not pick.launched:\n pick.go(event.pos)\n \n \n #elif pick.launched: #return before hits spot\n # pick.back()\n \n #----------Auto Ores----------------#\n if oreTimer < oreTimerMax:\n oreTimer += 1\n else:\n oreTimer = 0\n cluster.addCol()\n score.update(kills)\n #-------End Auto Ores----------------#\n \n pick.update()\n \n def rareness(kind = 0):\n kind = ore.kind\n if kind == \"coal\":\n rare = 2\n elif kind == \"iron\":\n rare = 3\n elif kind == \"ruby\":\n rare = 4\n elif kind == \"diamond\":\n rare = 7\n elif kind == \"amethyst\":\n rare = 5\n elif kind == \"emerald\":\n rare = 6\n elif kind == \"rainbow\":\n rare = 8\n elif kind == \"dirt\":\n rare = 1\n elif kind == \"dead\":\n rare = 0\n elif kind == None :\n rare = 0\n print(\"rareness: \")\n print(rare)\n \n if pick.canHit:\n cluster.pickCollide(pick)\n rareness()\n kills +=((len(cluster.vanes)))\n \n # ~ for oreCollumn in cluster.ores:\n # ~ if len(oreCollumn) > 10:\n # ~ screens == \"gameover\"\n # ~ print(\"end of game\")\n \n \n \n score.update(kills)\n \n \n cluster.update()\n # ~ cluster.endgame()\n \n \n \n screen.blit(image, imgRect)\n for oreC in cluster.ores:\n for ore in oreC:\n screen.blit(ore.image, ore.rect)\n screen.blit(guy.image, guy.rect)\n screen.blit(pick.image, pick.rect)\n screen.blit(score.image, score.rect)\n pygame.display.flip()\n clock.tick(60)\n \n\n" }, { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 44, "blob_id": "960e3865266d355622a0dd2fa1612fffaaee1413", "content_id": "16d5c00fbbf353b3b93f09359538d5667db1a16f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 45, "license_type": "no_license", "max_line_length": 44, "num_lines": 1, "path": "/Settings.py", "repo_name": "KRHS-GameProgramming-2019/Mining-Man", "src_encoding": "UTF-8", "text": "#Settings tab in main menu, add sound option\n" }, { "alpha_fraction": 0.43995633721351624, "alphanum_fraction": 0.45633187890052795, "avg_line_length": 29.53333282470703, "blob_id": "0ffeb44e164ed256b55bad33e718735f9a864186", "content_id": "8700cc29af725a62a8b0394c1dbcf53214add6c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1832, "license_type": "no_license", "max_line_length": 95, "num_lines": 60, "path": "/orerender.py", "repo_name": "KRHS-GameProgramming-2019/Mining-Man", "src_encoding": "UTF-8", "text": "#Code by Jack Chambers, modified by Kyle Goodwin\n\nfrom math import *\nfrom pygame import*\nfrom practice import *\n\n\ntms = []\n\nclass OreMap:\n def __init__(self, size, level):\n self.size = size\n self.tiles = []\n self.level = level\n self.loadingMap = False\n self.buildMap()\n tms.append(self)\n\n def buildMap(self):\n mapData = LevelHandler.loadMapFile(self.level)\n mapList = LevelHandler.parseMap(mapData)\n tileData = LevelHandler.parseData(mapData)\n try:\n stockTypes = LevelHandler.getStockBoxes(tileData)\n except:\n stockTypes = []\n boxCount = 0\n y = -1\n for line in mapList:\n x=-1\n y+=1\n for char in line:\n x+=1\n if char == \"v\":\n self.tiles.append(Tile((x*80,y*80), \"Images/Tiles/stove.png\", True, False))\n if char == \"s\":\n try:\n food = stockTypes[boxCount]\n except:\n food = None\n self.tiles.append(StockBox((x*80,y*80), food, True))\n boxCount += 1\n if char == \"#\":\n self.tiles.append(Counter((x*80,y*80), None))\n if char == \"t\":\n self.tiles.append(Trash((x*80,y*80)))\n if char == \"d\":\n self.tiles.append(DeliveryTable((x*80,y*80), None))\n if char == \"c\":\n self.tiles.append(ChoppingBoard((x*80,y*80), None))\n \n def render(self,screen):\n for ore in self.ores:\n screen.blit(ore.image, ore.rect)\n try:\n item = ore.holding\n except:\n pass\n if item:\n item.update(ore)\n" }, { "alpha_fraction": 0.4735276699066162, "alphanum_fraction": 0.49732303619384766, "avg_line_length": 26.47541046142578, "blob_id": "efc33116acfda04f2f6379021a6b8cb47c3fb079", "content_id": "7c2c56a1d1c445452a6836a63a093bd0b29522b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1681, "license_type": "no_license", "max_line_length": 68, "num_lines": 61, "path": "/Text.py", "repo_name": "KRHS-GameProgramming-2019/Mining-Man", "src_encoding": "UTF-8", "text": "# made by sam Loyd adapted by Annie Chambers\nimport pygame, sys, math\n\nclass Hud():\n def __init__(self, baseText, startPos=[0,1]):\n if self == \"score\":\n self.baseText = \"Score: \"\n self.file = \"saves/scorecounter.txt\"\n pygame.font.get_fonts()\n pygame.font.match_font\n self.font = pygame.font.Font(None, 30)\n \n WHITE = (255,255,255)\n \n self.baseText = baseText\n \n self.image = self.font.render(\"Score: 0\", 1, WHITE)\n self.rect = self.image.get_rect(topleft = startPos)\n \n \n def update(self, score):\n WHITE = (255,255,255)\n text = self.baseText + str(score)\n self.image = self.font.render(text , 1, WHITE)\n self.rect = self.image.get_rect(topleft = self.rect.topleft)\n \n \n def die(self):\n in_file = open(self.file, \"r\")\n count = int(in_file.read())\n in_file.close()\n count = count + 1\n print(count)\n out_file = open(self.file, \"w\")\n out_file.close()\n \n \nif __name__ == \"__main__\":\n # Death Counter\n\n # Variables\n deathcount = float(0)\n\n pygame.init()\n BLACK = (0, 0, 0)\n WIDTH = 320\n HEIGHT = 260\n windowSurface = pygame.display.set_mode((WIDTH, HEIGHT), 0, 32)\n \n windowSurface.fill(BLACK)\n \n \n while True:\n events = pygame.event.get()\n for event in events:\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_1:\n hud.die()\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n \n" }, { "alpha_fraction": 0.49727627635002136, "alphanum_fraction": 0.5073930025100708, "avg_line_length": 33.69444274902344, "blob_id": "d3800976432e817da30af8dbb0ef0814fba103b0", "content_id": "2d571e26775c7a964b7584aac346bce63767ed54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1285, "license_type": "no_license", "max_line_length": 82, "num_lines": 36, "path": "/Button.py", "repo_name": "KRHS-GameProgramming-2019/Mining-Man", "src_encoding": "UTF-8", "text": "from sys import *\nfrom math import *\nimport pygame\n\n\nclass Button():\n def __init__(self, name, pos=[0,0]):\n self.name=name\n self.baseImage=pygame.image.load(\"Images/Buttons/\"+name+\".png\")\n self.hoverImage=pygame.image.load(\"Images/Buttons/\"+name+\"_hover.png\")\n self.clickedImage=pygame.image.load(\"Images/Buttons/\"+name+\"_clicked.png\")\n self.image=self.baseImage\n self.rect=self.image.get_rect(topleft = pos)\n \n def update(self, pos, clicked):\n if (pos[0] > self.rect.left and \n pos[0] < self.rect.right and \n pos[1] > self.rect.top and \n pos[1] < self.rect.bottom):\n if clicked == (0,0,0):\n self.image = self.hoverImage\n else:\n self.image = self.clickedImage\n else:\n self.image = self.baseImage\n \n def click(self, pos):\n if (pos[0] > self.rect.left and \n pos[0] < self.rect.right and \n pos[1] > self.rect.top and \n pos[1] < self.rect.bottom):\n self.image = self.clickedImage\n return True\n else:\n self.image = self.baseImage\n return False\n \n \n \n \n" }, { "alpha_fraction": 0.3736470639705658, "alphanum_fraction": 0.37976470589637756, "avg_line_length": 37.98136520385742, "blob_id": "4b59dc08077201a8485a75a307a8a668b9ba0952", "content_id": "5a96c3f9ca0a3ae7935268360f0106e1e2c0e872", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6375, "license_type": "no_license", "max_line_length": 99, "num_lines": 161, "path": "/Cluster.py", "repo_name": "KRHS-GameProgramming-2019/Mining-Man", "src_encoding": "UTF-8", "text": "#Ore blocks file, add different ones (color, texture)\n\nimport math, pygame, random\nfrom Ore import *\n\n\nclass Cluster():\n def __init__(self):\n self.ores = []\n self.oreTimer = 0\n self.oreTimerMax = 60*3\n self.vanes = []\n \n def addCol(self):\n for oreC in self.ores:\n for ore in oreC:\n ore.moveOver()\n oreCollumn = []\n for i in range(7):\n oreCollumn += [Ore(None, [0, (6*80)-(i*80)])]\n self.ores += [oreCollumn]\n self.findVanes()\n \n def findVanes(self):\n self.vanes=[]\n \n #if len(self.ores) >= 1: #first row\n for colNum, oreC in enumerate(self.ores):\n vane = []\n # ~ if colNum > 12 :\n # ~ screens == \"gameover\"\n currKind = None\n for oreNum, ore in enumerate(oreC):\n if not currKind: #start of column\n # ~ print(\"Col Num: \", colNum)\n if colNum > 0 and oreNum < len(self.ores[colNum-1]):\n if self.ores[colNum-1][oreNum].kind == ore.kind:\n self.ores[colNum-1][oreNum].vane += [ore]\n ore.vane = self.ores[colNum-1][oreNum].vane\n vane = ore.vane\n currKind = ore.kind\n # ~ print(\"adding \" + currKind + \" to old vane\")\n else:\n currKind = ore.kind\n vane += [ore]\n ore.vane = vane\n # ~ print(\"starting \" + currKind)\n else:\n currKind = ore.kind\n vane += [ore]\n ore.vane = vane\n # ~ print(\"starting \" + currKind)\n elif ore.kind == currKind: #same block above last block\n if colNum > 0 and oreNum < len(self.ores[colNum-1]):\n if self.ores[colNum-1][oreNum].kind == ore.kind:\n vane += [ore]\n self.ores[colNum-1][oreNum].vane += vane\n ore.vane = self.ores[colNum-1][oreNum].vane\n vane = ore.vane\n else: \n vane += [ore]\n ore.vane = vane\n else:\n vane += [ore]\n ore.vane = vane\n # ~ print(\"Adding \" + currKind)\n else: #different block below last block\n if vane not in self.vanes: \n self.vanes += [vane] \n vane = [] \n if colNum > 0 and oreNum < len(self.ores[colNum-1]):\n if self.ores[colNum-1][oreNum].kind == ore.kind:\n self.ores[colNum-1][oreNum].vane += [ore]\n ore.vane = self.ores[colNum-1][oreNum].vane\n vane = ore.vane\n currKind = ore.kind\n # ~ print(\"adding \" + currKind + \" to old vane\")\n else:\n currKind = ore.kind\n vane += [ore]\n ore.vane = vane\n # ~ print(\"starting new \" + currKind)\n else:\n currKind = ore.kind\n vane += [ore]\n ore.vane = vane\n # ~ print(\"starting new \" + currKind)\n if vane not in self.vanes: \n self.vanes += [vane]\n \n \n def __str__(self):\n out = \"--------------------\\n\"\n for i in self.ores:\n for j in i:\n out += str(j) + \"\\n\"\n out += \"\\n\"\n out += \">>>>>>\\n\"\n for vane in self.vanes:\n if len(vane) > 0:\n out += vane[0].kind + \" ore, size: \" + str(len(vane)) + \"\\n\"\n out += \"--------------------\\n\\n\"\n return out\n \n \n def pickCollide(self, other):\n for oreC in self.ores: #look through ore columns\n for ore in oreC: #look through ores in column\n if ore.pickCollide(other): #if pick hit one\n for vane in self.vanes: #look through vanes to see what vane that ore is in\n if ore in vane: #in that vane\n for vaneOre in vane:#kill all ores in that vane\n vaneOre.kill()\n \n def update(self):\n didKill = self.killOres()\n for colNum, oreC in enumerate(self.ores):\n if colNum > 0 and len(oreC) == 0:\n colCount = colNum-1\n print (colNum, colCount)\n while colCount >= 0:\n for ore in self.ores[colCount]:\n ore.moveBack()\n colCount -=1\n self.ores.remove(oreC)\n print(\"Moving Back\")\n elif len(oreC) == 0:\n self.ores.remove(oreC)\n print(\"Removing Last \")\n if didKill:\n self.findVanes()\n \n \n \n # ~ y = 0\n # ~ for rowNum, oreC in enumerate(self.ores):\n # ~ for oreNum, ore in enumerate(oreC):\n \n \n \n \n \n \n \n def killOres(self):\n didKill = False\n for rowNum, oreC in enumerate(self.ores):\n for oreNum, ore in enumerate(oreC):\n if not ore.living: #found dead ore\n print(\"removing: \" + str(ore))\n oreC.remove(ore)\n didKill = True\n for i in range(oreNum, len(oreC)):\n oreC[i].moveDown()\n return didKill\n\t\n\t\n # ~ def endgame (self): \n # ~ for colNum, oreC in enumerate(self.ores):\n # ~ if colNum > 2:\n # ~ screens == endgame\n\t\t\n \n \n \n \n \n \n\n\n" }, { "alpha_fraction": 0.44481053948402405, "alphanum_fraction": 0.44481053948402405, "avg_line_length": 27.904762268066406, "blob_id": "6a12bffa735cad48b47ce9b40d22c07d4f659d14", "content_id": "83bfc859f36fcc26f92fa0c07c837fd00d41e133", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 607, "license_type": "no_license", "max_line_length": 51, "num_lines": 21, "path": "/Getters.py", "repo_name": "KRHS-GameProgramming-2019/Mining-Man", "src_encoding": "UTF-8", "text": "def getMenuOption(debug=False): \n if debug: print(\"getMenuOption Function\")\n \n goodInput = False\n while not goodInput:\n option = input(\"Please Select An Option: \")\n option = option.lower()\n # ~ print(option)\n if (option == \"q\" or \n option == \"quit\" or \n option == \"x\" or\n option == \"exit\"):\n option = \"q\"\n goodInput = True\n \n if (option == \"play\" or \n option == \"Play\"):\n option = \"Play\"\n goodInput = True\n \n return option\n" }, { "alpha_fraction": 0.6164772510528564, "alphanum_fraction": 0.6392045617103577, "avg_line_length": 27.5, "blob_id": "9f625fd1c0b0a2bb44a9a94441673395ecf78010", "content_id": "fca28b634119043b3d753e457addf306ebb8f731", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 352, "license_type": "no_license", "max_line_length": 66, "num_lines": 12, "path": "/Player.py", "repo_name": "KRHS-GameProgramming-2019/Mining-Man", "src_encoding": "UTF-8", "text": " #The \"guy\" that throws the pickaxe, must have animations\n\nfrom sys import *\nfrom math import *\nimport pygame\n\n\nclass Guy():\n def __init__(self, pos=[900,640]):\n self.image = pygame.image.load(\"images/Player/NewGuy.png\")\n self.rect = self.image.get_rect(bottomright = pos)\n self.speed = self.speedx, self.speedy = 0,0\n \n" } ]
13
santhosh8465/projects
https://github.com/santhosh8465/projects
411b24f227ccc6268dbbac0ba05f33a155fd9be8
f986bb82c48142b8d2d658046d464bc9937363cb
e58ff6cf4cc26be70984f2a05f13244178b0e227
refs/heads/master
2020-04-13T10:27:51.423034
2018-12-26T05:52:06
2018-12-26T05:52:06
163,140,750
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5083333253860474, "alphanum_fraction": 0.5219298005104065, "avg_line_length": 34.913387298583984, "blob_id": "7dd1325e0a8d0be4cd8b2fa267ca837c3139bbe2", "content_id": "39ac08a401cdaf4b669e4d985b0d3605638bad7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4560, "license_type": "no_license", "max_line_length": 452, "num_lines": 127, "path": "/boggle_checker.py", "repo_name": "santhosh8465/projects", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 6 23:51:48 2018\n\n@author: santhoshreddyventeru\n\"\"\"\n\nfrom boggle_dices import Boggle_Dice\nfrom inpu_t import Input\n\nwords = list()\ndef Word_checker():\n with open('words.txt', 'r') as file_:\n for x in file_:\n words = file_.read().splitlines()\n words = [wor_d.upper() for wor_d in words]\n \n\ndef word_check(strin_g):\n if strin_g in words:\n return True\n return False\n \ndef trace_word_grid(word_trace, grid_trace, i, j, strin_g):\n grid_trace[i][j] = True\n strin_g = strin_g + word_trace[i][j]\n print(str(strin_g))\n if word_check(strin_g.upper()):\n print(strin_g.upper())\n \n for row in range(i-1, i+2):\n if row >= len(word_trace):\n break\n for col in range(j-1, j+2):\n if col >= len(word_trace[0]):\n break\n if row>=0 and col>=0:\n if grid_trace[row][col] == False:\n trace_word_grid(word_trace, grid_trace, row, col, strin_g)\n strin_g = strin_g[:-1]\n grid_trace[i][j] = False\n return\n\ndef trace_word(input_word, input_grid):\n for row in range(0, len(input_grid)):\n for col in range(0, len(input_grid)):\n if trace_wordgrid(input_word, row, col, input_grid):\n return True\n return False\n\ndef trace_wordgrid(word_trace, row, col, grid_trace):\n \"\"\"\n This function is the important function that checks the adjacency of the letters recursively.\n \n \"\"\"\n if word_trace == '':\n return True\n elif row<0 or row>=4 or col<0 or col>=4 or word_trace[:1] != grid_trace[row][col]:\n return False\n else:\n exist = grid_trace[row][col]\n grid_trace[row][col] = ''\n remaining = word_trace[1:len(word_trace)]\n result = trace_wordgrid(remaining, row-1, col-1, grid_trace) or trace_wordgrid(remaining, row-1, col, grid_trace) or trace_wordgrid(remaining, row-1, col+1, grid_trace) or trace_wordgrid(remaining, row, col-1, grid_trace) or trace_wordgrid(remaining, row, col+1, grid_trace) or trace_wordgrid(remaining, row+1, col-1, grid_trace) or trace_wordgrid(remaining, row+1, col, grid_trace) or trace_wordgrid(remaining, row+1, col+1, grid_trace) \n \n grid_trace[row][col] = exist\n return result\n \n \nclass Boggle_checker:\n\n def display(self):\n self.sixteendices = Boggle_Dice()\n self.sixteendices.display_dices()\n print('Start typing your words!(press enter after each word and enter \"X\" when done)' ) \n\n def _input(self):\n self.player_input = Input()\n while True:\n input_word = input()\n if str(input_word).upper() == 'X':\n break\n else:\n self.player_input.add_to_input(input_word)\n\n def scor_e(self):\n points = 0\n matri_x = self.dices.letterboard()\n player_in = self.player_input.getlist()\n #length_word = len(player_in)\n for j in range(0, len(player_in)):\n if trace_word(player_in[j], matri_x):\n if len(player_in[j])<3:\n print('The word ', player_in[j], 'is short.')\n pass\n if len(player_in[j]) == 3 or len(player_in[j]) == 4:\n print('The word ', player_in[j], 'is worth 1 point')\n points += 1\n if len(player_in[j]) == 5:\n print('The word ', player_in[j], 'is worth 2 point')\n points += 2\n if len(player_in[j]) == 6:\n print('The word ', player_in[j], 'is worth 3 point')\n points += 3\n if len(player_in[j]) == 7:\n print('The word ', player_in[j], 'is worth 5 point')\n points += 5\n if len(player_in[j]) >= 8:\n print('The word ', player_in[j], 'is worth 11 point')\n points += 11\n \n else:\n print('The word ', player_in[j], \n 'is not present in the grid.')\n self.score = points\n print('Your total score is ', points, 'points')\n \n \n def setSixteenDices(self, _dices):\n self.dices = _dices\n \n def setInputProcessor(self, input_processor):\n self.player_input = input_processor\n \n def getScore(self):\n return self.score" }, { "alpha_fraction": 0.5879043340682983, "alphanum_fraction": 0.5953385829925537, "avg_line_length": 30.075000762939453, "blob_id": "e41205f15017d45e8992ed750dc2c8cf0f637c75", "content_id": "25140c7ae534c14c0157532beaad108fedf93c20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4977, "license_type": "no_license", "max_line_length": 179, "num_lines": 160, "path": "/venteru_final.py", "repo_name": "santhosh8465/projects", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 6 22:58:28 2018\n\n@author: santhoshreddyventeru\n\"\"\"\n\nimport sys\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom boggle_dices import Boggle_Dice\nfrom inpu_t import Input\nfrom boggle_checker import Boggle_checker, Word_checker\n\nclass Boggle_Game(QMainWindow):\n EXIT_CODE_REBOOT = -123\n def __init__(self):\n super().__init__()\n self.board = Boggle_Dice()\n self._list = self.board.letterboard()\n #print(self._list)\n self.boggle_check = Boggle_checker()\n #self.bo_in = Input()\n self.words = list()\n self.inpu_t = Input()\n #print(self.input)\n self.initUI()\n \n def initUI(self):\n self.centerWidget = QWidget()\n self.BTWidget = QWidget(self.centerWidget)\n self.word_widget = QWidget(self.centerWidget)\n \n self.list_widget = QWidget(self.centerWidget)\n \n \n \n \n self.centralWidgetLayout = QVBoxLayout()\n \n self.textGridLayout = QGridLayout()\n \n self.wordEnterLayout = QHBoxLayout()\n self.wordListLayout = QHBoxLayout()\n self.setCentralWidget(self.centerWidget)\n \n \n \n \n self.BTWidget.setLayout(self.textGridLayout)\n for i in range(0, 4):\n for j in range(0, 4):\n button = QPushButton(self._list[i][j])\n self.textGridLayout.addWidget(button, i, j)\n \n \n \n \n \n self.word_widget.setLayout(self.wordEnterLayout)\n self.lineEdit = QLineEdit()\n \n self.addWordBtn = QPushButton('ADD')\n \n \n \n self.wordEnterLayout.addWidget(self.lineEdit) \n self.wordEnterLayout.addWidget(self.addWordBtn)\n input\n \n \n \n \n self.list_widget.setLayout(self.wordListLayout)\n self.wordList = QTextEdit()\n \n self.scoreBtn = QPushButton('Get Score')\n \n self.wordListLayout.addWidget(self.wordList)\n \n self.wordListLayout.addWidget(self.scoreBtn)\n \n \n \n \n self.addWordBtn.clicked.connect(self.addWordBtn_clicked)\n self.scoreBtn.clicked.connect(self.scoreBtn_clicked)\n \n \n menubar = self.menuBar()\n gameMenu = menubar.addMenu('GAME')\n \n newGame = QAction('NEW GAME', self)\n \n gameMenu.addAction(newGame)\n \n newGame.triggered.connect(self.new_game)\n\n \n \n self.centerWidget.setLayout(self.centralWidgetLayout)\n #self.centerWidget.setLayout(self.CentralWidgetLayout)\n self.centralWidgetLayout.addWidget(self.BTWidget)\n #self.centralWidgetLayout.addWidget(self.word_listWidget)\n self.centralWidgetLayout.addWidget(self.word_widget)\n #self.centralWidgetLayout.addWidget(self.inplistWidget)\n #self.centralWidgetLayout.addWidget(self.inputlistWidget)\n #self.centralWidgetLayout.addWidget(self.checkWidget)\n #self.centralWidgetLayout.addWidget(self.wolistWidget)\n \n self.centralWidgetLayout.addWidget(self.list_widget)\n \n # Display the GUI\n self.setGeometry(800, 800, 800, 800)\n self.setWindowTitle('BOGGLE')\n #self.setWindowIcon(QIcon('./Documents/boggle-gui/boggle.png'))\n self.setWindowIcon(QIcon('/Users/santhoshreddyventeru/Documents/boggle-gui/boggle.png'))\n self.show()\n \n # Start a new Game\n def new_game(self):\n self.board = None\n self._list = None\n self.inpu_t = None\n self.boggle_check = None\n qApp.exit(Boggle_Game.EXIT_CODE_REBOOT)\n return\n \n def addWordBtn_clicked(self):\n word_entered = self.lineEdit.text()\n if word_entered:\n self.words.append(word_entered.strip().upper())\n self.lineEdit.clear()\n self.words = list(set(self.words))\n word_list_display = '\\n'.join(self.words)\n self.wordList.setText(word_list_display)\n return\n \n def scoreBtn_clicked(self):\n self.inpu_t.setlist(self.words)\n self.boggle_check.setInputProcessor(self.inpu_t)\n self.boggle_check.setSixteenDices(self.board)\n Word_checker()\n self.boggle_check.scor_e()\n reply = QMessageBox.question(self, 'Congrats','Your Score: {0} \\n Play Again ?'.format(str(self.boggle_check.getScore())),QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n if reply == QMessageBox.Yes:\n self.new_game()\n else:\n sys.exit(0)\n return\n \nif __name__ == \"__main__\":\n currentExitCode = Boggle_Game.EXIT_CODE_REBOOT\n while currentExitCode == Boggle_Game.EXIT_CODE_REBOOT:\n app = QApplication(sys.argv)\n gui = Boggle_Game()\n currentExitCode = app.exec_()\n app = None\n \n" }, { "alpha_fraction": 0.2831050157546997, "alphanum_fraction": 0.3111546039581299, "avg_line_length": 40.77777862548828, "blob_id": "183b4372bbeb9dbc8d7b2ea5279e3a80d7bb18e5", "content_id": "c2c7b5b2f9b5fd4a446e0b29ff40a00613292cd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1533, "license_type": "no_license", "max_line_length": 99, "num_lines": 36, "path": "/boggle_dices.py", "repo_name": "santhosh8465/projects", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 6 23:32:22 2018\n\n@author: santhoshreddyventeru\n\"\"\"\nimport random\nclass Boggle_Dice:\n \n def __init__(self):\n self.dic_es = { 1 : ['A', 'E', 'A', 'N', 'E', 'G'], 2 : ['A', 'H', 'S', 'P', 'C', 'O'],\n 3 : ['A', 'S', 'P', 'F', 'F', 'K'], 4 : ['O', 'B', 'J', 'O', 'A', 'B'],\n 5 : ['I', 'O', 'T', 'M', 'U', 'C'], 6 : ['R', 'Y', 'V', 'D', 'E', 'L'],\n 7 : ['L', 'R', 'E', 'I', 'X', 'D'], 8 : ['E', 'I', 'U', 'N', 'E', 'S'],\n 9 : ['W', 'N', 'G', 'E', 'E', 'H'], 10 : ['L', 'N', 'H', 'N', 'R', 'Z'],\n 11 : ['T', 'S', 'T', 'I', 'Y', 'D'], 12 : ['O', 'W', 'T', 'O', 'A', 'T'],\n 13 : ['E', 'R', 'T', 'T', 'Y', 'L'], 14 : ['T', 'O', 'E', 'S', 'S', 'I'],\n 15 : ['T', 'E', 'R', 'W', 'H', 'V'], 16 : ['N', 'U', 'I', 'H', 'M', 'Qu'],}\n self.board = [[]]\n self.dice_s()\n \n def dice_s(self):\n important = []\n for val in self.dic_es.keys():\n value = self.dic_es[val][random.randint(0, 5)]\n print('[', value, ']', sep = \" \", \n end = \"\" if (val%4) != 0 else '\\n')\n important.append(value)\n if val%4 == 0:\n self.board.append(important)\n important = []\n self.board = self.board[1:]\n \n def letterboard(self):\n return self.board\n \n \n \n \n \n " }, { "alpha_fraction": 0.48687663674354553, "alphanum_fraction": 0.5052493214607239, "avg_line_length": 21.382352828979492, "blob_id": "b4735dec3e8330b9f73e15b28df647ddcef710db", "content_id": "76441bdb8bfda3816b95783085061400bad18ce4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 762, "license_type": "no_license", "max_line_length": 48, "num_lines": 34, "path": "/inpu_t.py", "repo_name": "santhosh8465/projects", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 6 22:59:42 2018\n\n@author: santhoshreddyventeru\n\"\"\"\n\nimport sys\nclass Input:\n def __init__(self, ):\n self.lis = list()\n #self.input = string()\n #self.input = dictionary()\n \n def add_to_input(self, in_val):\n try:\n self.lis.append(str(in_val).upper())\n except TypeError:\n print(\"error\")\n sys.exit(1)\n \n \n def getlist(self): \n print(self.lis)\n #print(self.Input)\n return self.lis\n #print(x) \n def setlist(self, input_list):\n self.lis = input_list\n \n \n def set_input_list(self, input_list):\n self.__input_list = input_list\n\n" } ]
4
lucas-fs/mineracao
https://github.com/lucas-fs/mineracao
eb600ede003a08e8e56ace98660938ebeecc774d
1054e5d95bb411e9651fcd9baff078e3e9d8671b
6058599f27faf867ef4f572bba6f1def19006e83
refs/heads/master
2021-06-08T11:53:33.823345
2016-11-03T12:35:49
2016-11-03T12:35:49
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5562158226966858, "alphanum_fraction": 0.5856904983520508, "avg_line_length": 29.70802879333496, "blob_id": "ed6bd16331b825c55e6f17b749f91cd7add5f2e3", "content_id": "74894166d769e1c08238e78803c551467766a4a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4218, "license_type": "no_license", "max_line_length": 265, "num_lines": 137, "path": "/csv_handler.py", "repo_name": "lucas-fs/mineracao", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\n#########################################################################\n#\n# Escrito por: Iago Corrêa em [email protected] e\n# e Lucas Ferreira da Silva em [email protected]\n#\n#\n########################################################################\n\n\n\nimport csv\n\ntable = []; column = 2; summ = 0; count = 0; Country = []; rCountry = []; country_region = []\n\ndef read_csv(table, name_table):\n\twith open(name_table, 'r') as csvfile:\n\t\tfor line in csvfile:\n\t\t\tif name_table == \"tabela.csv\":\n\t\t\t\ttable.append(line[:-1].split('\",\"'))\n\t\t\telse:\n\t\t\t\ttable.append(line[:-1].split(','))\n\ndef write_csv(table):\n\twith open('new_csv.csv', 'w', newline='') as csvfile:\n\t spamwriter = csv.writer(csvfile, delimiter=\",\", escapechar=\";\")\n\t for i in range(1, len(table)):\n\t \tif i == 1:\n\t \t\tspamwriter.writerow([\"Country\"] + [\"continent\"] + [\"regionUN\"] + [\"subregionUN\"] + [\"Year\"] + [\"Both Sexes\"] + [\"Female\"] + [\"Male\"])\n\t \telif len(table[i]) == 17 and (int(table[i][1]))>1999:\n\t \t\tspamwriter.writerow([table[i][0]] + [table[i][14]] + [table[i][15]] + [table[i][16]] + [table[i][1]] + [table[i][5]] + [table[i][6]] + [table[i][7]])\n\ndef format_table(table):\n\tfor i in range(2,len(table)):\n\t\ttable[i][0] = table[i][0].replace('\"', \"\")\n\t\ttable[i][1] = table[i][1].replace(\" \", \"\")\n\t\ttable[i][13] = table[i][13].replace('\"', \"\")\n\ndef insert_number(n, table, nullPoints):\n\tfor i in range(len(nullPoints)):\n\t\ttable[nullPoints[i][0]][nullPoints[i][1]] = round(n, 1)\n\ndef search_id(string):\n\tfor i in range(len(Country)):\n\t\tif Country[i][0] == string:\n\t\t\treturn i\n\ndef trade_specChar(table):\n\tfor i in range(2, 14):\n\t\ttable[0][i] = table[0][i].replace(\"&lt;\", \"< \")\n\nread_csv(table, \"tabela.csv\")\nread_csv(country_region, \"paises.csv\")\nformat_table(table)\n\n# Laço responsável por definir os intervalos de cada país\ni = 2\nwhile i < len(table):\n\tatual_string = table[i][0];\n\taux = 1\n\tid_ini = i\n\twhile True:\n\t\tif i + aux > 853:\n\t\t\tbreak\n\t\telif atual_string == table[i + aux][0]:\n\t\t\taux = aux + 1\n\t\telse:\n\t\t\tbreak\n\tCountry.append([atual_string, id_ini, aux - 1])\n\ti = i + aux\n\n# Laço resposável por criar lista de países e suas respectivas médias por coluna\nfor i in range(0, len(Country)):\n\tid_base = Country[i][1]; id_lmit = Country[i][2]\n\tdel Country[i][1]; del Country[i][1]\n\tfor column in range(2, 14):\n\t\tnullPoints = []\n\t\tsumm = 0; count = 0; j = 0\n\t\twhile j <= id_lmit:\n\t\t\tlNum = table[id_base + j][column].split(\" \")\n\t\t\tif len(lNum) > 1:\n\t\t\t\taux = 0\n\t\t\t\tfor z in range(len(lNum)):\n\t\t\t\t\taux = aux + float(lNum[z])\n\t\t\t\tavg = aux / len(lNum)\n\t\t\t\ttable[id_base + j][column] = float(round(avg, 1))\n\t\t\t\tsumm = summ + avg\n\t\t\t\tcount = count + len(lNum)\n\t\t\tif lNum[0] != '':\n\t\t\t\tsumm = summ + float(lNum[0])\n\t\t\t\tcount = count + 1\n\t\t\telif lNum[0] == '':\n\t\t\t\tnullPoints.append([id_base + j, column])\n\t\t\tj = j + 1\n\t\tif count == 0:\n\t\t\tCountry[i].append(0.0)\n\t\t\tinsert_number(float(0.0), table, nullPoints)\n\t\t\t#continue\n\t\telse:\n\t\t\tavg = summ / count\n\t\t\tCountry[i].append(round(avg, 1))\n\t\t\tinsert_number(avg, table, nullPoints)\n\n\n# Cria lista responsável pelos intervalos intra-país\nfor i in range(2, 854):\n\tlPais = table[i][1].split(\"-\")\n\tif len(lPais) > 1:\n\t\tid_country = search_id(table[i][0])\n\t\trCountry.append([table[i][0], 1 + (int(lPais[1]) - int(lPais[0])), lPais[0], i, id_country])\n\n# Remove da lista principal as tuplas com anos intervalados.\ncount = 0\nfor i in range(len(rCountry)):\n\tdel table[rCountry[i][3]]\n\tcount = count + 1\n\tif i != len(rCountry) - 1:\n\t\trCountry[i + 1][3] = rCountry[i + 1][3] - count\n\nfor i in range(len(rCountry)):\n\tfor j in range(rCountry[i][1]):\n\t\tidC = rCountry[i][4]\n\t\ttable.append([Country[idC][0], int(rCountry[i][2]) + j, Country[idC][1], Country[idC][2], Country[idC][3], Country[idC][4], Country[idC][6], Country[idC][6], Country[idC][7], Country[idC][8], Country[idC][9], Country[idC][10], Country[idC][11], Country[idC][12]])\n\n# Busca região do pais e anexa os dados à tabela Country\n\nfor i in range(2, len(table)):\n\tfor j in range(len(country_region)):\n\t\tif table[i][0] == country_region[j][0]:\n\t\t\ttable[i].append(country_region[j][1])\n\t\t\ttable[i].append(country_region[j][2])\n\t\t\ttable[i].append(country_region[j][3])\n\ntable.sort(key=lambda x : x[0])\ntrade_specChar(table)\nwrite_csv(table)\n" } ]
1
dela3499/distributed
https://github.com/dela3499/distributed
f0ee3d374095c832bcbd40452e2009491a99e000
384c4ab6d5c1a26a208e1cd3cd1be94c35fac9a3
805d0e416c49e1fe7a2c9e5243988ccb7bf19da4
refs/heads/master
2021-01-15T08:43:27.564813
2016-03-18T02:48:08
2016-03-23T03:43:09
54,529,602
0
0
null
2016-03-23T03:58:09
2016-03-22T15:30:31
2016-03-23T03:43:21
null
[ { "alpha_fraction": 0.6163547039031982, "alphanum_fraction": 0.622463047504425, "avg_line_length": 34.739437103271484, "blob_id": "6cc237b5e510204021e71ff1818d5d2172579f03", "content_id": "5b58b487748b4b25d989cdfb63b4f2c8e0eae8ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5075, "license_type": "no_license", "max_line_length": 82, "num_lines": 142, "path": "/distributed/diagnostics/status_monitor.py", "repo_name": "dela3499/distributed", "src_encoding": "UTF-8", "text": "from __future__ import print_function, division, absolute_import\n\nfrom collections import defaultdict\nimport json\nimport time\nfrom tornado import gen\n\nfrom ..core import rpc\nfrom ..utils import ignoring, is_kernel, log_errors\nfrom ..executor import default_executor\nfrom ..scheduler import Scheduler\n\nfrom tornado.httpclient import AsyncHTTPClient\nfrom tornado.ioloop import IOLoop, PeriodicCallback\n\nwith ignoring(ImportError):\n from bokeh.palettes import Spectral11\n from bokeh.models import ColumnDataSource\n from bokeh.models.widgets import DataTable, TableColumn\n from bokeh.plotting import vplot, output_notebook, show, figure\n from bokeh.io import curstate, push_notebook\n\nclass Status_Monitor(object):\n \"\"\" Display the tasks running and waiting on each worker\n\n Parameters\n ----------\n addr: tuple, optional\n (ip, port) of scheduler. Defaults to scheduler of recent Executor\n interval: Number, optional\n Interval between updates. Defaults to 1s\n \"\"\"\n def __init__(self, addr=None, interval=1000.00, loop=None):\n if addr is None:\n scheduler = default_executor().scheduler\n if isinstance(scheduler, rpc):\n addr = (scheduler.ip, 9786)\n elif isinstance(scheduler, Scheduler):\n addr = ('127.0.0.1', scheduler.services['http'].port)\n self.addr = addr\n self.interval = interval\n\n self.display_notebook = False\n\n if is_kernel() and not curstate().notebook:\n output_notebook()\n assert curstate().notebook\n\n self.task_source, self.task_table = task_table_plot()\n self.worker_source, self.worker_table = worker_table_plot()\n\n self.output = vplot(self.worker_table, self.task_table)\n\n self.client = AsyncHTTPClient()\n\n self.loop = loop or IOLoop.current()\n self.loop.add_callback(self.update)\n self._pc = PeriodicCallback(self.update, self.interval, io_loop=self.loop)\n self._pc.start()\n\n def _ipython_display_(self, **kwargs):\n show(self.output)\n self.display_notebook = True\n\n @gen.coroutine\n def update(self):\n \"\"\" Query the Scheduler, update the figure\n\n This opens a connection to the scheduler, sends it a function to run\n periodically, streams the results back and uses those results to update\n the bokeh figure\n \"\"\"\n with log_errors():\n tasks, workers = yield [\n self.client.fetch('http://%s:%d/tasks.json' % self.addr),\n self.client.fetch('http://%s:%d/workers.json' % self.addr)]\n\n tasks = json.loads(tasks.body.decode())\n workers = json.loads(workers.body.decode())\n\n task_table_update(self.task_source, tasks)\n worker_table_update(self.worker_source, workers)\n\n if self.display_notebook:\n push_notebook()\n\n\ndef task_table_plot(row_headers=False, width=600, height=100):\n names = ['waiting', 'ready', 'failed', 'processing', 'in-memory', 'total']\n source = ColumnDataSource({k: [] for k in names})\n\n columns = [TableColumn(field=name, title=name) for name in names]\n table = DataTable(source=source, columns=columns,\n row_headers=row_headers, width=width, height=height)\n return source, table\n\n\ndef task_table_update(source, d):\n d = {k: [v] for k, v in d.items()}\n source.data = d\n\n\ndef worker_table_plot(width=600, height=100, **kwargs):\n \"\"\" Column data source and plot for host table \"\"\"\n names = ['workers', 'cpu', 'memory-percent', 'memory', 'cores', 'processes',\n 'processing', 'latency', 'last-seen', 'disk-read', 'disk-write',\n 'network-send', 'network-recv']\n source = ColumnDataSource({k: [] for k in names})\n\n columns = {name: TableColumn(field=name, title=name) for name in names}\n\n slow_names = ['workers', 'cores', 'processes', 'memory',\n 'latency', 'last-seen']\n slow = DataTable(source=source, columns=[columns[n] for n in slow_names],\n width=width, height=height, **kwargs)\n\n fast_names = ['workers', 'cpu', 'memory-percent', 'processing',\n 'disk-read', 'disk-write', 'network-send', 'network-recv']\n fast = DataTable(source=source, columns=[columns[n] for n in fast_names],\n width=width, height=height, **kwargs)\n\n table = vplot(slow, fast)\n return source, table\n\n\ndef worker_table_update(source, d):\n \"\"\" Update host table source \"\"\"\n workers = sorted(d, reverse=True)\n\n data = {}\n data['workers'] = workers\n for name in ['cores', 'cpu', 'memory-percent', 'latency', 'last-seen',\n 'memory', 'disk-read', 'disk-write', 'network-send',\n 'network-recv']:\n try:\n data[name] = [d[w][name] for w in workers]\n except KeyError:\n pass\n\n data['processing'] = [sorted(d[w]['processing']) for w in workers]\n data['processes'] = [len(d[w]['ports']) for w in workers]\n source.data.update(data)\n" }, { "alpha_fraction": 0.6610837578773499, "alphanum_fraction": 0.6758620738983154, "avg_line_length": 27.19444465637207, "blob_id": "6f369e073c00d5ea392d830109a2ee973d39fe5b", "content_id": "0315a4eb7d95418e02ca75dd16195f336fca3584", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1015, "license_type": "no_license", "max_line_length": 70, "num_lines": 36, "path": "/distributed/diagnostics/tests/test_status_monitor.py", "repo_name": "dela3499/distributed", "src_encoding": "UTF-8", "text": "from distributed.diagnostics.status_monitor import (worker_table_plot,\n worker_table_update, task_table_plot, task_table_update)\nfrom distributed.diagnostics.scheduler import workers, tasks\n\nfrom distributed.utils_test import gen_cluster, inc\nfrom distributed.executor import _wait\n\nfrom tornado import gen\n\n\n@gen_cluster()\ndef test_worker_table(s, a, b):\n while any('last-seen' not in v for v in s.host_info.values()):\n yield gen.sleep(0.01)\n data = workers(s)\n source, plot = worker_table_plot()\n worker_table_update(source, data)\n\n assert source.data['workers'] == ['127.0.0.1']\n\n\n@gen_cluster(executor=True)\ndef test_task_table(e, s, a, b):\n source, plot = task_table_plot()\n\n data = tasks(s)\n task_table_update(source, data)\n assert source.data['processing'] == [0]\n\n futures = e.map(inc, range(10))\n yield _wait(futures)\n\n data = tasks(s)\n task_table_update(source, data)\n assert source.data['processing'] == [0]\n assert source.data['total'] == [10]\n" }, { "alpha_fraction": 0.618791937828064, "alphanum_fraction": 0.6416107416152954, "avg_line_length": 32.8636360168457, "blob_id": "46c72b5e6d5ed91194ea6ed08a82ec76d1500e46", "content_id": "ab95ef9295f52749c93d56f49b73933b49e1f764", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1490, "license_type": "no_license", "max_line_length": 78, "num_lines": 44, "path": "/distributed/diagnostics/bokeh/status/server_lifecycle.py", "repo_name": "dela3499/distributed", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom __future__ import print_function, division, absolute_import\n\nfrom collections import deque\nfrom datetime import datetime\nimport json\n\nfrom tornado import gen\nfrom tornado.locks import Condition\nfrom tornado.httpclient import AsyncHTTPClient\n\nimport distributed.diagnostics\nfrom distributed.utils import log_errors\n\nclient = AsyncHTTPClient()\n\nmessages = {} # Globally visible store of messages\ndistributed.diagnostics.messages = messages # monkey-patching\n\n\[email protected]\ndef http_get(route):\n \"\"\" Get data from JSON route, store in messages deques \"\"\"\n with log_errors():\n response = yield client.fetch('http://localhost:9786/%s.json' % route)\n msg = json.loads(response.body.decode())\n messages[route]['deque'].append(msg)\n messages[route]['condition'].notify_all()\n messages[route]['times'].append(datetime.now())\n\n\ndef on_server_loaded(server_context):\n messages['workers'] = {'interval': 1000,\n 'deque': deque(maxlen=1000),\n 'times': deque(maxlen=1000),\n 'condition': Condition()}\n server_context.add_periodic_callback(lambda: http_get('workers'), 1000)\n\n messages['tasks'] = {'interval': 100,\n 'deque': deque(maxlen=1000),\n 'times': deque(maxlen=1000),\n 'condition': Condition()}\n server_context.add_periodic_callback(lambda: http_get('tasks'), 100)\n" }, { "alpha_fraction": 0.6140572428703308, "alphanum_fraction": 0.622474730014801, "avg_line_length": 27.975608825683594, "blob_id": "2c8b4c91060d2334ce295b8c0464f21f216b6419", "content_id": "971590fd9788282ffc44ed43af66f309984f3ea3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2376, "license_type": "no_license", "max_line_length": 79, "num_lines": 82, "path": "/distributed/cli/dscheduler.py", "repo_name": "dela3499/distributed", "src_encoding": "UTF-8", "text": "from __future__ import print_function, division, absolute_import\n\nimport logging\nimport os\nimport socket\nimport subprocess\nfrom sys import argv, exit\nfrom time import sleep\n\nimport click\n\nimport distributed\nfrom distributed import Scheduler\nfrom distributed.utils import get_ip\nfrom distributed.http import HTTPScheduler\nfrom distributed.cli.utils import check_python_3\nfrom tornado.ioloop import IOLoop\n\nlogger = logging.getLogger('distributed.scheduler')\n\nip = get_ip()\n\nimport signal\n\n\ndef handle_signal(sig, frame):\n IOLoop.instance().add_callback(IOLoop.instance().stop)\n\nsignal.signal(signal.SIGINT, handle_signal)\nsignal.signal(signal.SIGTERM, handle_signal)\n\n\[email protected]()\[email protected]('center', type=str, default='')\[email protected]('--port', type=int, default=8786, help=\"Serving port\")\[email protected]('--http-port', type=int, default=9786, help=\"HTTP port\")\[email protected]('--bokeh-port', type=int, default=8787, help=\"HTTP port\")\[email protected]('--host', type=str, default=ip,\n help=\"Serving host defaults to %s\" % ip)\ndef main(center, host, port, http_port, bokeh_port):\n ip = socket.gethostbyname(host)\n loop = IOLoop.current()\n scheduler = Scheduler(center, ip=ip,\n services={('http', http_port): HTTPScheduler})\n if center:\n loop.run_sync(scheduler.sync_center)\n scheduler.start(port)\n\n try:\n import bokeh\n except ImportError:\n pass\n else:\n import distributed.diagnostics.bokeh\n hosts = ['%s:%d' % (h, bokeh_port) for h in\n ['localhost', '127.0.0.1', ip, socket.gethostname()]]\n dirname = os.path.dirname(distributed.__file__)\n path = os.path.join(dirname, 'diagnostics', 'bokeh', 'status')\n proc = subprocess.Popen(['bokeh', 'serve', path,\n '--log-level', 'warning',\n '--port', str(bokeh_port)] +\n sum([['--host', host] for host in hosts], []))\n\n distributed.diagnostics.bokeh.server_process = proc # monkey patch\n\n logger.info(\" Start Bokeh UI at: http://%s:%d/status/\"\n % (ip, bokeh_port))\n\n loop.start()\n loop.close()\n scheduler.stop()\n\n logger.info(\"End scheduler at %s:%d\", ip, port)\n\n\ndef go():\n check_python_3()\n main()\n\n\nif __name__ == '__main__':\n go()\n" }, { "alpha_fraction": 0.7682926654815674, "alphanum_fraction": 0.8048780560493469, "avg_line_length": 7.199999809265137, "blob_id": "c4411fc424bd8026b87e3039b3902a9c285d9dd7", "content_id": "ba82e9638aa7f655bb1999b7e7e068ed5e8af4ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 82, "license_type": "no_license", "max_line_length": 14, "num_lines": 10, "path": "/requirements.txt", "repo_name": "dela3499/distributed", "src_encoding": "UTF-8", "text": "tornado >= 4.2\ntoolz\nmsgpack-python\ncloudpickle\ndask\nclick\nboto3\nlocket\nsix\ntblib\n" }, { "alpha_fraction": 0.4957633316516876, "alphanum_fraction": 0.5055515170097351, "avg_line_length": 31.287734985351562, "blob_id": "77c10d151918574e2ab62e696bf46292a9875f35", "content_id": "a1740331f72b17f0ba04b905e76064aa58baae5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13690, "license_type": "no_license", "max_line_length": 87, "num_lines": 424, "path": "/distributed/s3fs.py", "repo_name": "dela3499/distributed", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport io\nimport logging\nimport re\n\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom botocore.client import Config\n\nfrom dask.base import tokenize\nfrom .utils import read_block\n\nlogger = logging.getLogger(__name__)\n\nlogging.getLogger('boto3').setLevel(logging.WARNING)\nlogging.getLogger('botocore').setLevel(logging.WARNING)\n\ndef split_path(path):\n \"\"\"\n Normalise S3 path string into bucket and key.\n\n Parameters\n ----------\n path : string\n Input path, like `s3://mybucket/path/to/file`\n\n Examples\n --------\n >>> split_path(\"s3://mybucket/path/to/file\")\n (\"mybucket\", \"path/to/file\")\n \"\"\"\n if path.startswith('s3://'):\n path = path[5:]\n if '/' not in path:\n return path, \"\"\n else:\n return path.split('/', 1)\n\n\nclass S3FileSystem(object):\n \"\"\"\n Access S3 data as if it were a file system.\n \"\"\"\n _conn = {}\n connect_timeout=5\n read_timeout=15\n\n def __init__(self, anon=True, key=None, secret=None, **kwargs):\n \"\"\"\n Create connection object to S3\n\n Will use configured key/secret (typically in ~/.aws, see the\n boto3 documentation) unless specified\n\n Parameters\n ----------\n anon : bool (True)\n whether to use anonymous connection (public buckets only)\n key : string (None)\n if not anonymouns, use this key, if specified\n secret : string (None)\n if not anonymous, use this password, if specified\n kwargs : other parameters for boto3 session\n \"\"\"\n self.anon = anon\n self.key = key\n self.secret = secret\n self.kwargs = kwargs\n self.connect(anon, key, secret, kwargs)\n self.dirs = {}\n self.s3 = self.connect(anon, key, secret, kwargs)\n\n def connect(self, anon, key, secret, kwargs):\n tok = tokenize(anon, key, secret, kwargs)\n if tok not in self._conn:\n logger.debug(\"Open S3 connection. Anonymous: %s\",\n self.anon)\n if self.anon:\n from botocore import UNSIGNED\n conf = Config(connect_timeout=self.connect_timeout,\n read_timeout=self.read_timeout,\n signature_version=UNSIGNED)\n s3 = boto3.Session().client('s3', config=conf)\n else:\n conf = Config(connect_timeout=self.connect_timeout,\n read_timeout=self.read_timeout)\n s3 = boto3.Session(self.key, self.secret,\n **self.kwargs).client('s3', config=conf)\n self._conn[tok] = s3\n return self._conn[tok]\n\n def __getstate__(self):\n d = self.__dict__.copy()\n del d['s3']\n logger.debug(\"Serialize with state: %s\", d)\n return d\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n self.s3 = self.connect(self.anon, self.key, self.secret, self.kwargs)\n\n def open(self, path, mode='rb', block_size=4*1024**2):\n \"\"\" Open a file for reading or writing\n\n Parameters\n ----------\n path: string\n Path of file on S3\n mode: string\n One of 'rb' or 'wb'\n block_size: int\n Size of data-node blocks if reading\n \"\"\"\n if 'b' not in mode:\n raise NotImplementedError(\"Text mode not supported, use mode='%s'\"\n \" and manage bytes\" % (mode[0] + 'b'))\n return S3File(self, path, mode, block_size=block_size)\n\n def _ls(self, path, refresh=False):\n \"\"\" List files below path\n\n Parameters\n ----------\n path : string/bytes\n location at which to list files\n detail : bool (=True)\n if True, each list item is a dict of file properties;\n otherwise, returns list of filenames\n refresh : bool (=False)\n if False, look in local cache for file details first\n \"\"\"\n path = path.lstrip('s3://').lstrip('/')\n bucket, key = split_path(path)\n if bucket not in self.dirs or refresh:\n if bucket == '':\n # list of buckets\n if self.anon:\n # cannot list buckets if not logged in\n return []\n files = self.s3.list_buckets()['Buckets']\n for f in files:\n f['Key'] = f['Name']\n f['Size'] = 0\n del f['Name']\n else:\n files = self.s3.list_objects(Bucket=bucket).get('Contents', [])\n for f in files:\n f['Key'] = \"/\".join([bucket, f['Key']])\n self.dirs[bucket] = list(sorted(files, key=lambda x: x['Key']))\n files = self.dirs[bucket]\n return files\n\n def ls(self, path, detail=False):\n path = path.lstrip('s3://').rstrip('/')\n try:\n files = self._ls(path)\n except ClientError:\n files = []\n if path:\n pattern = re.compile(path + '/[^/]*.$')\n files = [f for f in files if pattern.match(f['Key']) is not None]\n if not files:\n try:\n files = [self.info(path)]\n except (OSError, IOError):\n files = []\n if detail:\n return files\n else:\n return [f['Key'] for f in files]\n\n def info(self, path):\n if path.startswith('s3://'):\n path = path[len('s3://'):]\n path = path.rstrip('/')\n files = self._ls(path)\n files = [f for f in files if f['Key'].rstrip('/') == path]\n if len(files) == 1:\n return files[0]\n else:\n raise IOError(\"File not found: %s\" %path)\n\n def walk(self, path):\n return [f['Key'] for f in self._ls(path) if f['Key'].rstrip('/'\n ).startswith(path.rstrip('/') + '/')]\n\n def glob(self, path):\n \"\"\"\n Find files by glob-matching.\n\n Note that the bucket part of the path must not contain a \"*\"\n \"\"\"\n path0 = path\n path = path.lstrip('s3://').lstrip('/')\n bucket, key = split_path(path)\n if \"*\" in bucket:\n raise ValueError('Bucket cannot contain a \"*\"')\n if '*' not in path:\n path = path.rstrip('/') + '/*'\n if '/' in path[:path.index('*')]:\n ind = path[:path.index('*')].rindex('/')\n root = path[:ind+1]\n else:\n root = '/'\n allfiles = self.walk(root)\n pattern = re.compile(\"^\" + path.replace('//', '/')\n .rstrip('/')\n .replace('*', '[^/]*')\n .replace('?', '.') + \"$\")\n out = [f for f in allfiles if re.match(pattern,\n f.replace('//', '/').rstrip('/'))]\n if not out:\n out = self.ls(path0)\n return out\n\n def du(self, path, total=False, deep=False):\n if deep:\n files = self.walk(path)\n files = [self.info(f) for f in files]\n else:\n files = self.ls(path, detail=True)\n if total:\n return sum(f.get('Size', 0) for f in files)\n else:\n return {p['Key']: p['Size'] for p in files}\n\n def exists(self, path):\n return bool(self.ls(path))\n\n def cat(self, path):\n with self.open(path, 'rb') as f:\n return f.read()\n\n def tail(self, path, size=1024):\n \"\"\" Return last bytes of file \"\"\"\n length = self.info(path)['Size']\n if size > length:\n return self.cat(path)\n with self.open(path, 'rb') as f:\n f.seek(length - size)\n return f.read(size)\n\n def head(self, path, size=1024):\n \"\"\" Return first bytes of file \"\"\"\n with self.open(path, 'rb', block_size=size) as f:\n return f.read(size)\n\n def read_block(self, fn, offset, length, delimiter=None):\n \"\"\" Read a block of bytes from an S3 file\n\n Starting at ``offset`` of the file, read ``length`` bytes. If\n ``delimiter`` is set then we ensure that the read starts and stops at\n delimiter boundaries that follow the locations ``offset`` and ``offset\n + length``. If ``offset`` is zero then we start at zero. The\n bytestring returned WILL include the end delimiter string.\n\n If offset+length is beyond the eof, reads to eof.\n\n Parameters\n ----------\n fn: string\n Path to filename on S3\n offset: int\n Byte offset to start read\n length: int\n Number of bytes to read\n delimiter: bytes (optional)\n Ensure reading starts and stops at delimiter bytestring\n\n Examples\n --------\n >>> s3.read_block('data/file.csv', 0, 13) # doctest: +SKIP\n b'Alice, 100\\\\nBo'\n >>> s3.read_block('data/file.csv', 0, 13, delimiter=b'\\\\n') # doctest: +SKIP\n b'Alice, 100\\\\nBob, 200\\\\n'\n\n Use ``length=None`` to read to the end of the file.\n >>> s3.read_block('data/file.csv', 0, None, delimiter=b'\\\\n') # doctest: +SKIP\n b'Alice, 100\\\\nBob, 200\\\\nCharlie, 300'\n\n See Also\n --------\n distributed.utils.read_block\n \"\"\"\n with self.open(fn, 'rb') as f:\n size = f.info()['Size']\n if length is None:\n length = size\n if offset + length > size:\n length = size - offset\n bytes = read_block(f, offset, length, delimiter)\n return bytes\n\n\nclass S3File(object):\n \"\"\"\n Cached read-only interface to a key in S3, behaving like a seekable file.\n\n Optimized for a single continguous block.\n \"\"\"\n\n def __init__(self, s3, path, mode='rb', block_size=4*2**20):\n \"\"\"\n Open S3 as a file. Data is only loaded and cached on demand.\n\n Parameters\n ----------\n s3 : boto3 connection\n bucket : string\n S3 bucket to access\n key : string\n S3 key to access\n blocksize : int\n read-ahead size for finding delimiters\n \"\"\"\n self.mode = mode\n if mode != 'rb':\n raise NotImplementedError(\"File mode must be 'rb', not %s\" % mode)\n self.path = path\n bucket, key = split_path(path)\n self.s3 = s3\n self.size = self.info()['Size']\n self.bucket = bucket\n self.key = key\n self.blocksize = block_size\n self.cache = b\"\"\n self.loc = 0\n self.start = None\n self.end = None\n self.closed = False\n\n def info(self):\n return self.s3.info(self.path)\n\n def tell(self):\n return self.loc\n\n def seek(self, loc, whence=0):\n if whence == 0:\n self.loc = loc\n elif whence == 1:\n self.loc += loc\n elif whence == 2:\n self.loc = self.size + loc\n else:\n raise ValueError(\"invalid whence (%s, should be 0, 1 or 2)\" % whence)\n if self.loc < 0:\n self.loc = 0\n return self.loc\n\n def _fetch(self, start, end):\n if self.start is None and self.end is None:\n # First read\n self.start = start\n self.end = end + self.blocksize\n self.cache = _fetch_range(self.s3.s3, self.bucket, self.key,\n start, self.end)\n if start < self.start:\n new = _fetch_range(self.s3.s3, self.bucket, self.key,\n start, self.start)\n self.start = start\n self.cache = new + self.cache\n if end > self.end:\n if end > self.size:\n return\n new = _fetch_range(self.s3.s3, self.bucket, self.key,\n self.end, end + self.blocksize)\n self.end = end + self.blocksize\n self.cache = self.cache + new\n\n def read(self, length=-1):\n \"\"\"\n Return data from cache, or fetch pieces as necessary\n \"\"\"\n if self.mode != 'rb':\n raise ValueError('File not in read mode')\n if length < 0:\n length = self.size\n if self.closed:\n raise ValueError('I/O operation on closed file.')\n self._fetch(self.loc, self.loc + length)\n out = self.cache[self.loc - self.start:\n self.loc - self.start + length]\n self.loc += len(out)\n return out\n\n def flush(self):\n pass\n\n def close(self):\n self.flush()\n self.cache = None\n self.closed = True\n\n def __str__(self):\n return \"<S3File %s/%s>\" % (self.bucket, self.key)\n\n __repr__ = __str__\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n self.close()\n\n\n\ndef _fetch_range(client, bucket, key, start, end, max_attempts=10):\n logger.debug(\"Fetch: %s/%s, %s-%s\", bucket, key, start, end)\n for i in range(max_attempts):\n try:\n resp = client.get_object(Bucket=bucket, Key=key,\n Range='bytes=%i-%i' % (start, end - 1))\n return resp['Body'].read()\n except boto3.s3.transfer.S3_RETRYABLE_ERRORS as e:\n logger.debug('Exception %e on S3 download, retrying',\n exc_info=True)\n continue\n except ClientError as e:\n if e.response['Error'].get('Code', 'Unknown') in ['416', 'InvalidRange']:\n return b''\n else:\n raise\n raise RuntimeError(\"Max number of S3 retries exceeded\")\n" }, { "alpha_fraction": 0.6408717632293701, "alphanum_fraction": 0.6481364369392395, "avg_line_length": 26.53043556213379, "blob_id": "af2b3c28def20b32d79865660f83dfecce3d1070", "content_id": "4ac5bc4eff81dcc0dfe398a11dd44f1c0b271ddc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3166, "license_type": "no_license", "max_line_length": 78, "num_lines": 115, "path": "/distributed/protocol.py", "repo_name": "dela3499/distributed", "src_encoding": "UTF-8", "text": "\"\"\"\nThe distributed message protocol consists of the following parts:\n\n1. The length of the header, stored as a uint32\n2. The header, stored as msgpack.\n If there are no fields in the header then we skip it entirely.\n3. The payload, stored as possibly compressed msgpack\n4. A sentinel value\n\n**Header**\n\nThe Header contains the following fields:\n\n* **compression**: string, optional\n One of the following: ``'snappy', 'lz4', 'zlib'`` or missing for None\n\n**Payload**\n\nThe payload is any msgpack serializable value. It may be compressed based\non the header.\n\n**Sentinel**\n\nWe often terminate each message with a sentinel value. This happens\noutside of this module though and is not baked in.\n\"\"\"\nfrom __future__ import print_function, division, absolute_import\n\nfrom io import BytesIO\nimport struct\n\ntry:\n import pandas.msgpack as msgpack\nexcept ImportError:\n import msgpack\n\nfrom toolz import first, keymap\n\nfrom .utils import ignoring\nfrom .compatibility import unicode\n\n\ncompressions = {}\n\ndefault_compression = None\n\nwith ignoring(ImportError):\n import zlib\n compressions['zlib'] = {'compress': zlib.compress,\n 'decompress': zlib.decompress}\n\nwith ignoring(ImportError):\n import snappy\n compressions['snappy'] = {'compress': snappy.compress,\n 'decompress': snappy.decompress}\n default_compression = 'snappy'\n\nwith ignoring(ImportError):\n import lz4\n compressions['lz4'] = {'compress': lz4.LZ4_compress,\n 'decompress': lz4.LZ4_uncompress}\n default_compression = 'lz4'\n\n\ndef dumps(msg):\n \"\"\" Transform Python value to bytestream suitable for communication \"\"\"\n header = {}\n\n payload = msgpack.dumps(msg, use_bin_type=True)\n\n if len(payload) > 1e3 and default_compression:\n payload = compressions[default_compression]['compress'](payload)\n header['compression'] = default_compression\n\n if header:\n header_bytes = msgpack.dumps(header, use_bin_type=True)\n else:\n header_bytes = b''\n out = BytesIO()\n out.write(struct.pack('I', len(header_bytes)))\n out.write(header_bytes)\n out.write(payload)\n\n out.seek(0)\n return out.read()\n\n\ndef loads(b):\n \"\"\" Transform bytestream back into Python value \"\"\"\n header_length, = struct.unpack('I', b[:4])\n if header_length:\n header = msgpack.loads(b[4: header_length + 4], encoding='utf8')\n else:\n header = {}\n payload = b[header_length + 4:]\n\n if header.get('compression'):\n try:\n decompress = compressions[header['compression']]['decompress']\n payload = decompress(payload)\n except KeyError:\n raise ValueError(\"Data is compressed as %s but we don't have this\"\n \" installed\" % header['compression'].decode())\n\n msg = msgpack.loads(payload, encoding='utf8')\n\n if header.get('decode'):\n if isinstance(msg, dict) and msg:\n msg = keymap(bytes.decode, msg)\n elif isinstance(msg, bytes):\n msg = msg.decode()\n else:\n raise TypeError(\"Asked to decode a %s\" % type(msg).__name__)\n\n return msg\n" }, { "alpha_fraction": 0.5202516913414001, "alphanum_fraction": 0.5623279809951782, "avg_line_length": 33.83561706542969, "blob_id": "c76188753c069460ba152b7110e1b577a83ffdfd", "content_id": "ecebb778c2036d0ecebaa62e391e27d2127ddbfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5086, "license_type": "no_license", "max_line_length": 93, "num_lines": 146, "path": "/distributed/tests/test_s3fs.py", "repo_name": "dela3499/distributed", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport pytest\nfrom distributed.s3fs import S3FileSystem\nfrom distributed.s3 import seek_delimiter\nfrom distributed.utils_test import slow\nfrom distributed.utils import ignoring\n\nfrom botocore.exceptions import NoCredentialsError\n\n# These get mirrored on s3://distributed-test/\ntest_bucket_name = 'distributed-test'\nfiles = {'test/accounts.1.json': (b'{\"amount\": 100, \"name\": \"Alice\"}\\n'\n b'{\"amount\": 200, \"name\": \"Bob\"}\\n'\n b'{\"amount\": 300, \"name\": \"Charlie\"}\\n'\n b'{\"amount\": 400, \"name\": \"Dennis\"}\\n'),\n 'test/accounts.2.json': (b'{\"amount\": 500, \"name\": \"Alice\"}\\n'\n b'{\"amount\": 600, \"name\": \"Bob\"}\\n'\n b'{\"amount\": 700, \"name\": \"Charlie\"}\\n'\n b'{\"amount\": 800, \"name\": \"Dennis\"}\\n')}\n\ncsv_files = {'2014-01-01.csv': (b'name,amount,id\\n'\n b'Alice,100,1\\n'\n b'Bob,200,2\\n'\n b'Charlie,300,3\\n'),\n '2014-01-02.csv': (b'name,amount,id\\n'),\n '2014-01-03.csv': (b'name,amount,id\\n'\n b'Dennis,400,4\\n'\n b'Edith,500,5\\n'\n b'Frank,600,6\\n')}\n\[email protected]_fixture\ndef s3():\n # could do with a bucket with write privileges.\n yield S3FileSystem(anon=True)\n\n\ndef test_non_anonymous_access():\n with ignoring(NoCredentialsError):\n fs = S3FileSystem(anon=False)\n fs.ls('distributed-test')\n\n\ndef test_s3_file_access(s3):\n fn = 'distributed-test/nested/file1'\n data = b'hello\\n'\n assert s3.cat(fn) == data\n assert s3.head(fn, 3) == data[:3]\n assert s3.tail(fn, 3) == data[-3:]\n assert s3.tail(fn, 10000) == data\n\n\ndef test_s3_file_info(s3):\n fn = 'distributed-test/nested/file1'\n data = b'hello\\n'\n assert fn in s3.walk('distributed-test')\n assert s3.exists(fn)\n assert not s3.exists(fn+'another')\n assert s3.info(fn)['Size'] == len(data)\n with pytest.raises((OSError, IOError)):\n s3.info(fn+'another')\n\n\ndef test_du(s3):\n d = s3.du(test_bucket_name, deep=True)\n assert all(isinstance(v, int) and v >= 0 for v in d.values())\n assert 'distributed-test/nested/file1' in d\n\n assert s3.du(test_bucket_name + '/test/', total=True) ==\\\n sum(map(len, files.values()))\n\n\ndef test_s3_ls(s3):\n fn = 'distributed-test/nested/file1'\n assert fn not in s3.ls('distributed-test/')\n assert fn in s3.ls('distributed-test/nested/')\n assert fn in s3.ls('distributed-test/nested')\n assert s3.ls('s3://distributed-test/nested/') == s3.ls('distributed-test/nested')\n\n\ndef test_s3_ls_detail(s3):\n L = s3.ls('distributed-test/nested', detail=True)\n assert all(isinstance(item, dict) for item in L)\n\n\ndef test_s3_glob(s3):\n fn = 'distributed-test/nested/file1'\n assert fn not in s3.glob('distributed-test/')\n assert fn not in s3.glob('distributed-test/*')\n assert fn in s3.glob('distributed-test/nested')\n assert fn in s3.glob('distributed-test/nested/*')\n assert fn in s3.glob('distributed-test/nested/file*')\n assert fn in s3.glob('distributed-test/*/*')\n\n\ndef test_get_list_of_summary_objects(s3):\n L = s3.ls(test_bucket_name + '/test')\n\n assert len(L) == 2\n assert [l.lstrip(test_bucket_name).lstrip('/') for l in sorted(L)] == sorted(list(files))\n\n L2 = s3.ls('s3://' + test_bucket_name + '/test')\n\n assert L == L2\n\n\ndef test_read_keys_from_bucket(s3):\n for k, data in files.items():\n file_contents = s3.cat('/'.join([test_bucket_name, k]))\n assert file_contents == data\n\n assert (s3.cat('/'.join([test_bucket_name, k])) ==\n s3.cat('s3://' + '/'.join([test_bucket_name, k])))\n\n\n@slow\ndef test_seek_delimiter(s3):\n fn = 'test/accounts.1.json'\n data = files[fn]\n with s3.open('/'.join([test_bucket_name, fn])) as f:\n seek_delimiter(f, b'}', 0)\n assert f.tell() == 0\n f.seek(1)\n seek_delimiter(f, b'}', 5)\n assert f.tell() == data.index(b'}') + 1\n seek_delimiter(f, b'\\n', 5)\n assert f.tell() == data.index(b'\\n') + 1\n f.seek(1, 1)\n ind = data.index(b'\\n') + data[data.index(b'\\n')+1:].index(b'\\n') + 1\n seek_delimiter(f, b'\\n', 5)\n assert f.tell() == ind + 1\n\n\ndef test_read_s3_block(s3):\n import io\n data = files['test/accounts.1.json']\n lines = io.BytesIO(data).readlines()\n path = 'distributed-test/test/accounts.1.json'\n assert s3.read_block(path, 1, 35, b'\\n') == lines[1]\n assert s3.read_block(path, 0, 30, b'\\n') == lines[0]\n assert s3.read_block(path, 0, 35, b'\\n') == lines[0] + lines[1]\n assert s3.read_block(path, 0, 5000, b'\\n') == data\n assert len(s3.read_block(path, 0, 5)) == 5\n assert len(s3.read_block(path, 4, 5000)) == len(data) - 4\n assert s3.read_block(path, 5000, 5010) == b''\n\n assert s3.read_block(path, 5, None) == s3.read_block(path, 5, 1000)\n" }, { "alpha_fraction": 0.7755101919174194, "alphanum_fraction": 0.7755101919174194, "avg_line_length": 23.5, "blob_id": "7d23908b58148cd6f0fa095b80578ee9b7055101", "content_id": "e01bb3ce3b620cfc1703e17edd51a2612b0ed1c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 49, "license_type": "no_license", "max_line_length": 33, "num_lines": 2, "path": "/distributed/diagnostics/bokeh/main.py", "repo_name": "dela3499/distributed", "src_encoding": "UTF-8", "text": "from bokeh.plotting import curdoc\ndoc = curdoc()\n" }, { "alpha_fraction": 0.7085124850273132, "alphanum_fraction": 0.710232138633728, "avg_line_length": 29.605262756347656, "blob_id": "27d543f4b7d8dce53fa447e4644301da62fdcfd1", "content_id": "382ab783247ef10ec988904b88b5cf6f7a22c424", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1163, "license_type": "no_license", "max_line_length": 73, "num_lines": 38, "path": "/distributed/diagnostics/bokeh/status/main.py", "repo_name": "dela3499/distributed", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom __future__ import print_function, division, absolute_import\n\nimport json\n\nfrom bokeh.plotting import curdoc, vplot\nfrom tornado import gen\n\nfrom distributed.diagnostics.status_monitor import (\n worker_table_plot, worker_table_update, task_table_plot,\n task_table_update)\nfrom distributed.utils import log_errors\nimport distributed.diagnostics\n\nmessages = distributed.diagnostics.messages # global message store\n\ndoc = curdoc()\n\nworker_source, worker_table = worker_table_plot()\[email protected]\ndef worker_update():\n with log_errors():\n yield messages['workers']['condition'].wait()\n msg = messages['workers']['deque'][-1]\n worker_table_update(worker_source, msg)\ndoc.add_periodic_callback(worker_update, messages['workers']['interval'])\n\ntask_source, task_table = task_table_plot()\[email protected]\ndef task_update():\n with log_errors():\n yield messages['tasks']['condition'].wait()\n msg = messages['tasks']['deque'][-1]\n task_table_update(task_source, msg)\ndoc.add_periodic_callback(task_update, messages['tasks']['interval'])\n\ndoc.add_root(vplot(worker_table, task_table))\n" } ]
10
engahmed/mycity
https://github.com/engahmed/mycity
9a36ab38273ca34abef8a3eb2f3da3a34dd3dbf4
ec47dc0fd155dbb6eebaf1df8d1dff36b449ae19
fdea72baac914e22a028bdc3a81f519603060ace
refs/heads/master
2023-03-22T14:02:55.644602
2021-03-21T09:02:29
2021-03-21T09:02:29
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7746815085411072, "alphanum_fraction": 0.7850318551063538, "avg_line_length": 29.560976028442383, "blob_id": "86c9e4aa2dff2f4c920e9be29fc0a28a9bf1672b", "content_id": "3d62105b3d546b97e3adbd3f5647d08f496b42d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1256, "license_type": "no_license", "max_line_length": 123, "num_lines": 41, "path": "/README.md", "repo_name": "engahmed/mycity", "src_encoding": "UTF-8", "text": "# Project Hoenggerberg presenting mycity\n\n## Pitch\n[Pitch Video](https://youtu.be/NLloPttvuBc)\n\n## App Download\n[APK Download](https://drive.google.com/file/d/1LxJz_TxOY1RHyw1U-DNAfZUCI6txRv3c/view?usp=sharing)\nThe App is also in this repository\n\n## App Tutorial\n[Tutorial Video](https://drive.google.com/file/d/1z-TSvOg7sYJ5FG9NdPjAWQ-gRqhZWpVK/view?usp=sharing)\n\n## Pitch Slides\n[Slides and images on google drive](https://drive.google.com/drive/folders/1XqAuvPVuYwaKigtm75qj0JiUWNSCEcPL?usp=sharing)\n\n\n\n# Main Features:\n\n### Direct Participation\nParticipants can submit their own ideas and proposals. Other users can vote for and against them and make a proposal.\n\n### Population Feedback\nProjects and ideas can be voted and commented on\n\n### Direct Budget Participation\nEvery citizen can distribute a small amount of their taxes to projects they like\n\n### Sustainability Label\nBy making citizens aware of the environmental impact of project proposals a higher degree of sustainability can be achieved\n\n### Intuitive App Design\nIntuitive App with different ways to browse the projects (on a map, as a list of all projects or only favourites)\n\n\n\n\n\n# Tech\n- Cross Platform Flutter App for Android, iOS, and Web\n- Firebase and Firestore Cloud Services\n\n\n\n" }, { "alpha_fraction": 0.6009648442268372, "alphanum_fraction": 0.6788421869277954, "avg_line_length": 22.80327796936035, "blob_id": "e7c41e94c89c3813482f33755605693df0936847", "content_id": "3d4f30dbec295dcfd9c2c8802aefaed467730277", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1451, "license_type": "no_license", "max_line_length": 71, "num_lines": 61, "path": "/back/test.py", "repo_name": "engahmed/mycity", "src_encoding": "UTF-8", "text": "import pyrebase\n\nconfig = {\n \"apiKey\": \"AIzaSyDYDsId2B8_J6G5Pr7qn7gN6RGbuZ-2_r4\",\n \"authDomain\": \"starthack-cedf4.firebaseapp.com\",\n \"databaseURL\": \"https://starthack-cedf4-default-rtdb.firebaseio.com\",\n \"projectId\": \"starthack-cedf4\",\n \"storageBucket\": \"starthack-cedf4.appspot.com\",\n \"messagingSenderId\": \"274912792343\",\n \"appId\": \"1:274912792343:web:41b7867a7f4f95f7a3b152\",\n \"measurementId\": \"G-LW7WLYJ3F4\"\n}\n\nconfiglocal = {\n \"apiKey\": \"AIzaSyDYDsId2B8_J6G5Pr7qn7gN6RGbuZ-2_r4\",\n \"authDomain\": \"starthack-cedf4.firebaseapp.com\",\n \"databaseURL\": \"http://starthack-cedf4-default-rtdb.localhost:9000\",\n \"projectId\": \"starthack-cedf4\",\n \"storageBucket\": \"starthack-cedf4.appspot.com\",\n \"messagingSenderId\": \"274912792343\",\n \"appId\": \"1:274912792343:web:41b7867a7f4f95f7a3b152\",\n \"measurementId\": \"G-LW7WLYJ3F4\"\n}\n\nfirebase = pyrebase.initialize_app(configlocal)\n\ndb = firebase.database()\n# db.useEmulator(\"localhost\", 9000);\n\n\nimport hashlib\nimport random\nid_counter = 0\n\ndef dummy_id():\n global id_counter\n id_counter+=1\n m = hashlib.sha256(str(id_counter).encode())\n return m.hexdigest()\n\ndef dummy_project():\n return dummy_id(), {\n \"level\": 0,\n \"votes\": 5\n }\n\n\n# data = {\"name\": \"Mortimer 'Morty' Smith\"}\n# db.child(\"users\").child(\"Morty\").set(data)\n\nprojects = [dummy_project() for i in range(2)]\n\nfor id_, data in projects:\n\n db.child(\"projects\").child(id_).set(data)\n\n\n\nid_, payload = projects[0]\npayload[\"votes\"]=11\ndb.child(\"projects\").child(id_).set(payload)" }, { "alpha_fraction": 0.6541627049446106, "alphanum_fraction": 0.6553109884262085, "avg_line_length": 37.1459846496582, "blob_id": "fc7e847fa2a2647754a83ba4514cc5c43887fdee", "content_id": "b4071daafb131974c8fb00438fef187232b4b372", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5225, "license_type": "no_license", "max_line_length": 99, "num_lines": 137, "path": "/functions/index.js", "repo_name": "engahmed/mycity", "src_encoding": "UTF-8", "text": "const functions = require(\"firebase-functions\");\n\n// // Create and Deploy Your First Cloud Functions\n// // https://firebase.google.com/docs/functions/write-firebase-functions\n//\n// exports.helloWorld = functions.https.onRequest((request, response) => {\n// functions.logger.info(\"Hello logs!\", {structuredData: true});\n// response.send(\"Hello from Firebase!\");\n// });\n\n// The Firebase Admin SDK to access Firestore.\nconst admin = require('firebase-admin');\nadmin.initializeApp();\n\n\n// Take the text parameter passed to this HTTP endpoint and insert it into \n// Firestore under the path /messages/:documentId/original\nexports.addMessage = functions.https.onRequest(async (req, res) => {\n // Grab the text parameter.\n const original = req.query.text;\n // Push the new message into Firestore using the Firebase Admin SDK.\n const writeResult = await admin.firestore().collection('messages').add({original: original});\n // Send back a message that we've successfully written the message\n res.json({result: `Message with ID: ${writeResult.id} added.`});\n });\n\n\n// Listens for new messages added to /messages/:documentId/original and creates an\n// uppercase version of the message to /messages/:documentId/uppercase\nexports.makeUppercase = functions.firestore.document('/messages/{documentId}')\n.onCreate((snap, context) => {\n // Grab the current value of what was written to Firestore.\n const original = snap.data().original;\n\n // Access the parameter `{documentId}` with `context.params`\n functions.logger.log('Uppercasing', context.params.documentId, original);\n \n const uppercase = original.toUpperCase();\n \n // You must return a Promise when performing asynchronous tasks inside a Functions such as\n // writing to Firestore.\n // Setting an 'uppercase' field in Firestore document returns a Promise.\n return snap.ref.set({uppercase}, {merge: true});\n});\n\nvar db = admin.database();\n\nexports.level_increase = functions.database.ref('/projects/{project_id}')\n .onWrite(async (change, context) => {\n const project_id = context.params.project_id;\n \n \n // create\n if(!change.before.val())\n return;\n\n const new_votes = change.after.val().votes;\n const old_votes = change.before.val().votes;\n const level = change.after.val().level;\n\n // votes did not change\n if (!new_votes>old_votes)\n return;\n \n if (new_votes>10 && level<1){\n let payload = change.after.val()\n payload.level = 1 \n functions.logger.log(\"update\",change);\n db.ref(\"/projects/\"+project_id).set(payload)\n }\n \n });\n\n\n\nexports.sendFollowerNotification = functions.database.ref('/followers/{followedUid}/{followerUid}')\n .onWrite(async (change, context) => {\n const followerUid = context.params.followerUid;\n const followedUid = context.params.followedUid;\n // If un-follow we exit the function.\n if (!change.after.val()) {\n return console.log('User ', followerUid, 'un-followed user', followedUid);\n }\n console.log('We have a new follower UID:', followerUid, 'for user:', followedUid);\n\n // Get the list of device notification tokens.\n const getDeviceTokensPromise = admin.database()\n .ref(`/users/${followedUid}/notificationTokens`).once('value');\n\n // Get the follower profile.\n const getFollowerProfilePromise = admin.auth().getUser(followerUid);\n\n // The snapshot to the user's tokens.\n let tokensSnapshot;\n\n // The array containing all the user's tokens.\n let tokens;\n\n const results = await Promise.all([getDeviceTokensPromise, getFollowerProfilePromise]);\n tokensSnapshot = results[0];\n const follower = results[1];\n\n // Check if there are any device tokens.\n if (!tokensSnapshot.hasChildren()) {\n return console.log('There are no notification tokens to send to.');\n }\n console.log('There are', tokensSnapshot.numChildren(), 'tokens to send notifications to.');\n console.log('Fetched follower profile', follower);\n\n // Notification details.\n const payload = {\n notification: {\n title: 'You have a new follower!',\n body: `${follower.displayName} is now following you.`,\n icon: follower.photoURL\n }\n };\n\n // Listing all tokens as an array.\n tokens = Object.keys(tokensSnapshot.val());\n // Send notifications to all tokens.\n const response = await admin.messaging().sendToDevice(tokens, payload);\n // For each message check if there was an error.\n const tokensToRemove = [];\n response.results.forEach((result, index) => {\n const error = result.error;\n if (error) {\n console.error('Failure sending notification to', tokens[index], error);\n // Cleanup the tokens who are not registered anymore.\n if (error.code === 'messaging/invalid-registration-token' ||\n error.code === 'messaging/registration-token-not-registered') {\n tokensToRemove.push(tokensSnapshot.ref.child(tokens[index]).remove());\n }\n }\n });\n return Promise.all(tokensToRemove);\n });" } ]
3
GeoICON/edugis-qgis-plugin
https://github.com/GeoICON/edugis-qgis-plugin
0f2f721779f7511b16be9c3acac12d1982a1500b
d7ffa15777e088d0f757ada636c296888b5cd896
80678e447240b2d390d7b1164c3be9e2d7bb3f48
refs/heads/master
2016-09-07T18:51:31.468051
2015-03-25T15:28:21
2015-03-25T15:28:21
32,873,083
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.725800633430481, "alphanum_fraction": 0.747750997543335, "avg_line_length": 46.89655303955078, "blob_id": "b3a1b9ade3aedb172c983a7be591b0e2c0ddea25", "content_id": "08980907dc30b4fab829c53dceb71eed1dc9621d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2779, "license_type": "no_license", "max_line_length": 103, "num_lines": 58, "path": "/ui_layer_transparency_widget.py", "repo_name": "GeoICON/edugis-qgis-plugin", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'ui_layer_transparency_widget.ui'\n#\n# Created: Sat Mar 14 06:11:09 2015\n# by: PyQt4 UI code generator 4.11.3\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_LayerTransparencyWidget(object):\n def setupUi(self, LayerTransparencyWidget):\n LayerTransparencyWidget.setObjectName(_fromUtf8(\"LayerTransparencyWidget\"))\n LayerTransparencyWidget.resize(418, 70)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(LayerTransparencyWidget.sizePolicy().hasHeightForWidth())\n LayerTransparencyWidget.setSizePolicy(sizePolicy)\n LayerTransparencyWidget.setMinimumSize(QtCore.QSize(0, 70))\n LayerTransparencyWidget.setMaximumSize(QtCore.QSize(16777215, 70))\n LayerTransparencyWidget.setStyleSheet(_fromUtf8(\"\"))\n self.verticalLayout = QtGui.QVBoxLayout(LayerTransparencyWidget)\n self.verticalLayout.setObjectName(_fromUtf8(\"verticalLayout\"))\n spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)\n self.verticalLayout.addItem(spacerItem)\n self.label = QtGui.QLabel(LayerTransparencyWidget)\n self.label.setObjectName(_fromUtf8(\"label\"))\n self.verticalLayout.addWidget(self.label)\n self.transparencySlider = QtGui.QSlider(LayerTransparencyWidget)\n self.transparencySlider.setMaximum(100)\n self.transparencySlider.setOrientation(QtCore.Qt.Horizontal)\n self.transparencySlider.setObjectName(_fromUtf8(\"transparencySlider\"))\n self.verticalLayout.addWidget(self.transparencySlider)\n spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)\n self.verticalLayout.addItem(spacerItem1)\n\n self.retranslateUi(LayerTransparencyWidget)\n QtCore.QMetaObject.connectSlotsByName(LayerTransparencyWidget)\n\n def retranslateUi(self, LayerTransparencyWidget):\n LayerTransparencyWidget.setWindowTitle(_translate(\"LayerTransparencyWidget\", \"Form\", None))\n self.label.setText(_translate(\"LayerTransparencyWidget\", \"Layer transparency:\", None))\n\n" }, { "alpha_fraction": 0.7075852155685425, "alphanum_fraction": 0.7259069085121155, "avg_line_length": 44.46666717529297, "blob_id": "7c09a245415c08f9908a666dc604a9e8f0d62122", "content_id": "47da7c7203c270919198e706148778e62aa2287d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2729, "license_type": "no_license", "max_line_length": 102, "num_lines": 60, "path": "/ui_edugis_layers_dialog.py", "repo_name": "GeoICON/edugis-qgis-plugin", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'ui_edugis_layers_dialog.ui'\n#\n# Created: Sat Mar 14 06:11:09 2015\n# by: PyQt4 UI code generator 4.11.3\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_EduGisLayersDialog(object):\n def setupUi(self, EduGisLayersDialog):\n EduGisLayersDialog.setObjectName(_fromUtf8(\"EduGisLayersDialog\"))\n EduGisLayersDialog.resize(480, 600)\n EduGisLayersDialog.setMinimumSize(QtCore.QSize(400, 400))\n self.verticalLayout = QtGui.QVBoxLayout(EduGisLayersDialog)\n self.verticalLayout.setObjectName(_fromUtf8(\"verticalLayout\"))\n self.treeWidget = QtGui.QTreeWidget(EduGisLayersDialog)\n self.treeWidget.setObjectName(_fromUtf8(\"treeWidget\"))\n self.treeWidget.headerItem().setText(0, _fromUtf8(\"1\"))\n self.treeWidget.header().setVisible(False)\n self.verticalLayout.addWidget(self.treeWidget)\n self.widget = QtGui.QWidget(EduGisLayersDialog)\n self.widget.setObjectName(_fromUtf8(\"widget\"))\n self.horizontalLayout = QtGui.QHBoxLayout(self.widget)\n self.horizontalLayout.setMargin(0)\n self.horizontalLayout.setObjectName(_fromUtf8(\"horizontalLayout\"))\n spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.horizontalLayout.addItem(spacerItem)\n self.addButton = QtGui.QPushButton(self.widget)\n self.addButton.setObjectName(_fromUtf8(\"addButton\"))\n self.horizontalLayout.addWidget(self.addButton)\n self.cancelButton = QtGui.QPushButton(self.widget)\n self.cancelButton.setDefault(False)\n self.cancelButton.setObjectName(_fromUtf8(\"cancelButton\"))\n self.horizontalLayout.addWidget(self.cancelButton)\n self.verticalLayout.addWidget(self.widget)\n\n self.retranslateUi(EduGisLayersDialog)\n QtCore.QMetaObject.connectSlotsByName(EduGisLayersDialog)\n\n def retranslateUi(self, EduGisLayersDialog):\n EduGisLayersDialog.setWindowTitle(_translate(\"EduGisLayersDialog\", \"Add EduGIS Layers\", None))\n self.addButton.setText(_translate(\"EduGisLayersDialog\", \"Add\", None))\n self.cancelButton.setText(_translate(\"EduGisLayersDialog\", \"Cancel\", None))\n\n" } ]
2
mi-utrgv/test
https://github.com/mi-utrgv/test
9be7431684bab6c8211dc7bfa211ce03a32d732c
82e5b85afb6c42f03466bf18bf16f52fa26544d5
7a2f470421d2dae4bb6f9c4333121fa1bd957231
refs/heads/main
2023-07-28T22:40:53.433806
2021-09-23T20:26:35
2021-09-23T20:38:38
409,734,652
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.591796875, "alphanum_fraction": 0.599609375, "avg_line_length": 28.764705657958984, "blob_id": "6a8aea53d9c995d7c7d232beb7789c8335b86c3d", "content_id": "b69c36f929068809cf97f390e25cf44dafdab0ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 512, "license_type": "no_license", "max_line_length": 86, "num_lines": 17, "path": "/car.py", "repo_name": "mi-utrgv/test", "src_encoding": "UTF-8", "text": "from operator import attrgetter\n\nclass Car:\n def __init__(self, brand, model, year):\n self.brand = brand\n self.model = model\n self.year = year\n\n\nclass Minivan(Car):\n def __init__(self, brand, model, year, hasASD):\n super().__init__(brand, model, year)\n self.hasASD = hasASD\n\nm = Minivan('Honda', 'Odyssey', '2021', True)\nprint(getattr(m, 'brand'),getattr(m, 'model'),getattr(m, 'year'),getattr(m, 'hasASD'))\nprint(attrgetter('brand', 'model', 'year', 'hasASD')(m))\n\n\n\n\n\n\n" } ]
1
chadsfatherlali/colorama
https://github.com/chadsfatherlali/colorama
d6e562e455715f2dc3ea733c3280a34a3227c221
a96c4e21eaf96181214ff0185cee55bf01fa4522
2cf4780c674afe283d04d37df912ca15055ecc8d
refs/heads/master
2021-01-13T02:26:56.581406
2014-06-06T14:06:30
2014-06-06T14:06:30
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5803907513618469, "alphanum_fraction": 0.5868434906005859, "avg_line_length": 50.66666793823242, "blob_id": "f7d89d2af61b913eabb8715d8664f4e6e6d92fb5", "content_id": "dd1a8f56fdd5f34be151a1d7453f089b854bd79a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 5579, "license_type": "no_license", "max_line_length": 256, "num_lines": 108, "path": "/index.php", "repo_name": "chadsfatherlali/colorama", "src_encoding": "UTF-8", "text": "<? \ninclude \"s3helper.php\";\n\n$s3h->minifyHTML(\"inicio\");\n?>\n\n<!doctype html>\n<html lang=\"es\" ng-app=\"App\">\n<head>\n <base href=\"index.php\" />\n <meta charset=\"UTF-8\">\n <title>Colorama Landings</title>\n <link rel=\"stylesheet\" type=\"text/css\" href=\"bootstrap/css/bootstrap.min.css\">\n <link rel=\"stylesheet\" type=\"text/css\" href=\"bootstrap/css/bootstrap-theme.min.css\"> \n <link rel=\"stylesheet\" type=\"text/css\" href=\"modulos/tree/abn_tree.css\"> \n <link rel=\"stylesheet\" type=\"text/css\" href=\"vendor/chosen.css\">\n <link rel=\"stylesheet\" type=\"text/css\" href=\"chosen-spinner.css\">\n <link href=\"colorpicker.css\" rel=\"stylesheet\">\n <link href=\"stylesheets/screen.css\" rel=\"stylesheet\">\n\n <script type=\"text/javascript\" src=\"http://ajax.googleapis.com/ajax/libs/jquery/2.0.3/jquery.min.js\"></script> \n <script type=\"text/javascript\" src=\"https://ajax.googleapis.com/ajax/libs/angularjs/1.2.12/angular.min.js\"></script>\n <script type=\"text/javascript\" src=\"http://code.angularjs.org/1.2.7/angular-route.min.js\"></script>\n <script type=\"text/javascript\" src=\"http://code.angularjs.org/1.2.15/angular-sanitize.min.js\"></script> \n <script type=\"text/javascript\" src=\"vendor/chosen.jquery.js\"></script>\n <script type=\"text/javascript\" src=\"chosen.js\"></script> \n <script type=\"text/javascript\" src=\"bootstrap-colorpicker-module.js\"></script>\n <script type=\"text/javascript\" src=\"modulos/tree/abn_tree_directive.js\"></script>\n\n <script type=\"text/javascript\" src=\"ngDirectives/ngDirectives.js\"></script>\n <script type=\"text/javascript\" src=\"ngFactories/ngFactories.js\"></script>\n <script type=\"text/javascript\" src=\"ngControllers/ngControllers.js\"></script>\n \n <script type=\"text/javascript\" src=\"var.js.php\"></script>\n\n <script>\n (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){\n (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),\n m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)\n })(window,document,'script','//www.google-analytics.com/analytics.js','ga');\n\n ga('create', 'UA-51155491-1', 'egtelecom.es');\n ga('send', 'pageview');\n </script>\n</head>\n<body ng-controller=\"mainController\">\n <div id=\"loader\" ng-if=\"listo\" ng-include=\"'ngincludes/loader.html'\"></div>\n\n <header ng-class=\"{menudesplegado: menudesplegado == true}\">\n <div id=\"opciones\" ng-include=\"'ngincludes/opciones.html'\"></div>\n <a id=\"menu\" ng-click=\"desplegarmenu($event)\">OPCIONES BUCKETS</a>\n </header>\n \n <div id=\"wrapper\">\n <div class=\"modulos formularioHeader\">\n <h3>Portales disponibles:</h3>\n <select chosen no-results-text=\"'No hemos encontrado nada'\" data-placeholder=\"Escoge un Portal\" id=\"portalSelect\" ng-model=\"portal\" ng-change=\"checkPortal(portal)\" ng-options=\"(p.w_alias + ' -- ' + p.w_id) for p in Portales\">\n <option value=\"\"></option>\n </select>\n \n <!-- <h3>Landings Asociadas:</h3>\n <select chosen no-results-text=\"'No hemos encontrado nada'\" data-placeholder=\"Escoge una landing\" id=\"landingSelect\" ng-model=\"landing\" ng-change=\"setFolderSkins(landing)\" ng-options=\"(l.land_id + ' -- ' + l.land_nombre ) for l in Landings\">\n <option value=\"\"></option>\n </select> -->\n\n <h3>Skins disponibles:</h3>\n <select chosen no-results-text=\"'No hemos encontrado nada'\" data-placeholder=\"Escoge un Skin\" id=\"skinsSelect\" ng-model=\"skin\" ng-change=\"setSkin(skin)\" ng-options=\"s for s in Skins\">\n <option value=\"\"></option>\n </select>\n\n <div class=\"replica\" ng-if=\"Mcolorama.replica\"><strong>REPLICA:</strong> {{Mcolorama.replica}}</div>\n </div>\n \n <div ng-include=\"'ngincludes/formularioprincipal.html'\"></div>\n\n <div class=\"modulos\" id=\"modulo-dummy\"> \n <!-- <select ng-model=\"cartaPago\" ng-change=\"setCartaPago()\" ng-options=\"p.nombre for p in cartasPago\"></select> -->\n \n <fieldset>\n <legend>Todos los skins disponibles</legend>\n \n <div>\n <button id=\"borrarSkin\" ng-if=\"permitidoBorrar\" confirmed-click=\"borrarSkin()\" ng-confirm-click>Borrar - Skin</button>\n <button id=\"descargarIMG\" ng-if=\"Mcolorama.dummyimg_backgroundimage\" ng-click=\"getIMG()\">Descargar - Imagen BG</button>\n <button id=\"descargarSkin\" ng-if=\"permitidoDescargar\" ng-click=\"descargarSkin()\">Descargar - Skin</button>\n </div>\n \n <select id=\"todoslosskindisponibles\" chosen no-results-text=\"'No hemos encontrado nada'\" data-placeholder=\"Escoge un Skin\" ng-model=\"skin\" ng-change=\"setSkin(skin)\" ng-options=\"s for s in AllSkins\">\n <option value=\"\"></option>\n </select>\n </fieldset>\n\n <div ng-view></div>\n\n <div class=\"modulos\" id=\"modulo-json\">\n <fieldset>\n <legend>Dummy JSON:</legend>\n <p>{{Mcolorama|json}}</p>\n </fieldset>\n </div>\n </div>\n </div>\n\n<script type=\"text/javascript\" src=\"scripts.js\"></script>\n</body>\n</html>\n\n<? $s3h->minifyHTML(\"fin\"); ?>" }, { "alpha_fraction": 0.4393126964569092, "alphanum_fraction": 0.4435778558254242, "avg_line_length": 35.74477767944336, "blob_id": "aec1f338becea4d56f10052b02f0e35ab15fa01a", "content_id": "1aefce2c78c5e39078476d6ed5e7602ee6c927c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 24633, "license_type": "no_license", "max_line_length": 197, "num_lines": 670, "path": "/s3helper.php", "repo_name": "chadsfatherlali/colorama", "src_encoding": "UTF-8", "text": "<?\ninclude \"aws-sdk-for-php-master/sdk.class.php\";\ninclude \"aws-sdk-for-php-master/services/s3.class.php\";\ninclude \"lib/dev_prod_config.php\";\ninclude_once \"lib/dbmgr.php\";\ninclude_once \"lib/resource.php\";\n\nclass s3helper extends AmazonS3 {\n private $bucketname = \"b2c-docs\";\n private $carpetabase = \"colorama_landings\";\n private $portales_mobile;\n private $portales_result;\n private $bucketsimplantar;\n private $db;\n private $patrones = array(\n \"/colorama_landings\\/([A-z0-9-_]+)\\//\",\n \"/([A-z0-9_-]+\\.js)/\",\n \"/(.*)+\\//\",\n \"/colorama_landings\\/([A-z0-9-_\\/]+)\\//\",\n \"/colorama_landings\\/[A-z0-9-_\\/]+\\/[A-z0-9-_.]+/\",\n \"/^colorama_landings\\/[A-z0-9-_]+\\/$/\",\n \"/colorama_landings\\/[A-z0-9-_]+\\/[A-z0-9-_]+\\//\",\n \"/colorama_landings\\/[A-z0-9-_]+\\//\",\n );\n\n\n /**\n * [__construct iniciamos constructor parent AmazonS3]\n */\n public function __construct() {\n parent::__construct();\n\n $sql = \"SELECT w.web_id AS w_id,\n w.web_nombre AS w_nombre,\n w.web_ts_modificacion AS w_ts_modificacion,\n w.web_alias AS w_alias,\n w.web_etiqueta_website AS w_etiqueta_webiste,\n cf.cfc_cname AS w_url_landings\n FROM argo_websites w\n LEFT JOIN b2c.b2c_cloudfront_config cf ON cfc_id=web_cloudfront_cfc_id\n WHERE 1 = 1\n ORDER BY w_ts_modificacion DESC\";\n\n $this->portales_result = db::query(\"argo\", $sql, array());\n $this->db = new PDO(\"sqlite:../../../../proyecto_ssanchez/trunk/dptografico/colorama/colorama\");\n }\n\n\n /**\n * [minifyHTML Función que comprime el HTML de la aplicación]\n * @param [string] $param [Parametro que decide el inicio o el fin de la compresión del documente HTML valores: \"inicio\" -> para comenzar la compresión y \"fin\" -> para terminar la compresion]\n * @return [xhtml] [html comprimido]\n */\n public function minifyHTML($param) {\n if($param == \"inicio\"){\n function html($buffer) {\n $search = array(\n \"/\\>[^\\S ]+/s\",\n \"/[^\\S ]+\\</s\",\n \"/(\\s)+/s\",\n );\n \n $replace = array(\n \">\",\n \"<\",\n \"\\\\1\",\n );\n\n $buffer = preg_replace($search, $replace, $buffer);\n\n return ($buffer);\n }\n\n ob_start(\"html\");\n }else{\n ob_end_flush();\n }\n }\n\n\n /**\n * [get_all_buckets obtiene todos los buscket asociados a la cuenta]\n * @return [array] [listado de todos los buckets]\n */\n public function get_all_buckets() {\n print_r($this->list_buckets());\n }\n\n\n /**\n * [get_buckets_used_and_unused devuelve los buckets que contienen skin y los que no]\n * @return [array]\n */\n public function get_buckets_used_and_unused() {\n $bucketstatus = array();\n $portalesusados = array();\n $portalesalias = array();\n \n foreach ($this->portales_result as $value) {\n $portalesusados[] = $value[\"w_id\"];\n $portalesalias[$value[\"w_id\"]] = $value[\"w_alias\"];\n }\n\n $archivos = $this->get_object_list($this->bucketname, array(\n \"pcre\" => \"/colorama_landings/\"\n ));\n\n $match = preg_grep(\"/colorama_landings\\/[0-9A-z]+\\//\", $archivos);\n \n $bucketslimpios = preg_replace_callback(\"/[0-9A-z]+.js/\", function($matches) {\n null;\n }, $match);\n\n $bucketslimpios = preg_replace_callback(\"/colorama_landings/\", function($matches) {\n null;\n }, $bucketslimpios);\n \n $bucketslimpios = preg_replace_callback(\"/\\//\", function($matches) {\n null;\n }, $bucketslimpios);\n\n $bucketslimpios = array_unique($bucketslimpios);\n\n $bucketstatus[\"full\"] = $bucketslimpios;\n $bucketstatus[\"empty\"] = array_diff($portalesusados, $bucketslimpios);\n\n foreach ($bucketstatus[\"full\"] as $key => $value) {\n if(@$portalesalias[$value]) {\n $bucketstatus[\"full\"][$key] = $portalesalias[$value] . \" -- \" . $value;\n }\n }\n\n foreach ($bucketstatus[\"empty\"] as $key => $value) {\n $this->bucketsimplantar[] = array(\n \"label\" => $value,\n \"ruta\" => \"colorama_landings/\" . $value,\n );\n\n if(@$portalesalias[$value]) {\n $bucketstatus[\"empty\"][$key] = $portalesalias[$value] . \" -- \" . $value;\n }\n }\n\n return $bucketstatus;\n }\n\n\n /**\n * [duplicate_buckets Función que nos permite operaciones de copia con los buckets]\n * @param [array] $obj [objeto con toda la información del los archivos, buckets o directorios]\n * @return [array]\n */\n public function duplicate_buckets($obj) {\n $origen = $obj[\"objeto\"][\"origen\"];\n $destino = $obj[\"objeto\"][\"destino\"];\n $idOrigen = explode(\"/\", $origen);\n $idDestino = explode(\"/\", $destino);\n $idOrigen = $idOrigen[1];\n $idDestino = $idDestino[1];\n\n /**\n * Copia de carpeta a carpeta\n */\n if(!preg_match($this->patrones[1], $origen)\n && !preg_match($this->patrones[1], $destino)) {\n $result = $this->get_files_json($origen, false);\n\n if(preg_match($this->patrones[6], $destino)) {\n if($result) {\n foreach ($result[\"objetos\"] as $value) {\n preg_match($this->patrones[1], $value, $match);\n\n $archivo = $this->get_object($this->bucketname, $this->carpetabase . \"/\" . $value);\n $nuevoobjeto = str_replace($idOrigen, $idDestino, $archivo->body);\n\n $result = $this->create_object($this->bucketname, $destino . $match[0], array(\n \"body\" => $nuevoobjeto,\n \"acl\" => AmazonS3::ACL_PUBLIC,\n \"contentType\" => \"application/javascript\",\n \"headers\" => array(\n \"Content-Encoding\" => \"UTF-8\",\n \"Cache-Control\" => \"max-age=60\",\n ),\n ));\n\n if(!$result) {\n $response[\"success\"] = false;\n $response[\"error\"] = \"Se ha producido un error vuelvelo a intentar más tarde.\";\n \n return $response;\n } \n }\n }\n \n $response[\"success\"] = true;\n $response[\"objeto\"] = $idDestino;\n }else{\n if($result) {\n foreach ($result[\"objetos\"] as $value) {\n $archivo = $this->get_object($this->bucketname, $this->carpetabase . \"/\" . $value);\n $nuevoobjeto = str_replace($idOrigen, $idDestino, $archivo->body);\n\n $result = $this->create_object($this->bucketname, $this->carpetabase . \"/\" . str_replace($idOrigen, $idDestino, $value), array(\n \"body\" => $nuevoobjeto,\n \"acl\" => AmazonS3::ACL_PUBLIC,\n \"contentType\" => \"application/javascript\",\n \"headers\" => array(\n \"Content-Encoding\" => \"UTF-8\",\n \"Cache-Control\" => \"max-age=60\",\n ),\n ));\n\n if(!$result) {\n $response[\"success\"] = false;\n $response[\"error\"] = \"Se ha producido un error vuelvelo a intentar más tarde.\";\n \n return $response;\n } \n }\n }\n }\n \n $response[\"success\"] = true;\n $response[\"objeto\"] = $idDestino;\n }\n\n /**\n * Copia de archivo a archivo\n */\n elseif(preg_match($this->patrones[1], $origen)\n && preg_match($this->patrones[1], $destino)) {\n $archivo = $this->get_object($this->bucketname, $origen);\n $nuevoobjeto = str_replace($idOrigen, $idDestino, $archivo->body);\n\n $result = $this->create_object($this->bucketname, $destino, array(\n \"body\" => $nuevoobjeto,\n \"acl\" => AmazonS3::ACL_PUBLIC,\n \"contentType\" => \"application/javascript\",\n \"headers\" => array(\n \"Content-Encoding\" => \"UTF-8\",\n \"Cache-Control\" => \"max-age=60\",\n ),\n ));\n\n if($result) {\n $response[\"success\"] = true;\n $response[\"objeto\"] = $idDestino;\n }else{\n $response[\"success\"] = false;\n $response[\"error\"] = \"Se ha producido un error vuelvelo a intentar más tarde.\";\n }\n }\n\n /**\n * Copia de archivo a carpeta\n */\n elseif(preg_match($this->patrones[1], $origen)\n && !preg_match($this->patrones[1], $destino)) {\n preg_match($this->patrones[1], $origen, $match);\n\n $archivo = $this->get_object($this->bucketname, $origen);\n $nuevoobjeto = str_replace($idOrigen, $idDestino, $archivo->body);\n\n $result = $this->create_object($this->bucketname, $destino . $match[0] , array(\n \"body\" => $nuevoobjeto,\n \"acl\" => AmazonS3::ACL_PUBLIC,\n \"contentType\" => \"application/javascript\",\n \"headers\" => array(\n \"Content-Encoding\" => \"UTF-8\",\n \"Cache-Control\" => \"max-age=60\",\n ),\n ));\n\n if($result) {\n $response[\"success\"] = true;\n $response[\"objeto\"] = $idDestino;\n }else{\n $response[\"success\"] = false;\n $response[\"error\"] = \"Se ha producido un error vuelvelo a intentar más tarde.\";\n }\n\n }\n\n /**\n * Error no se puede copiar una carpeta dentro de un archivo\n */\n elseif(!preg_match($this->patrones[1], $origen)\n && preg_match($this->patrones[1], $destino)) {\n $response[\"success\"] = false;\n $response[\"error\"] = \"No se puede copiar de un directorio dentro de un archivo\";\n }\n\n return $response;\n }\n\n\n /**\n * [delete_bucket_obj Función para borrar un bucket completo]\n * @param [string] $bucketaborrar [id del bucket a borrar]\n * @return [null]\n */\n public function delete_bucket_obj($bucketaborrar) {\n $result = $this->get_files_json($bucketaborrar);\n\n foreach ($result[\"objetos\"] as $value) {\n list($bucket, $js) = explode(\" - \", $value);\n $borrado = $this->delete_object($this->bucketname, \"colorama_landings/\" . $bucketaborrar . \"/\" . $js); \n \n if(!$borrado) {\n return $response[\"success\"] = false;\n }\n }\n \n $response[\"success\"] = true;\n\n return $response;\n }\n\n\n /**\n * [get_files_for_folders en desarrollo]\n */\n public function get_files_for_folders($tree = false) {\n $estructuracorrecta = array();\n $estructura = array();\n \n $archivos = $this->get_object_list($this->bucketname, array(\n \"pcre\" => \"/colorama_landings/\"\n ));\n \n unset($archivos[0]);\n\n foreach ($archivos as $key => $value) {\n if (preg_match($this->patrones[5], $value)) {\n unset($archivos[$key]);\n }\n }\n\n foreach ($archivos as $value) {\n $result = explode(\"/\", $value);\n preg_match($this->patrones[0], $value, $match);\n\n $estructura[$result[1]][\"label\"] = $match[1];\n $estructura[$result[1]][\"ruta\"] = $match[0];\n \n if (preg_match($this->patrones[1], $result[2])){\n $estructura[$result[1]][\"children\"][] = array(\n \"label\" => $result[count($result) - 1],\n \"ruta\" => $value,\n );\n }else{\n $subfolders = array();\n $subfolders[$result[2]] = array(\n \"label\" => $result[count($result) - 1],\n \"ruta\" => $value,\n );\n\n $estructura[$result[1]][\"children\"][$result[2]][\"label\"] = $result[count($result) - 2];\n $estructura[$result[1]][\"children\"][$result[2]][\"ruta\"] = $match[0] . $result[count($result) - 2] . \"/\";\n $estructura[$result[1]][\"children\"][$result[2]][\"children\"][] = array(\n \"label\" => $result[count($result) - 1],\n \"ruta\" => $value,\n );\n }\n }\n\n foreach ($estructura as $key => $value) {\n $aux = 0;\n $estructuracorrecta[$key][\"label\"] = $estructura[$key][\"label\"];\n $estructuracorrecta[$key][\"ruta\"] = $estructura[$key][\"ruta\"];\n\n foreach ($value[\"children\"] as $k => $v) {\n $estructuracorrecta[$key][\"children\"][$aux] = $v;\n $aux++;\n }\n }\n\n $response = array_values($estructuracorrecta);\n\n if($tree) {\n $response = array_values(array_merge($estructuracorrecta, $this->bucketsimplantar));\n }\n\n return $response;\n }\n\n\n /**\n * [get_all_files_json obtienes todos los SKINS]\n * @return [array] [devuelve todas las posibles configuraciones de los skins]\n */\n public function get_all_files_json() {\n $patron = \"/[a-zA-Z0-9].js/i\";\n \n $archivos = $this->get_object_list($this->bucketname, array(\n \"pcre\" => \"/colorama_landings/\"\n ));\n\n if(!empty($archivos)) {\n $coincidencias = preg_grep($patron, $archivos);\n\n foreach ($coincidencias as $value) {\n list($basura, $archivo) = explode(\"colorama_landings/\", $value);\n $archivo = str_replace(\"/\", \" - \", $archivo);\n\n $response[\"objetos\"][] = $archivo;\n }\n\n $response[\"success\"] = true;\n }else{\n $response[\"success\"] = false; \n }\n\n return $response;\n }\n\n\n /**\n * [get_files_json obtiene el nombre de todas las configuraciones \".json\" creadas para los skins de las landings]\n * @return [array] [nombres de los json de los skins]\n */\n public function get_files_json($carpeta = null, $orginal = true) {\n $wid = $carpeta;\n $response = array();\n $patron = \"/[a-zA-Z0-9].js/i\"; \n $carpeta = ($carpeta)? $carpeta : \"/colorama_landings/\";\n $carpetasplit = ($carpeta)? \"/\" . str_replace(\"/\", \"\\/\", $carpeta) . \"/i\" : \"/colorama_landings/i\";\n\n $archivos = $this->get_object_list($this->bucketname, array(\n \"pcre\" => $carpetasplit\n ));\n\n if(!empty($archivos)) {\n $coincidencias = preg_grep($patron, $archivos);\n\n foreach ($coincidencias as $value) {\n list($basura, $archivo) = explode(\"colorama_landings/\", $value);\n \n $archivo = ($orginal)\n ? str_replace(\"/\", \" - \", $archivo) \n : $archivo;\n\n $response[\"objetos\"][] = $archivo;\n }\n\n $response[\"success\"] = true;\n }else{\n $response[\"success\"] = false;\n }\n \n $sql = \"select * from altaskinsfolders where id_portal = '$wid'\";\n\n $estado = $this->db->prepare($sql);\n $estado->execute();\n\n $result = $estado->fetchAll();\n $response[\"folders\"] = ($result)? $result[0][\"skin_folders\"] : null;\n\n return $response;\n }\n\n\n /**\n * [get_file_json devuelve el archivo json donse se alacenan todas las configuraciones]\n * @return [array] [objeto con todo la info del mismo]\n */\n public function get_file_json() {\n $result = $this->get_object($this->bucketname, \"colorama_landings/configJson.json\");\n $result = (array) $result;\n $result = $result[\"body\"];\n\n return $result;\n }\n\n\n /**\n * [delete_skin función que borra un skin]\n * @param [string] $portal [id del portal equivalente a el nombre en el bucket de amazon]\n * @param [string] $skin [nombre del skin]\n * @return [array]\n */\n public function delete_skin($obj) {\n $origen = $obj[\"objeto\"][\"origen\"];\n\n if(!preg_match($this->patrones[1], $origen)){\n $result = $this->get_files_json($origen, false);\n \n if($result) {\n foreach ($result[\"objetos\"] as $value) {\n $result = $this->delete_object($this->bucketname, $this->carpetabase . \"/\" . $value);\n \n if(!$result->isOK()) {\n $response[\"success\"] = false;\n \n return $response;\n }\n }\n\n $response[\"success\"] = true;\n }\n }else{\n $result = $this->delete_object($this->bucketname, $obj[\"objeto\"][\"origen\"]);\n\n $response[\"success\"] = ($result->isOK()) ? true : false;\n }\n \n return $response;\n }\n\n\n /**\n * [upload_generate_json función que sube el json]\n * @param [objeto] $datos [obtengo json que va a contener el archivo subido a AmazonS3]\n * @return [null]\n */\n public function upload_generate_json($datos) {\n $replica = null;\n $datos = json_decode($datos, true);\n\n $carpeta = (isset($datos[\"cfg\"][\"dummy_skinfolder\"]) && $datos[\"cfg\"][\"dummy_skinfolder\"])\n ? \"colorama_landings/\" . $datos[\"cfg\"][\"dummy_portal\"] . \"/\" . $datos[\"cfg\"][\"dummy_skinfolder\"] . \"/\"\n : \"colorama_landings/\" . $datos[\"cfg\"][\"dummy_portal\"] . \"/\";\n\n if(!$datos[\"reescribir\"]) {\n @$existePorNumero = $this->if_object_exists($this->bucketname, $carpeta . $datos[\"cfg\"][\"dummylayer_null\"] . \".js\");\n @$existePorNombre = $this->if_object_exists($this->bucketname, $carpeta . $datos[\"cfg\"][\"dummynombre_null\"] . \".js\");\n\n if($existePorNumero \n || $existePorNombre) {\n\n $response[\"success\"] = false;\n $response[\"detalle\"] = \"existe\";\n\n return $response;\n }\n }\n\n if(isset($datos[\"cfg\"][\"dummylayer_null\"])) {\n $replica = $datos[\"cfg\"][\"dummylayer_null\"] . \"===\" . $datos[\"cfg\"][\"dummynombre_null\"];\n $datos[\"cfg\"][\"replica\"] = $replica;\n\n $result = $this->create_object($this->bucketname, $carpeta . $datos[\"cfg\"][\"dummylayer_null\"] . \".js\", array(\n \"body\" => json_encode($datos[\"cfg\"]),\n \"acl\" => AmazonS3::ACL_PUBLIC,\n \"contentType\" => \"application/javascript\",\n \"headers\" => array(\n \"Content-Encoding\" => \"UTF-8\",\n \"Cache-Control\" => \"max-age=60\",\n ),\n ));\n }\n\n $result = $this->create_object($this->bucketname, $carpeta . $datos[\"cfg\"][\"dummynombre_null\"] . \".js\", array(\n \"body\" => json_encode($datos[\"cfg\"]), \n \"acl\" => AmazonS3::ACL_PUBLIC,\n \"contentType\" => \"application/javascript\",\n \"headers\" => array(\n \"Content-Encoding\" => \"UTF-8\",\n \"Cache-Control\" => \"max-age=60\",\n ),\n ));\n\n if($result) {\n $response[\"success\"] = true;\n $response[\"objeto\"] = $datos[\"cfg\"];\n }else{\n $response[\"success\"] = false;\n }\n\n return $response; \n }\n\n\n /**\n * [get_mobile_web_sites obtiene todos los portales mobiles creados]\n * @return [array]\n */\n public function get_mobile_web_sites() {\n foreach ($this->portales_result as $value) {\n $this->portales_mobile[] = $value;\n }\n\n return $this->portales_mobile;\n }\n\n\n /**\n * [get_landings obtiene todas las landings publicadas.]\n * @return [array] \n */\n public function get_landings() {\n $landings = array();\n $sql = \"select land_id,\n land_creatividad,\n land_nombre,\n land_argo_website,\n land_skin_folder\n from landings\"; \n\n $response = db::query(\"b2c\", $sql, array());\n\n foreach ($response as $key => $value) {\n $landings[$value[\"land_argo_website\"]][$key] = $value;\n }\n\n return $landings;\n }\n\n\n /**\n * [comprobar_carpeta comprueba si una carpeta esta asignada a un portal de contenido]\n * @param [array] $obj [array con todas las variables para crear el portal]\n * @return [array]\n */\n public function comprobar_carpeta($obj) {\n $wid = $obj[\"wid\"];\n \n $sql = \"select * from altaskinsfolders where id_portal = '$wid'\";\n\n $estado = $this->db->prepare($sql);\n $estado->execute();\n\n $result = $estado->fetchAll();\n\n if($result) {\n $response[\"success\"] = true;\n $response[\"objeto\"] = $result;\n }else{\n $response[\"success\"] = false;\n }\n\n return $response;\n }\n\n\n /**\n * [updateorcreate_carpeta función que nos permite actualizar las carpetas por potal de contenido]\n * @param [array] $obj [array con toda las variables necesarias para la creación de la carpeta]\n * @return [array]\n */\n public function updateorcreate_carpeta($obj) {\n $wid = $obj[\"objeto\"][\"website\"][\"w_id\"];\n $lfs = $obj[\"objeto\"][\"folders\"];\n\n $sql = \"select * from altaskinsfolders where id_portal = '$wid'\";\n\n $estado = $this->db->prepare($sql);\n $estado->execute();\n\n $result = $estado->fetchAll();\n\n $sql = ($result)\n ? \"update altaskinsfolders set skin_folders = '$lfs' where id_portal = '$wid'\"\n : \"insert into altaskinsfolders (skin_folders, id_portal) values ('$lfs', '$wid')\";\n\n $estado = $this->db->prepare($sql);\n \n if($estado->execute()) {\n $response[\"success\"] = true;\n }else{\n $response[\"success\"] = false;\n $response[\"error\"] = $estado->errorCode();\n };\n\n return $response;\n }\n}\n\n$s3h = new s3helper();\n?>" }, { "alpha_fraction": 0.7558860182762146, "alphanum_fraction": 0.7608426213264465, "avg_line_length": 34.08695602416992, "blob_id": "ac4e474ac707cc78185dd8cbebb26bbe3404322b", "content_id": "dd469435ff7dab0bb6dc598c37045a564aaefe25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 809, "license_type": "no_license", "max_line_length": 151, "num_lines": 23, "path": "/README.md", "repo_name": "chadsfatherlali/colorama", "src_encoding": "UTF-8", "text": "COLORAMA LANDINGS:\n==================\n***\n***\n\n###Colorama panel configuración del skin:\n\n![alt text](https://raw.githubusercontent.com/chadsfatherlali/colorama/master/imgscolorama/colorama1.png \"Colorama: panel de modificación del SKIN\")\n\n***\n###Colorama con un skin cargado:\n\n![alt text](https://raw.githubusercontent.com/chadsfatherlali/colorama/master/imgscolorama/colorama2.png \"Colorama: un skin cargado\")\n\n***\n###Colorama panel de acciones con los buckets:\n\n![alt text](https://raw.githubusercontent.com/chadsfatherlali/colorama/master/imgscolorama/colorama3.png \"Colorama: panel de acciones con los buckets\")\n\n***\n###Colorama desplegado el colorpicker:\n\n![alt text](https://raw.githubusercontent.com/chadsfatherlali/colorama/master/imgscolorama/colorama4.png \"Colorama: desplegado el colorpicker:\")\n" }, { "alpha_fraction": 0.46759942173957825, "alphanum_fraction": 0.4727540612220764, "avg_line_length": 35.21333312988281, "blob_id": "37ba9cf870c57a2249896c3107c2337b9e98ecda", "content_id": "74cc746f90d35a562329ba47f213a6125ead188f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 2716, "license_type": "no_license", "max_line_length": 112, "num_lines": 75, "path": "/json.php", "repo_name": "chadsfatherlali/colorama", "src_encoding": "UTF-8", "text": "<?\nif(isset($_GET[\"objetojson\"]) && isset($_GET[\"portal\"])) {\n\n $nombrejs = (isset($_GET[\"carpeta\"]) && $_GET[\"carpeta\"] != \"undefined\")\n ? $_GET[\"portal\"] . \"/\" . $_GET[\"carpeta\"] . \"/\" . $_GET[\"objetojson\"]\n : $_GET[\"portal\"] . \"/\" . $_GET[\"objetojson\"];\n\n $datos = file_get_contents(\"https://b2c-docs.s3.amazonaws.com/colorama_landings/\" . $nombrejs . \".js\");\n $datosNombre = json_decode($datos, true);\n\n if(isset($_GET[\"render\"]) \n && $_GET[\"render\"] == 1) {\n \n header(\"Content-Description: File Transfer\"); \n header(\"Content-Type: application/octet-stream\");\n header(\"Content-disposition: attachment; filename='\" . $datosNombre[\"dummynombre_null\"] . \".js'\");\n }else if(isset($_GET[\"render\"])\n && $_GET[\"render\"] == 2) {\n \n header(\"Content-Description: File Transfer\"); \n header(\"Content-Type: application/octet-stream\");\n header(\"Content-disposition: attachment; filename='\" . (int)$datosNombre[\"dummylayer_null\"] . \".js'\");\n }\n\n echo $datos;\n}else{\n include \"s3helper.php\";\n\n $datos = file_get_contents(\"php://input\");\n $get = json_decode($datos, true);\n\n if(isset($get[\"accion\"])) {\n switch ($get[\"accion\"]) {\n case \"set\":\n $skin = str_replace(\" - \", \"/\", $get[\"nombre\"]);\n $datos = file_get_contents(\"https://b2c-docs.s3.amazonaws.com/colorama_landings/\" . $skin);\n echo $datos;\n break;\n\n case \"comprobar\":\n $result = $s3h->get_files_json($get[\"portal_id\"]);\n echo json_encode($result);\n break;\n\n case \"borrar\":\n $result = $s3h->delete_skin($get);\n echo json_encode($result);\n break;\n\n case \"duplicar\":\n $result = $s3h->duplicate_buckets($get);\n echo json_encode($result);\n break;\n\n case \"borrarbucket\":\n $result = $s3h->delete_bucket_obj($get[\"portal_borrar\"]);\n echo json_encode($result); \n break;\n\n case \"comprobarcarpetasskin\":\n $result = $s3h->comprobar_carpeta($get);\n echo json_encode($result);\n break;\n\n case \"updateorcreate\":\n $result = $s3h->updateorcreate_carpeta($get);\n echo json_encode($result);\n break;\n }\n }else{\n $result = $s3h->upload_generate_json($datos); \n echo json_encode($result);\n }\n}\n?>\n" }, { "alpha_fraction": 0.636296272277832, "alphanum_fraction": 0.6459259390830994, "avg_line_length": 37.599998474121094, "blob_id": "b744f991f278a152f90002b1cfd170296fde0878", "content_id": "d826234c818a0a359468b081ceb876e7378d8274", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1350, "license_type": "no_license", "max_line_length": 110, "num_lines": 35, "path": "/var.js.php", "repo_name": "chadsfatherlali/colorama", "src_encoding": "UTF-8", "text": "<?\ninclude \"s3helper.php\";\n\n$portales = $s3h->get_mobile_web_sites();\n$allskins = $s3h->get_all_files_json();\n$buckets = $s3h->get_buckets_used_and_unused();\n$landings = $s3h->get_landings();\n$forfolders = $s3h->get_files_for_folders();\n$forfoldersimplantados = $s3h->get_files_for_folders(true);\n\n// $lastModified=filemtime(__FILE__);\n// $etagFile = md5_file(__FILE__);\n\n// $ifModifiedSince = (isset($_SERVER[\"HTTP_IF_MODIFIED_SINCE\"])? $_SERVER[\"HTTP_IF_MODIFIED_SINCE\"] : false);\n// $etagHeader = (isset($_SERVER[\"HTTP_IF_NONE_MATCH\"]) ? trim($_SERVER[\"HTTP_IF_NONE_MATCH\"]) : false);\n\n// header(\"Content-Type: application/javascript\");\n// header(\"Last-Modified: \" . gmdate(\"D, d M Y H:i:s\", $lastModified) . \" GMT\");\n// header(\"Etag: $etagFile\");\n// header(\"Cache-Control: public\");\n\n// if(@strtotime($_SERVER[\"HTTP_IF_MODIFIED_SINCE\"]) == $lastModified \n// || $etagHeader == $etagFile){\n \n// header(\"HTTP/1.1 304 Not Modified\");\n// exit;\n// }\n?>\n\nwindow.treedata_avm_implantados = <? echo json_encode($forfoldersimplantados) ?>; \nwindow.treedata_avm = <? echo json_encode($forfolders) ?>; \nwindow.AllLandings = <? echo json_encode($landings) ?>;\nwindow.Portales = <? echo json_encode($portales) ?>;\nwindow.AllSkins = <? echo json_encode($allskins) ?>;\nwindow.BucketsConContenido = <? echo json_encode($buckets) ?>;" }, { "alpha_fraction": 0.6702508926391602, "alphanum_fraction": 0.6738350987434387, "avg_line_length": 16.1875, "blob_id": "9abd068f856b5ddd3fb94183152ffba7eb27879a", "content_id": "f363b8daae007c18816f4776c4ebcd1fc71c3f8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 279, "license_type": "no_license", "max_line_length": 53, "num_lines": 16, "path": "/users.py", "repo_name": "chadsfatherlali/colorama", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport sys\nimport getpass\n\nusuario = getpass.getuser()\nusuariospermitidos = [\n\t\"ssanchez\",\n\t\"rlopez\"\n]\n\n\nif usuario in usuariospermitidos:\n\tprint \"El usuario \" + usuario + \" esta permitido\"\nelse:\n\tprint \"El usuario \" + usuario + \" no esta permitido\"\n\t\n\n\n" }, { "alpha_fraction": 0.4878985583782196, "alphanum_fraction": 0.49273914098739624, "avg_line_length": 38.68058395385742, "blob_id": "9c9cb156804235e6389710d8579b475924bc2ab6", "content_id": "50c9cbb02847afa8da5c4150c0c579c1373cc2a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 19020, "license_type": "no_license", "max_line_length": 299, "num_lines": 479, "path": "/ngControllers/ngControllers.js", "repo_name": "chadsfatherlali/colorama", "src_encoding": "UTF-8", "text": "var _controllers_ = angular.module(\"_controllers_\", []);\n\n\n/**\n * Controlador principal de la aplicación;\n */\n_controllers_.controller(\"mainController\", function($rootScope, $scope, $window, $http, $routeParams, $location, $sce, $compile) {\n $scope.permitirreescribir = false;\n $scope.permitidoBorrar = false;\n $scope.permitidoDescargar = false;\n $scope.menudesplegado = false;\n $scope.pais = \"espana\";\n $scope.Skins = [];\n $scope.Landings = [];\n $scope.land_skin_folder = [];\n $scope.Mcolorama = {};\n $scope.portalBorrar = {};\n $scope.envioDatos = true; \n $scope.Portales = $window.Portales;\n $scope.AllSkins = $window.AllSkins[\"objetos\"];\n $scope.AllLandings = $window.AllLandings;\n $scope.BucketLlenos = $window.BucketsConContenido[\"full\"];\n $scope.BucketVacios = $window.BucketsConContenido[\"empty\"]; \n $scope.CarpetaContenedoraDeSkins = [];\n $scope.my_data_origen = $window.treedata_avm;\n $scope.my_data_destino = $window.treedata_avm_implantados;\n $scope.seleccionado1;\n $scope.seleccionado2;\n $scope.cartasPago = [\n {\n nombre: \"España\",\n vista: \"espana\"\n },\n {\n nombre: \"Italia\",\n vista: \"italia\"\n }\n ];\n\n $rootScope.Carpetas = false;\n $rootScope.listo = false;\n $rootScope.rootImg = null;\n\n $scope.cartaPago = $scope.cartasPago[0];\n\n $rootScope.$watch(function() {\n $scope.Mcolorama.dummyimg_backgroundimageanimated = $rootScope.rootName;\n $scope.Mcolorama.dummyimg_backgroundimage = $rootScope.rootImg; \n });\n\n\n /**\n * [my_tree_handler En desarrollo]\n */\n $scope.my_tree_origen_handler = function(branch) {\n $scope.seleccionado1 = branch.ruta;\n };\n\n $scope.my_tree_destino_handler = function(branch) {\n $scope.seleccionado2 = branch.ruta;\n };\n\n $scope.copiarRama = function() {\n $rootScope.listo = true;\n\n if($scope.seleccionado1\n && $scope.seleccionado2){\n var obj = {\n origen: $scope.seleccionado1,\n destino: $scope.seleccionado2\n }\n\n $http.post(\"json.php\", {\n objeto: obj,\n accion: \"duplicar\"\n })\n .success(function(data, status, headers, config) {\n if(data[\"success\"]) {\n alert(\"Operación completada con exito\");\n \n window.location.reload(true);\n }else{\n alert(data[\"error\"]);\n \n $rootScope.listo = false;\n }\n })\n .error(function(data, status, headers, config) {\n $scope.permitidoBorrar = false;\n\n try{console.log(\"ERROR:\", data)}catch(err) {};\n\n $rootScope.listo = false;\n });\n }else{\n alert(\"Por favor selecciona un Origen y un Destino\");\n \n $rootScope.listo = false;\n }\n\n }\n\n $scope.borrarRama = function() {\n $rootScope.listo = true;\n\n if($scope.seleccionado1) {\n var obj = {\n origen: $scope.seleccionado1\n }\n\n $http.post(\"json.php\", {\n objeto: obj,\n accion: \"borrar\"\n })\n .success(function(data, status, headers, config) {\n if(data.success) {\n alert(\"Operación completada con exito\");\n \n window.location.reload(true);\n }else{ \n $rootScope.listo = false;\n }\n })\n .error(function(data, status, headers, config) {\n $scope.permitidoBorrar = false;\n\n try{console.log(\"ERROR:\", data)}catch(err) {};\n\n $rootScope.listo = false;\n });\n }else{\n alert(\"Por favor selecciona un Origen\");\n }\n }\n\n /**\n * [descargarSkin Función que nos permite descarganos los skin js]\n * @return {[null]} \n */\n $scope.descargarSkin = function() {\n if($scope.Mcolorama.dummylayer_null) window.open(\"json.php?objetojson=\" + $scope.Mcolorama.dummylayer_null + \"&portal=\" + $scope.Mcolorama.dummy_portal + \"&carpeta=\" + $scope.Mcolorama.dummy_skinfolder + \"&render=2\", \"_blank\", \"width=500,height=10,left=500,top=0\");\n\n window.open(\"json.php?objetojson=\" + $scope.Mcolorama.dummynombre_null + \"&portal=\" + $scope.Mcolorama.dummy_portal + \"&carpeta=\" + $scope.Mcolorama.dummy_skinfolder + \"&render=1\", \"_blank\", \"width=500,height=10,left=0,top=0\");\n }\n \n\n /**\n * [borrarSkin Función que nos permite borrar Skins que no se necesite]\n * @return {[null]}\n */\n \n\n /**\n * [getIMG Función que nos abre en una ventana nueva la IMAGEN actualmente usada en la configuración del colorama]\n * @return {[null]}\n */\n $scope.getIMG = function() {\n window.open($scope.Mcolorama.dummyimg_backgroundimage, \"_blank\");\n }\n\n\n /**\n * [desplegarmenu Funcion para desplegar el menu]\n * @param {[objeto]} $event [Objeto que lo emite]\n * @return {[null]}\n */\n $scope.desplegarmenu = function($event) {\n $scope.menudesplegado = ($scope.menudesplegado)? false : true;\n\n var texto = ($scope.menudesplegado)\n ? \"CERRAR\"\n : \"OPCIONES BUCKETS\"\n\n angular.element($event.target).html(texto);\n }\n\n\n /**\n * [setCartaPago establece la url de la vista a cargar y su respectivo controlador]\n */\n $scope.setCartaPago = function() {\n $scope.pais = $scope.cartaPago.vista;\n $location.path($scope.cartaPago.vista);\n }\n\n\n $scope.formularioAltaSkinfolder = function($scope) { \n $scope.currentPortalLandings = []; \n\n // $scope.getLandings = function() {\n // $scope.currentPortalLandings = [];\n\n // angular.forEach($scope.AllLandings[$scope.skinfoldersAdd.website.w_id], function(v, k) {\n // $scope.currentPortalLandings.push(v);\n // });\n // }\n\n $scope.comprobateSkinsFolders = function() {\n // w_alias: \"Infotel\"\n // w_etiqueta_webiste: \"Infotel\"\n // w_id: \"infotel255f287ca47c42cacdedf92bb\"\n // w_nombre: \"Infotel11847\"\n // w_ts_modificacion: \"2014-05-27 16:43:31\"\n // w_url_landings: null\n $scope.skinfoldersAdd.folders = null;\n\n $http.post(\"json.php\", {\n wid: $scope.skinfoldersAdd.website.w_id,\n accion: \"comprobarcarpetasskin\"\n })\n .success(function(data, status, headers, config) {\n if(data[\"success\"]) {\n $scope.skinfoldersAdd.folders = data[\"objeto\"][0].skin_folders;\n } else {\n alert(\"La landing no tiene ninguna carpeta asociada para los Skins\");\n }\n })\n .error(function(data, status, headers, config) {\n try{console.log(\"ERROR:\", data)}catch(err) {};\n });\n }\n\n $scope.sendadd = function() {\n $http.post(\"json.php\", {\n objeto: $scope.skinfoldersAdd,\n accion: \"updateorcreate\"\n })\n .success(function(data, status, headers, config) {\n if(data[\"success\"]) {\n alert(\"Actualización de carpetas completada.\")\n } else {\n alert(\"Se ha producido un error\");\n }\n })\n .error(function(data, status, headers, config) {\n try{console.log(\"ERROR:\", data)}catch(err) {};\n });\n }\n }\n\n\n /**\n * [formularioS3borrar Función para borrar un bucket complete]\n * @param {[type]} $scope [objeto que contiene todas las variables de la aplicación]\n * @return {[objeto]}\n */\n $scope.formularioS3borrar = function($scope) {\n $scope.borrar = function(portalBorrar) {\n $rootScope.listo = true;\n\n if($scope.formS3borrar.portalaborrar.$valid\n && $scope.formS3borrar.portalaborrar.$dirty) {\n var pborrar = portalBorrar.portalaborrar.split(\" -- \");\n\n $http.post(\"json.php\", {\n portal_borrar: pborrar[1],\n accion: \"borrarbucket\"\n })\n .success(function(data, status, headers, config) {\n if(data[\"success\"]) {\n window.top.location.reload();\n alert(\"Bucket borrado...\"); \n } else {\n $rootScope.listo = false;\n alert(\"Surgio un Error vuele a intentar más tarde...\");\n } \n })\n .error(function(data, status, headers, config) {\n try{console.log(\"ERROR:\", data)}catch(err) {};\n try{console.log(\"ERROR:\", status)}catch(err) {};\n\n alert(\"Surgio un Error vuele a intentar más tarde...\");\n });\n\n } else {\n alert(\"Debes de escoger un bucket...\");\n $rootScope.listo = false;\n }\n }\n }\n\n\n /**\n * [formularioColorama Recoje todos los colores del formulario para aplicarlos al colorama]\n * @param {[objeto]} $scope [el objeto que contiene el modelo de Mcolorama]\n * @return {[objeto]}\n */\n $scope.formularioColorama = function($scope, enviarImg) {\n $scope.imagen = null; \n $scope.setValuesForm = false;\n\n $rootScope.$on(\"request:setvaluespresaved\", function(e) {\n $scope.setValuesForm = true;\n });\n \n\n /**\n * [gd funcion para generar el fichero js y descargarlo]\n * @param {[objeto]} $scope [entorno de de la aplicacion donde se encuentra el modelo del JSON]\n * @return {[null]}\n */ \n $scope.gd = function(Mcolorama) { \n $rootScope.listo = true;\n $scope.envioDatos = false;\n\n if($scope.setValuesForm) {\n $scope.formColorama.dummy_backgroundcolor.$setViewValue($scope.formColorama.dummy_backgroundcolor.$modelValue);\n $scope.formColorama.dummyboton_backgroundcolor.$setViewValue($scope.formColorama.dummyboton_backgroundcolor.$modelValue);\n $scope.formColorama.dummytextoboton_color.$setViewValue($scope.formColorama.dummytextoboton_color.$modelValue);\n $scope.formColorama.dummytexto_color.$setViewValue($scope.formColorama.dummytexto_color.$modelValue);\n $scope.formColorama.dummyimg_backgroundimage.$setViewValue($scope.formColorama.dummyimg_backgroundimage.$modelValue);\n $scope.formColorama.dummynombre_null.$setViewValue($scope.formColorama.dummynombre_null.$modelValue);\n $scope.formColorama.dummylayer_null.$setViewValue($scope.formColorama.dummylayer_null.$modelValue);\n }\n\n if($scope.formColorama.dummy_backgroundcolor.$valid\n && $scope.formColorama.dummyboton_backgroundcolor.$valid\n && $scope.formColorama.dummytextoboton_color.$valid\n && $scope.formColorama.dummytexto_color.$valid\n && $scope.formColorama.dummynombre_null.$valid\n && $scope.formColorama.dummy_backgroundcolor.$dirty\n && $scope.formColorama.dummyboton_backgroundcolor.$dirty\n && $scope.formColorama.dummytextoboton_color.$dirty\n && $scope.formColorama.dummytexto_color.$dirty\n && $scope.formColorama.dummynombre_null.$dirty) {\n\n var datosok = ($scope.permitirreescribir)\n ? {cfg: Mcolorama, reescribir: true}\n : {cfg: Mcolorama, reescribir: false}; \n\n $http.post(\"json.php\", datosok)\n .success(function(data, status, headers, config) {\n if(data[\"success\"]) {\n\n var nuevoSkin = data[\"objeto\"][\"dummy_portal\"] + \" - \" + data[\"objeto\"][\"dummynombre_null\"] + \".js\";\n var texto = (data[\"objeto\"][\"dummylayer_null\"])\n ? \"Se ha creado el SKIN correctamente: \" + data[\"objeto\"][\"dummynombre_null\"] + \"\\ny el LAYER: \" + data[\"objeto\"][\"dummylayer_null\"]\n : \"Se ha creado el SKIN correctamente: \" + data[\"objeto\"][\"dummynombre_null\"];\n\n if($scope.Skins.indexOf(nuevoSkin) == -1) {\n $scope.Skins.push(nuevoSkin);\n $scope.AllSkins.push(nuevoSkin);\n }\n\n alert(texto);\n \n if(data[\"objeto\"][\"dummylayer_null\"]) window.open(\"json.php?objetojson=\" + data[\"objeto\"][\"dummylayer_null\"] + \"&portal=\" + data[\"objeto\"][\"dummy_portal\"] + \"&carpeta=\" + data[\"objeto\"][\"dummy_skinfolder\"] + \"&render=2\", \"_blank\", \"width=500,height=10,left=500,top=0\");\n\n window.open(\"json.php?objetojson=\" + data[\"objeto\"][\"dummynombre_null\"] + \"&portal=\" + data[\"objeto\"][\"dummy_portal\"] + \"&carpeta=\" + data[\"objeto\"][\"dummy_skinfolder\"] + \"&render=1\", \"_blank\", \"width=500,height=10,left=0,top=0\");\n\n } else if(!data[\"success\"]\n && data[\"detalle\"] == \"existe\") {\n alert(\"Ya existe un SKIN con ese ID ó NOMBRE, elige otros.\");\n $scope.envioDatos = true; \n } else {\n alert(\"Se ha producido un error.\");\n $scope.envioDatos = true;\n }\n\n $scope.permitidoBorrar = true;\n $scope.permitidoDescargar = true;\n $rootScope.listo = false;\n $scope.envioDatos = true;\n })\n .error(function(data, status, headers, config) {\n $scope.permitidoBorrar = false;\n $scope.permitidoDescargar = true;\n try{console.log(\"ERROR1:\", status)}catch(err) {}\n try{console.log(\"ERROR2:\", headers)}catch(err) {}\n });\n\n } else {\n alert(\"Revisa el FORMULARIO\");\n $scope.envioDatos = true;\n $rootScope.listo = false;\n }\n }\n }\n\n\n /**\n * [setSkin Se establece el skin en el dummy]\n * @param {[objeto]} skin [todos los parametros del skin apra el dummy]\n */\n $scope.setSkin = function(skin) {\n $rootScope.listo = true;\n\n $http.post(\"json.php\", {\n nombre: skin,\n accion: \"set\"\n })\n .success(function(data, status, headers, config) {\n $scope.$emit(\"request:setvaluespresaved\");\n $scope.Mcolorama = data;\n $rootScope.rootImg = $scope.Mcolorama.dummyimg_backgroundimage;\n\n // angular.forEach($scope.Landings, function(v, k) {\n // if(v.land_id == data.dummy_landingid) {\n // $scope.landing = $scope.Landings[k];\n // $scope.setFolderSkins($scope.landing);\n // }\n // });\n\n $scope.permitidoBorrar = true;\n $scope.permitidoDescargar = true;\n $rootScope.listo = false;\n })\n .error(function(data, status, headers, config) {\n $scope.permitidoBorrar = false;\n $scope.permitidoDescargar = false;\n\n try{console.log(\"ERROR1:\", status)}catch(err) {}\n try{console.log(\"ERROR2:\", headers)}catch(err) {}\n });\n }\n\n \n /**\n * [checkPortal chequea el portal si existe el portal y regresa los skins, caso contrario lo crea para poder almacenar los skins]\n * @param {[string]} id [id del portal selecto]\n */\n $scope.checkPortal = function(portal) { \n $scope.land_skin_folder = [];\n $scope.Landings = [];\n $rootScope.listo = true;\n\n $scope.Mcolorama.dummy_portal = portal.w_id;\n\n $http.post(\"json.php\", {\n portal_id: \"colorama_landings/\" + portal.w_id,\n accion: \"comprobar\"\n })\n .success(function(data, status, headers, config) {\n if(data[\"success\"]) {\n alert(\"Estan listos los SKIN solicitados\");\n \n $scope.Skins = data[\"objetos\"];\n \n } else {\n alert(\"No existe el bucket (No tiene skins asociados...)\");\n \n $scope.permitidoBorrar = false;\n $scope.Skins = [];\n }\n\n if(data[\"folders\"]) $scope.land_skin_folder = data[\"folders\"].split(\",\");\n\n $rootScope.listo = false;\n })\n .error(function(data, status, headers, config) {\n $scope.permitidoBorrar = false;\n try{console.log(\"ERROR:\", data)}catch(err) {};\n });\n }\n\n\n // $scope.setFolderSkins = function(landing) {\n // $scope.Mcolorama.dummy_landingid = landing.land_id;\n // landing.land_skin_folder = \"skin1,skin2,skin3\";\n\n // if(landing.land_skin_folder) {\n // $scope.land_skin_folder = landing.land_skin_folder.split(\",\");\n // angular.forEach($scope.land_skin_folder, function(v, k) {\n // $scope.land_skin_folder[k] = v.trim();\n // });\n // }\n // }\n\n\n /**\n * [permitirReescribir establece un booleano que nos permite o no re-escribir una landing]\n * @return {[boolean]}\n */\n $scope.permitirReescribir = function() {\n $scope.permitirreescribir = ($scope.permitirreescribir)\n ? false\n : true;\n }\n});" }, { "alpha_fraction": 0.6286353468894958, "alphanum_fraction": 0.6331096291542053, "avg_line_length": 25.352941513061523, "blob_id": "ebf9d88653ca42937cc59c642fc93518bb381c25", "content_id": "8e26dd77d9166b59e57edd7ea144fa207cb32f43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 447, "license_type": "no_license", "max_line_length": 88, "num_lines": 17, "path": "/ngFactories/ngFactories.js", "repo_name": "chadsfatherlali/colorama", "src_encoding": "UTF-8", "text": "angular.module(\"_factories_\", [])\n\n\n/**\n * [Setea la variable rootImg en el entorno del Colorama]\n * @param {[objeto]} $rootScope [contiene todo el entorno de angularjs]\n * @return {[objeto]} [devuelve la informacion del objeto \"imagen en base64\"]\n */\n.factory(\"enviarImg\", function($rootScope) {\n var imagenInfo = {};\n\n imagenInfo.pasar = function(info) {\n $rootScope.rootImg = info;\n }\n\n return imagenInfo;\n});" } ]
8
google/framework-for-osdu
https://github.com/google/framework-for-osdu
ae94a01943aa3298dd6f0ccda2fc64196bc2e1f4
73870a7dee903d4ec50d16b3f84fce6ca592d71d
1be7126e8746baae9d3dd8112a53afbd5890df89
refs/heads/master
2023-08-31T00:58:34.236993
2021-12-16T19:43:03
2021-12-16T19:43:03
224,537,036
17
7
Apache-2.0
2019-11-28T00:00:29
2023-03-29T17:05:06
2023-07-22T22:51:42
Java
[ { "alpha_fraction": 0.7656981945037842, "alphanum_fraction": 0.7731958627700806, "avg_line_length": 26.35897445678711, "blob_id": "693877a0624c471b9c232ef399ab22d8027f2980", "content_id": "4600505aeaa2fbe37668c5d92a2ebd059678ba2e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1067, "license_type": "permissive", "max_line_length": 75, "num_lines": 39, "path": "/compatibility-layer/service/ingest/src/main/java/com/osdu/model/IngestHeaders.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.model;\n\nimport com.osdu.model.delfi.submit.LegalTagsObject;\nimport java.util.List;\nimport lombok.AllArgsConstructor;\nimport lombok.Builder;\nimport lombok.Data;\nimport lombok.NoArgsConstructor;\n\n@Data\n@Builder\n@NoArgsConstructor\n@AllArgsConstructor\npublic class IngestHeaders {\n\n String authorizationToken;\n String partition;\n String legalTags;\n LegalTagsObject legalTagsObject;\n String resourceHomeRegionID;\n List<String> resourceHostRegionIDs;\n\n}\n" }, { "alpha_fraction": 0.7082997560501099, "alphanum_fraction": 0.7179693579673767, "avg_line_length": 37.8125, "blob_id": "23d6f753631ebd8f26e430a1e26d975c8d86c819", "content_id": "2cc86da27d7ce1466716e14bfb5a6d07b408184f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1241, "license_type": "permissive", "max_line_length": 102, "num_lines": 32, "path": "/osdu-r2/os-python-sdk/osdu_api/model/legal.py", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# Copyright 2020 Google LLC\n# Copyright 2020 Amazon\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom osdu_api.model.legal_compliance import LegalCompliance\n\n'''\nLegal model mirroring what's found in core common\n'''\nclass Legal:\n def __init__(self, legaltags: list, other_relevant_data_countries: list, status: LegalCompliance):\n self.legaltags = legaltags\n self.other_relevant_data_countries = other_relevant_data_countries\n self.status = status\n\n def get_dict(self):\n legal_dict = {}\n legal_dict['legaltags'] = self.legaltags\n legal_dict['otherRelevantDataCountries'] = self.other_relevant_data_countries\n legal_dict['status'] = str(self.status)\n return legal_dict" }, { "alpha_fraction": 0.772446870803833, "alphanum_fraction": 0.7779300808906555, "avg_line_length": 32.930233001708984, "blob_id": "c314faa3b52a94b90d22996a94ee28165020297d", "content_id": "21dfad9fba488d0f687e93f961799d2c393c5685", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1459, "license_type": "permissive", "max_line_length": 81, "num_lines": 43, "path": "/compatibility-layer/service/search/src/main/java/com/osdu/mapper/SearchResultMapperDecorator.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.mapper;\n\nimport com.osdu.model.delfi.DelfiSearchResult;\nimport com.osdu.model.osdu.OsduSearchObject;\nimport com.osdu.model.osdu.OsduSearchResult;\nimport javax.inject.Inject;\nimport javax.inject.Named;\n\npublic abstract class SearchResultMapperDecorator implements SearchResultMapper {\n\n @Inject\n @Named(\"com.osdu.mapper.SearchResultMapperImpl_\")\n SearchResultMapper searchResultMapper;\n\n @Override\n public OsduSearchResult delfiToOsdu(DelfiSearchResult searchResult,\n OsduSearchObject osduSearchObject) {\n OsduSearchResult osduSearchResult = searchResultMapper\n .delfiToOsdu(searchResult, osduSearchObject);\n\n osduSearchResult.setFacets(osduSearchObject.getFacets());\n osduSearchResult.setCount(osduSearchObject.getCount());\n osduSearchResult.setStart(osduSearchObject.getStart());\n\n return osduSearchResult;\n }\n}\n" }, { "alpha_fraction": 0.7317425608634949, "alphanum_fraction": 0.7404193878173828, "avg_line_length": 36.35135269165039, "blob_id": "583d7e3cc33de302c23bfc4b5dcd413e3eb819ff", "content_id": "6c68317dc06d41e1086c28906b27d2b6b391d92a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1383, "license_type": "permissive", "max_line_length": 106, "num_lines": 37, "path": "/osdu-r2/os-python-sdk/osdu_api/search/search_client.py", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# Copyright 2020 Google LLC\n# Copyright 2020 Amazon\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nfrom typing import List\nfrom osdu_api.base_client import BaseClient\nfrom osdu_api.model.http_method import HttpMethod\nfrom osdu_api.model.search.query_response import QueryResponse\n\n'''\nHolds the logic for interfacing with Search's query api\n'''\nclass SearchClient(BaseClient):\n\n '''\n Used to hit search's api endpoint \"queryRecords\"\n '''\n def query_records_from_dict(self, query_request: dict):\n query_request_data = json.dumps(query_request)\n\n response = self.make_request(method=HttpMethod.POST, url=self.search_url, data=query_request_data)\n response_content = json.loads(response.content)\n query_response = QueryResponse(response_content['results'], response_content['aggregations'])\n\n return query_response\n\n" }, { "alpha_fraction": 0.7582417726516724, "alphanum_fraction": 0.7592983841896057, "avg_line_length": 36.85599899291992, "blob_id": "3526aac0d521187164da76d16c2bd4db15f1fdf6", "content_id": "0e5c7e1b3d7abba00dc6755b46a2cb3dacc757d1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 9464, "license_type": "permissive", "max_line_length": 119, "num_lines": 250, "path": "/osdu-r2/os-workflow/provider/workflow-gcp-datastore/src/test/java/org/opengroup/osdu/workflow/provider/gcp/repository/DatastoreWorkflowStatusRepositoryTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.workflow.provider.gcp.repository;\n\nimport static org.assertj.core.api.Assertions.catchThrowable;\nimport static org.assertj.core.api.BDDAssertions.then;\nimport static org.mockito.ArgumentMatchers.any;\nimport static org.mockito.BDDMockito.given;\nimport static org.opengroup.osdu.workflow.model.WorkflowStatus.Fields.SUBMITTED_AT;\n\nimport java.util.Date;\nimport org.junit.jupiter.api.BeforeEach;\nimport org.junit.jupiter.api.DisplayNameGeneration;\nimport org.junit.jupiter.api.Nested;\nimport org.junit.jupiter.api.Test;\nimport org.junit.jupiter.api.extension.ExtendWith;\nimport org.mapstruct.factory.Mappers;\nimport org.mockito.ArgumentCaptor;\nimport org.mockito.Captor;\nimport org.mockito.InOrder;\nimport org.mockito.Mock;\nimport org.mockito.Mockito;\nimport org.mockito.Spy;\nimport org.mockito.invocation.InvocationOnMock;\nimport org.mockito.junit.jupiter.MockitoExtension;\nimport org.opengroup.osdu.workflow.ReplaceCamelCase;\nimport org.opengroup.osdu.workflow.model.WorkflowStatus;\nimport org.opengroup.osdu.workflow.model.WorkflowStatusType;\nimport org.opengroup.osdu.workflow.provider.gcp.exception.WorkflowStatusNotFoundException;\nimport org.opengroup.osdu.workflow.provider.gcp.exception.WorkflowStatusNotUpdatedException;\nimport org.opengroup.osdu.workflow.provider.gcp.mapper.EnumMapper;\nimport org.opengroup.osdu.workflow.provider.gcp.mapper.IWorkflowStatusMapper;\nimport org.opengroup.osdu.workflow.provider.gcp.model.WorkflowStatusEntity;\nimport org.opengroup.osdu.workflow.provider.interfaces.IWorkflowStatusRepository;\nimport org.springframework.beans.BeanUtils;\nimport org.springframework.test.util.ReflectionTestUtils;\n\n@ExtendWith(MockitoExtension.class)\n@DisplayNameGeneration(ReplaceCamelCase.class)\nclass DatastoreWorkflowStatusRepositoryTest {\n\n private static final String TEST_WORKFLOW_ID = \"test-workflow-id\";\n private static final String TEST_AIRFLOW_RUN_ID = \"test-airflow-run-id\";\n private static final String USER = \"user-1\";\n\n @Spy\n private IWorkflowStatusMapper workflowStatusMapper = Mappers.getMapper(IWorkflowStatusMapper.class);\n @Mock\n private IWorkflowStatusEntityRepository workflowStatusEntityRepository;\n\n private IWorkflowStatusRepository workflowStatusRepository;\n\n @BeforeEach\n void setUp() {\n EnumMapper enumMapper = new EnumMapper();\n ReflectionTestUtils.setField(workflowStatusMapper, \"enumMapper\", enumMapper);\n\n workflowStatusRepository = new DatastoreWorkflowStatusRepository(workflowStatusMapper,\n workflowStatusEntityRepository);\n }\n\n @Nested\n class FindWorkflowStatus {\n\n @Test\n void shouldFindWorkflowStatusByWorkflowId() {\n\n // given\n Date createdDate = new Date();\n\n given(workflowStatusEntityRepository.findByWorkflowId(TEST_WORKFLOW_ID))\n .willReturn(getWorkflowStatusEntity(createdDate));\n\n // when\n WorkflowStatus workflowStatus = workflowStatusRepository\n .findWorkflowStatus(TEST_WORKFLOW_ID);\n\n // then\n then(workflowStatus).isEqualTo(getWorkflowStatus(createdDate));\n }\n\n }\n\n @Nested\n class SaveWorkflowStatus {\n\n @Captor\n private ArgumentCaptor<WorkflowStatusEntity> entityCaptor;\n\n @Test\n void shouldSaveWorkflowStatusAndReturnSavedEntity() {\n // given\n Date createdDate = new Date();\n WorkflowStatus workflowStatus = getWorkflowStatus(createdDate);\n\n given(workflowStatusEntityRepository.save(any(WorkflowStatusEntity.class)))\n .willAnswer(DatastoreWorkflowStatusRepositoryTest.this::copyWorkflowStatusEntity);\n\n // when\n WorkflowStatus saved = workflowStatusRepository.saveWorkflowStatus(workflowStatus);\n\n // then\n then(saved).isEqualTo(workflowStatus);\n\n InOrder inOrder = Mockito.inOrder(workflowStatusEntityRepository, workflowStatusMapper);\n inOrder.verify(workflowStatusEntityRepository).save(any(WorkflowStatusEntity.class));\n inOrder.verify(workflowStatusMapper).toWorkflowStatus(any(WorkflowStatusEntity.class));\n inOrder.verifyNoMoreInteractions();\n }\n\n @Test\n void shouldUseServerTimestampWhenCreateAtIsNotSpecified() {\n // given\n WorkflowStatus workflowStatus = WorkflowStatus.builder()\n .workflowId(TEST_WORKFLOW_ID)\n .airflowRunId(TEST_AIRFLOW_RUN_ID)\n .workflowStatusType(WorkflowStatusType.SUBMITTED)\n .submittedBy(USER)\n .build();\n\n given(workflowStatusEntityRepository.save(any(WorkflowStatusEntity.class)))\n .willAnswer(DatastoreWorkflowStatusRepositoryTest.this::copyWorkflowStatusEntity);\n\n // when\n WorkflowStatus saved = workflowStatusRepository.saveWorkflowStatus(workflowStatus);\n\n // then\n then(saved).isEqualToIgnoringGivenFields(saved, SUBMITTED_AT);\n then(saved.getSubmittedAt()).isBefore(new Date());\n\n InOrder inOrder = Mockito.inOrder(workflowStatusEntityRepository, workflowStatusMapper);\n inOrder.verify(workflowStatusEntityRepository).save(any(WorkflowStatusEntity.class));\n inOrder.verify(workflowStatusMapper).toWorkflowStatus(any(WorkflowStatusEntity.class));\n inOrder.verifyNoMoreInteractions();\n\n }\n\n }\n\n @Nested\n class UpdateWorkflowStatus {\n\n @Test\n void shouldUpdateWorkflowStatusAndReturnSavedEntity() {\n // given\n Date createdDate = new Date();\n\n given(workflowStatusEntityRepository.findByWorkflowId(TEST_WORKFLOW_ID))\n .willReturn(getWorkflowStatusEntity(createdDate));\n given(workflowStatusEntityRepository.save(any(WorkflowStatusEntity.class)))\n .willAnswer(DatastoreWorkflowStatusRepositoryTest.this::copyWorkflowStatusEntity);\n\n // when\n WorkflowStatus saved = workflowStatusRepository\n .updateWorkflowStatus(TEST_WORKFLOW_ID, WorkflowStatusType.RUNNING);\n\n // then\n then(saved.getWorkflowStatusType()).isEqualTo(WorkflowStatusType.RUNNING);\n then(saved.getWorkflowId()).isEqualTo(TEST_WORKFLOW_ID);\n\n InOrder inOrder = Mockito.inOrder(workflowStatusEntityRepository, workflowStatusMapper);\n inOrder.verify(workflowStatusEntityRepository).findByWorkflowId(TEST_WORKFLOW_ID);\n inOrder.verify(workflowStatusEntityRepository).save(any(WorkflowStatusEntity.class));\n inOrder.verify(workflowStatusMapper).toWorkflowStatus(any(WorkflowStatusEntity.class));\n inOrder.verifyNoMoreInteractions();\n }\n\n @Test\n void shouldThrowExceptionWhenNothingWasFound() {\n // when\n Throwable thrown = catchThrowable(() -> workflowStatusRepository\n .updateWorkflowStatus(TEST_WORKFLOW_ID, WorkflowStatusType.RUNNING));\n\n // then\n // then\n then(thrown)\n .isInstanceOf(WorkflowStatusNotFoundException.class)\n .hasMessage(\"Workflow status for Workflow id: test-workflow-id not found\");\n\n InOrder inOrder = Mockito.inOrder(workflowStatusEntityRepository, workflowStatusMapper);\n inOrder.verify(workflowStatusEntityRepository).findByWorkflowId(TEST_WORKFLOW_ID);\n inOrder.verifyNoMoreInteractions();\n }\n\n @Test\n void shouldThrowExceptionIfWorkflowHasAlreadyDefinedStatus() {\n // given\n Date createdDate = new Date();\n given(workflowStatusEntityRepository.findByWorkflowId(TEST_WORKFLOW_ID))\n .willReturn(getWorkflowStatusEntity(createdDate));\n\n // when\n Throwable thrown = catchThrowable(() -> workflowStatusRepository.updateWorkflowStatus(\n TEST_WORKFLOW_ID, WorkflowStatusType.SUBMITTED));\n\n // then\n then(thrown)\n .isInstanceOf(WorkflowStatusNotUpdatedException.class)\n .hasMessage(\n \"Workflow status for workflow id: test-workflow-id already has status:SUBMITTED and can not be updated\");\n\n InOrder inOrder = Mockito.inOrder(workflowStatusEntityRepository, workflowStatusMapper);\n inOrder.verify(workflowStatusEntityRepository).findByWorkflowId(TEST_WORKFLOW_ID);\n inOrder.verifyNoMoreInteractions();\n }\n\n }\n\n private WorkflowStatusEntity getWorkflowStatusEntity(Date createdDate) {\n return WorkflowStatusEntity.builder()\n .workflowId(TEST_WORKFLOW_ID)\n .airflowRunId(TEST_AIRFLOW_RUN_ID)\n .workflowStatusType(WorkflowStatusType.SUBMITTED.name())\n .submittedAt(createdDate)\n .submittedBy(USER)\n .build();\n }\n\n private WorkflowStatus getWorkflowStatus(Date createdDate) {\n return WorkflowStatus.builder()\n .workflowId(TEST_WORKFLOW_ID)\n .airflowRunId(TEST_AIRFLOW_RUN_ID)\n .workflowStatusType(WorkflowStatusType.SUBMITTED)\n .submittedAt(createdDate)\n .submittedBy(USER)\n .build();\n }\n\n private WorkflowStatusEntity copyWorkflowStatusEntity(InvocationOnMock invocation) {\n WorkflowStatusEntity saved = invocation.getArgument(0);\n WorkflowStatusEntity copied = new WorkflowStatusEntity();\n BeanUtils.copyProperties(saved, copied);\n return copied;\n }\n\n}\n" }, { "alpha_fraction": 0.5949656963348389, "alphanum_fraction": 0.604721188545227, "avg_line_length": 30.332075119018555, "blob_id": "81ba7b8fa24ca29e2386de4f9f3daeb0263991e0", "content_id": "77d1761e370fc6cb9deba03227547a41006db748", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Maven POM", "length_bytes": 8303, "license_type": "permissive", "max_line_length": 102, "num_lines": 265, "path": "/osdu-r2/os-ingest/pom.xml", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!--\n ~ Copyright 2020 Google LLC\n ~\n ~ Licensed under the Apache License, Version 2.0 (the \"License\");\n ~ you may not use this file except in compliance with the License.\n ~ You may obtain a copy of the License at\n ~\n ~ http://www.apache.org/licenses/LICENSE-2.0\n ~\n ~ Unless required by applicable law or agreed to in writing, software\n ~ distributed under the License is distributed on an \"AS IS\" BASIS,\n ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n ~ See the License for the specific language governing permissions and\n ~ limitations under the License.\n -->\n\n<project xmlns=\"http://maven.apache.org/POM/4.0.0\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd\">\n <modelVersion>4.0.0</modelVersion>\n\n <parent>\n <groupId>org.springframework.boot</groupId>\n <artifactId>spring-boot-starter-parent</artifactId>\n <version>2.2.5.RELEASE</version>\n <relativePath/>\n </parent>\n\n <groupId>org.opengroup.osdu</groupId>\n <artifactId>os-ingest</artifactId>\n <version>0.0.1-SNAPSHOT</version>\n <packaging>pom</packaging>\n\n <name>os-ingest</name>\n <description>Demo project for Spring Boot</description>\n\n <modules>\n <module>ingest-core</module>\n <module>provider/ingest-gcp</module>\n <module>provider/ingest-gcp-datastore</module>\n <module>testing/ingest-test-core</module>\n </modules>\n\n <properties>\n <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>\n <java.version>1.8</java.version>\n <spring-boot.version>2.2.5.RELEASE</spring-boot.version>\n <spring-cloud.version>Hoxton.SR3</spring-cloud.version>\n <javax.inject.version>1</javax.inject.version>\n <org.mapstruct.version>1.3.1.Final</org.mapstruct.version>\n <maven-surefire-plugin.version>3.0.0-M4</maven-surefire-plugin.version>\n <os-core-common.version>0.0.13</os-core-common.version>\n <json-schema-validator.version>1.0.26</json-schema-validator.version>\n </properties>\n\n <dependencyManagement>\n <dependencies>\n <dependency>\n <groupId>org.springframework.cloud</groupId>\n <artifactId>spring-cloud-dependencies</artifactId>\n <version>${spring-cloud.version}</version>\n <type>pom</type>\n <scope>import</scope>\n </dependency>\n </dependencies>\n </dependencyManagement>\n\n <dependencies>\n <dependency>\n <groupId>org.projectlombok</groupId>\n <artifactId>lombok</artifactId>\n <scope>provided</scope>\n </dependency>\n\n <!-- OSDU -->\n <dependency>\n <groupId>org.opengroup.osdu</groupId>\n <artifactId>os-core-common</artifactId>\n <version>${os-core-common.version}</version>\n </dependency>\n\n <!-- Spring Boot-->\n <dependency>\n <groupId>org.springframework.boot</groupId>\n <artifactId>spring-boot-starter</artifactId>\n </dependency>\n <dependency>\n <groupId>org.springframework.boot</groupId>\n <artifactId>spring-boot-configuration-processor</artifactId>\n <optional>true</optional>\n </dependency>\n\n <dependency>\n <groupId>org.springframework.boot</groupId>\n <artifactId>spring-boot-starter-json</artifactId>\n </dependency>\n\n <dependency>\n <groupId>javax.inject</groupId>\n <artifactId>javax.inject</artifactId>\n <version>${javax.inject.version}</version>\n </dependency>\n\n <dependency>\n <groupId>org.apache.commons</groupId>\n <artifactId>commons-lang3</artifactId>\n </dependency>\n <dependency>\n <groupId>org.apache.commons</groupId>\n <artifactId>commons-collections4</artifactId>\n <version>4.4</version>\n </dependency>\n\n <!-- Testing -->\n <dependency>\n <groupId>org.springframework.boot</groupId>\n <artifactId>spring-boot-starter-test</artifactId>\n <scope>test</scope>\n <exclusions>\n <exclusion>\n <groupId>org.junit.vintage</groupId>\n <artifactId>junit-vintage-engine</artifactId>\n </exclusion>\n </exclusions>\n </dependency>\n <dependency>\n <groupId>org.junit.jupiter</groupId>\n <artifactId>junit-jupiter</artifactId>\n <scope>test</scope>\n </dependency>\n <dependency>\n <groupId>org.mockito</groupId>\n <artifactId>mockito-junit-jupiter</artifactId>\n <scope>test</scope>\n </dependency>\n <dependency>\n <groupId>org.opengroup.osdu</groupId>\n <artifactId>ingest-test-core</artifactId>\n <version>0.0.1-SNAPSHOT</version>\n <scope>test</scope>\n </dependency>\n\n <dependency>\n <groupId>org.mapstruct</groupId>\n <artifactId>mapstruct</artifactId>\n <version>${org.mapstruct.version}</version>\n </dependency>\n\n </dependencies>\n\n <build>\n <pluginManagement>\n <plugins>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-surefire-plugin</artifactId>\n </plugin>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-compiler-plugin</artifactId>\n <configuration>\n <source>${maven.compiler.source}</source>\n <target>${maven.compiler.target}</target>\n <annotationProcessorPaths>\n <path>\n <groupId>org.springframework.boot</groupId>\n <artifactId>spring-boot-configuration-processor</artifactId>\n <version>${spring-boot.version}</version>\n </path>\n <path>\n <groupId>org.projectlombok</groupId>\n <artifactId>lombok</artifactId>\n <version>${lombok.version}</version>\n </path>\n <path>\n <groupId>org.mapstruct</groupId>\n <artifactId>mapstruct-processor</artifactId>\n <version>${org.mapstruct.version}</version>\n </path>\n </annotationProcessorPaths>\n <compilerArgs>\n <arg>-Amapstruct.defaultComponentModel=jsr330</arg>\n </compilerArgs>\n </configuration>\n </plugin>\n </plugins>\n </pluginManagement>\n\n <plugins>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-checkstyle-plugin</artifactId>\n <version>3.1.0</version>\n <configuration>\n <configLocation>google_checks.xml</configLocation>\n </configuration>\n <executions>\n <execution>\n <goals>\n <goal>checkstyle</goal>\n </goals>\n <phase>verify</phase>\n </execution>\n </executions>\n </plugin>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-pmd-plugin</artifactId>\n <version>3.12.0</version>\n <configuration>\n <targetJdk>${java.version}</targetJdk>\n <excludes>\n <exclude>**/*Bean.java</exclude>\n <exclude>**/generated/*.java</exclude>\n </excludes>\n <excludeRoots>\n <excludeRoot>target/generated-sources/</excludeRoot>\n </excludeRoots>\n </configuration>\n <executions>\n <execution>\n <goals>\n <goal>pmd</goal>\n </goals>\n <phase>verify</phase>\n </execution>\n </executions>\n </plugin>\n <plugin>\n <groupId>com.github.spotbugs</groupId>\n <artifactId>spotbugs-maven-plugin</artifactId>\n <version>3.1.12</version>\n <executions>\n <execution>\n <goals>\n <goal>spotbugs</goal>\n </goals>\n <phase>verify</phase>\n </execution>\n </executions>\n </plugin>\n <plugin>\n <groupId>org.jacoco</groupId>\n <artifactId>jacoco-maven-plugin</artifactId>\n <version>0.8.4</version>\n <executions>\n <execution>\n <goals>\n <goal>prepare-agent</goal>\n </goals>\n </execution>\n <execution>\n <id>report</id>\n <goals>\n <goal>report</goal>\n </goals>\n <phase>test</phase>\n </execution>\n </executions>\n </plugin>\n </plugins>\n </build>\n\n</project>\n" }, { "alpha_fraction": 0.6833333373069763, "alphanum_fraction": 0.6903100609779358, "avg_line_length": 36.94117736816406, "blob_id": "9a0ce929dc54e736bdd58da76c68ab1b41306b72", "content_id": "807bb6d610097ee4a49407a96a665b7022510b30", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2580, "license_type": "permissive", "max_line_length": 94, "num_lines": 68, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/search/validation/SortOrderValidator.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.search.validation;\n\nimport org.opengroup.osdu.core.common.SwaggerDoc;\nimport org.opengroup.osdu.core.common.model.search.SortOrder;\nimport org.opengroup.osdu.core.common.model.search.SortQuery;\n\nimport javax.validation.ConstraintValidator;\nimport javax.validation.ConstraintValidatorContext;\nimport java.util.List;\n\npublic class SortOrderValidator implements ConstraintValidator<ValidSortOrder, SortQuery> {\n\n @Override\n public void initialize(final ValidSortOrder constraintAnnotation) {\n }\n\n @Override\n public boolean isValid(SortQuery sort, ConstraintValidatorContext context) {\n if (sort == null) {\n return true;\n }\n List<String> field = sort.getField();\n List<SortOrder> order = sort.getOrder();\n if (isBlank(field)) {\n return getViolation(context, SwaggerDoc.SORT_FIELD_VALIDATION_NOT_EMPTY_MSG);\n }\n if (isBlank(order)) {\n return getViolation(context, SwaggerDoc.SORT_ORDER_VALIDATION_NOT_EMPTY_MSG);\n }\n if (field.size() != order.size()) {\n return getViolation(context, SwaggerDoc.SORT_FIELD_ORDER_SIZE_NOT_MATCH);\n }\n if (field.stream().filter(val -> (val == null || val.trim().isEmpty())).count() > 0) {\n return getViolation(context, SwaggerDoc.SORT_FIELD_LIST_VALIDATION_NOT_EMPTY_MSG);\n }\n if (order.stream().filter(val -> (val == null)).count() > 0) {\n return getViolation(context, SwaggerDoc.SORT_NOT_VALID_ORDER_OPTION);\n }\n return true;\n }\n\n private boolean isBlank(List<?> list) {\n return list == null || list.isEmpty();\n }\n\n private boolean getViolation(ConstraintValidatorContext context, String message) {\n context.disableDefaultConstraintViolation();\n context.buildConstraintViolationWithTemplate(message).addConstraintViolation();\n return false;\n }\n}\n" }, { "alpha_fraction": 0.7676630616188049, "alphanum_fraction": 0.7785326242446899, "avg_line_length": 30.319149017333984, "blob_id": "3ae62c61e6e490d371b2a6ce053dd525bf4a622b", "content_id": "65326f3c02b14a29d5d58b00b5d299efb2ba5762", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1472, "license_type": "permissive", "max_line_length": 111, "num_lines": 47, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/legal/jobs/ComplianceChangeInfo.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.legal.jobs;\n\nimport org.opengroup.osdu.core.common.model.legal.LegalCompliance;\nimport org.opengroup.osdu.core.common.model.indexer.OperationType;\nimport org.opengroup.osdu.core.common.model.storage.RecordState;\n\npublic class ComplianceChangeInfo {\n\n\tpublic ComplianceChangeInfo(LegalCompliance newState, OperationType pubSubEvent, RecordState newRecordState) {\n\t\tthis.newState = newState;\n\t\tthis.newRecordState = newRecordState;\n\t\tthis.pubSubEvent = pubSubEvent;\n\t}\n\n\tprivate LegalCompliance newState;\n\tprivate OperationType pubSubEvent;\n\tprivate RecordState newRecordState;\n\n\tpublic LegalCompliance getNewState() {\n\t\treturn this.newState;\n\t}\n\n\tpublic OperationType getPubSubEvent() {\n\t\treturn this.pubSubEvent;\n\t}\n\n\tpublic RecordState getNewRecordState() {\n\t\treturn this.newRecordState;\n\t}\n}\n" }, { "alpha_fraction": 0.6649408340454102, "alphanum_fraction": 0.6767751574516296, "avg_line_length": 29.044445037841797, "blob_id": "bbf0830a187eb7e4aed15e3b621b6c37d9c5025b", "content_id": "975d26f0ed87dd43fac316ae60bed6f589bda805", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1352, "license_type": "permissive", "max_line_length": 133, "num_lines": 45, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/crs/Point.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.crs;\n\nimport lombok.AllArgsConstructor;\nimport lombok.Data;\n\n@Data\n@AllArgsConstructor\npublic class Point {\n private Double x;\n private Double y;\n private Double z;\n\n public Point() {\n setNaN(this);\n }\n\n public static boolean isValid(Point point) {\n if (point == null) return false;\n // if (point.x == null || point.y ==null || point.z == null) return false; // values cannot be null due to lombok constraints\n return !(Double.isNaN(point.x) || Double.isNaN(point.y) || Double.isNaN(point.z));\n }\n\n public static void setNaN(Point p) {\n p.x = Double.NaN;\n p.y = Double.NaN;\n p.z = Double.NaN;\n }\n}\n" }, { "alpha_fraction": 0.6705498695373535, "alphanum_fraction": 0.6831313967704773, "avg_line_length": 29.225351333618164, "blob_id": "9e82612f072e57fd4db5884e65053a9626b405ca", "content_id": "eee4d7a18b4d0b6e33759c038781edca27e990f9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": true, "language": "Java", "length_bytes": 2146, "license_type": "permissive", "max_line_length": 86, "num_lines": 71, "path": "/osdu-r2/os-core-common/src/test/java/org/opengroup/osdu/core/common/cache/TenantSafeCacheTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.cache;\n\nimport org.junit.Test;\n\nimport static org.mockito.Mockito.*;\n\npublic class TenantSafeCacheTest {\n\n @Test(expected = NullPointerException.class)\n public void should_throwNullError_when_givenNullTenantName() {\n new TenantSafeCache<String>(null, mock(ICache.class));\n }\n\n @Test\n public void should_addTenantNamToKey_when_addingToCache() {\n ICache wrapped = mock(ICache.class);\n TenantSafeCache<String> sut = new TenantSafeCache<String>(\"tenant1\", wrapped);\n\n sut.put(\"key\", \"value\");\n\n verify(wrapped, times(1)).put(\"tenant1key\", \"value\");\n }\n\n @Test\n public void should_addTenantNamToKey_when_deletingFromCache() {\n ICache wrapped = mock(ICache.class);\n TenantSafeCache<String> sut = new TenantSafeCache<String>(\"tenant1\", wrapped);\n\n sut.delete(\"key\");\n\n verify(wrapped, times(1)).delete(\"tenant1key\");\n }\n\n @Test\n public void should_addTenantNamToKey_when_retrievingfromCache() {\n ICache wrapped = mock(ICache.class);\n TenantSafeCache<String> sut = new TenantSafeCache<String>(\"tenant1\", wrapped);\n\n sut.get(\"key\");\n\n verify(wrapped, times(1)).get(\"tenant1key\");\n }\n\n\n @Test\n public void should_callWrappedClearCache() {\n ICache wrapped = mock(ICache.class);\n TenantSafeCache<String> sut = new TenantSafeCache<String>(\"tenant1\", wrapped);\n\n sut.clearAll();\n\n verify(wrapped, times(1)).clearAll();\n }\n}\n" }, { "alpha_fraction": 0.6506767868995667, "alphanum_fraction": 0.6540014147758484, "avg_line_length": 38.355140686035156, "blob_id": "2b716bb5092e9baf8dc00082fa1b18e2ed2c915b", "content_id": "aeae63bee85b8eae19971e8312bf56ddcc906510", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4211, "license_type": "permissive", "max_line_length": 123, "num_lines": 107, "path": "/osdu-r2/os-python-sdk/osdu_api/base_client.py", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# Copyright 2020 Google LLC\n# Copyright 2020 Amazon\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys, os\nimport importlib\nimport yaml # MIT license\nimport requests\nfrom airflow.models import Variable\nfrom osdu_api.model.http_method import HttpMethod\n\n'''\nBase client that is meant to be extended by service specific clients\n'''\nclass BaseClient:\n\n '''\n Base client gets initialized with configuration values and a bearer token\n based on provider-specific logic\n '''\n def __init__(self):\n self._read_variables()\n self.bearer_token = self._get_bearer_token()\n \n '''\n The path to the logic to get a valid bearer token is dynamically injected based on\n what provider and entitlements module name is provided in the configuration yaml\n '''\n def _get_bearer_token(self):\n entitlements_client = importlib.import_module(f\"osdu_api.provider.{self.provider}.{self.entitlements_module_name}\")\n return entitlements_client.get_bearer_token()\n\n '''\n Parses a yaml filed named osdu_api.yaml. All config values listed below are meant to \n be required except URLs to specific services which may or may not be used depending\n on the specific script\n '''\n def _parse_config(self):\n config_file_location = os.path.join(sys.path[0], 'osdu_api.yaml')\n with open(config_file_location, 'r') as config_file:\n config = yaml.load(config_file)\n self.data_partition_id = self._parse_config_value(config, 'data_partition_id', True)\n self.storage_url = self._parse_config_value(config, 'storage_url', False)\n self.search_url = self._parse_config_value(config, 'search_url', False)\n self.provider = self._parse_config_value(config, 'provider', True)\n self.entitlements_module_name = self._parse_config_value(config, 'entitlements_module_name', True)\n\n '''\n Read Airflow variables \n '''\n def _read_variables(self):\n self.storage_url = Variable.get('storage_url')\n self.search_url = Variable.get('search_url')\n self.provider = Variable.get('provider')\n self.entitlements_module_name = Variable.get('entitlements_module_name')\n \n '''\n Used during parsing of the yaml config file. Will raise an exception if a required config\n value is missing\n '''\n def _parse_config_value(self, config, config_name, is_required):\n config_value = ''\n try:\n config_value = config[config_name]\n except TypeError:\n if(is_required):\n raise Exception('Config value %s missing and is required' % config_name)\n else:\n print('Config value %s missing' % config_name)\n return config_value\n\n '''\n Makes a request using python's built in requests library. Takes additional headers if\n necessary\n '''\n def make_request(self, method: HttpMethod, url: str, data = '', add_headers = {}, params = {}):\n headers = {\n 'content-type': 'application/json',\n 'data-partition-id': self.data_partition_id,\n 'Authorization': self.bearer_token\n }\n\n if (len(add_headers) > 0):\n for key, value in add_headers:\n headers[key] = value\n\n response = object\n\n if (method == HttpMethod.GET):\n response = requests.get(url=url, params=params, headers=headers)\n elif (method == HttpMethod.POST):\n response = requests.post(url=url, params=params, data=data, headers=headers)\n elif (method == HttpMethod.PUT):\n response = requests.put(url=url, params=params, data=data, headers=headers)\n \n return response\n" }, { "alpha_fraction": 0.7165259122848511, "alphanum_fraction": 0.7340168952941895, "avg_line_length": 37.55813980102539, "blob_id": "1300f9754bb84f0ee0052deb45d9430f34db7e51", "content_id": "53a22c173bf97abf3360a068c83c5bb2a9e54f82", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1658, "license_type": "permissive", "max_line_length": 127, "num_lines": 43, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/Constants.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common;\n\npublic final class Constants {\n\n // Indexer parameters\n public static final String REINDEX_RELATIVE_URL = \"/api/indexer/v2/_dps/task-handlers/reindex-worker\";\n public static final String WORKER_RELATIVE_URL = \"/api/indexer/v2/_dps/task-handlers/index-worker\";\n\n public static final String INDEXER_QUEUE_IDENTIFIER = \"indexer-queue-osdu\";\n\n // Search parameters\n public static final int QUERY_DEFAULT_LIMIT = 10;\n public static final int QUERY_LIMIT_MAXIMUM = 100;\n public static final int AGGREGATION_SIZE = 1000;\n\n public static final String PROPERTIES = \"properties\";\n public static final String DATA = \"data\";\n public static final String TYPE = \"type\";\n public static final String KEYWORD = \"keyword\";\n public static final String FIELDS = \"fields\";\n\n //headers needed to call storage and get converted data\n public static final String SLB_FRAME_OF_REFERENCE_VALUE = \"units=SI;crs=wgs84;elevation=msl;azimuth=true north;dates=utc;\";\n\n\n}\n" }, { "alpha_fraction": 0.7160686254501343, "alphanum_fraction": 0.7243889570236206, "avg_line_length": 29.5238094329834, "blob_id": "35e5cf2ad513df641fc2cedb78db1e4ff23d1f1a", "content_id": "9c3ba36f992fc6ca3dd0ad88ea14d717bab1a3bb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1923, "license_type": "permissive", "max_line_length": 75, "num_lines": 63, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/indexer/RecordIndexerPayload.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.indexer;\n\nimport com.fasterxml.jackson.annotation.JsonIgnore;\nimport lombok.AllArgsConstructor;\nimport lombok.Builder;\nimport lombok.Data;\nimport lombok.NoArgsConstructor;\nimport org.opengroup.osdu.core.common.model.entitlements.Acl;\nimport org.opengroup.osdu.core.common.model.legal.Legal;\nimport org.opengroup.osdu.core.common.model.storage.RecordAncestry;\n\nimport java.util.List;\nimport java.util.Map;\n\n@Data\n@Builder\n@NoArgsConstructor\n@AllArgsConstructor\npublic class RecordIndexerPayload {\n\n private List<IndexSchema> schemas;\n private List<Record> records;\n\n @Data\n public static class Record {\n private String id;\n private String kind;\n private String namespace;\n private String type;\n private OperationType operationType;\n private long version;\n private Acl acl;\n private IndexProgress indexProgress;\n private Legal legal;\n private RecordAncestry ancestry;\n private Map<String, Object> data;\n @JsonIgnore\n private boolean schemaMissing = false;\n @JsonIgnore\n private boolean mappingMismatch = false;\n\n public boolean skippedDataIndexing() {\n return schemaMissing || mappingMismatch;\n }\n }\n}\n" }, { "alpha_fraction": 0.6892764568328857, "alphanum_fraction": 0.700904369354248, "avg_line_length": 32.65217208862305, "blob_id": "3e2ebb19bb3de6c3645c758928034c074b8b7681", "content_id": "d402c848d7d556bfa856e8cb020d13871803ffdd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1548, "license_type": "permissive", "max_line_length": 79, "num_lines": 46, "path": "/osdu-r2/os-core-common/src/test/java/org/opengroup/osdu/core/common/search/util/StringUtilTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.search.util;\n\nimport org.junit.Test;\nimport org.opengroup.osdu.core.common.util.StringUtils;\n\nimport java.util.Set;\n\nimport static junit.framework.TestCase.assertTrue;\nimport static org.junit.Assert.assertEquals;\n\npublic class StringUtilTest {\n @Test\n public void should_returnNonEmptyValuesOnly(){\n Set<String> result = StringUtils.splitAndRemoveBlank(\"a,,b,c,\", \",\");\n assertEquals(3, result.size());\n assertTrue(result.contains(\"a\"));\n assertTrue(result.contains(\"b\"));\n assertTrue(result.contains(\"c\"));\n }\n\n @Test\n public void should_returnNonWhitespaceValuesOnly(){\n Set<String> result = StringUtils.splitAndRemoveBlank(\"a, ,b,c, \", \",\");\n assertEquals(3, result.size());\n assertTrue(result.contains(\"a\"));\n assertTrue(result.contains(\"b\"));\n assertTrue(result.contains(\"c\"));\n }\n}\n" }, { "alpha_fraction": 0.7199602723121643, "alphanum_fraction": 0.7279046773910522, "avg_line_length": 28.647058486938477, "blob_id": "cee8fec3437cd277b5b91b29bf19f714418a2c84", "content_id": "884f8f6ac7431d7a3bba9c49271670319b846309", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1007, "license_type": "permissive", "max_line_length": 75, "num_lines": 34, "path": "/osdu-r2/os-qa/src/main/java/com/osdu/core/data/properties/Local.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.core.data.properties;\n\nimport org.aeonbits.owner.Config;\n\[email protected](\"classpath:services/local.properties\")\npublic interface Local extends Config {\n @Key(\"ingest.local\")\n String getIngestLocalUrl();\n\n @Key(\"localhost\")\n String getLocalhost();\n\n @Key(\"file.service.local\")\n String getFileServiceHost();\n\n @Key(\"workflow.service\")\n String getWorkflowServiceHost();\n}" }, { "alpha_fraction": 0.7181328535079956, "alphanum_fraction": 0.7351884841918945, "avg_line_length": 30.828571319580078, "blob_id": "1511ff42e0d677eea03bddb6e3cf7a795789a23e", "content_id": "4216c8d4016d26fa5b05319fdc7e5ca21c35027f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1114, "license_type": "permissive", "max_line_length": 102, "num_lines": 35, "path": "/osdu-r2/os-core-common/src/test/java/org/opengroup/osdu/core/common/http/ResponseHeaderTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.http;\n\nimport org.junit.Test;\n\nimport static junit.framework.TestCase.assertEquals;\n\npublic class ResponseHeaderTest {\n\n @Test\n public void should_retrieveFullListOfHeaders() {\n assertEquals(11, ResponseHeaders.STANDARD_RESPONSE_HEADERS.size());\n }\n\n @Test\n public void should_haveXFrameOptions_setToDeny() {\n assertEquals(\"DENY\", ResponseHeaders.STANDARD_RESPONSE_HEADERS.get(\"X-Frame-Options\").get(0));\n }\n}\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.7618694305419922, "avg_line_length": 30.34883689880371, "blob_id": "b13f2fb0668e6245ca43ff1696a911f9e5caaf2e", "content_id": "ef99c904d9002e167834376691d8f81708eee42f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1348, "license_type": "permissive", "max_line_length": 90, "num_lines": 43, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/crs/CrsConverterClientFactory.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.crs;\n\nimport org.springframework.beans.factory.annotation.Value;\nimport org.springframework.beans.factory.config.AbstractFactoryBean;\nimport org.springframework.stereotype.Component;\n\n@Component\npublic class CrsConverterClientFactory extends AbstractFactoryBean<ICrsConverterFactory> {\n\n //TODO: make it private once endpoint is up for all clouds\n\t@Value(\"${CRS_API:}\")\n\tpublic String CRS_API;\n\n\t@Override\n\tprotected ICrsConverterFactory createInstance() throws Exception {\n\t\treturn new CrsConverterFactory(CrsConverterAPIConfig\n\t\t\t\t.builder()\n\t\t\t\t.rootUrl(CRS_API)\n\t\t\t\t.build());\n\t}\n\n\t@Override\n\tpublic Class<?> getObjectType() {\n\t\treturn ICrsConverterFactory.class;\n\t}\n}\n" }, { "alpha_fraction": 0.6187645792961121, "alphanum_fraction": 0.631288468837738, "avg_line_length": 38.85593032836914, "blob_id": "dd4ca4cb2703504df6085ce8470417d96e43cd0b", "content_id": "0c3d8776369f69f01847a4c43d54766f7654e3d8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4711, "license_type": "permissive", "max_line_length": 151, "num_lines": 118, "path": "/osdu-r2/os-python-sdk/osdu_api/storage/record_client.py", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# Copyright 2020 Google LLC\n# Copyright 2020 Amazon\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nfrom typing import List\nfrom osdu_api.base_client import BaseClient\nfrom osdu_api.model.record import Record\nfrom osdu_api.model.http_method import HttpMethod\n\n'''\nHolds the logic for interfacing with Storage's record api\n'''\nclass RecordClient(BaseClient):\n\n '''\n Calls storage's api endpoint createOrUpdateRecords taking a list of record objects and constructing\n the body of the request\n Returns the response object for the call\n\n Example of code to new up a record:\n acl = Acl(['[email protected]'], ['[email protected]'])\n legal = Legal(['opendes-storage-1579034803194'], ['US'], LegalCompliance.compliant)\n ancestry = RecordAncestry([])\n id = 'opendes:welldb:123456'\n kind = 'opendes:welldb:wellbore:1.0.0'\n meta = [{}]\n version = 0\n data = {'id': 'test'}\n record = Record(id, version, kind, acl, legal, data, ancestry, meta)\n '''\n def create_update_records(self, records: List[Record], headers: dict):\n records_data = [record.convert_to_dict() for record in records]\n print(records_data)\n\n return self.create_update_records_from_dict(records_data, headers)\n\n '''\n Calls storage's api endpoint createOrUpdateRecords taking individual attributes and constructing\n the body of the request\n Returns the response object for the call\n\n Example of records_data:\n [\n {\n \"acl\": {\n \"owners\":[\n \"[email protected]\"\n ],\n \"viewers\":[\n \"[email protected]\"\n ]\n },\n \"ancestry\":{\n \"parents\":[]\n },\n \"data\":{\"id\":\"test\"},\n \"id\":\"opendes:welldb:123456\",\n \"kind\":\"opendes:welldb:wellbore:1.0.0\",\n \"legal\":{\n \"legaltags\":[\"opendes-storage-1579034803194\"],\n \"otherRelevantDataCountries\":[\"US\"],\n \"status\":\"compliant\"\n },\n \"meta\":[\n {}\n ],\n \"version\":0\n }\n ]\n '''\n def create_update_records_from_dict(self, records: dict, headers: dict):\n records_data = json.dumps(records)\n\n response = self.make_request(method=HttpMethod.PUT, url=self.storage_url, data=records_data, add_headers=headers)\n\n return response\n\n '''\n Calls storage's api endpoint getLatestRecordVersion taking the required attributes\n Returns the content for the response object\n '''\n def get_latest_record(self, recordId: str, attributes: List[str] = [], headers = dict):\n request_params = {'attribute': attributes}\n response = self.make_request(method=HttpMethod.GET, params=request_params, url=f\"{self.storage_url}/{recordId}\", add_headers=headers)\n response_content = json.loads(response.content)\n return Record.from_dict(response_content)\n\n '''\n Calls storage's api endpoint getSpecificRecordVersion taking the required attributes\n Returns the content for the response object\n '''\n def get_specific_record(self, recordId: str, version: str, headers: dict, attributes: List[str] = []):\n request_params = {'attribute': attributes}\n response = self.make_request(method=HttpMethod.GET, params=request_params, url=f\"{self.storage_url}/{recordId}/{version}\", add_headers=headers)\n response_content = json.loads(response.content)\n return Record.from_dict(response_content)\n\n '''\n Calls storage's api endpoint getRecordVersions taking the one required parameter record id\n Returns the content for the response object for the call containing the list of versions. \n Find the versions in the response.content attribute\n '''\n def get_record_versions(self, recordId: str, headers: dict):\n response = self.make_request(method=HttpMethod.GET, url=f\"{self.storage_url}/versions/{recordId}\", add_headers=headers)\n response_content = json.loads(response.content.decode(\"utf-8\"))\n return response_content['versions']\n " }, { "alpha_fraction": 0.7474437355995178, "alphanum_fraction": 0.7505112290382385, "avg_line_length": 35.67499923706055, "blob_id": "d40fe97ee55c27bf28ef358b107695ca2810e566", "content_id": "269ef78953cd6c9651709bf1fb87edebb972776c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2934, "license_type": "permissive", "max_line_length": 99, "num_lines": 80, "path": "/osdu-r2/os-workflow/workflow-core/src/main/java/org/opengroup/osdu/workflow/validation/CommonUpdateStatusRequestValidator.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.workflow.validation;\n\nimport static org.opengroup.osdu.workflow.model.WorkflowStatusType.FAILED;\nimport static org.opengroup.osdu.workflow.model.WorkflowStatusType.FINISHED;\nimport static org.opengroup.osdu.workflow.model.WorkflowStatusType.RUNNING;\n\nimport java.util.Arrays;\nimport java.util.List;\nimport javax.validation.ConstraintValidatorContext;\nimport lombok.RequiredArgsConstructor;\nimport org.apache.commons.lang3.StringUtils;\nimport org.opengroup.osdu.workflow.model.UpdateStatusRequest;\nimport org.opengroup.osdu.workflow.model.WorkflowStatusType;\nimport org.springframework.stereotype.Component;\n\n@Component\n@RequiredArgsConstructor\npublic class CommonUpdateStatusRequestValidator implements IUpdateStatusRequestValidator {\n\n private static final String WORKFLOW_ID_FIELD = \"WorkflowID\";\n private static final String WORKFLOW_STATUS_FIELD = \"Status\";\n private static final List<WorkflowStatusType> VALID_STATUSES = Arrays\n .asList(RUNNING, FINISHED, FAILED);\n\n @Override\n public boolean isValid(UpdateStatusRequest request, ConstraintValidatorContext context) {\n\n String workflowId = request.getWorkflowId();\n WorkflowStatusType workflowStatusType = request.getWorkflowStatusType();\n\n if (StringUtils.isBlank(workflowId)) {\n context.disableDefaultConstraintViolation();\n context\n .buildConstraintViolationWithTemplate(\"{javax.validation.constraints.NotBlank.message}\")\n .addPropertyNode(WORKFLOW_ID_FIELD)\n .addConstraintViolation();\n return false;\n }\n\n if (workflowStatusType == null) {\n context.disableDefaultConstraintViolation();\n context\n .buildConstraintViolationWithTemplate(\"{javax.validation.constraints.NotNull.message}\")\n .addPropertyNode(WORKFLOW_STATUS_FIELD)\n .addConstraintViolation();\n return false;\n }\n\n if (!VALID_STATUSES.contains(workflowStatusType)) {\n String msg = String\n .format(\"Not allowed workflow status type: %s, Should be one of: %s\", workflowStatusType,\n VALID_STATUSES);\n context.disableDefaultConstraintViolation();\n context\n .buildConstraintViolationWithTemplate(msg)\n .addPropertyNode(WORKFLOW_STATUS_FIELD)\n .addConstraintViolation();\n return false;\n }\n\n return true;\n }\n\n}\n" }, { "alpha_fraction": 0.7598130702972412, "alphanum_fraction": 0.7672896981239319, "avg_line_length": 28.72222137451172, "blob_id": "c8f7a2941a0a90f211099f251d93961d6fcdda61", "content_id": "62830d52a5593d0204586eb828b6fb7bbe204778", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1070, "license_type": "permissive", "max_line_length": 75, "num_lines": 36, "path": "/compatibility-layer/service/ingest/src/main/java/com/osdu/service/JobStatusService.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.service;\n\nimport com.osdu.model.job.IngestJob;\nimport com.osdu.model.job.IngestJobStatus;\nimport com.osdu.model.job.IngestJobStatusDto;\nimport org.springframework.messaging.MessageHeaders;\n\npublic interface JobStatusService {\n\n IngestJobStatusDto getStatus(String jobId, MessageHeaders headers);\n\n IngestJob get(String jobId);\n\n String initInjectJob();\n\n void updateJobStatus(String jobId, IngestJobStatus status);\n\n void save(IngestJob ingestJob);\n\n}\n" }, { "alpha_fraction": 0.7922964692115784, "alphanum_fraction": 0.797659695148468, "avg_line_length": 39.2156867980957, "blob_id": "b4a623c28a6aa533a83ba1376bca62dd67b8f31f", "content_id": "59779c91d513c905efcde50ccc2b5755f30d3fa2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2051, "license_type": "permissive", "max_line_length": 89, "num_lines": 51, "path": "/osdu-r2/os-workflow/workflow-core/src/main/java/org/opengroup/osdu/workflow/api/WorkflowApi.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.workflow.api;\n\nimport lombok.RequiredArgsConstructor;\nimport lombok.extern.slf4j.Slf4j;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.core.common.model.storage.StorageRole;\nimport org.opengroup.osdu.core.common.model.workflow.StartWorkflowRequest;\nimport org.opengroup.osdu.core.common.model.workflow.StartWorkflowResponse;\nimport org.opengroup.osdu.workflow.provider.interfaces.IWorkflowService;\nimport org.springframework.security.access.prepost.PreAuthorize;\nimport org.springframework.validation.annotation.Validated;\nimport org.springframework.web.bind.annotation.PostMapping;\nimport org.springframework.web.bind.annotation.RequestBody;\nimport org.springframework.web.bind.annotation.RestController;\nimport org.springframework.web.context.annotation.RequestScope;\n\n@Slf4j\n@RestController\n@RequestScope\n@Validated\n@RequiredArgsConstructor\npublic class WorkflowApi {\n\n final DpsHeaders headers;\n final IWorkflowService workflowService;\n\n @PostMapping(\"/startWorkflow\")\n @PreAuthorize(\"@authorizationFilter.hasPermission('\" + StorageRole.CREATOR + \"')\")\n public StartWorkflowResponse startWorkflow(@RequestBody StartWorkflowRequest request) {\n log.debug(\"Start Workflow request received : {}\", request);\n StartWorkflowResponse response = workflowService.startWorkflow(request, headers);\n log.debug(\"Start Workflow result ready : {}\", response);\n return response;\n }\n}\n" }, { "alpha_fraction": 0.7487579584121704, "alphanum_fraction": 0.7643718719482422, "avg_line_length": 32.5476188659668, "blob_id": "22d82cb1b1f4e2d606106fb49afb849c4406571f", "content_id": "43b7049a77aea813e013bf38d12e54a01ca5d682", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1409, "license_type": "permissive", "max_line_length": 90, "num_lines": 42, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/legal/validation/OriginatorValidator.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.legal.validation;\n\nimport com.google.common.base.Strings;\n\nimport javax.validation.ConstraintValidator;\nimport javax.validation.ConstraintValidatorContext;\n\npublic class OriginatorValidator implements ConstraintValidator<ValidOriginator, String> {\n\n\tprivate static final String PATTERN = \"^[-. A-Za-z0-9]{3,60}+$\";\n\n\t@Override\n\tpublic void initialize(ValidOriginator constraintAnnotation) {\n\t\t//needed by interface - we don't use\n\t}\n\n\t@Override\n\tpublic boolean isValid(String originator, ConstraintValidatorContext context) {\n\t\treturn !isNullOrWhitespace(originator) && originator.matches(PATTERN);\n\t}\n\n\tprivate boolean isNullOrWhitespace(String string){\n\t\treturn Strings.isNullOrEmpty(string) || string.trim().length() == 0;\n\t}\n}\n" }, { "alpha_fraction": 0.7790948152542114, "alphanum_fraction": 0.7866379022598267, "avg_line_length": 31, "blob_id": "e9194925ca8dac9df3e6dd5e1a6d110879fde9d3", "content_id": "d35226d2ecec25a4d43c3d9375bfc8ccb64c6521", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1856, "license_type": "permissive", "max_line_length": 119, "num_lines": 58, "path": "/osdu-r2/os-qa/README.md", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# OSDU Test automation framework\n\nThe OSDU R2 project has two 2 types of test suites:\n\n* Suites targeted at Google Cloud Platform (GCP) implementation\n* Suites targeted at OSDU GCP services\n\nTo run the tests, the following environment variables must be set:\n\n* Variables for the service host: `INGEST`, `DELIVERY`, `WORKFLOW`, and `AIRFLOW`.\n* A bearer token variable: `TOKEN`.\n* GCP specified tests have additional deep checkouts and require authentication with Google services:\n`GOOGLE_APPLICATION_CREDENTIALS`.\n\n**Example**\n\n```\nTOKEN=temp\nINGEST=https://amer-demo28-test.apigee.net\nDELIVERY=https://amer-demo28-test.apigee.net\nWORKFLOW=https://amer-demo28-test.apigee.net\nAIRFLOW=https://temp\nGOOGLE_APPLICATION_CREDENTIALS = temp\n```\n\nMaven commands for the GCP tests:\n\n```sh\n# OSDU R2 Delivery service (formerly File service)\nmvn clean test -Dsurefire.suiteXmlFile=src/test/resources/suites/file_service/FileServiceGcp.xml\n\n# OSDU R2 Ingest\nmvn clean test -Dsurefire.suiteXmlFile=src/test/resources/suites/ingest/IngestGcp.xml\n\n# OSDU R2 Workflow\nmvn clean test -Dsurefire.suiteXmlFile=src/test/resources/suites/workflow/WorkflowAnyCloud.xml\n```\n\nMaven commands for any cloud tests:\n\n```sh\n# OSDU R2 Delivery service (formerly File service)\nmvn clean test -Dsurefire.suiteXmlFile=src/test/resources/suites/file_service/FileServiceAnyCloud.xml\n\n# OSDU R2 Ingest service\nmvn clean test -Dsurefire.suiteXmlFile=src/test/resources/suites/ingest/IngestAnyCloud.xml\n\n# OSDU R2 Workflow service\nmvn clean test -Dsurefire.suiteXmlFile=src/test/resources/suites/workflow/WorkflowAnyCloud.xml\n```\n\nTo get Allure Report, execute an additional Maven task **after** test suites have run using the following command:\n\n```sh\nmvn site\n```\n\nTo view the test report, open the generated `index.html` file under the `os-qa\\target\\site\\allure-maven-plugin` folder.\n" }, { "alpha_fraction": 0.7481445074081421, "alphanum_fraction": 0.7610093951225281, "avg_line_length": 34.456138610839844, "blob_id": "cb14ae72e0cae6adb3c69d1b8cf12b8b3a4b629c", "content_id": "367c9ebb2b15125e47257f14c82540faa5391a6d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2021, "license_type": "permissive", "max_line_length": 85, "num_lines": 57, "path": "/compatibility-layer/service/delfi-client/src/main/java/com/osdu/service/OsduFeignErrorDecoder.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.service;\n\nimport static feign.FeignException.errorStatus;\n\nimport com.osdu.exception.OsduBadRequestException;\nimport com.osdu.exception.OsduException;\nimport com.osdu.exception.OsduForbiddenException;\nimport com.osdu.exception.OsduNotFoundException;\nimport com.osdu.exception.OsduServerErrorException;\nimport com.osdu.exception.OsduUnauthorizedException;\nimport feign.FeignException;\nimport feign.Response;\nimport feign.codec.ErrorDecoder;\nimport lombok.extern.slf4j.Slf4j;\nimport org.springframework.stereotype.Component;\n\n@Component\n@Slf4j\npublic class OsduFeignErrorDecoder implements ErrorDecoder {\n\n @Override\n public Exception decode(String methodKey, Response response) {\n FeignException feignException = errorStatus(methodKey, response);\n log.error(\"Feign Client exception.\", feignException);\n\n switch (response.status()) {\n case 400:\n return new OsduBadRequestException(\"Bad request\", feignException);\n case 401:\n return new OsduUnauthorizedException(\"Unauthorized\", feignException);\n case 403:\n return new OsduForbiddenException(\"Forbidden\", feignException);\n case 404:\n return new OsduNotFoundException(\"Not found\", feignException);\n case 500:\n return new OsduServerErrorException(\"Internal server error\", feignException);\n default:\n return new OsduException(\"Unknown feignException\", feignException);\n }\n }\n}\n" }, { "alpha_fraction": 0.6823687553405762, "alphanum_fraction": 0.6931359171867371, "avg_line_length": 28.719999313354492, "blob_id": "7d6c4690fd2bba0c9a05f56762ebbad5faa0bec6", "content_id": "ccafd100adfb90d5a848db347e2ea07a8e6e7f73", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1486, "license_type": "permissive", "max_line_length": 81, "num_lines": 50, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/indexer/IndexSchema.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.indexer;\n\nimport lombok.Builder;\nimport lombok.Data;\n\nimport java.util.ArrayList;\nimport java.util.HashSet;\nimport java.util.Map;\nimport java.util.Set;\n\n@Data\n@Builder\npublic class IndexSchema {\n\n private String kind;\n private String type;\n private Map<String, String> dataSchema;\n private Map<String, Object> metaSchema;\n\n public ArrayList<String> getSchemaKeysByValue(String value) {\n Set<String> keys = new HashSet<>();\n for (Map.Entry<String, String> entry : this.getDataSchema().entrySet()) {\n if (value.equalsIgnoreCase(entry.getValue())) {\n keys.add(entry.getKey());\n }\n }\n return new ArrayList<>(keys);\n }\n\n public boolean isDataSchemaMissing() {\n return dataSchema == null || dataSchema.isEmpty();\n }\n}\n" }, { "alpha_fraction": 0.6787827610969543, "alphanum_fraction": 0.7083685398101807, "avg_line_length": 25.886363983154297, "blob_id": "df530841716e0b45dcd5589383414aa8ad3f7372", "content_id": "c3ba9e2c3ae8094883cad152c36bc2d2db923001", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1183, "license_type": "permissive", "max_line_length": 75, "num_lines": 44, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/legal/PersistenceException.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.legal;\n\nimport java.util.Collection;\n\npublic class PersistenceException extends RuntimeException {\n\n static final long serialVersionUID = -7034897190745766930L;\n\n private int code;\n\n private String reason;\n\n public PersistenceException(int code, String message, String reason) {\n super(message);\n this.code = code;\n this.reason = reason;\n }\n\n public int getCode(){\n return this.code;\n }\n\n public String getReason(){\n return this.reason;\n }\n\n}\n" }, { "alpha_fraction": 0.7632590532302856, "alphanum_fraction": 0.7694081664085388, "avg_line_length": 37.264705657958984, "blob_id": "e94c44286f17157cd6295fa28c41d4b296d7e356", "content_id": "389b9faebf6498d80592d10442e5afa698321e05", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1301, "license_type": "permissive", "max_line_length": 86, "num_lines": 34, "path": "/compatibility-layer/service/search/src/main/java/com/osdu/client/delfi/DelfiSearchClient.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.client.delfi;\n\nimport com.osdu.model.SearchObject;\nimport com.osdu.model.delfi.DelfiSearchResult;\nimport org.springframework.cloud.openfeign.FeignClient;\nimport org.springframework.web.bind.annotation.PostMapping;\nimport org.springframework.web.bind.annotation.RequestHeader;\n\n@FeignClient(url = \"${osdu.delfi.portal.url}\", name = \"search.delfi\")\npublic interface DelfiSearchClient {\n\n\n @PostMapping(\"${search.mapper.searchEndpoint}\")\n DelfiSearchResult searchIndex(@RequestHeader(Header.AUTHORIZATION) String authToken,\n @RequestHeader(Header.APP_KEY) String applicationKey,\n @RequestHeader(Header.SLB_DATA_PARTITION_ID) String partition,\n SearchObject searchObject);\n}\n" }, { "alpha_fraction": 0.6832181215286255, "alphanum_fraction": 0.6920176148414612, "avg_line_length": 33.58695602416992, "blob_id": "a84b13fe06bcbc652ef9137cc3a5ff19a1431313", "content_id": "14ad29360186098f71f563119840711b79e462e3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1591, "license_type": "permissive", "max_line_length": 84, "num_lines": 46, "path": "/osdu-r2/os-python-sdk/osdu_api/provider/aws/entitlements_client.py", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# Copyright 2020 Google LLC\n# Copyright 2020 Amazon\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport boto3\n\n#TODO: look at using secrets manager to hold cognito credentials\n'''\nReaches out to aws cognito for a valid user's token based on environment variables. \nThe same pattern is used in OSDU java microservices' integration tests\n'''\ndef get_bearer_token():\n ACCESS_KEY = os.environ.get('AWS_ACCESS_KEY_ID')\n SECRET_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')\n CLIENT_ID = os.environ.get('AWS_COGNITO_CLIENT_ID')\n USER = os.environ.get('AWS_COGNITO_AUTH_PARAMS_USER')\n PWD = os.environ.get('AWS_COGNITO_AUTH_PARAMS_PASSWORD')\n\n client = boto3.client(\n 'cognito-idp',\n aws_access_key_id=ACCESS_KEY,\n aws_secret_access_key=SECRET_KEY\n )\n\n response = client.initiate_auth(\n AuthFlow='USER_PASSWORD_AUTH',\n ClientId=CLIENT_ID,\n AuthParameters={\n 'USERNAME': USER,\n 'PASSWORD': PWD\n }\n )\n\n return f'Bearer %s' % response['AuthenticationResult']['AccessToken']\n" }, { "alpha_fraction": 0.7605095505714417, "alphanum_fraction": 0.7681528925895691, "avg_line_length": 36.98387145996094, "blob_id": "48da53fff7dae32ccad49d941e27b36ad7fe61b2", "content_id": "3f53178db8067a40a7082553a9ea5e3f1a31802a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2355, "license_type": "permissive", "max_line_length": 100, "num_lines": 62, "path": "/compatibility-layer/service/ingest/src/main/java/com/osdu/jackson/Base64Deserializer.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.jackson;\n\nimport static java.lang.String.format;\n\nimport com.fasterxml.jackson.core.JsonParseException;\nimport com.fasterxml.jackson.core.JsonParser;\nimport com.fasterxml.jackson.databind.BeanProperty;\nimport com.fasterxml.jackson.databind.DeserializationContext;\nimport com.fasterxml.jackson.databind.JsonDeserializer;\nimport com.fasterxml.jackson.databind.JsonMappingException;\nimport com.fasterxml.jackson.databind.ObjectMapper;\nimport com.fasterxml.jackson.databind.deser.ContextualDeserializer;\nimport com.fasterxml.jackson.databind.exc.InvalidFormatException;\nimport java.io.IOException;\nimport java.util.Base64;\n\npublic class Base64Deserializer extends JsonDeserializer<Object> implements ContextualDeserializer {\n\n Class<?> resultClass;\n\n @Override\n public Object deserialize(JsonParser parser, DeserializationContext ctxt)\n throws IOException {\n String value = parser.getValueAsString();\n Base64.Decoder decoder = Base64.getDecoder();\n\n try {\n ObjectMapper objectMapper = new ObjectMapper();\n byte[] decodedValue = decoder.decode(value);\n return objectMapper.readValue(decodedValue, resultClass);\n } catch (IllegalArgumentException | JsonParseException e) {\n String fieldName = parser.getParsingContext().getCurrentName();\n Class<?> wrapperClass = parser.getParsingContext().getCurrentValue().getClass();\n\n throw new InvalidFormatException(parser,\n format(\"Value for '%s' is not a base64 encoded JSON\", fieldName), value, wrapperClass);\n }\n }\n\n @Override\n public JsonDeserializer<?> createContextual(DeserializationContext ctxt, BeanProperty property)\n throws JsonMappingException {\n this.resultClass = property.getType().getRawClass();\n return this;\n }\n}\n" }, { "alpha_fraction": 0.6373445391654968, "alphanum_fraction": 0.6526744961738586, "avg_line_length": 43.70294189453125, "blob_id": "5527729602eb11a60e24c41ab9b280c1141bab28", "content_id": "8c048724d56466d442218b7f02ae67ffde0bdabb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 15199, "license_type": "permissive", "max_line_length": 228, "num_lines": 340, "path": "/osdu-r2/os-ingest/README.md", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# OSDU R2 Ingestion Service\n\n## Contents\n\n* [Introduction](#introduction)\n* [System interactions](#system-interactions)\n * [Default ingestion workflow](#default-ingestion-workflow)\n * [OSDU ingestion workflow](#osdu-ingestion-workflow)\n* [Ingestion API](#ingestion-api)\n * [POST /submit](#post-submit)\n * [POST /submitWithManifest](#post-submitwithmanifest)\n* [GCP implementation](#gcp-implementation)\n* [Firestore](#firestore-collections)\n\n## Introduction\n\nThe OSDU R2 Ingestion service starts ingestion of OSDU documents, such as the OSDU Work Products,\nWork Product Components, and Files. The Ingestion service is basically a wrapper around the [OSDU R2\nWorkflow service] and performs preliminary work before starting actual ingestion. The preliminary\nwork can include fetching file location data or validating the manifest.\n\n## System interactions\n\nThe Ingestion service in the OSDU R2 Prototype provides two ingestion workflows.\n\nThe _Default (Opaque) Ingestion_ workflow is designed to ingest files without metadata. Per request,\nonly one file is ingested.\n\nThe _OSDU (Manifest) Ingestion_ workflow is designed to ingest multiple files with metadata\nassociated with them. The metadata is passed as an OSDU manifest, which must contain an OSDU Work\nProduct and associated Work Product Components.\n\n### Default Ingestion workflow\n\nThe Default Ingestion workflow is designed to ingest one file per request. Before submitting a file\nfor ingestion, the user needs to upload the file to the system. For that purpose, the user needs to\nobtain a signed URL from the OSDU R2 Delivery service, and then upload their file by the URL. By the\nURL, the user will be able to upload their file.\n\nFor more information on uploading files to the system, consult the [OSDU R2 Delivery service\ndocumentation].\n\nThe Default Ingestion workflow starts upon a call to the `/submit` endpoint. The following diagram\nshows this workflow.\n\n![OSDU R2 Ingestion Service submit](https://user-images.githubusercontent.com/21691607/77780782-357ee700-705d-11ea-8388-a1671d06ee22.png)\n\nUpon a `/submit` request:\n\n1. Validate the incoming request.\n * Verify the authentication token. If the token is missing or invalid, respond with the `401\n Unauthorized` status.\n * Verify the partition ID. If the partition ID is missing, invalid or doesn't have assigned user\n groups, respond with the `400 Bad Request` status.\n * Verify `FileID`. Respond with the `400 Bad request` status and the `Missing required field\n FileID` message if a `FileID` isn't provided.\n * Verify `DataType`. Respond with the `400 Bad request` status if the `DataType` is an empty\n string or consists of whitespaces.\n > `DataType` can contain any string. If the string is not \"well_log\", then the data type is\n > treated as \"opaque\". During the next steps in the ingestion flow, the Opaque Ingestion DAG\n > will run for any `DataType` but \"well_log\".\n2. Query the Delivery service's `/getFileLocation` API endpoint to obtain a direct link to the file\nby `FileID`. The Delivery service will verify whether the `FileID` field exists in the database and\nwill fetch the file location data. The following flows are possible for the Delivery service:\n * Respond with the `400 Bad request` status and the `Missing required field FileID` message if\n an ID wasn't provided.\n * Respond with the `Driver` and `Location` for the requested `FileID`.\n3. Query the Workflow service's `/startWorkflow` API endpoint with the workflow type \"ingest\". Pass\nthe file location in the context.\n4. Receive the workflow ID from the Workflow service, and then return the ID to the user or app that\nstarted ingestion.\n\n### OSDU Ingestion workflow\n\nThe OSDU Ingestion workflow is designed to ingest well log .las files with the manifest.\n\nThe OSDU Ingestion workflow starts upon a call to the Ingestion service's `/submitWithManifest`\nendpoint. The following diagram shows the workflow.\n\n![OSDU R2 Ingestion Service submitWithManifest](https://user-images.githubusercontent.com/21691607/77781014-84c51780-705d-11ea-8846-ea08163afcf7.png)\n\nThe workflow is the following:\n\n1. Validate the incoming request.\n * Verify the authentication token. If the token is missing or invalid, respond with the `401\n Unauthorized` status.\n * Verify the partition ID. If the partition ID is missing or invalid or doesn't have assigned\n user groups, respond with the `400 Bad Request` status.\n * Validate the manifest. If the manifest doesn't correspond to the OSDU\n `WorkProductLoadManifestStagedFiles` schema stored in the project's database, fail ingestion,\n and then respond with an HTTP error.\n2. Query the Workflow service's `/startWorkflow` API endpoint with the \"osdu\" workflow type and the\nmanifest added in the request's `Context` property.\n3. Return the workflow ID received from the Workflow service.\n\n## Ingestion API\n\nThe Ingest service's API includes the following endpoints in the OSDU R2 Prototype:\n\n* `/submit`, external\n* `/submitWithManifest`, external\n\nGeneral considerations related to querying the Ingestion API:\n\n* Each endpoint must receive the authentication bearer token in the \"Authorization\" header. Example:\n`\"Authorization\": \"Bearer {token}\"`\n* Each endpoint must receive the partition ID in the \"Partition-ID\" header. Example:\n`\"Partition-Id: \"assigned_partition\"`\n* The request and response Content Type is always \"application/json\"\n\n### POST /submit\n\nThe `/submit` API endpoint starts a new ingestion process and carries out necessary operations\ndepending on the file type. The operations include obtaining file location data from the OSDU R2\nDelivery service. The current implementation of the endpoint supports ingestion of any file types.\n\n#### Incoming request body\n\n| Property | Type | Description |\n| ---------- | -------- | ----------------------------------------------------------- |\n| `FileID` | `String` | Unique ID of the file |\n| `DataType` | `String` | Type of file. Supported data types: \"well_log\" and \"opaque\" |\n\n> **Note**: `DataType` can be any string. If the `DataType` value is not \"well_log\", then it's\n> treated as the \"opaque\" data type. `DataType` cannot contain only whitespaces.\n\n**Example**:\n\n```sh\ncurl --location --request POST 'https://{path}/submit' \\\n --header 'Authorization: Bearer {token}' \\\n --header 'Partition-Id: {assigned DELFI partition ID}' \\\n --header 'Content-Type: application/json' \\\n --data-raw '{\n \"FileID\": \"c26c7656-8c50-4147-b51f-c7a449af33f3\",\n \"DataType\": \"opaque\"\n }'\n```\n\n#### Response body\n\n| Property | Type | Description |\n| ------------ | -------- | ------------------------------------------------------------------ |\n| `WorkflowID` | `String` | Unique ID of the workflow that was started by the Workflow service |\n\n#### Internal requests\n\nDuring the `/submit` workflow, the Ingestion service queries the Delivery service's\n`/getFileLocation` API endpoint. The information retrieved from the Delivery API will be added to\nthe request body's Context and passed to the Workflow service.\n\n### POST /submitWithManifest\n\nThe `/submitWithManifest` API endpoint starts the OSDU ingestion process for the OSDU Work Product,\nWork Product Components, and Files passed in the OSDU `WorkProductLoadManifestStagedFiles` manifest.\n\nDifferently from the `/submit` endpoint, the request body for `/submitWithManifest` doesn't need to\ncontain a `FileID` and `DataType`.\n\nThe list of file IDs must be added to the manifest's `Files` property. The `DataType` property\ndefaults to \"well_log\" for all files.\n\n#### Incoming request body\n\n| Property | Type | Description |\n| ----------------------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------- |\n| `WorkProduct` | `Object` | OSDU Work Product with **ResourceTypeID**, **ResourceSecurityClassification**, **Data**, and **ComponentsAssociativeID** properties |\n| `WorkProductComponents` | `Array` | List of OSDU Work Product Components. Each WPC contains at least **ResourceTypeID**, **ResourceSecurityClassification**, **AssociativeID**, **FileAssociativeIDs**, and **Data** properties |\n| `Files` | `Array` | List of OSDU Files. Each File contains at least **ResourceTypeID**, **ResourceSecurityClassification**, **AssociativeID**, and **Data** properties |\n\nRequest example:\n\n```sh\ncurl -X POST \\\n https://{Apigee URI}/submit \\\n -H 'Authorization: Bearer {token}' \\\n -H 'Partition-Id: {assigned DELFI partition ID}' \\\n -H 'Cache-Control: no-cache' \\\n -H 'Content-Type: application/json' \\\n -d '{\n \"WorkProduct\": {\n \"ResourceTypeID\": \"srn:type:work-product/WellLog:\",\n \"ResourceSecurityClassification\": \"srn:reference-data/ResourceSecurityClassification:RESTRICTED:\",\n \"Data\": {\n \"GroupTypeProperties\": {\n \"Components\": []\n },\n \"IndividualTypeProperties\": {\n \"Name\": \"AKM-11 LOG\",\n \"Description\": \"Well Log\"\n },\n \"ExtensionProperties\": {}\n },\n \"ComponentsAssociativeIDs\": [\n \"wpc-1\"\n ]\n },\n \"WorkProductComponents\": [\n {\n \"ResourceTypeID\": \"srn:type:work-product-component/WellLog:\",\n \"ResourceSecurityClassification\": \"srn:reference-data/ResourceSecurityClassification:RESTRICTED:\",\n \"Data\": {\n \"GroupTypeProperties\": {\n \"Files\": [],\n \"Artefacts\": []\n },\n \"IndividualTypeProperties\": {\n \"Name\": \"AKM-11 LOG\",\n \"Description\": \"Well Log\",\n \"WellboreID\": \"srn:master-data/Wellbore:1013:\",\n \"TopMeasuredDepth\": {\n \"Depth\": 2182.0004,\n \"UnitOfMeasure\": \"srn:reference-data/UnitOfMeasure:M:\"\n },\n \"BottomMeasuredDepth\": {\n \"Depth\": 2481.0,\n \"UnitOfMeasure\": \"srn:reference-data/UnitOfMeasure:M:\"\n },\n \"Curves\": [\n {\n \"Mnemonic\": \"DEPT\",\n \"TopDepth\": 2182.0,\n \"BaseDepth\": 2481.0,\n \"DepthUnit\": \"srn:reference-data/UnitOfMeasure:M:\",\n \"CurveUnit\": \"srn:reference-data/UnitOfMeasure:M:\"\n },\n {\n \"Mnemonic\": \"GR\",\n \"TopDepth\": 2182.0,\n \"BaseDepth\": 2481.0,\n \"DepthUnit\": \"srn:reference-data/UnitOfMeasure:M:\",\n \"CurveUnit\": \"srn:reference-data/UnitOfMeasure:GAPI:\"\n },\n {\n \"Mnemonic\": \"DT\",\n \"TopDepth\": 2182.0,\n \"BaseDepth\": 2481.0,\n \"DepthUnit\": \"srn:reference-data/UnitOfMeasure:M:\",\n \"CurveUnit\": \"srn:reference-data/UnitOfMeasure:US/F:\"\n },\n {\n \"Mnemonic\": \"RHOB\",\n \"TopDepth\": 2182.0,\n \"BaseDepth\": 2481.0,\n \"DepthUnit\": \"srn:reference-data/UnitOfMeasure:M:\",\n \"CurveUnit\": \"srn:reference-data/UnitOfMeasure:G/C3:\"\n },\n {\n \"Mnemonic\": \"DRHO\",\n \"TopDepth\": 2182.0,\n \"BaseDepth\": 2481.0,\n \"DepthUnit\": \"srn:reference-data/UnitOfMeasure:M:\",\n \"CurveUnit\": \"srn:reference-data/UnitOfMeasure:G/C3:\"\n },\n {\n \"Mnemonic\": \"NPHI\",\n \"TopDepth\": 2182.0,\n \"BaseDepth\": 2481.0,\n \"DepthUnit\": \"srn:reference-data/UnitOfMeasure:M:\",\n \"CurveUnit\": \"srn:reference-data/UnitOfMeasure:V/V:\"\n }\n ]\n },\n \"ExtensionProperties\": {}\n },\n \"AssociativeID\": \"wpc-1\",\n \"FileAssociativeIDs\": [\n \"f-1\"\n ]\n }\n ],\n \"Files\": [\n {\n \"ResourceTypeID\": \"srn:type:file/las2:\",\n \"ResourceSecurityClassification\": \"srn:reference-data/ResourceSecurityClassification:RESTRICTED:\",\n \"Data\": {\n \"GroupTypeProperties\": {\n \"FileSource\": \"\",\n \"PreLoadFilePath\": \"{Path to File}\"\n },\n \"IndividualTypeProperties\": {},\n \"ExtensionProperties\": {}\n },\n \"AssociativeID\": \"f-1\"\n }\n ]\n}\n'\n```\n\n#### Response body\n\n| Property | Type | Description |\n| ------------ | -------- | ------------------------------------------------------------------ |\n| `WorkflowID` | `String` | Unique ID of the workflow that was started by the Workflow service |\n\n## Validation\n\nThe Ingestion service's current implementation performs a general check of the validity of the\nincoming authentication token and partition ID. Also, the service checks if the `FileID` property is\nprovided. For OSDU Ingestion workflow, the service also validates the manifest.\n\nIn OSDU R2 Prototype, the service doesn't perform any verification whether a file upload happened.\n\n## GCP implementation\n\nFor development purposes, it's recommended to create a separate GCP Identity and Access Management\nservice account. It's enough to grant the **Service Account Token Creator** role to the development\nservice account.\n\nObtaining user credentials for Application Default Credentials isn't suitable in this case because\nsigning a blob is only available with the service account credentials. Remember to set the\n`GOOGLE_APPLICATION_CREDENTIALS` environment variable. Follow the [instructions on the Google\ndeveloper's portal][application-default-credentials].\n\n### Persistence layer\n\nThe GCP implementation contains two mutually exclusive modules to work with the persistence layer.\nPresently, OSDU R2 connects to legacy Cloud Datastore for compatibility with the current OpenDES\nimplementation. In the future OSDU releases, Cloud Datastore will be replaced by a Cloud Firestore\nimplementation that's already available in the project.\n\n* The Cloud Datastore implementation is located in the **provider/ingest-gcp-datastore** folder.\n* The Cloud Firestore implementation is located in the **provider/ingest-gcp** folder.\n\n### Firestore collections\n\nThe manifest validation schema is stored in the `schema-data` collection with the following\nproperties.\n\n| Property | Type | Description |\n| --------- | ------- | ---------------------------------------------------------- |\n| CreatedAt | String | The timestamp when the record was created. |\n| Schema | String | The OSDU [WorkProductLoadManifestStagedFiles] JSON schema. |\n| Title | Integer | The name of the manifest validation schema. |\n\n[OSDU R2 Workflow service]: ../os-workflow/README.md\n[OSDU R2 Delivery service documentation]: ../os-delivery/README.md\n[WorkProductLoadManifestStagedFiles]: https://gitlab.opengroup.org/osdu/open-test-data/blob/master/rc-1.0.0/3-schemas/WorkProductLoadManifestStagedFiles.json\n[application-default-credentials]: https://developers.google.com/identity/protocols/application-default-credentials#calling\n" }, { "alpha_fraction": 0.7358856797218323, "alphanum_fraction": 0.7381317019462585, "avg_line_length": 37.71936798095703, "blob_id": "05f62c94671a14c0ecc0949448e14c88a7db5547", "content_id": "6fa0990e73248f67eb64bbda013e7189e8fdb4fb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 9795, "license_type": "permissive", "max_line_length": 104, "num_lines": 253, "path": "/compatibility-layer/service/ingest/src/test/java/com/osdu/service/validation/LoadManifestValidationServiceTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.service.validation;\n\nimport static org.assertj.core.api.Assertions.catchThrowable;\nimport static org.assertj.core.api.BDDAssertions.then;\nimport static org.mockito.ArgumentMatchers.any;\nimport static org.mockito.BDDMockito.given;\nimport static org.mockito.BDDMockito.willReturn;\nimport static org.mockito.BDDMockito.willThrow;\n\nimport com.fasterxml.jackson.annotation.JsonInclude.Include;\nimport com.fasterxml.jackson.databind.JsonNode;\nimport com.fasterxml.jackson.databind.ObjectMapper;\nimport com.fasterxml.jackson.databind.SerializationFeature;\nimport com.osdu.ReplaceCamelCase;\nimport com.osdu.exception.IngestException;\nimport com.osdu.exception.OsduServerErrorException;\nimport com.osdu.model.type.base.ExtensionProperties;\nimport com.osdu.model.type.base.IndividualTypeProperties;\nimport com.osdu.model.type.file.FileData;\nimport com.osdu.model.type.file.FileGroupTypeProperties;\nimport com.osdu.model.type.manifest.LoadManifest;\nimport com.osdu.model.type.manifest.ManifestFile;\nimport com.osdu.model.type.manifest.ManifestWp;\nimport com.osdu.model.type.manifest.ManifestWpc;\nimport com.osdu.model.type.wp.WpData;\nimport com.osdu.model.type.wp.WpGroupTypeProperties;\nimport com.osdu.model.type.wp.WpIndividualTypeProperties;\nimport com.osdu.model.type.wp.WpcData;\nimport com.osdu.model.type.wp.WpcGroupTypeProperties;\nimport com.osdu.model.type.wp.WpcIndividualTypeProperties;\nimport com.osdu.service.processing.CustomSchemeFetcher;\nimport com.osdu.service.validation.schema.ClassloaderManifestSchemaReceiver;\nimport com.osdu.service.validation.schema.ManifestSchemaReceiver;\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.util.Collections;\nimport org.junit.jupiter.api.BeforeEach;\nimport org.junit.jupiter.api.DisplayNameGeneration;\nimport org.junit.jupiter.api.Test;\nimport org.junit.jupiter.api.extension.ExtendWith;\nimport org.mockito.InOrder;\nimport org.mockito.Mock;\nimport org.mockito.Mockito;\nimport org.mockito.Spy;\nimport org.mockito.junit.jupiter.MockitoExtension;\n\n@ExtendWith(MockitoExtension.class)\n@DisplayNameGeneration(ReplaceCamelCase.class)\nclass LoadManifestValidationServiceTest {\n\n private static final String FILEPATH = \"http://some.host.com/temp-file/file.las\";\n private static final String WP_RESOURCE_TYPE_ID = \"srn:type:work-product/WellLog:version1\";\n private static final String WPC_RESOURCE_TYPE_ID = \"srn:type:work-product-component/WellLog:version1\";\n private static final String FILE_RESOURCE_TYPE_ID = \"srn:type:file/las2:version1\";\n private static final String RESOURCE_SECURITY_CLASSIFICATION =\n \"srn:reference-data/ResourceSecurityClassification:RESTRICTED:\";\n private static final String WPC_ID_1 = \"wpc-1\";\n private static final String FILE_ID_1 = \"f-1\";\n private static final String WELL_LOG_NAME = \"AKM-11 LOG\";\n private static final String DESCRIPTION = \"Well Log\";\n\n @Mock\n private CustomSchemeFetcher schemeFetcher;\n @Spy\n private ManifestSchemaReceiver manifestSchemaReceiver = new ClassloaderManifestSchemaReceiver();\n\n private JsonValidationService jsonValidationService;\n private ObjectMapper objectMapper;\n\n private LoadManifestValidationService service;\n\n @BeforeEach\n void setUp() {\n jsonValidationService = Mockito.spy(new JsonValidationService(schemeFetcher));\n objectMapper = Mockito.spy(new ObjectMapper()\n .disable(SerializationFeature.FAIL_ON_EMPTY_BEANS)\n .setSerializationInclusion(Include.NON_NULL)\n .findAndRegisterModules());\n service = new LoadManifestValidationService(manifestSchemaReceiver, jsonValidationService,\n objectMapper);\n }\n\n @Test\n void shouldSuccessfullyValidateLoadManifest() {\n // when\n service.validateManifest(getLoadManifest());\n\n // then\n InOrder inOrder = Mockito.inOrder(jsonValidationService, schemeFetcher);\n inOrder.verify(jsonValidationService).validate(any(JsonNode.class), any(JsonNode.class));\n inOrder.verifyNoMoreInteractions();\n }\n\n @Test\n void shouldThrowIngestExceptionWhenValidationHasTheErrors() {\n // when\n Throwable thrown = catchThrowable(() -> service.validateManifest(new LoadManifest()));\n\n // then\n then(thrown)\n .isInstanceOf(IngestException.class)\n .hasMessageStartingWith(\"Failed to validate json from manifest\")\n .hasMessageContaining(\"validation result is\");\n\n InOrder inOrder = Mockito.inOrder(jsonValidationService, schemeFetcher);\n inOrder.verify(jsonValidationService).validate(any(JsonNode.class), any(JsonNode.class));\n inOrder.verifyNoMoreInteractions();\n }\n\n @Test\n void shouldThrowServerErrorExceptionWhen() {\n // given\n willReturn(null).given(manifestSchemaReceiver).getLoadManifestSchema();\n\n // when\n Throwable thrown = catchThrowable(() -> service.validateManifest(getLoadManifest()));\n\n // then\n then(thrown)\n .isInstanceOf(OsduServerErrorException.class)\n .hasMessage(\"Can not find resource for load manifest schema\");\n\n InOrder inOrder = Mockito.inOrder(jsonValidationService, schemeFetcher);\n inOrder.verifyNoMoreInteractions();\n }\n\n @Test\n void shouldThrowIngestExceptionWhenFailToParseManifestSchema() throws IOException {\n // given\n willThrow(IOException.class).given(objectMapper).readTree(any(InputStream.class));\n\n // when\n Throwable thrown = catchThrowable(() -> service.validateManifest(getLoadManifest()));\n\n // then\n then(thrown)\n .isInstanceOf(IngestException.class)\n .hasMessageStartingWith(\"Fail parse load manifest\");\n\n InOrder inOrder = Mockito.inOrder(jsonValidationService, schemeFetcher);\n inOrder.verifyNoMoreInteractions();\n }\n\n @Test\n void shouldThrowIllegalArgumentExceptionWhenUnableToReadLoadManifestAsTree() {\n // given\n willThrow(IllegalArgumentException.class).given(objectMapper)\n .valueToTree(any(LoadManifest.class));\n\n // when\n Throwable thrown = catchThrowable(() -> service.validateManifest(getLoadManifest()));\n\n // then\n then(thrown).isInstanceOf(IllegalArgumentException.class);\n\n InOrder inOrder = Mockito.inOrder(jsonValidationService, schemeFetcher);\n inOrder.verifyNoMoreInteractions();\n }\n\n @Test\n void shouldThrowIngestExceptionWhenFailToCreateJsonSchema() throws IOException {\n // given\n JsonNode uriNode = Mockito.mock(JsonNode.class);\n JsonNode schemaNode = Mockito.mock(JsonNode.class);\n\n willReturn(schemaNode).given(objectMapper).readTree(any(InputStream.class));\n given(schemaNode.get(\"$schema\")).willReturn(uriNode);\n\n // when\n Throwable thrown = catchThrowable(() -> service.validateManifest(getLoadManifest()));\n\n // then\n then(thrown)\n .isInstanceOf(IngestException.class)\n .hasMessageStartingWith(\"Error creating json validation schema from json object:\");\n\n InOrder inOrder = Mockito.inOrder(jsonValidationService, schemeFetcher);\n inOrder.verify(jsonValidationService).validate(any(JsonNode.class), any(JsonNode.class));\n inOrder.verifyNoMoreInteractions();\n }\n\n private LoadManifest getLoadManifest() {\n ManifestFile file = ManifestFile.builder()\n .resourceTypeID(FILE_RESOURCE_TYPE_ID)\n .resourceSecurityClassification(RESOURCE_SECURITY_CLASSIFICATION)\n .associativeId(FILE_ID_1)\n .data(FileData.builder()\n .groupTypeProperties(FileGroupTypeProperties.builder()\n .preLoadFilePath(FILEPATH)\n .fileSource(\"\")\n .build())\n .individualTypeProperties(new IndividualTypeProperties())\n .extensionProperties(new ExtensionProperties())\n .build())\n .build();\n\n ManifestWpc wpc = ManifestWpc.builder()\n .resourceTypeID(WPC_RESOURCE_TYPE_ID)\n .resourceSecurityClassification(RESOURCE_SECURITY_CLASSIFICATION)\n .associativeId(WPC_ID_1)\n .fileAssociativeIds(Collections.singletonList(FILE_ID_1))\n .data(WpcData.builder()\n .groupTypeProperties(WpcGroupTypeProperties.builder()\n .files(Collections.emptyList())\n .artefacts(Collections.emptyList())\n .build())\n .individualTypeProperties(WpcIndividualTypeProperties.builder()\n .name(WELL_LOG_NAME)\n .description(DESCRIPTION)\n .build())\n .extensionProperties(new ExtensionProperties())\n .build())\n .build();\n\n ManifestWp wp = ManifestWp.builder()\n .resourceTypeID(WP_RESOURCE_TYPE_ID)\n .resourceSecurityClassification(RESOURCE_SECURITY_CLASSIFICATION)\n .componentsAssociativeIDs(Collections.singletonList(WPC_ID_1))\n .data(WpData.builder()\n .groupTypeProperties(WpGroupTypeProperties.builder()\n .components(Collections.emptyList())\n .build())\n .individualTypeProperties(WpIndividualTypeProperties.builder()\n .name(WELL_LOG_NAME)\n .description(DESCRIPTION)\n .build())\n .extensionProperties(new ExtensionProperties())\n .build())\n .build();\n\n return LoadManifest.builder()\n .workProduct(wp)\n .workProductComponent(wpc)\n .file(file)\n .build();\n }\n}" }, { "alpha_fraction": 0.7961129546165466, "alphanum_fraction": 0.8001466989517212, "avg_line_length": 38.5217399597168, "blob_id": "a7e93e268eacaa65d6ed2005b9ab3c3cff239c49", "content_id": "86054bfd93b91edc7c05ed11940cce6748231296", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2727, "license_type": "permissive", "max_line_length": 96, "num_lines": 69, "path": "/osdu-r2/os-workflow/workflow-core/src/main/java/org/opengroup/osdu/workflow/service/WorkflowServiceImpl.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.workflow.service;\n\nimport java.util.UUID;\nimport lombok.RequiredArgsConstructor;\nimport lombok.extern.slf4j.Slf4j;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.core.common.model.workflow.StartWorkflowRequest;\nimport org.opengroup.osdu.core.common.model.workflow.StartWorkflowResponse;\nimport org.opengroup.osdu.workflow.model.WorkflowStatus;\nimport org.opengroup.osdu.workflow.model.WorkflowStatusType;\nimport org.opengroup.osdu.workflow.provider.interfaces.IIngestionStrategyService;\nimport org.opengroup.osdu.workflow.provider.interfaces.ISubmitIngestService;\nimport org.opengroup.osdu.workflow.provider.interfaces.IValidationService;\nimport org.opengroup.osdu.workflow.provider.interfaces.IWorkflowService;\nimport org.opengroup.osdu.workflow.provider.interfaces.IWorkflowStatusRepository;\nimport org.springframework.stereotype.Service;\n\n@Service\n@RequiredArgsConstructor\n@Slf4j\npublic class WorkflowServiceImpl implements IWorkflowService {\n\n final IValidationService validationService;\n final IIngestionStrategyService ingestionStrategyService;\n final ISubmitIngestService submitIngestService;\n final IWorkflowStatusRepository workflowStatusRepository;\n\n @Override\n public StartWorkflowResponse startWorkflow(StartWorkflowRequest request, DpsHeaders headers) {\n log.debug(\"Start Workflow with payload - {}\", request);\n\n validationService.validateStartWorkflowRequest(request);\n\n String userId = headers.getUserEmail();\n\n String strategyName = ingestionStrategyService.determineStrategy(request.getWorkflowType(),\n request.getDataType(), userId);\n\n String workflowId = UUID.randomUUID().toString();\n String airflowRunId = UUID.randomUUID().toString();\n\n submitIngestService.submitIngest(strategyName, request.getContext());\n\n workflowStatusRepository.saveWorkflowStatus(WorkflowStatus.builder()\n .workflowId(workflowId)\n .airflowRunId(airflowRunId)\n .workflowStatusType(WorkflowStatusType.SUBMITTED)\n .build());\n\n return StartWorkflowResponse.builder().workflowId(workflowId).build();\n }\n\n}\n" }, { "alpha_fraction": 0.6982065439224243, "alphanum_fraction": 0.7034631967544556, "avg_line_length": 39.936710357666016, "blob_id": "1adefbc9db9bff9e56bfa02dc87ed63ebcb84fd1", "content_id": "72c1971be8cd1648f0fd8897af0b137898f3ac3a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3234, "license_type": "permissive", "max_line_length": 130, "num_lines": 79, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/crs/CrsConverterService.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.crs;\n\nimport com.google.gson.JsonSyntaxException;\nimport org.apache.commons.lang3.StringUtils;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.core.common.model.crs.*;\nimport org.opengroup.osdu.core.common.http.HttpRequest;\nimport org.opengroup.osdu.core.common.http.HttpResponse;\nimport org.opengroup.osdu.core.common.http.IHttpClient;\n\npublic class CrsConverterService implements ICrsConverterService {\n private final String rootUrl;\n private final IHttpClient httpClient;\n private final DpsHeaders headers;\n\n CrsConverterService(CrsConverterAPIConfig config,\n IHttpClient httpClient,\n DpsHeaders headers) {\n this.rootUrl = config.getRootUrl();\n this.httpClient = httpClient;\n this.headers = headers;\n if (config.apiKey != null) {\n headers.put(\"AppKey\", config.apiKey);\n }\n }\n\n @Override\n public ConvertPointsResponse convertPoints(ConvertPointsRequest request) throws CrsConverterException {\n String url = this.createUrl(\"/convert\");\n HttpResponse result = this.httpClient.send(HttpRequest.post(request).url(url).headers(this.headers.getHeaders()).build());\n return this.getResult(result, ConvertPointsResponse.class);\n }\n\n @Override\n public ConvertTrajectoryResponse convertTrajectory(ConvertTrajectoryRequest request) throws CrsConverterException {\n String url = this.createUrl(\"/convertTrajectory\");\n HttpResponse result = this.httpClient.send(HttpRequest.post(request).url(url).headers(this.headers.getHeaders()).build());\n return this.getResult(result, ConvertTrajectoryResponse.class);\n }\n\n private CrsConverterException generateException(HttpResponse result) {\n return new CrsConverterException(\n \"Error making request to CrsConverter service. Check the inner HttpResponse for more info.\", result);\n }\n\n private String createUrl(String pathAndQuery) {\n return StringUtils.join(this.rootUrl, pathAndQuery);\n }\n\n private <T> T getResult(HttpResponse result, Class<T> type) throws CrsConverterException {\n if (result.isSuccessCode()) {\n try {\n return result.parseBody(type);\n } catch (JsonSyntaxException e) {\n throw new CrsConverterException(\"Error parsing response. Check the inner HttpResponse for more info.\",\n result);\n }\n } else {\n throw this.generateException(result);\n }\n }\n}\n" }, { "alpha_fraction": 0.7459689974784851, "alphanum_fraction": 0.751140832901001, "avg_line_length": 32.8865966796875, "blob_id": "3187337f6e5adacec0a472af7f9c8529891d0baa", "content_id": "a88aa70550bac79965728577fec5846410483075", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3287, "license_type": "permissive", "max_line_length": 91, "num_lines": 97, "path": "/osdu-r2/os-ingest/provider/ingest-gcp-datastore/src/test/java/org/opengroup/osdu/ingest/provider/gcp/repository/DatastoreSchemaRepositoryTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.ingest.provider.gcp.repository;\n\nimport static org.assertj.core.api.BDDAssertions.then;\nimport static org.mockito.ArgumentMatchers.any;\nimport static org.mockito.BDDMockito.given;\nimport static org.opengroup.osdu.ingest.ResourceUtils.getResource;\n\nimport com.fasterxml.jackson.databind.ObjectMapper;\nimport java.util.Date;\nimport org.junit.jupiter.api.BeforeEach;\nimport org.junit.jupiter.api.DisplayNameGeneration;\nimport org.junit.jupiter.api.Test;\nimport org.junit.jupiter.api.extension.ExtendWith;\nimport org.mockito.Mock;\nimport org.mockito.junit.jupiter.MockitoExtension;\nimport org.opengroup.osdu.ingest.ReplaceCamelCase;\nimport org.opengroup.osdu.ingest.model.SchemaData;\nimport org.opengroup.osdu.ingest.provider.gcp.mapper.ISchemaDataMapper;\nimport org.opengroup.osdu.ingest.provider.gcp.model.entity.SchemaDataEntity;\nimport org.opengroup.osdu.ingest.provider.interfaces.ISchemaRepository;\n\n@ExtendWith(MockitoExtension.class)\n@DisplayNameGeneration(ReplaceCamelCase.class)\nclass DatastoreSchemaRepositoryTest {\n\n private static final String SCHEMA_TITLE = \"test-schema-title\";\n private static final String DRAFT_07_SCHEMA_PATH = \"3-schemas/TinySchemaDraft7.json\";\n\n @Mock\n private ISchemaDataMapper schemaDataMapper;\n @Mock\n private ISchemaDataEntityRepository entityRepository;\n\n private ObjectMapper objectMapper = new ObjectMapper();\n\n private ISchemaRepository schemaRepository;\n\n @BeforeEach\n void setUp() {\n schemaRepository = new DatastoreSchemaRepository(schemaDataMapper, entityRepository);\n }\n\n @Test\n void shouldFindSchemaDataByTitle() throws Exception {\n // given\n Date now = new Date();\n given(entityRepository.findByTitle(SCHEMA_TITLE)).willReturn(SchemaDataEntity.builder()\n .title(SCHEMA_TITLE)\n .schema(getResource(DRAFT_07_SCHEMA_PATH))\n .createdAt(now)\n .build());\n given(schemaDataMapper.schemaDataDtoToSchemaData(any())).willAnswer(invocation -> {\n SchemaDataEntity entity = invocation.getArgument(0);\n return SchemaData.builder()\n .title(entity.getTitle())\n .schema(objectMapper.readTree(entity.getSchema()))\n .created(now)\n .build();\n });\n\n // when\n SchemaData schemaData = schemaRepository.findByTitle(SCHEMA_TITLE);\n\n // then\n then(schemaData).isEqualTo(SchemaData.builder()\n .title(SCHEMA_TITLE)\n .schema(objectMapper.readTree(getResource(DRAFT_07_SCHEMA_PATH)))\n .created(now)\n .build());\n }\n\n @Test\n void shouldReturnNullWhenNothingWasFound() {\n // when\n SchemaData schemaData = schemaRepository.findByTitle(\"nothing\");\n\n // then\n then(schemaData).isNull();\n }\n\n}\n" }, { "alpha_fraction": 0.7661843299865723, "alphanum_fraction": 0.7734196782112122, "avg_line_length": 34.97260284423828, "blob_id": "8a5e1d715fc418661c1daab90e728332d19e02a4", "content_id": "15bcd2632b093b6c79ccfd815d07421cd0b7a2d6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2626, "license_type": "permissive", "max_line_length": 118, "num_lines": 73, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/search/Query.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.search;\n\nimport com.fasterxml.jackson.annotation.JsonIgnore;\nimport io.swagger.annotations.ApiModelProperty;\nimport lombok.AllArgsConstructor;\nimport lombok.Data;\nimport lombok.NoArgsConstructor;\nimport org.opengroup.osdu.core.common.SwaggerDoc;\nimport org.opengroup.osdu.core.common.model.search.validation.ValidMultiKind;\nimport org.opengroup.osdu.core.common.model.search.validation.ValidSortOrder;\nimport org.opengroup.osdu.core.common.model.search.validation.ValidSpatialFilter;\nimport org.springframework.validation.annotation.Validated;\n\nimport javax.validation.Valid;\nimport javax.validation.constraints.Min;\nimport javax.validation.constraints.NotBlank;\nimport java.util.ArrayList;\nimport java.util.List;\n\n@Data\n@Validated\n@NoArgsConstructor\n@AllArgsConstructor\npublic abstract class Query {\n\n @NotBlank(message = SwaggerDoc.KIND_VALIDATION_CAN_NOT_BE_NULL_OR_EMPTY)\n @ApiModelProperty(value = SwaggerDoc.KIND_REQUEST_DESCRIPTION, required = true, example = SwaggerDoc.KIND_EXAMPLE)\n @ValidMultiKind\n private String kind;\n\n @Min(value = 0, message = SwaggerDoc.LIMIT_VALIDATION_MIN_MSG)\n @ApiModelProperty(value = SwaggerDoc.LIMIT_DESCRIPTION, dataType = \"java.lang.Integer\", example = \"30\")\n private int limit;\n\n @ApiModelProperty(value = SwaggerDoc.QUERY_DESCRIPTION)\n private String query = \"\";\n\n @JsonIgnore\n boolean returnHighlightedFields = false;\n\n @ApiModelProperty(value = SwaggerDoc.RETURNED_FIELDS_DESCRIPTION)\n private List<String> returnedFields = new ArrayList<>();\n\n @Valid\n @ValidSortOrder\n @ApiModelProperty(value = SwaggerDoc.SORT_DESCRIPTION)\n private SortQuery sort;\n\n @ApiModelProperty(value = SwaggerDoc.QUERYASOWNER_DESCRIPTION, dataType = \"java.lang.Boolean\", example = \"false\")\n private boolean queryAsOwner;\n\n @Valid\n @ValidSpatialFilter\n @ApiModelProperty(value = SwaggerDoc.SPATIAL_FILTER_DESCRIPTION)\n private SpatialFilter spatialFilter;\n}\n" }, { "alpha_fraction": 0.7610208988189697, "alphanum_fraction": 0.7656612396240234, "avg_line_length": 36.5, "blob_id": "2a8e988e5edfd409407b9eff9581c13f728abac6", "content_id": "2c1a7d617c05c9b414ea7a5abe121fd8b1c02fe2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1724, "license_type": "permissive", "max_line_length": 139, "num_lines": 46, "path": "/osdu-r2/os-qa/src/main/java/com/osdu/core/endpoints/factories/specified/LocalUrlFactory.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.core.endpoints.factories.specified;\n\nimport com.osdu.core.data.properties.PropertyHolder;\nimport com.osdu.core.endpoints.creator.InstanceCreator;\nimport com.osdu.core.endpoints.services.FileServiceCreator;\nimport com.osdu.core.endpoints.services.IngestCreator;\nimport com.osdu.core.endpoints.services.WorkflowServiceCreator;\n\n/**\n * Get local url for the required endpoint\n */\npublic class LocalUrlFactory implements BaseFactory {\n InstanceCreator instanceCreator = new InstanceCreator();\n\n @Override\n public String getIngest(String resource) {\n return instanceCreator.creator(new IngestCreator()).getUrl(PropertyHolder.localProps.getIngestLocalUrl() + resource);\n }\n\n @Override\n public String getFileService(String resource) {\n return instanceCreator.creator(new FileServiceCreator()).getUrl(PropertyHolder.localProps.getFileServiceHost() + resource);\n }\n\n @Override\n public String getWorkflowService(String resource) {\n return instanceCreator.creator(new WorkflowServiceCreator()).getUrl(PropertyHolder.localProps.getWorkflowServiceHost() + resource);\n\n }\n}" }, { "alpha_fraction": 0.7041176557540894, "alphanum_fraction": 0.7141176462173462, "avg_line_length": 33, "blob_id": "44166be9436d329793e51a570404dc1c237b1505", "content_id": "7c27f9bd3313e932159eda2fa622b81ff2b85b8d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": true, "language": "Java", "length_bytes": 1700, "license_type": "permissive", "max_line_length": 108, "num_lines": 50, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/cache/MultiTenantCache.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.cache;\n\nimport org.apache.commons.lang3.StringUtils;\n\nimport java.util.HashMap;\nimport java.util.Map;\n\npublic class MultiTenantCache<V> {\n private final ICache<String, V> wrappedCache;\n private final Map<String, TenantSafeCache> tenantCaches = new HashMap<>();\n\n public MultiTenantCache(ICache<String, V> cache) {\n this.wrappedCache = cache;\n }\n\n public ICache<String, V> get(String partition) {\n String tenantName = partition;\n if (StringUtils.isBlank(tenantName))\n invalidTenantGivenException(tenantName);\n if (!tenantCaches.containsKey(tenantName)) {\n addCache(tenantName);\n }\n return tenantCaches.get(tenantName);\n }\n\n private void addCache(String tenantName) {\n tenantCaches.put(tenantName, new TenantSafeCache<>(tenantName, wrappedCache));\n }\n\n private void invalidTenantGivenException(String tenantName) {\n throw new IllegalArgumentException(String.format(\"Partition given does not exist: %s\", tenantName));\n }\n}\n" }, { "alpha_fraction": 0.715177595615387, "alphanum_fraction": 0.7186221480369568, "avg_line_length": 42, "blob_id": "10f115ef9c749c7d1708b84ea89e5481757c9c03", "content_id": "f2d8924a32e684ee7a8ec0c60e7c724cc2f51838", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4645, "license_type": "permissive", "max_line_length": 169, "num_lines": 108, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/entitlements/AuthorizationServiceImpl.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.entitlements;\n\nimport org.opengroup.osdu.core.common.model.entitlements.AuthorizationResponse;\nimport org.opengroup.osdu.core.common.model.entitlements.GroupInfo;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.core.common.model.entitlements.EntitlementsException;\nimport org.opengroup.osdu.core.common.model.entitlements.Groups;\nimport org.opengroup.osdu.core.common.model.http.AppException;\nimport org.opengroup.osdu.core.common.provider.interfaces.IAuthorizationService;\nimport org.opengroup.osdu.core.common.http.HeadersUtil;\nimport org.opengroup.osdu.core.common.http.HttpResponse;\nimport org.opengroup.osdu.core.common.logging.JaxRsDpsLog;\nimport org.springframework.context.annotation.Lazy;\nimport org.springframework.stereotype.Service;\n\nimport javax.inject.Inject;\nimport java.util.ArrayList;\nimport java.util.List;\nimport java.util.stream.Collectors;\n\n@Service\n@Lazy\npublic class AuthorizationServiceImpl implements IAuthorizationService {\n\n private static final String TENANT_GROUP_FORMAT = \"@%s\";\n\n @Inject\n private IEntitlementsFactory factory;\n @Inject\n @Lazy\n private JaxRsDpsLog jaxRsDpsLog;\n\n @Override\n public AuthorizationResponse authorizeAny(DpsHeaders headers, String... roles) {\n AuthorizationResponse authorizationResponse = null;\n IEntitlementsService service = factory.create(headers);\n try {\n authorizationResponse = authorizeAny(headers, service.getGroups(), roles);\n } catch (EntitlementsException e) {\n handleEntitlementsException(e, headers);\n }\n return authorizationResponse;\n }\n\n @Override\n public AuthorizationResponse authorizeAny(String tenantName, DpsHeaders headers, String... roles) {\n IEntitlementsService service = factory.create(headers);\n AuthorizationResponse authorizationResponse = null;\n try {\n Groups groups = service.getGroups();\n List<GroupInfo> allGroups = new ArrayList<>(groups.getGroups());\n groups.setGroups(groups.getGroups().stream().filter(groupInfo -> groupInfo.getEmail()\n .contains(String.format(TENANT_GROUP_FORMAT, tenantName))).collect(Collectors.toList()));\n\n authorizationResponse = authorizeAny(headers, groups, roles);\n groups.setGroups(allGroups);\n } catch (EntitlementsException e) {\n handleEntitlementsException(e, headers);\n }\n return authorizationResponse;\n }\n\n private void handleEntitlementsException(EntitlementsException e, DpsHeaders headers) {\n HttpResponse response = e.getHttpResponse();\n throw new AppException(response.getResponseCode(), \"Access denied\", \"The user is not authorized to perform this action\", HeadersUtil.toLogMsg(headers, null), e);\n }\n\n private AuthorizationResponse authorizeAny(DpsHeaders headers, Groups groups, String... roles) {\n String userEmail = null;\n List<String> logMessages = new ArrayList<>();\n Long curTimeStamp = System.currentTimeMillis();\n Long latency = System.currentTimeMillis() - curTimeStamp;\n\n logMessages.add(String.format(\"entitlements-api latency: %s\", latency));\n logMessages.add(String.format(\"groups: %s\", getEmailFromGroups(groups)));\n if (groups != null) {\n userEmail = groups.getMemberEmail();\n if (groups.any(roles)) {\n return AuthorizationResponse.builder().user(userEmail).groups(groups).build();\n }\n }\n jaxRsDpsLog.info(String.join(\" | \", logMessages));\n jaxRsDpsLog.info(HeadersUtil.toLogMsg(headers, userEmail));\n throw AppException.createUnauthorized(\"required search service roles are missing for user\");\n }\n\n private String getEmailFromGroups(Groups groups) {\n if (groups == null) return \"\";\n return groups.getGroups().stream().map(GroupInfo::getEmail).collect(Collectors.joining(\" | \"));\n }\n}\n\n" }, { "alpha_fraction": 0.7539834380149841, "alphanum_fraction": 0.7641810178756714, "avg_line_length": 34.65909194946289, "blob_id": "9546a89afe9e5e05d8f56f730d4f19f98217455e", "content_id": "d89ecb675e6eed6e1ba6178d19e95d0eacb6ae9e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1569, "license_type": "permissive", "max_line_length": 110, "num_lines": 44, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/legal/validation/ExportClassificationValidator.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.legal.validation;\n\nimport com.google.common.base.Strings;\nimport org.opengroup.osdu.core.common.model.legal.AllowedLegaltagPropertyValues;\n\nimport javax.validation.ConstraintValidator;\nimport javax.validation.ConstraintValidatorContext;\n\npublic class ExportClassificationValidator implements ConstraintValidator<ValidExportClassification, String> {\n\n AllowedLegaltagPropertyValues properties = new AllowedLegaltagPropertyValues();\n\n @Override\n public void initialize(ValidExportClassification constraintAnnotation) {\n //needed by interface - we don't use\n }\n\n @Override\n public boolean isValid(String exportClassification, ConstraintValidatorContext context) {\n if(Strings.isNullOrEmpty(exportClassification))\n return false;\n else\n return properties.getEccns().stream().anyMatch(exportClassification::equalsIgnoreCase);\n\n\n }\n}\n" }, { "alpha_fraction": 0.6929001808166504, "alphanum_fraction": 0.6992632150650024, "avg_line_length": 32.550559997558594, "blob_id": "f8f1fd3526c6c4612d3f77c7e8b185c1dd8dce76", "content_id": "afb4367d9120de0ab5eb71e7bdc6a82edfeec41a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2986, "license_type": "permissive", "max_line_length": 99, "num_lines": 89, "path": "/osdu-r2/os-delivery/provider/delivery-gcp-datastore/src/test/java/org/opengroup/osdu/delivery/provider/gcp/TestUtils.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.delivery.provider.gcp;\n\nimport static java.lang.String.format;\nimport static org.opengroup.osdu.delivery.provider.gcp.model.constant.StorageConstant.GCS_PROTOCOL;\n\nimport java.net.MalformedURLException;\nimport java.net.URI;\nimport java.net.URL;\nimport java.time.Clock;\nimport java.time.Instant;\nimport java.util.UUID;\nimport java.util.regex.Pattern;\nimport lombok.SneakyThrows;\nimport org.assertj.core.api.Condition;\n\npublic final class TestUtils {\n\n public static final String AUTHORIZATION_TOKEN = \"authToken\";\n public static final String PARTITION = \"partition\";\n public static final String USER_DES_ID = \"common-user\";\n public static final String BUCKET_NAME = \"odes-os-file-temp\";\n\n public static final String UUID_REGEX = \"(.{8})(.{4})(.{4})(.{4})(.{12})\";\n public static final Pattern GCS_OBJECT_URI\n = Pattern.compile(\"^gs://[\\\\w,\\\\s-]+/[\\\\w,\\\\s-]+/[\\\\w,\\\\s-]+/[\\\\w,\\\\s-]+/?.*$\");\n public static final Condition<String> UUID_CONDITION\n = new Condition<>(TestUtils::isValidUuid, \"Valid UUID\");\n public static final Condition<String> GCS_URL_CONDITION\n = new Condition<>(TestUtils::isValidSingedUrl, \"Signed URL for GCS object\");\n public static final String FILE_ID = \"test-file-id.tmp\";\n\n private TestUtils() {\n }\n\n private static boolean isValidUuid(String uuid) {\n try {\n String normalizedUuid = uuid.replaceAll(UUID_REGEX, \"$1-$2-$3-$4-$5\");\n UUID.fromString(normalizedUuid);\n return true;\n } catch (IllegalArgumentException e) {\n return false;\n }\n }\n\n private static boolean isValidSingedUrl(String url) {\n try {\n new URL(url);\n return true;\n } catch (MalformedURLException e) {\n return false;\n }\n }\n\n public static URI getGcsObjectUri(String bucketName, String folderName, String filename) {\n return URI.create(format(\"%s%s/%s/%s\", GCS_PROTOCOL, bucketName, folderName, filename));\n }\n\n @SneakyThrows\n public static URL getGcsObjectUrl(String bucketName, String folderName, String filename) {\n return new URL(format(\n \"https://storage.googleapis.com/%s/%s/%s?X-Goog-Algorithm=aaa&X-Goog-Credential=BBB\",\n bucketName, folderName, filename));\n }\n\n public static Instant now() {\n return Instant.now(Clock.systemUTC());\n }\n\n public static String getUuidString() {\n return UUID.randomUUID().toString().replace(\"-\", \"\");\n }\n\n}\n" }, { "alpha_fraction": 0.7504550218582153, "alphanum_fraction": 0.7528817057609558, "avg_line_length": 36.462120056152344, "blob_id": "aa09413f109b78cbe02acdf3159ba89a76c7b439", "content_id": "1f0f1a3538c0b2324a1e5494a6b36561bc878747", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4945, "license_type": "permissive", "max_line_length": 99, "num_lines": 132, "path": "/compatibility-layer/service/ingest/src/main/java/com/osdu/service/delfi/DelfiIngestionService.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.service.delfi;\n\nimport static java.lang.String.format;\n\nimport com.google.cloud.storage.Blob;\nimport com.osdu.client.DelfiIngestionClient;\nimport com.osdu.client.delfi.RecordDataFields;\nimport com.osdu.exception.IngestException;\nimport com.osdu.model.Record;\nimport com.osdu.model.RequestContext;\nimport com.osdu.model.delfi.signed.SignedFile;\nimport com.osdu.model.delfi.signed.SignedUrlResult;\nimport com.osdu.model.property.DelfiPortalProperties;\nimport com.osdu.model.type.base.OsduObject;\nimport com.osdu.model.type.manifest.ManifestFile;\nimport com.osdu.service.IngestionService;\nimport com.osdu.service.JsonUtils;\nimport com.osdu.service.PortalService;\nimport com.osdu.service.StorageService;\nimport java.net.MalformedURLException;\nimport java.net.URI;\nimport java.net.URISyntaxException;\nimport java.net.URL;\nimport java.nio.file.Path;\nimport java.nio.file.Paths;\nimport java.util.List;\nimport java.util.stream.Collectors;\nimport lombok.RequiredArgsConstructor;\nimport lombok.extern.slf4j.Slf4j;\nimport org.apache.commons.lang3.StringUtils;\nimport org.springframework.http.HttpStatus;\nimport org.springframework.stereotype.Service;\n\n@Service\n@RequiredArgsConstructor\n@Slf4j\npublic class DelfiIngestionService implements IngestionService {\n\n final DelfiPortalProperties portalProperties;\n final DelfiIngestionClient delfiIngestionClient;\n final StorageService storageService;\n final PortalService portalService;\n\n @Override\n public SignedFile uploadFile(ManifestFile file, String authorizationToken, String partition) {\n URL url = createUrlFromManifestFile(file);\n SignedUrlResult result = transferFile(url, authorizationToken, partition);\n\n return SignedFile.builder()\n .file(file)\n .locationUrl(result.getLocationUrl())\n .relativeFilePath(result.getRelativeFilePath())\n .build();\n }\n\n private SignedUrlResult transferFile(URL fileUrl, String authToken, String partition) {\n String fileName = getFileNameFromUrl(fileUrl);\n Blob blob = storageService.uploadFileToStorage(fileUrl, fileName);\n\n SignedUrlResult signedUrlResult = delfiIngestionClient\n .getSignedUrlForLocation(fileName, authToken, portalProperties.getAppKey(), partition);\n\n if (signedUrlResult.getResponseCode() != HttpStatus.CREATED.value()) {\n throw new IngestException(\"Count not fetch a signed URL to landing zone for file: \"\n + fileName);\n }\n\n storageService.writeFileToSignedUrlLocation(blob, signedUrlResult.getLocationUrl());\n return signedUrlResult;\n }\n\n @Override\n public List<Record> failRecords(List<Record> records, RequestContext requestContext) {\n return records.stream()\n .map(record -> failRecord(requestContext, record))\n .collect(Collectors.toList());\n }\n\n private static URL createUrlFromManifestFile(ManifestFile file) {\n String preLoadFilePath = file.getData().getGroupTypeProperties().getPreLoadFilePath();\n try {\n return new URL(preLoadFilePath);\n } catch (MalformedURLException e) {\n throw new IngestException(\n format(\"Could not create URL from preload file path: %s\", preLoadFilePath),\n e);\n }\n }\n\n /**\n * Returns file name from URL. Is used to get file name from signed URL.\n */\n private static String getFileNameFromUrl(URL fileUrl) {\n try {\n Path filePath = Paths.get(new URI(fileUrl.toString()).getPath()).getFileName();\n final String fileName = filePath == null ? null : filePath.toString();\n if (StringUtils.isEmpty(fileName)) {\n throw new IngestException(format(\"File name obtained is empty, URL : %s\", fileUrl));\n }\n return fileName;\n } catch (URISyntaxException e) {\n throw new IngestException(format(\"Can not get file name from URL: %s\", fileUrl), e);\n }\n }\n\n private Record failRecord(RequestContext requestContext, Record record) {\n log.debug(format(\"Fail delfi record : %s\", record.getId()));\n OsduObject osduObject = JsonUtils.deepCopy(record.getData().get(RecordDataFields.OSDU_DATA),\n OsduObject.class);\n osduObject.setResourceLifecycleStatus(\"srn:reference-data/ResourceLifecycleStatus:RESCINDED:\");\n record.getData().put(RecordDataFields.OSDU_DATA, osduObject);\n return portalService.putRecord(record, requestContext.getAuthorizationToken(),\n requestContext.getPartition());\n }\n\n}\n" }, { "alpha_fraction": 0.7457072734832764, "alphanum_fraction": 0.7587898373603821, "avg_line_length": 28.829267501831055, "blob_id": "d1077347906097f0e5f8abb6b213c259e31cab65", "content_id": "8e990eb66addffe6b1fa5d00eca26c98db1abaa0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1223, "license_type": "permissive", "max_line_length": 75, "num_lines": 41, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/crs/PointConversionInfo.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.crs;\n\nimport com.google.gson.JsonObject;\nimport lombok.Data;\nimport lombok.NoArgsConstructor;\n\nimport java.util.List;\n\n@Data\n@NoArgsConstructor\npublic class PointConversionInfo {\n private String recordId;\n private int recordIndex;\n private List<String> conversionStatus;\n private String xFieldName;\n private String yFieldName;\n private String zFieldName;\n private Double xValue;\n private Double yValue;\n private Double zValue;\n private int metaItemIndex;\n private List<JsonObject> metaItems;\n private boolean hasError;\n}\n" }, { "alpha_fraction": 0.6553797721862793, "alphanum_fraction": 0.6696202754974365, "avg_line_length": 28.259260177612305, "blob_id": "86135ac561c9d3b0d6630f580e2b7d8453f5d755", "content_id": "4ab6f2f0a6a999ff1052c359b13a041129f9c0d7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3160, "license_type": "permissive", "max_line_length": 83, "num_lines": 108, "path": "/osdu-r2/os-core-common/src/test/java/org/opengroup/osdu/core/common/model/entitlements/GroupsTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.entitlements;\n\nimport org.junit.Test;\nimport org.opengroup.osdu.core.common.model.entitlements.GroupInfo;\nimport org.opengroup.osdu.core.common.model.entitlements.Groups;\n\nimport java.util.List;\n\nimport static org.junit.Assert.assertEquals;\nimport static org.junit.Assert.assertFalse;\nimport static org.junit.Assert.assertNull;\nimport static org.junit.Assert.assertTrue;\n\npublic class GroupsTest {\n @Test\n public void should_returnTrue_when_groupsContainsPermission() {\n Groups sut = this.generateSut();\n\n assertTrue(sut.any(\"group.2\"));\n }\n\n @Test\n public void should_returnFalse_when_groupsDoesNotContainsPermission() {\n Groups sut = this.generateSut();\n\n assertFalse(sut.any(\"group.4\"));\n }\n\n @Test\n public void should_returnFalse_when_groupsContainsPermissionInDifferentCase() {\n Groups sut = this.generateSut();\n\n assertFalse(sut.any(\"GrouP.2\"));\n }\n\n @Test\n public void should_returnTrue_when_groupsContainsOnePermission() {\n Groups sut = this.generateSut();\n\n assertTrue(sut.any(\"GrouP.4\", \"group.1\"));\n }\n\n @Test\n public void should_returnFalse_when_groupsContainsNull() {\n Groups sut = this.generateSut();\n sut.setGroups(null);\n\n assertFalse(sut.any(\"group.1\"));\n }\n\n @Test\n public void should_getGroupNames_when_groupsExist() {\n Groups sut = this.generateSut();\n List<String> result = sut.getGroupNames();\n assertEquals(3, result.size());\n assertEquals(\"group.1\", result.get(0));\n assertEquals(\"group.2\", result.get(1));\n assertEquals(\"group.3\", result.get(2));\n }\n\n @Test\n public void should_returnGroupInfo_when_groupsContainsGroupName() {\n Groups sut = this.generateSut();\n\n assertEquals(\"group.1\", sut.getGroup(\"group.1\").getName());\n }\n\n @Test\n public void should_returnNull_when_groupsDoesNotContainGroupName() {\n Groups sut = this.generateSut();\n\n assertNull(sut.getGroup(\"group.43\"));\n }\n\n Groups generateSut() {\n Groups output = new Groups();\n GroupInfo group1 = new GroupInfo();\n group1.setName(\"group.1\");\n output.getGroups().add(group1);\n\n GroupInfo group2 = new GroupInfo();\n group2.setName(\"group.2\");\n output.getGroups().add(group2);\n\n GroupInfo group3 = new GroupInfo();\n group3.setName(\"group.3\");\n output.getGroups().add(group3);\n\n return output;\n }\n}\n" }, { "alpha_fraction": 0.7686731815338135, "alphanum_fraction": 0.7711944580078125, "avg_line_length": 38.67499923706055, "blob_id": "8824ebcd6551671f277d77e3a2b9ec55342f3c5b", "content_id": "b4a80a220c6b4cee2dccb9672d15e2aa0c665d6c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3173, "license_type": "permissive", "max_line_length": 95, "num_lines": 80, "path": "/compatibility-layer/service/delivery/src/main/java/com/osdu/service/processing/delfi/DelfiDataProcessingJob.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.service.processing.delfi;\n\nimport static com.osdu.config.AsyncConfiguration.DATA_PROCESSING_EXECUTOR;\n\nimport com.osdu.client.delfi.RecordDataFields;\nimport com.osdu.model.Record;\nimport com.osdu.model.SrnToRecord;\nimport com.osdu.model.delfi.DelfiFile;\nimport com.osdu.model.osdu.delivery.delfi.ProcessingResult;\nimport com.osdu.model.osdu.delivery.delfi.ProcessingResultStatus;\nimport com.osdu.service.PortalService;\nimport com.osdu.service.SrnMappingService;\nimport com.osdu.service.processing.DataProcessingJob;\nimport java.util.Map;\nimport java.util.concurrent.CompletableFuture;\nimport lombok.RequiredArgsConstructor;\nimport org.springframework.scheduling.annotation.Async;\nimport org.springframework.stereotype.Component;\n\n@Component\n@RequiredArgsConstructor\npublic class DelfiDataProcessingJob implements DataProcessingJob {\n\n public static final String BUCKET_URL = \"bucketURL\";\n\n final SrnMappingService srnMappingService;\n final PortalService portalService;\n\n /**\n * Perform the async data processing for delivery for specified SRN.\n * It's run on {@code AsyncConfiguration#dataProcessingExecutor} executor.\n *\n * @param srn SRN\n * @param authorizationToken Bearer token\n * @param partition partition\n * @return {@link CompletableFuture} of delivery data processing result.\n */\n @Async(DATA_PROCESSING_EXECUTOR)\n public CompletableFuture<ProcessingResult> process(String srn, String authorizationToken,\n String partition) {\n\n ProcessingResult result = new ProcessingResult();\n result.setSrn(srn);\n\n SrnToRecord srnToRecord = srnMappingService.getSrnToRecord(srn);\n if (srnToRecord == null) {\n result.setProcessingResultStatus(ProcessingResultStatus.NO_MAPPING);\n return CompletableFuture.completedFuture(result);\n }\n String recordId = srnToRecord.getRecordId();\n final Record record = portalService.getRecord(recordId, authorizationToken, partition);\n if (record.getData().containsKey(BUCKET_URL)) {\n DelfiFile file = portalService\n .getFile(record.getData().get(BUCKET_URL).toString(), authorizationToken, partition);\n result.setProcessingResultStatus(ProcessingResultStatus.FILE);\n result.setData((Map<String, Object>) record.getData().get(RecordDataFields.OSDU_DATA));\n result.setFileLocation(file.getSignedUrl());\n } else {\n result.setData((Map<String, Object>) record.getData().get(RecordDataFields.OSDU_DATA));\n result.setProcessingResultStatus(ProcessingResultStatus.DATA);\n }\n return CompletableFuture.completedFuture(result);\n }\n}" }, { "alpha_fraction": 0.7052767276763916, "alphanum_fraction": 0.7104247212409973, "avg_line_length": 37.849998474121094, "blob_id": "fad9ad5e50dec63a0128816729f89c79fe0768b5", "content_id": "cd9ff5699ebd9bc2e6e5d32d210b5e6e8cdea52f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1554, "license_type": "permissive", "max_line_length": 82, "num_lines": 40, "path": "/osdu-r2/os-ingest/ingest-core/src/main/java/org/opengroup/osdu/ingest/provider/interfaces/IWorkflowIntegrationService.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.ingest.provider.interfaces;\n\nimport java.util.Map;\nimport org.opengroup.osdu.core.common.model.WorkflowType;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.ingest.exception.ServerErrorException;\n\npublic interface IWorkflowIntegrationService {\n\n /**\n * Start new workflow according to {@code workflowType} and {@code dataType}.\n *\n * @param workflowType workflowType\n * @param dataType dataType\n * @param context workflow context\n * @param commonHeaders common headers\n * @return workflow ID\n * @throws ServerErrorException if unable to create start workflow request\n * or workflow response doesn't contain workflow ID\n */\n String submitIngestToWorkflowService(WorkflowType workflowType, String dataType,\n Map<String, Object> context,\n DpsHeaders commonHeaders);\n}\n" }, { "alpha_fraction": 0.7696924209594727, "alphanum_fraction": 0.775693953037262, "avg_line_length": 30.738094329833984, "blob_id": "a6d97abde5a4f06c8cb203a6558e12e236d0673d", "content_id": "4749d0fae902c406e2f56ba1108fd016733807b1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1333, "license_type": "permissive", "max_line_length": 75, "num_lines": 42, "path": "/osdu-r2/os-delivery/provider/delivery-gcp/src/main/java/org/opengroup/osdu/delivery/provider/gcp/model/property/FileLocationProperties.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.delivery.provider.gcp.model.property;\n\nimport javax.validation.constraints.NotBlank;\nimport lombok.Getter;\nimport org.springframework.boot.context.properties.ConfigurationProperties;\nimport org.springframework.boot.context.properties.ConstructorBinding;\nimport org.springframework.validation.annotation.Validated;\n\n// TODO: remove it after defined tenant info and auth\n@Getter\n@ConfigurationProperties(prefix = \"file.location\")\n@Validated\npublic class FileLocationProperties {\n\n @NotBlank\n final String bucketName;\n\n @NotBlank\n final String userId;\n\n @ConstructorBinding\n public FileLocationProperties(String bucketName, String userId) {\n this.bucketName = bucketName;\n this.userId = userId;\n }\n}\n" }, { "alpha_fraction": 0.772090494632721, "alphanum_fraction": 0.7735273241996765, "avg_line_length": 35.39215850830078, "blob_id": "8a3b39c4d979c356b18d93131404d22e609e7888", "content_id": "248b11a4e35041e7fcb395ac26613f9ba85e32ea", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 5568, "license_type": "permissive", "max_line_length": 99, "num_lines": 153, "path": "/osdu-r2/os-workflow/workflow-core/src/test/java/org/opengroup/osdu/workflow/service/WorkflowStatusServiceImplTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.workflow.service;\n\nimport static org.assertj.core.api.Assertions.catchThrowable;\nimport static org.assertj.core.api.BDDAssertions.then;\nimport static org.mockito.ArgumentMatchers.eq;\nimport static org.mockito.BDDMockito.given;\n\nimport java.util.Date;\nimport java.util.HashMap;\nimport java.util.Map;\nimport org.junit.jupiter.api.BeforeEach;\nimport org.junit.jupiter.api.DisplayNameGeneration;\nimport org.junit.jupiter.api.Test;\nimport org.junit.jupiter.api.extension.ExtendWith;\nimport org.mockito.InOrder;\nimport org.mockito.Mock;\nimport org.mockito.Mockito;\nimport org.mockito.junit.jupiter.MockitoExtension;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.workflow.ReplaceCamelCase;\nimport org.opengroup.osdu.workflow.exception.WorkflowNotFoundException;\nimport org.opengroup.osdu.workflow.model.GetStatusRequest;\nimport org.opengroup.osdu.workflow.model.GetStatusResponse;\nimport org.opengroup.osdu.workflow.model.UpdateStatusRequest;\nimport org.opengroup.osdu.workflow.model.UpdateStatusResponse;\nimport org.opengroup.osdu.workflow.model.WorkflowStatus;\nimport org.opengroup.osdu.workflow.model.WorkflowStatusType;\nimport org.opengroup.osdu.workflow.provider.interfaces.IValidationService;\nimport org.opengroup.osdu.workflow.provider.interfaces.IWorkflowStatusRepository;\n\n@ExtendWith(MockitoExtension.class)\n@DisplayNameGeneration(ReplaceCamelCase.class)\nclass WorkflowStatusServiceImplTest {\n\n private static final String AUTHORIZATION_TOKEN = \"authToken\";\n private static final String PARTITION = \"partition\";\n private static final String WORKFLOW_ID = \"workflow-id\";\n\n @Mock\n private IValidationService validationService;\n @Mock\n private IWorkflowStatusRepository workflowStatusRepository;\n\n WorkflowStatusServiceImpl workflowStatusService;\n\n @BeforeEach\n void setUp() {\n workflowStatusService = new WorkflowStatusServiceImpl(validationService,\n workflowStatusRepository);\n }\n\n @Test\n void shouldGetWorkflowStatus() {\n\n // given\n DpsHeaders headers = getMessageHeaders();\n GetStatusRequest request = GetStatusRequest.builder().workflowId(WORKFLOW_ID).build();\n\n WorkflowStatus workflowStatus = WorkflowStatus.builder()\n .workflowStatusType(WorkflowStatusType.SUBMITTED)\n .workflowId(WORKFLOW_ID)\n .submittedAt(new Date()).build();\n\n given(workflowStatusRepository.findWorkflowStatus(eq(WORKFLOW_ID))).willReturn(workflowStatus);\n\n // when\n GetStatusResponse workflowStatusResponse = workflowStatusService\n .getWorkflowStatus(request, headers);\n\n // then\n then(workflowStatusResponse.getWorkflowStatusType()).isEqualTo(WorkflowStatusType.SUBMITTED);\n InOrder inOrder = Mockito.inOrder(validationService,\n workflowStatusRepository);\n inOrder.verify(validationService).validateGetStatusRequest(request);\n inOrder.verify(workflowStatusRepository)\n .findWorkflowStatus(eq(WORKFLOW_ID));\n inOrder.verifyNoMoreInteractions();\n }\n\n @Test\n void shouldThrowExceptionIfThereIsNoWorkflow() {\n\n // given\n DpsHeaders headers = getMessageHeaders();\n GetStatusRequest request = GetStatusRequest.builder().workflowId(WORKFLOW_ID).build();\n\n given(workflowStatusRepository.findWorkflowStatus(eq(WORKFLOW_ID))).willReturn(null);\n\n // when\n Throwable thrown = catchThrowable(\n () -> workflowStatusService.getWorkflowStatus(request, headers));\n\n // then\n then(thrown).isInstanceOf(WorkflowNotFoundException.class);\n }\n\n @Test\n void shouldUpdateWorkflowStatus() {\n\n // given\n DpsHeaders headers = getMessageHeaders();\n\n UpdateStatusRequest request = UpdateStatusRequest.builder()\n .workflowId(WORKFLOW_ID)\n .workflowStatusType(WorkflowStatusType.RUNNING).build();\n\n WorkflowStatus workflowStatus = WorkflowStatus.builder()\n .workflowStatusType(WorkflowStatusType.RUNNING)\n .workflowId(WORKFLOW_ID)\n .submittedAt(new Date()).build();\n\n given(workflowStatusRepository\n .updateWorkflowStatus(eq(WORKFLOW_ID), eq(WorkflowStatusType.RUNNING)))\n .willReturn(workflowStatus);\n\n // when\n UpdateStatusResponse updateStatusResponse = workflowStatusService\n .updateWorkflowStatus(request, headers);\n\n // then\n then(updateStatusResponse.getWorkflowStatusType()).isEqualTo(WorkflowStatusType.RUNNING);\n InOrder inOrder = Mockito.inOrder(validationService,\n workflowStatusRepository);\n inOrder.verify(validationService).validateUpdateStatusRequest(request);\n inOrder.verify(workflowStatusRepository)\n .updateWorkflowStatus(eq(WORKFLOW_ID), eq(WorkflowStatusType.RUNNING));\n inOrder.verifyNoMoreInteractions();\n }\n\n private DpsHeaders getMessageHeaders() {\n Map<String, String> headers = new HashMap<>();\n headers.put(DpsHeaders.AUTHORIZATION, AUTHORIZATION_TOKEN);\n headers.put(DpsHeaders.DATA_PARTITION_ID, PARTITION);\n\n return DpsHeaders.createFromMap(headers);\n }\n}\n" }, { "alpha_fraction": 0.79693204164505, "alphanum_fraction": 0.8035061955451965, "avg_line_length": 47.92856979370117, "blob_id": "cc41ef968333a3579562266480f4db7c7639e92e", "content_id": "6ea2adc5190d8fba415551d3ef3aaf554d1cff13", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1369, "license_type": "permissive", "max_line_length": 119, "num_lines": 28, "path": "/README.md", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# OSDU\n\nThe OSDU repository contains the OSDU Compatibility Layer and OSDU Release 2 projects.\n\n## OSDU Compatibility Layer\n\nThe OSDU Compatibility Layer is an implementation of the OSDU standard and provides a subset of the functionality\navailable in OSDU Release 1. In particular, the compatibility layer can only ingest .las files. The implementation is\nlocated under the **compatibility-layer** folder.\n\nThe compatibility layer consists of the following services:\n\n* Search, provides an external API method to perform search for OSDU data \n* Delivery, provides an external API method to download OSDU data\n* Ingest, provides external API methods to ingest .las files and learn the ingestion status \n\n## OSDU R2\n\nThe OSDU Release 2 is an implementation of a unified ingestion flow based on the ingestion flows of the OSDU Release 1 \nand DELFI Data Ecosystem. The implementation is located under the **osdu-r2** folder.\n\nThe OSDU Release 2 consists of the following services:\n\n* Workflow, handles any business process in the OSDU R2, in particular, the ingestion process\n* Delivery, provides internal and external API endpoints to let the user or OSDU R2 services to request for file\nlocation\n* Ingest, provides external API endpoints to let the user to submit files for ingestion, and performs preliminary work\non the request before calling the OSDU Workflow service" }, { "alpha_fraction": 0.76348477602005, "alphanum_fraction": 0.778968870639801, "avg_line_length": 51.48214340209961, "blob_id": "1d8a4b4d56a428284e4c2d330ecd0cc688d5b8d6", "content_id": "4d2814b5d018a89116d3e351fb10e92f8db1780a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5877, "license_type": "permissive", "max_line_length": 135, "num_lines": 112, "path": "/osdu-r2/os-dags/README.md", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# OSDU R2 DAGs\n\n## Contents\n\n* [Introduction](#introduction)\n* [Opaque Ingestion DAG](#opaque-ingestion-dag)\n* [Manifest Ingestion DAG](#manifest-ingestion-dag)\n* [DAG implementation details](#dag-implementation-details)\n* [Workflow Status Operator](#workflow-status-operator)\n* [Stale Jobs Scheduler](#stale-jobs-scheduler)\n* [Workflow Finished Sensor operator](#workflow-finished-sensor-operator)\n\n## Introduction\n\nThe OSDU R2 Prototype includes a Workflow Engine, an implementation of Apache Airflow, to orchestrate business\nprocesses. In particular, the Workflow Engine handles ingestion of opaque and well log .las files in OSDU R2.\n\nThe Workflow Engine encompasses the following components:\n\n* Opaque Ingestion DAG\n* OSDU Ingestion DAG\n* Workflow Status Operator\n* Stale Jobs Scheduler\n* Workflow Finished Sensor Operator\n\n## Opaque Ingestion DAG\n\nThe Opaque Ingestion DAG performs ingestion of OSDU opaque data type. The following diagram shows the workflow of the\nOpaque Ingestion DAG.\n\n![OSDU R2 Opaque Ingestion DAG](https://user-images.githubusercontent.com/21691607/77777705-9c4dd180-7058-11ea-97c7-9e0deb9d2a87.png)\n\nThe Opaque Ingestion DAG flow:\n\n1. Call the Workflow Status Operator with the **running** status.\n * Workflow Status Operator queries the Workflow service's **/updateWorkflowStatus** API endpoint with the\n **running** status, and then returns the control back to the Opaque Ingestion DAG.\n2. Query the Storage service's **/createOrUpdateRecord** API endpoint to create a record for the file.\n * The ODES Storage service makes a call to ODES Indexer and returns to the DAG.\n3. Call the Workflow Status Operator with the **finished** status.\n * The Workflow Status Operator queries the Workflow service's **/updateWorkflowStatus** endpoint to set the workflow\n status to **finished** in the database.\n\n## Manifest Ingestion DAG\n\nThe Manifest Ingestion DAG ingests multiple files with their metadata provided in an OSDU manifest. The following\ndiagram demonstrates the workflow of the Manifest\nIngestion DAG.\n\n![OSDU R2 Manifest Ingestion DAG](https://user-images.githubusercontent.com/21691607/77666377-8cb38780-6f89-11ea-97b4-57abf507ca5a.png)\n\nUpon an execution request:\n\n1. Invoke the Workflow Status Operator to set the new status for the workflow.\n * The Workflow Status Operator queries the Workflow service's **/updateWorkflowStatus** API endpoint with the\n **running** status.\n2. Obtain the Work Product Components associated with the Work Product.\n * For each Work Product Component, find all associated OSDU Files. For each File in the manifest:\n * Start the **ingest** workflow. Call the Workflow service's **/startWorkflow** API endpoint the **ingest**\n workflow type.\n > The Workflow Finished Sensor operator polls the DAG execution and notifies the DAG to start ingestion of the\n > next file.\n * Once all Files for the current Work Product Component are ingested, query the Storage service's\n **/CreateOrUpdatedRecord** API endpoint to create a record for the current Work Product Component.\n * Once all Work Product Components and Files are ingested, switch to the third step.\n3. Create a new record for the Work Product.\n * Query the Storage service's **/CreateOrUpdateRecord** API endpoint and pass it the Work Product.\n4. Search the records by metadata.\n * Query the Storage service's **/listRecords** API to obtain the records by metadata.\n5. Enrich the records with data from the manifest.\n * Query the Storage service's **/UpdateRecord** API endpoint and pass it the metadata from the manifest.\n > Only file records are updated.\n6. Invoke the Workflow Status Operator with the **finished** job status.\n * The Workflow Status Operator queries the Workflow service to set the new workflow status.\n\n## DAG implementation details\n\nOSDU DAGs are cloud platform-agnostic by design. However, there are specific implementation requirements by cloud\nplatforms, and the OSDU R2 Prototype provides a dedicated Python SDK to make sure that DAGs are independent from the\ncloud platforms. This Python SDK is located in a separate [os-python-sdk] folder.\n\n## Workflow Status Operator\n\nThe Workflow Status Operator is an Airflow operator callable from each DAG. It's purpose is to receive the latest status\nof a workflow job and then update the workflow record in the database. Each DAG in the system has to invoke the Workflow\nStatus Operator to update the workflow status.\n\nThis operator isn't designed to directly update the status in the database, and it queries the OSDU R2 Workflow\nservice's API endpoint. Once the operator sends a request to update status, it cedes control back to the DAG.\n\n## Stale Jobs Scheduler\n\nThe Stale Jobs Scheduler is designed to query Apache Airflow to find out any stale workflow jobs, that is, the jobs that\nfailed during execution but which status wasn't updated to **failed** in the database.\n\nThis operator queries the Airflow API every N minutes to verify that the workflow jobs that do not have the _finished_\nstatus are still running. If a workflow job has failed in Airflow, the Stale Jobs Scheduler will set this workflow job\nstatus to **failed** in the database.\n\nThe Stale Jobs Scheduler workflow:\n\n1. Query the database to find all workflow records with the _submitted_ or _running_ statuses.\n2. Query Airflow to verify the status of the submitted or running workflow jobs.\n3. If Airflow returns the failed status for a workflow job, query Firestore to set the workflow status to FAILED.\n\n## Workflow Finished Sensor Operator\n\nThe Workflow Finished Sensor operator is a special type of operator that monitors ingestion of a file during the \"osdu\"\ningestion workflow. Once a file is ingested, this operator notifies the DAG, which then starts ingestion of the next\nfile in the manifest.\n\n[os-python-sdk]: ../os-python-sdk" }, { "alpha_fraction": 0.7312796115875244, "alphanum_fraction": 0.73601895570755, "avg_line_length": 37.3636360168457, "blob_id": "88293b84868eb1209483ec4d8780012e411eac2a", "content_id": "a9d042469dbe7e70d8ffbc92249f9d35a0d7d89d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2110, "license_type": "permissive", "max_line_length": 99, "num_lines": 55, "path": "/compatibility-layer/service/search/src/main/java/com/osdu/deserializer/SortOptionDeserializer.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.deserializer;\n\nimport com.fasterxml.jackson.core.JsonParser;\nimport com.fasterxml.jackson.databind.DeserializationContext;\nimport com.fasterxml.jackson.databind.JsonDeserializer;\nimport com.fasterxml.jackson.databind.JsonNode;\nimport com.osdu.model.osdu.SortOption;\nimport java.io.IOException;\nimport java.util.Map;\n\n/**\n * Custom deserializer for OSDU search object. Needed since the structure defined in documentation\n * describes two different styles for inner objects of the sort array - 1 - Object with inner field\n * named as a fieldName and inner object of that field with K:V pair order:orderType (example : {\n * \"region\":{ \"order\":\"asc\" } } ) 2 - String with the name of the field. (example : \"fieldName\")\n */\npublic class SortOptionDeserializer extends JsonDeserializer<SortOption> {\n\n private static final String ORDER_JSON_KEY = \"order\";\n\n @Override\n public SortOption deserialize(JsonParser p, DeserializationContext ctxt) throws IOException {\n JsonNode node = p.getCodec().readTree(p);\n SortOption sortOption = new SortOption();\n\n if (node.fields().hasNext()) {\n Map.Entry<String, JsonNode> sortEntry = node.fields().next();\n sortOption.setFieldName(sortEntry.getKey());\n sortOption\n .setOrderType(\n SortOption.OrderType.valueOf(sortEntry.getValue().get(ORDER_JSON_KEY).asText()));\n } else {\n sortOption.setFieldName(node.asText());\n sortOption.setOrderType(SortOption.OrderType.ASC);\n }\n\n return sortOption;\n }\n}\n" }, { "alpha_fraction": 0.7100059986114502, "alphanum_fraction": 0.7195925712585449, "avg_line_length": 33.77083206176758, "blob_id": "1d5a648db8edbb7725c45c6b15f9162fda470038", "content_id": "9068204ad606026772e2e571e6b40f358fe93b55", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1669, "license_type": "permissive", "max_line_length": 106, "num_lines": 48, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/search/Preconditions.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.search;\n\nimport com.google.common.base.Strings;\n\nimport java.util.Objects;\nimport java.util.function.Function;\n\npublic final class Preconditions {\n\n private static Function<String, Boolean> isNotNullOrEmptyPredicate = (s) -> !Strings.isNullOrEmpty(s);\n\n public static <T> T checkArgument(T argument, Function<T, Boolean> predicate, Object errorMessage) {\n if (!predicate.apply(argument)) {\n throw new IllegalArgumentException(String.valueOf(errorMessage));\n } else {\n return argument;\n }\n }\n\n public static <T> T checkNotNull(T argument) {\n return checkNotNull(argument, \"Argument should be not null\");\n }\n\n public static String checkNotNullOrEmpty(String argument, Object errorMessage) {\n return checkArgument(argument, isNotNullOrEmptyPredicate, errorMessage);\n }\n\n public static <T> T checkNotNull(T argument, Object errorMessage) {\n return checkArgument(argument, Objects::nonNull, errorMessage);\n }\n}\n" }, { "alpha_fraction": 0.7599742412567139, "alphanum_fraction": 0.7702702879905701, "avg_line_length": 37.849998474121094, "blob_id": "dab62672224e0e68e031e212bfb9a13920b544f9", "content_id": "5ab702fe022dc7814b1e81a770fd19c95ae3d0a5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1554, "license_type": "permissive", "max_line_length": 97, "num_lines": 40, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/search/validation/CcsOffsetValidator.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.search.validation;\n\nimport org.opengroup.osdu.core.common.model.search.CcsQueryRequest;\nimport org.opengroup.osdu.core.common.model.search.QueryRequest;\n\nimport javax.validation.ConstraintValidator;\nimport javax.validation.ConstraintValidatorContext;\n\n// TODO: Remove this temporary implementation when ECE CCS is utilized\npublic class CcsOffsetValidator implements ConstraintValidator<CcsValidOffset, CcsQueryRequest> {\n\n @Override\n public void initialize(final CcsValidOffset constraintAnnotation) {\n }\n\n @Override\n public boolean isValid(CcsQueryRequest ccsQueryRequest, ConstraintValidatorContext context) {\n QueryRequest queryRequest = new QueryRequest();\n queryRequest.setFrom(ccsQueryRequest.getFrom());\n queryRequest.setLimit(ccsQueryRequest.getLimit());\n return new OffsetValidator().isValid(queryRequest, context);\n }\n}\n" }, { "alpha_fraction": 0.7546599507331848, "alphanum_fraction": 0.7576826214790344, "avg_line_length": 36.10280227661133, "blob_id": "677801cf925044478c028faff04c597d3e3d6724", "content_id": "4f65655b193d8b5eafdbedba271e20bbc942c139", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3970, "license_type": "permissive", "max_line_length": 105, "num_lines": 107, "path": "/osdu-r2/os-delivery/provider/delivery-gcp-datastore/src/test/java/org/opengroup/osdu/delivery/provider/gcp/repository/GcpStorageRepositoryTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.delivery.provider.gcp.repository;\n\nimport static org.assertj.core.api.Assertions.catchThrowable;\nimport static org.assertj.core.api.BDDAssertions.then;\nimport static org.assertj.core.extractor.Extractors.toStringMethod;\nimport static org.mockito.AdditionalAnswers.delegatesTo;\nimport static org.mockito.ArgumentMatchers.any;\nimport static org.mockito.ArgumentMatchers.eq;\nimport static org.mockito.Mockito.mock;\nimport static org.mockito.Mockito.verify;\n\nimport com.google.auth.oauth2.GoogleCredentials;\nimport com.google.cloud.storage.BlobInfo;\nimport com.google.cloud.storage.Storage;\nimport com.google.cloud.storage.Storage.SignUrlOption;\nimport com.google.cloud.storage.contrib.nio.testing.LocalStorageHelper;\nimport java.util.concurrent.TimeUnit;\nimport org.junit.jupiter.api.DisplayNameGeneration;\nimport org.junit.jupiter.api.Test;\nimport org.junit.jupiter.api.extension.ExtendWith;\nimport org.mockito.ArgumentCaptor;\nimport org.mockito.Captor;\nimport org.mockito.junit.jupiter.MockitoExtension;\nimport org.opengroup.osdu.delivery.ReplaceCamelCase;\nimport org.opengroup.osdu.delivery.model.SignedObject;\nimport org.opengroup.osdu.delivery.provider.interfaces.StorageRepository;\n\n@ExtendWith(MockitoExtension.class)\n@DisplayNameGeneration(ReplaceCamelCase.class)\nclass GcpStorageRepositoryTest {\n\n private static final String BUCKET_NAME = \"bucket\";\n private static final String FILEPATH = \"file/path/temp.tmp\";\n\n @Captor\n private ArgumentCaptor<byte[]> contentCaptor;\n @Captor\n private ArgumentCaptor<SignUrlOption> optionsCaptor;\n\n @Test\n void shouldCreateSignedObject() {\n // given\n Storage storage = spyLocalStorage(TestCredential.getSa());\n StorageRepository storageRepository = new GcpStorageRepository(storage);\n\n // when\n SignedObject signedObject = storageRepository.createSignedObject(BUCKET_NAME, FILEPATH);\n\n // then\n then(signedObject).isNotNull();\n\n verify(storage).create(any(BlobInfo.class), contentCaptor.capture());\n verify(storage).signUrl(any(BlobInfo.class), eq(7L), eq(TimeUnit.DAYS),\n optionsCaptor.capture());\n\n then(contentCaptor.getValue()).isEmpty();\n then(optionsCaptor.getAllValues())\n .extracting(\"option\", \"value\")\n .extracting(toStringMethod())\n .containsExactly(\n \"(HTTP_METHOD, PUT)\",\n \"(SIGNATURE_VERSION, V4)\");\n }\n\n @Test\n void shouldThrowExceptionWhenCallerIsNotSigner () {\n // given\n Storage storage = spyLocalStorage(TestCredential.getUserCredentials());\n StorageRepository storageRepository = new GcpStorageRepository(storage);\n\n // when\n Throwable thrown = catchThrowable(() -> storageRepository.createSignedObject(BUCKET_NAME, FILEPATH));\n\n // then\n then(thrown)\n .isInstanceOf(IllegalStateException.class)\n .hasMessage(\"Signing key was not provided and could not be derived\");\n\n verify(storage).create(any(BlobInfo.class), any(byte[].class));\n verify(storage).signUrl(any(BlobInfo.class), eq(7L), eq(TimeUnit.DAYS),\n any(SignUrlOption.class), any(SignUrlOption.class));\n }\n\n private Storage spyLocalStorage(GoogleCredentials credentials) {\n Storage localStorage = LocalStorageHelper.getOptions().toBuilder()\n .setCredentials(credentials)\n .build()\n .getService();\n return mock(Storage.class, delegatesTo(localStorage));\n }\n}\n" }, { "alpha_fraction": 0.6906609535217285, "alphanum_fraction": 0.6957452297210693, "avg_line_length": 28.895999908447266, "blob_id": "3cea9c6e85676a15e95ee6424486f1b37a86136d", "content_id": "31be94a4bf4810648f788807ecf2b36afab6bb9f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3737, "license_type": "permissive", "max_line_length": 93, "num_lines": 125, "path": "/osdu-r2/os-delivery/delivery-core/src/test/java/org/opengroup/osdu/delivery/TestUtils.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.delivery;\n\nimport static java.lang.String.format;\n\nimport java.net.MalformedURLException;\nimport java.net.URI;\nimport java.net.URL;\nimport java.time.Clock;\nimport java.time.Instant;\nimport java.util.UUID;\nimport java.util.regex.Pattern;\nimport lombok.SneakyThrows;\nimport org.assertj.core.api.Condition;\nimport org.hamcrest.Description;\nimport org.hamcrest.Matcher;\nimport org.hamcrest.TypeSafeMatcher;\n\npublic final class TestUtils {\n\n public static final String AUTHORIZATION_TOKEN = \"authToken\";\n public static final String PARTITION = \"partition\";\n public static final String USER_DES_ID = \"common-user\";\n\n public static final String UUID_REGEX = \"(.{8})(.{4})(.{4})(.{4})(.{12})\";\n public static final String SRG_PROTOCOL = \"srg://\";\n public static final Pattern SRG_OBJECT_URI\n = Pattern.compile(\"^srg://[\\\\w,\\\\s-]+/[\\\\w,\\\\s-]+/[\\\\w,\\\\s-]+/[\\\\w,\\\\s-]+/?.*$\");\n public static final Condition<String> UUID_CONDITION\n = new Condition<>(TestUtils::isValidUuid, \"Valid UUID\");\n public static final Condition<String> SIGNED_URL_CONDITION\n = new Condition<>(TestUtils::isValidSingedUrl, \"Signed URL for GCS object\");\n public static final String FILE_ID = \"test-file-id.tmp\";\n\n private TestUtils() {\n }\n\n private static boolean isValidUuid(String uuid) {\n try {\n String normalizedUuid = uuid.replaceAll(UUID_REGEX, \"$1-$2-$3-$4-$5\");\n UUID.fromString(normalizedUuid);\n return true;\n } catch (IllegalArgumentException e) {\n return false;\n }\n }\n\n private static boolean isValidSingedUrl(String url) {\n try {\n new URL(url);\n return true;\n } catch (MalformedURLException e) {\n return false;\n }\n }\n\n public static URI getObjectUri(String bucketName, String folderName, String filename) {\n return URI.create(format(\"%s%s/%s/%s\", SRG_PROTOCOL, bucketName, folderName, filename));\n }\n\n @SneakyThrows\n public static URL getObjectUrl(String bucketName, String folderName, String filename) {\n return new URL(format(\n \"https://storage.googleapis.com/%s/%s/%s?X-Goog-Algorithm=aaa&X-Goog-Credential=BBB\",\n bucketName, folderName, filename));\n }\n\n public static Instant now() {\n return Instant.now(Clock.systemUTC());\n }\n\n public static String getUuidString() {\n return UUID.randomUUID().toString().replace(\"-\", \"\");\n }\n\n public static Matcher<String> isValidUUID() {\n return new UUIDMatcher();\n }\n\n public static Matcher<String> isValidSingedUrl() {\n return new UrlMatcher();\n }\n\n public static class UUIDMatcher extends TypeSafeMatcher<String> {\n\n @Override\n protected boolean matchesSafely(String value) {\n return isValidUuid(value);\n }\n\n @Override\n public void describeTo(Description description) {\n description.appendText(\"should be UUID\");\n }\n }\n\n public static class UrlMatcher extends TypeSafeMatcher<String> {\n\n @Override\n protected boolean matchesSafely(String value) {\n return isValidSingedUrl(value);\n }\n\n @Override\n public void describeTo(Description description) {\n description.appendText(\"should be URL\");\n }\n }\n\n}\n" }, { "alpha_fraction": 0.6507478356361389, "alphanum_fraction": 0.6641301512718201, "avg_line_length": 36.73267364501953, "blob_id": "e4f5da4e2cf8b31f0130e4d6349d249bdb1d3576", "content_id": "567f487e8bff8b78f7e86637de21435b98f29450", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3811, "license_type": "permissive", "max_line_length": 92, "num_lines": 101, "path": "/osdu-r2/os-core-common/src/test/java/org/opengroup/osdu/core/common/logging/LogIntegrationTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.logging;\n\nimport static org.junit.Assert.assertTrue;\n\nimport java.util.ArrayList;\nimport java.util.HashMap;\nimport java.util.Map;\n\nimport org.junit.Test;\n\nimport org.opengroup.osdu.core.common.logging.audit.AuditAction;\nimport org.opengroup.osdu.core.common.model.http.Request;\nimport org.opengroup.osdu.core.common.logging.audit.AuditPayload;\nimport org.opengroup.osdu.core.common.logging.audit.AuditStatus;\n\npublic class LogIntegrationTest {\n\n DefaultLogger log = new DefaultLogger();\n\n @Test\n public void loggingIntegerationTest() throws Exception {\n // appEngine provided environment variables.\n AuditPayload auditPayload = AuditPayload.builder()\n .action(AuditAction.CREATE)\n .status(AuditStatus.SUCCESS)\n .message(\"hello\")\n .resources(new ArrayList<>())\n .actionId(\"10001\")\n .user(\"testUser\")\n .build();\n\n\n Map<String, String> labels = new HashMap<>();\n labels.put(\"correlation-id\", \"testCorrelationId\");\n labels.put(\"X-Cloud-Trace-Context\", \"f8b375ea4c7da1933f8f4829246032ef;o=0\");\n\n Request http = Request.builder().build();\n String logname = \"legaltest.log\";\n long time = System.currentTimeMillis();\n\n log.audit(logname, auditPayload, labels);\n time = System.currentTimeMillis() - time;\n System.out.println(\"Time spent writing logs \" + time);\n time = System.currentTimeMillis();\n\n log.request(logname, http, labels);\n time = System.currentTimeMillis() - time;\n System.out.println(\"Time spent writing logs \" + time);\n time = System.currentTimeMillis();\n\n log.info(logname, \"info\", labels);\n time = System.currentTimeMillis() - time;\n System.out.println(\"Time spent writing logs \" + time);\n time = System.currentTimeMillis();\n\n log.warning(logname, \"warning with exception\", new Exception(\"Test error\"), labels);\n time = System.currentTimeMillis() - time;\n System.out.println(\"Time spent writing logs \" + time);\n time = System.currentTimeMillis();\n\n log.warning(logname, \"warning no exception\", labels);\n time = System.currentTimeMillis() - time;\n System.out.println(\"Time spent writing logs \" + time);\n time = System.currentTimeMillis();\n\n log.error(logname, \"error with exception\", new Exception(\"Test error\"), labels);\n time = System.currentTimeMillis() - time;\n System.out.println(\"Time spent writing logs \" + time);\n time = System.currentTimeMillis();\n\n log.audit(logname, auditPayload, labels);\n time = System.currentTimeMillis() - time;\n System.out.println(\"Time spent writing logs \" + time);\n time = System.currentTimeMillis();\n\n log.error(logname, \"error no exception\", labels);\n time = System.currentTimeMillis() - time;\n System.out.println(\"Time spent writing logs \" + time);\n\n assertTrue(\"Expected time to be no more than 50 millisecond\", time <= 50);\n Thread.sleep(6000);// wait for batch settings to kick in\n\n }\n}\n" }, { "alpha_fraction": 0.7524247169494629, "alphanum_fraction": 0.7672281861305237, "avg_line_length": 37.411766052246094, "blob_id": "00ee3192b643a9e6eb45d73d90d5d7ef155bb2ec", "content_id": "7c9f1093d9bc2f7930b5b54570172dc7f30f2377", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1959, "license_type": "permissive", "max_line_length": 99, "num_lines": 51, "path": "/compatibility-layer/service/search/src/main/java/com/osdu/function/SearchServiceFunction.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.function;\n\nimport com.osdu.model.SearchResult;\nimport com.osdu.model.osdu.OsduSearchObject;\nimport com.osdu.service.SearchService;\nimport java.util.function.Function;\nimport lombok.RequiredArgsConstructor;\nimport lombok.extern.slf4j.Slf4j;\nimport org.springframework.messaging.Message;\nimport org.springframework.messaging.support.GenericMessage;\nimport org.springframework.stereotype.Component;\n\n/**\n * Function to Map OSDU compliant search query to Delfi query. Input format is described in\n * \"SDU-82935841-250319-1033.pdf\", output format is taken from API description from Delfi Developer\n * Portal -> Search Service -> /query\n */\n@Component\n@Slf4j\n@RequiredArgsConstructor\npublic class SearchServiceFunction implements\n Function<Message<OsduSearchObject>, Message<SearchResult>> {\n\n final SearchService searchService;\n\n @Override\n public Message<SearchResult> apply(Message<OsduSearchObject> messageSource) {\n log.debug(\"Received request to search with following arguments: {}\", messageSource);\n SearchResult searchResult = searchService\n .searchIndex(messageSource.getPayload(), messageSource.getHeaders());\n log.debug(\"Result of the request to search with following arguments: {}, \"\n + \"resulted in following object : {}\", messageSource, searchResult);\n return new GenericMessage<>(searchResult);\n }\n}\n" }, { "alpha_fraction": 0.7692307829856873, "alphanum_fraction": 0.7737226486206055, "avg_line_length": 36.10416793823242, "blob_id": "515189ec53ef172a3be0cbec16281e2322f4a228", "content_id": "93f6e0f5be1c61579d9ca7e9fe5c3bbfb630ca18", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1781, "license_type": "permissive", "max_line_length": 93, "num_lines": 48, "path": "/osdu-r2/os-workflow/workflow-core/src/main/java/org/opengroup/osdu/workflow/provider/interfaces/IWorkflowStatusService.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.workflow.provider.interfaces;\n\nimport org.opengroup.osdu.core.common.exception.UnauthorizedException;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.workflow.model.GetStatusRequest;\nimport org.opengroup.osdu.workflow.model.GetStatusResponse;\nimport org.opengroup.osdu.workflow.model.UpdateStatusRequest;\nimport org.opengroup.osdu.workflow.model.UpdateStatusResponse;\n\npublic interface IWorkflowStatusService {\n\n /**\n * GetWorkflowStatus returns status of workflow specified.\n *\n * @param request getStatus request\n * @param headers headers\n * @return workflow status.\n * @throws UnauthorizedException if token and partitionID are missing or, invalid\n */\n GetStatusResponse getWorkflowStatus(GetStatusRequest request, DpsHeaders headers);\n\n /**\n * Update Workflow Status returns status of workflow specified.\n *\n * @param request update status request\n * @param headers headers\n * @return workflow status.\n * @throws UnauthorizedException if token and partitionID are missing or, invalid\n */\n UpdateStatusResponse updateWorkflowStatus(UpdateStatusRequest request, DpsHeaders headers);\n\n}\n" }, { "alpha_fraction": 0.7864043116569519, "alphanum_fraction": 0.7903398871421814, "avg_line_length": 40.7164192199707, "blob_id": "9c190acd7bb9bd82e2542791d0e1773d6075f6ad", "content_id": "929f68aaef76062b3b115da2eae25cdd2df5f1e6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2795, "license_type": "permissive", "max_line_length": 95, "num_lines": 67, "path": "/osdu-r2/os-ingest/ingest-core/src/main/java/org/opengroup/osdu/ingest/api/SubmitApi.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.ingest.api;\n\nimport lombok.RequiredArgsConstructor;\nimport lombok.extern.slf4j.Slf4j;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.core.common.model.storage.StorageRole;\nimport org.opengroup.osdu.ingest.model.SubmitRequest;\nimport org.opengroup.osdu.ingest.model.SubmitResponse;\nimport org.opengroup.osdu.ingest.model.WorkProductLoadManifest;\nimport org.opengroup.osdu.ingest.provider.interfaces.IOsduSubmitService;\nimport org.opengroup.osdu.ingest.provider.interfaces.ISubmitService;\nimport org.springframework.security.access.prepost.PreAuthorize;\nimport org.springframework.validation.annotation.Validated;\nimport org.springframework.web.bind.annotation.PostMapping;\nimport org.springframework.web.bind.annotation.RequestBody;\nimport org.springframework.web.bind.annotation.RestController;\nimport org.springframework.web.context.annotation.RequestScope;\n\n@Slf4j\n@RequiredArgsConstructor\n@RestController\n@RequestScope\n@Validated\npublic class SubmitApi {\n\n final DpsHeaders headers;\n\n final ISubmitService submitService;\n final IOsduSubmitService osduSubmitService;\n\n // TODO: Create the permission for os-ingest and change pre authorize annotation\n @PostMapping(\"/submit\")\n @PreAuthorize(\"@authorizationFilter.hasPermission('\" + StorageRole.CREATOR + \"')\")\n public SubmitResponse submit(@RequestBody SubmitRequest request) {\n log.debug(\"Submit request received : {}\", request);\n SubmitResponse submitResponse = submitService.submit(request, headers);\n log.debug(\"Submit result ready : {}\", submitResponse);\n return submitResponse;\n }\n\n // TODO: Create the permission for os-ingest and change pre authorize annotation\n @PostMapping(\"/submitWithManifest\")\n @PreAuthorize(\"@authorizationFilter.hasPermission('\" + StorageRole.CREATOR + \"')\")\n public SubmitResponse submitWithManifest(@RequestBody WorkProductLoadManifest loadManifest) {\n log.debug(\"Submit with load manifest request received : {}\", loadManifest);\n SubmitResponse submitResponse = osduSubmitService.submit(loadManifest, headers);\n log.debug(\"Submit load manifest result ready : {}\", submitResponse);\n return submitResponse;\n }\n\n}\n" }, { "alpha_fraction": 0.7327188849449158, "alphanum_fraction": 0.7400921583175659, "avg_line_length": 32.90625, "blob_id": "e7911ba0e0a80d899af51e58dfea9d021a132483", "content_id": "1063928dc108ec3af76edd8a3d84fadd9dfd0ec0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1085, "license_type": "permissive", "max_line_length": 86, "num_lines": 32, "path": "/osdu-r2/os-workflow/workflow-core/src/main/java/org/opengroup/osdu/workflow/provider/interfaces/IIngestionStrategyService.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.workflow.provider.interfaces;\n\nimport org.opengroup.osdu.core.common.model.WorkflowType;\n\npublic interface IIngestionStrategyService {\n\n /**\n * Determine which ingestion strategy to use based on input parameters.\n *\n * @param workflowType workflow type\n * @param dataType data type\n * @param userId user id\n * @return name of ingestion strategy\n */\n String determineStrategy(WorkflowType workflowType, String dataType, String userId);\n}\n" }, { "alpha_fraction": 0.6716828346252441, "alphanum_fraction": 0.6777600646018982, "avg_line_length": 44.467105865478516, "blob_id": "f475c5271881dd3b3ce479c08adeb3da413b901e", "content_id": "bfebdba723c492492136faef43edbc91553b6308", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 6911, "license_type": "permissive", "max_line_length": 214, "num_lines": 152, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/http/HttpClientHandler.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.http;\n\nimport org.apache.http.Header;\nimport org.apache.http.HttpHeaders;\nimport org.apache.http.HttpStatus;\nimport org.apache.http.ParseException;\nimport org.apache.http.client.ServiceUnavailableRetryStrategy;\nimport org.apache.http.client.config.RequestConfig;\nimport org.apache.http.client.methods.CloseableHttpResponse;\nimport org.apache.http.client.methods.HttpRequestBase;\nimport org.apache.http.entity.ContentType;\nimport org.apache.http.impl.client.CloseableHttpClient;\nimport org.apache.http.impl.client.HttpClients;\nimport org.apache.http.message.BasicHeader;\nimport org.apache.http.protocol.HttpContext;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.core.common.model.http.AppException;\nimport org.opengroup.osdu.core.common.model.http.HttpResponse;\nimport org.opengroup.osdu.core.common.model.http.RequestStatus;\nimport org.opengroup.osdu.core.common.logging.JaxRsDpsLog;\nimport org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.context.annotation.Lazy;\nimport org.springframework.http.MediaType;\nimport org.springframework.stereotype.Component;\nimport org.springframework.web.context.annotation.RequestScope;\n\nimport java.io.BufferedReader;\nimport java.io.IOException;\nimport java.io.InputStreamReader;\nimport java.net.SocketTimeoutException;\nimport java.nio.charset.UnsupportedCharsetException;\nimport java.util.ArrayList;\nimport java.util.List;\n\n@Component\n@RequestScope\npublic class HttpClientHandler {\n\n private final int RETRY_COUNT = 3;\n\n @Autowired\n @Lazy\n private JaxRsDpsLog log;\n\n private final RequestConfig REQUEST_CONFIG = RequestConfig.custom()\n .setConnectTimeout(60000)\n .setConnectionRequestTimeout(60000)\n .setSocketTimeout(60000).build();\n\n public HttpResponse sendRequest(HttpRequestBase request, DpsHeaders requestHeaders) {\n\n Long curTimeStamp = System.currentTimeMillis();\n\n List<Header> httpHeaders = new ArrayList<>();\n for (String key : requestHeaders.getHeaders().keySet()) {\n httpHeaders.add(new BasicHeader(key, requestHeaders.getHeaders().get(key)));\n }\n if (!requestHeaders.getHeaders().containsKey(HttpHeaders.ACCEPT)) {\n httpHeaders.add(new BasicHeader(HttpHeaders.ACCEPT, MediaType.APPLICATION_JSON.toString()));\n }\n\n try {\n CloseableHttpClient httpclient = HttpClients.custom()\n .setDefaultHeaders(httpHeaders)\n .setDefaultRequestConfig(REQUEST_CONFIG)\n .setServiceUnavailableRetryStrategy(getRetryStrategy()).build();\n try (CloseableHttpResponse response = httpclient.execute(request)) {\n\n StringBuilder responseBuilder = new StringBuilder();\n try (BufferedReader br = new BufferedReader(new InputStreamReader(response.getEntity().getContent()))) {\n String responsePayloadLine;\n while ((responsePayloadLine = br.readLine()) != null) {\n responseBuilder.append(responsePayloadLine);\n }\n }\n\n String responseBody = responseBuilder.toString();\n\n // handle case where upstream server is running out of resources and throwing generic exception\n checkResponseMediaType(response, responseBody);\n\n HttpResponse output = new HttpResponse();\n output.setResponseCode(response.getStatusLine().getStatusCode());\n output.setBody(responseBody);\n if (output.getResponseCode() != 200) {\n log.info(String.format(\"method: %s | response code: %s | url: %s | error message: %s\", request.getMethod(), output.getResponseCode(), request.getURI().toString(), responseBody));\n }\n return output;\n }\n } catch (SocketTimeoutException e) {\n throw new AppException(RequestStatus.SOCKET_TIMEOUT, \"Socket time out\", \"Request cannot be completed in specified time\", e);\n } catch (IOException e) {\n throw new AppException(HttpStatus.SC_INTERNAL_SERVER_ERROR, \"Internal communication failure\", \"Internal communication failure\", e);\n } finally {\n Long latency = System.currentTimeMillis() - curTimeStamp;\n log.info(String.format(\"method: %s | latency: %s | url: %s | correlation id: %s\", request.getMethod(), latency, request.getURI().toString(), requestHeaders.getHeaders().get(DpsHeaders.CORRELATION_ID)));\n }\n }\n\n private ServiceUnavailableRetryStrategy getRetryStrategy() {\n return new ServiceUnavailableRetryStrategy() {\n @Override\n public boolean retryRequest(\n final org.apache.http.HttpResponse response, final int executionCount, final HttpContext context) {\n int statusCode = response.getStatusLine().getStatusCode();\n return statusCode >= 501 && executionCount <= RETRY_COUNT;\n }\n\n @Override\n public long getRetryInterval() {\n return 1000;\n }\n };\n }\n\n private boolean checkResponseMediaType(CloseableHttpResponse response, String responseBody) {\n try {\n String contentMimeType = ContentType.getOrDefault(response.getEntity()).getMimeType();\n if (ContentType.APPLICATION_JSON.getMimeType().equalsIgnoreCase(contentMimeType)) {\n return true;\n }\n throw new AppException(\n HttpStatus.SC_UNSUPPORTED_MEDIA_TYPE,\n \"Unsupported media type\",\n String.format(\"upstream server responded with unsupported media type: %s\", contentMimeType),\n String.format(\"upstream server response: %s\", responseBody));\n } catch (ParseException | UnsupportedCharsetException e) {\n throw new AppException(\n HttpStatus.SC_UNSUPPORTED_MEDIA_TYPE,\n \"Unsupported media type\",\n \"error parsing upstream server response entity content type\",\n String.format(\"upstream server response: %s\", responseBody), e);\n }\n }\n}\n" }, { "alpha_fraction": 0.7507011294364929, "alphanum_fraction": 0.7528825402259827, "avg_line_length": 36.3139533996582, "blob_id": "319b61bda626991429e77409ec4a8e1bc6110f33", "content_id": "8481c8564ec6f55728adfb1d955d03d1953932de", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 6418, "license_type": "permissive", "max_line_length": 123, "num_lines": 172, "path": "/osdu-r2/os-workflow/provider/workflow-gcp/src/test/java/org/opengroup/osdu/workflow/provider/gcp/validation/GcpValidationServiceTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.workflow.provider.gcp.validation;\n\nimport static org.assertj.core.api.Assertions.assertThat;\nimport static org.assertj.core.api.Assertions.catchThrowable;\nimport static org.assertj.core.api.Assertions.tuple;\nimport static org.opengroup.osdu.workflow.provider.gcp.validation.GcpUpdateStatusRequestValidator.DATASTORE_MAX_VALUE_SIZE;\n\nimport javax.validation.ConstraintValidator;\nimport javax.validation.ConstraintValidatorFactory;\nimport javax.validation.ConstraintViolationException;\nimport javax.validation.Validation;\nimport javax.validation.Validator;\nimport javax.validation.ValidatorFactory;\nimport org.apache.commons.lang3.RandomStringUtils;\nimport org.hibernate.validator.HibernateValidatorConfiguration;\nimport org.hibernate.validator.internal.cfg.context.DefaultConstraintMapping;\nimport org.junit.jupiter.api.BeforeAll;\nimport org.junit.jupiter.api.BeforeEach;\nimport org.junit.jupiter.api.DisplayNameGeneration;\nimport org.junit.jupiter.api.Nested;\nimport org.junit.jupiter.api.Test;\nimport org.opengroup.osdu.workflow.ReplaceCamelCase;\nimport org.opengroup.osdu.workflow.config.RequestConstraintMappingContributor;\nimport org.opengroup.osdu.workflow.model.UpdateStatusRequest;\nimport org.opengroup.osdu.workflow.model.WorkflowStatusType;\nimport org.opengroup.osdu.workflow.provider.interfaces.IValidationService;\nimport org.opengroup.osdu.workflow.validation.CommonUpdateStatusRequestValidator;\nimport org.opengroup.osdu.workflow.validation.UpdateStatusRequestValidatorWrapper;\nimport org.opengroup.osdu.workflow.validation.ValidationServiceImpl;\n\n@DisplayNameGeneration(ReplaceCamelCase.class)\nclass GcpValidationServiceTest {\n\n private static final String WORKFLOW_ID_FIELD = \"WorkflowID\";\n private static final String NOT_BLANK_MESSAGE = \"must not be blank\";\n private static final String WORKFLOW_ID = \"workflow-id\";\n\n private static Validator validator;\n private IValidationService validationService;\n\n @BeforeAll\n static void initAll() {\n HibernateValidatorConfiguration configuration = (HibernateValidatorConfiguration) Validation\n .byDefaultProvider()\n .configure();\n\n RequestConstraintMappingContributor requestConstraintMappingContributor\n = new RequestConstraintMappingContributor();\n requestConstraintMappingContributor.createConstraintMappings(() -> {\n DefaultConstraintMapping mapping = new DefaultConstraintMapping();\n configuration.addMapping(mapping);\n return mapping;\n });\n\n ValidatorFactory factory = configuration\n .constraintValidatorFactory(new TestConstraintValidatorFactory())\n .buildValidatorFactory();\n validator = factory.getValidator();\n }\n\n @BeforeEach\n void setUp() {\n validationService = new ValidationServiceImpl(validator);\n }\n\n @Nested\n class ValidateUpdateStatusRequest {\n\n @Test\n void shouldSuccessfullyValidate() {\n // given\n UpdateStatusRequest request = UpdateStatusRequest.builder()\n .workflowId(WORKFLOW_ID)\n .workflowStatusType(WorkflowStatusType.RUNNING)\n .build();\n\n // when\n Throwable thrown = catchThrowable(\n () -> validationService.validateUpdateStatusRequest(request));\n\n // then\n assertThat(thrown).isNull();\n }\n\n @Test\n void shouldNotExecuteGcpSpecificValidationWhenCommonValidationIsFailed() {\n // given\n UpdateStatusRequest request = UpdateStatusRequest.builder()\n .workflowId(\"\")\n .workflowStatusType(WorkflowStatusType.RUNNING)\n .build();\n\n // when\n Throwable thrown = catchThrowable(\n () -> validationService.validateUpdateStatusRequest(request));\n\n // then\n assertThat(thrown)\n .isInstanceOf(ConstraintViolationException.class)\n .hasMessage(\"Invalid Update Workflow Status request\");\n\n ConstraintViolationException ex = (ConstraintViolationException) thrown;\n assertThat(ex.getConstraintViolations())\n .extracting(v -> tuple(v.getPropertyPath().toString(), v.getMessage()))\n .containsExactly(tuple(WORKFLOW_ID_FIELD, NOT_BLANK_MESSAGE));\n }\n\n @Test\n void shouldFailValidationWhenWorkflowIdIsTooLong() {\n // given\n UpdateStatusRequest request = UpdateStatusRequest.builder()\n .workflowId(RandomStringUtils.randomAlphanumeric(DATASTORE_MAX_VALUE_SIZE + 1))\n .workflowStatusType(WorkflowStatusType.RUNNING)\n .build();\n\n // when\n Throwable thrown = catchThrowable(\n () -> validationService.validateUpdateStatusRequest(request));\n\n // then\n assertThat(thrown)\n .isInstanceOf(ConstraintViolationException.class)\n .hasMessage(\"Invalid Update Workflow Status request\");\n\n ConstraintViolationException ex = (ConstraintViolationException) thrown;\n assertThat(ex.getConstraintViolations())\n .extracting(v -> tuple(v.getPropertyPath().toString(), v.getMessage()))\n .containsExactly(tuple(WORKFLOW_ID_FIELD, \"value length should be less than 1500\"));\n }\n\n }\n\n static class TestConstraintValidatorFactory implements ConstraintValidatorFactory {\n\n ConstraintValidatorFactory constraintValidatorFactory = Validation\n .buildDefaultValidatorFactory().getConstraintValidatorFactory();\n\n @Override\n public <T extends ConstraintValidator<?, ?>> T getInstance(Class<T> key) {\n\n if (UpdateStatusRequestValidatorWrapper.class.equals(key)) {\n CommonUpdateStatusRequestValidator updateStatusRequestValidator =\n new GcpUpdateStatusRequestValidator();\n return (T) new UpdateStatusRequestValidatorWrapper(updateStatusRequestValidator);\n }\n\n return constraintValidatorFactory.getInstance(key);\n }\n\n @Override\n public void releaseInstance(ConstraintValidator<?, ?> instance) {\n\n }\n }\n\n}\n" }, { "alpha_fraction": 0.7607788443565369, "alphanum_fraction": 0.7663421630859375, "avg_line_length": 36.842105865478516, "blob_id": "90ce5105fa2e1b64b6e0c05d0efa7328fd666085", "content_id": "1e2a3a6bfcd8268e1b8c92ec1972e892c3780e73", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1438, "license_type": "permissive", "max_line_length": 97, "num_lines": 38, "path": "/osdu-r2/os-delivery/delivery-core/src/main/java/org/opengroup/osdu/delivery/provider/interfaces/FileListService.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.delivery.provider.interfaces;\n\nimport javax.validation.ConstraintViolationException;\nimport org.opengroup.osdu.core.common.model.file.FileListRequest;\nimport org.opengroup.osdu.core.common.model.file.FileListResponse;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\n\npublic interface FileListService {\n\n /**\n * GetFileList will be used for audit purposes, since system relies on client to upload a file,\n * we should have an option to identify whether actual upload happened or not;\n * and potentially cleanup database.\n *\n * @param request location request\n * @param headers request headers\n * @return a paginated file location result.\n * @throws ConstraintViolationException if request is invalid\n */\n FileListResponse getFileList(FileListRequest request, DpsHeaders headers);\n\n}\n" }, { "alpha_fraction": 0.6835278868675232, "alphanum_fraction": 0.7064418792724609, "avg_line_length": 34.58461380004883, "blob_id": "518a507c3a4f3ce5b6a1cec3a38d0c6bb9f7b6d5", "content_id": "5ba6d8b719c1d553245b3980cfcbce6dadf4fbcb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2313, "license_type": "permissive", "max_line_length": 107, "num_lines": 65, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/search/Coordinate.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.search;\n\nimport io.swagger.annotations.ApiModelProperty;\nimport lombok.AllArgsConstructor;\nimport lombok.Data;\nimport lombok.NoArgsConstructor;\nimport org.opengroup.osdu.core.common.SwaggerDoc;\n\nimport javax.validation.constraints.Max;\nimport javax.validation.constraints.Min;\n\n@Data\n@NoArgsConstructor\n@AllArgsConstructor\npublic class Coordinate {\n\n @Max(value = 90, message = SwaggerDoc.LATITUDE_VALIDATION_RANGE_MSG)\n @Min(value = -90, message = SwaggerDoc.LATITUDE_VALIDATION_RANGE_MSG)\n @ApiModelProperty(value = SwaggerDoc.LATITUDE, dataType = \"java.lang.Double\", example = \"37.450727\")\n private double latitude;\n\n @Max(value = 180, message = SwaggerDoc.LONGITUDE_VALIDATION_RANGE_MSG)\n @Min(value = -180, message = SwaggerDoc.LONGITUDE_VALIDATION_RANGE_MSG)\n @ApiModelProperty(value = SwaggerDoc.LONGITUDE, dataType = \"java.lang.Double\", example = \"-122.174762\")\n private double longitude;\n\n @Override\n public boolean equals(Object o) {\n if (this == o) return true;\n if (o == null || getClass() != o.getClass()) return false;\n\n Coordinate coordinate = (Coordinate) o;\n\n if (Double.compare(coordinate.latitude, latitude) != 0) return false;\n return Double.compare(coordinate.longitude, longitude) == 0;\n }\n\n @Override\n public int hashCode() {\n int result;\n long temp;\n temp = latitude != +0.0d ? Double.doubleToLongBits(latitude) : 0L;\n result = Long.hashCode(temp);\n temp = longitude != +0.0d ? Double.doubleToLongBits(longitude) : 0L;\n result = 31 * result + Long.hashCode(temp);\n return result;\n }\n}\n" }, { "alpha_fraction": 0.7364310026168823, "alphanum_fraction": 0.7392349243164062, "avg_line_length": 34.411346435546875, "blob_id": "573b6f11dfee7c62b7562c75d9c9f650b601b811", "content_id": "6677ec6c70cc04f87f9d40cdf21a25265ff90fe9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4993, "license_type": "permissive", "max_line_length": 96, "num_lines": 141, "path": "/compatibility-layer/service/delivery/src/test/java/com/osdu/service/processing/delfi/DelfiDataProcessingJobTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.service.processing.delfi;\n\nimport static com.osdu.service.processing.delfi.DelfiDataProcessingJob.BUCKET_URL;\nimport static org.assertj.core.api.Assertions.assertThat;\nimport static org.junit.Assert.assertEquals;\nimport static org.mockito.ArgumentMatchers.eq;\nimport static org.mockito.Mockito.when;\n\nimport com.osdu.model.Record;\nimport com.osdu.model.SrnToRecord;\nimport com.osdu.model.delfi.DelfiFile;\nimport com.osdu.model.osdu.delivery.delfi.ProcessingResult;\nimport com.osdu.model.osdu.delivery.delfi.ProcessingResultStatus;\nimport com.osdu.service.PortalService;\nimport com.osdu.service.SrnMappingService;\nimport java.util.HashMap;\nimport java.util.Map;\nimport java.util.concurrent.CompletableFuture;\nimport org.junit.Before;\nimport org.junit.Test;\nimport org.junit.runner.RunWith;\nimport org.mockito.Mock;\nimport org.mockito.junit.MockitoJUnitRunner;\n\n@RunWith(MockitoJUnitRunner.class)\npublic class DelfiDataProcessingJobTest {\n\n @Mock\n private SrnMappingService srnMappingService;\n @Mock\n private PortalService portalService;\n\n private static final String AUTHORIZATION_TOKEN = \"authToken\";\n private static final String PARTITION = \"partition\";\n private static final String SRN = \"srn\";\n private static final String SIGNED_URL = \"signedUrl\";\n private static final String RECORD_ID_1 = \"recordId1\";\n\n private DelfiDataProcessingJob dataProcessingJob;\n\n @Before\n public void init() {\n dataProcessingJob = new DelfiDataProcessingJob(srnMappingService, portalService);\n }\n\n @Test\n public void testNoLocation() throws Exception {\n // given\n SrnToRecord srnToRecord = SrnToRecord.builder().recordId(RECORD_ID_1).srn(SRN).build();\n when(srnMappingService.getSrnToRecord(eq(SRN))).thenReturn(srnToRecord);\n\n Record record = new Record();\n Map<String, Object> data = new HashMap<>();\n data.put(\"one\", \"test\");\n\n Map<String, Object> osduData = new HashMap<>();\n osduData.put(\"osdu\", data);\n\n record.setData(osduData);\n\n when(portalService.getRecord(eq(RECORD_ID_1), eq(AUTHORIZATION_TOKEN), eq(PARTITION)))\n .thenReturn(record);\n\n // when\n CompletableFuture<ProcessingResult> future = dataProcessingJob\n .process(SRN, AUTHORIZATION_TOKEN, PARTITION);\n ProcessingResult result = future.get();\n\n // then\n assertThat(result.getProcessingResultStatus()).isEqualTo(ProcessingResultStatus.DATA);\n assertThat(result.getFileLocation()).isNull();\n assertThat(result.getSrn()).isEqualTo(SRN);\n assertEquals(result.getData(), data);\n }\n\n @Test\n public void testWithFileLocation() throws Exception {\n // given\n SrnToRecord srnToRecord = SrnToRecord.builder().recordId(RECORD_ID_1).srn(SRN).build();\n when(srnMappingService.getSrnToRecord(eq(SRN))).thenReturn(srnToRecord);\n\n Record record = new Record();\n Map<String, Object> data = new HashMap<>();\n data.put(BUCKET_URL, \"test location\");\n Map<String, Object> osduData = new HashMap<>();\n osduData.put(\"two\", \"test\");\n data.put(\"osdu\", osduData);\n record.setData(data);\n when(portalService.getRecord(eq(RECORD_ID_1), eq(AUTHORIZATION_TOKEN), eq(PARTITION)))\n .thenReturn(record);\n\n DelfiFile delfiFile = new DelfiFile();\n delfiFile.setSignedUrl(SIGNED_URL);\n when(portalService.getFile(eq(\"test location\"), eq(AUTHORIZATION_TOKEN), eq(PARTITION)))\n .thenReturn(delfiFile);\n\n // when\n CompletableFuture<ProcessingResult> future = dataProcessingJob\n .process(SRN, AUTHORIZATION_TOKEN, PARTITION);\n ProcessingResult result = future.get();\n\n // then\n assertThat(result.getProcessingResultStatus()).isEqualTo(ProcessingResultStatus.FILE);\n assertThat(result.getFileLocation()).isEqualTo(SIGNED_URL);\n assertThat(result.getSrn()).isEqualTo(SRN);\n assertEquals(result.getData(), osduData);\n }\n\n @Test\n public void testNoMapping() throws Exception {\n // given\n when(srnMappingService.getSrnToRecord(eq(SRN))).thenReturn(null);\n\n // when\n CompletableFuture<ProcessingResult> future = dataProcessingJob\n .process(SRN, AUTHORIZATION_TOKEN, PARTITION);\n ProcessingResult result = future.get();\n\n // then\n assertThat(result.getProcessingResultStatus()).isEqualTo(ProcessingResultStatus.NO_MAPPING);\n assertThat(result.getFileLocation()).isNull();\n assertThat(result.getSrn()).isEqualTo(SRN);\n assertThat(result.getData()).isNull();\n }\n}\n" }, { "alpha_fraction": 0.729093611240387, "alphanum_fraction": 0.737105667591095, "avg_line_length": 28.367647171020508, "blob_id": "0268b75ff5340e37d965e2dc6d0ad50dc8ad3ec4", "content_id": "36272e9367ac9e6c64abd421e87990ae10518cc4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1997, "license_type": "permissive", "max_line_length": 75, "num_lines": 68, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/indexer/Records.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.indexer;\n\nimport org.opengroup.osdu.core.common.model.legal.Legal;\nimport org.opengroup.osdu.core.common.model.storage.ConversionStatus;\n\nimport com.fasterxml.jackson.annotation.JsonIgnoreProperties;\nimport com.fasterxml.jackson.annotation.JsonInclude;\nimport lombok.*;\nimport org.opengroup.osdu.core.common.model.entitlements.Acl;\nimport org.opengroup.osdu.core.common.model.storage.RecordAncestry;\n\nimport java.util.List;\nimport java.util.Map;\n\n@Data\n@Builder\n@NoArgsConstructor\n@AllArgsConstructor\n@JsonInclude(JsonInclude.Include.NON_NULL)\n@JsonIgnoreProperties(ignoreUnknown = true)\npublic class Records {\n\n @Singular\n private List<Entity> records;\n private List<String> notFound;\n @Singular\n private List<ConversionStatus> conversionStatuses;\n\n @Data\n @Builder\n @NoArgsConstructor\n @AllArgsConstructor\n @JsonInclude(JsonInclude.Include.NON_NULL)\n @JsonIgnoreProperties(ignoreUnknown = true)\n public static class Entity {\n private String id;\n private long version;\n private String kind;\n private Acl acl;\n private Legal legal;\n private RecordAncestry ancestry;\n private Map<String, Object> data;\n private List<Object> meta;\n }\n\n @Data\n @Builder\n public static class Type {\n private String type;\n }\n}\n" }, { "alpha_fraction": 0.7379807829856873, "alphanum_fraction": 0.7443910241127014, "avg_line_length": 34.657142639160156, "blob_id": "cb79ce65100e39607ed99a959d1cf101c05ac642", "content_id": "570cf3784e5d0b5ba09cd152038de307151704f8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1248, "license_type": "permissive", "max_line_length": 90, "num_lines": 35, "path": "/osdu-r2/os-delivery/delivery-core/src/main/java/org/opengroup/osdu/delivery/provider/interfaces/StorageService.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.delivery.provider.interfaces;\n\nimport org.opengroup.osdu.delivery.model.SignedUrl;\n\npublic interface StorageService {\n\n /**\n * Creates the empty object blob in storage.\n * Bucket name is determined by tenant using {@code partitionID}.\n * Object name is concat of a filepath and a fileID. Filepath is determined by user.\n *\n * @param fileID file ID\n * @param authorizationToken authorization token\n * @param partitionID partition ID\n * @return info about object URI, signed URL and when and who created blob.\n */\n SignedUrl createSignedUrl(String fileID, String authorizationToken, String partitionID);\n\n}\n" }, { "alpha_fraction": 0.7613227963447571, "alphanum_fraction": 0.763239860534668, "avg_line_length": 35.286956787109375, "blob_id": "277a9f2881461cd2af0876ffdb1f598522cf0d2a", "content_id": "a8efbd0b3a4453665d1258b8c87a8f59cd7d236e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4173, "license_type": "permissive", "max_line_length": 96, "num_lines": 115, "path": "/osdu-r2/os-workflow/provider/workflow-gcp-datastore/src/test/java/org/opengroup/osdu/workflow/provider/gcp/service/SubmitIngestServiceTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.workflow.provider.gcp.service;\n\nimport static org.assertj.core.api.Assertions.catchThrowable;\nimport static org.assertj.core.api.BDDAssertions.then;\nimport static org.mockito.ArgumentMatchers.anyMap;\nimport static org.mockito.ArgumentMatchers.anyString;\nimport static org.mockito.ArgumentMatchers.eq;\nimport static org.mockito.BDDMockito.given;\n\nimport com.google.api.client.http.HttpRequest;\nimport com.google.api.client.http.HttpResponse;\nimport java.io.ByteArrayInputStream;\nimport java.io.IOException;\nimport java.util.HashMap;\nimport org.junit.jupiter.api.BeforeEach;\nimport org.junit.jupiter.api.DisplayNameGeneration;\nimport org.junit.jupiter.api.Test;\nimport org.junit.jupiter.api.extension.ExtendWith;\nimport org.mockito.InOrder;\nimport org.mockito.Mock;\nimport org.mockito.Mockito;\nimport org.mockito.junit.jupiter.MockitoExtension;\nimport org.opengroup.osdu.workflow.ReplaceCamelCase;\nimport org.opengroup.osdu.workflow.exception.RuntimeException;\nimport org.opengroup.osdu.workflow.provider.gcp.property.AirflowProperties;\nimport org.opengroup.osdu.workflow.provider.interfaces.ISubmitIngestService;\n\n@ExtendWith(MockitoExtension.class)\n@DisplayNameGeneration(ReplaceCamelCase.class)\nclass SubmitIngestServiceTest {\n\n private static final String TEST_AIRFLOW_URL = \"http://test-airflow\";\n private static final String TEST_CLIENT_ID = \"client-id\";\n @Mock\n private GoogleIapHelper googleIapHelper;\n\n @Mock\n private AirflowProperties airflowProperties;\n\n @Mock\n private HttpRequest httpRequest;\n\n @Mock\n private HttpResponse httpResponse;\n\n ISubmitIngestService submitIngestService;\n\n @BeforeEach\n void setUp() {\n submitIngestService = new SubmitIngestServiceImpl(airflowProperties, googleIapHelper);\n }\n\n @Test\n void shouldStartWorkflow() throws IOException {\n\n // given\n HashMap<String, Object> data = new HashMap<>();\n data.put(\"key\", \"value\");\n given(airflowProperties.getUrl()).willReturn(TEST_AIRFLOW_URL);\n given(googleIapHelper.getIapClientId(eq(TEST_AIRFLOW_URL))).willReturn(TEST_CLIENT_ID);\n given(googleIapHelper.buildIapRequest(anyString(), eq(TEST_CLIENT_ID), anyMap()))\n .willReturn(httpRequest);\n given(httpRequest.execute()).willReturn(httpResponse);\n given(httpResponse.getContent()).willReturn(new ByteArrayInputStream(\"test\".getBytes()));\n\n // when\n submitIngestService.submitIngest(\"dag-name\", data);\n\n // then\n InOrder inOrder = Mockito.inOrder(airflowProperties, googleIapHelper);\n inOrder.verify(airflowProperties).getUrl();\n inOrder.verify(googleIapHelper).getIapClientId(eq(TEST_AIRFLOW_URL));\n inOrder.verify(googleIapHelper).buildIapRequest(anyString(), anyString(), anyMap());\n inOrder.verifyNoMoreInteractions();\n }\n\n @Test\n void shouldThrowExceptionIfRequestFails() throws IOException {\n\n // given\n HashMap<String, Object> data = new HashMap<>();\n data.put(\"key\", \"value\");\n given(airflowProperties.getUrl()).willReturn(TEST_AIRFLOW_URL);\n given(googleIapHelper.getIapClientId(eq(TEST_AIRFLOW_URL))).willReturn(TEST_CLIENT_ID);\n given(googleIapHelper.buildIapRequest(anyString(), eq(TEST_CLIENT_ID), anyMap()))\n .willReturn(httpRequest);\n given(httpRequest.execute()).willThrow(new IOException(\"test-exception\"));\n\n // when\n Throwable thrown = catchThrowable(() -> submitIngestService.submitIngest(\"dag-name\", data));\n\n // then\n then(thrown).satisfies(exception -> {\n then(exception).isInstanceOf(RuntimeException.class);\n then(exception).hasMessage(\"test-exception\");\n });\n }\n\n}\n" }, { "alpha_fraction": 0.7013945579528809, "alphanum_fraction": 0.7153404355049133, "avg_line_length": 28.0238094329834, "blob_id": "360ed07aca65e575cf832463ff1e8fa39471ce5a", "content_id": "c31c366ca11fea471709bdedcda4e8178256aae5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1219, "license_type": "permissive", "max_line_length": 111, "num_lines": 42, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/indexer/RecordStatus.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.indexer;\n\nimport lombok.Builder;\nimport lombok.Data;\nimport lombok.ToString;\n\n@Data\n@Builder\npublic class RecordStatus {\n\n private String id;\n private String kind;\n private String operationType;\n\n private IndexingStatus status;\n\n @ToString.Exclude private IndexProgress indexProgress;\n\n public String getLatestTrace() {\n if (indexProgress != null && indexProgress.getTrace() != null && indexProgress.getTrace().size() > 0) {\n return indexProgress.getTrace().peek();\n }\n return null;\n }\n}\n" }, { "alpha_fraction": 0.7655776143074036, "alphanum_fraction": 0.7765240669250488, "avg_line_length": 37.81045913696289, "blob_id": "3fe74e176264ae72a6cc0ff61b1b09777bd95764", "content_id": "27fadb3b47b092e5b889867b9f9e06634df469d0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 5938, "license_type": "permissive", "max_line_length": 98, "num_lines": 153, "path": "/compatibility-layer/service/search/src/test/java/com/osdu/service/DelfiSearchServiceTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.service;\n\nimport static com.osdu.service.DelfiSearchService.KIND_HEADER_KEY;\nimport static org.assertj.core.api.Assertions.assertThat;\nimport static org.assertj.core.api.Assertions.catchThrowable;\nimport static org.mockito.ArgumentMatchers.eq;\nimport static org.mockito.ArgumentMatchers.same;\nimport static org.mockito.Mockito.when;\n\nimport com.fasterxml.jackson.core.JsonProcessingException;\nimport com.fasterxml.jackson.databind.ObjectMapper;\nimport com.osdu.client.delfi.DelfiSearchClient;\nimport com.osdu.exception.SearchException;\nimport com.osdu.model.delfi.DelfiSearchObject;\nimport com.osdu.model.delfi.DelfiSearchResult;\nimport com.osdu.model.delfi.entitlement.UserGroups;\nimport com.osdu.model.delfi.geo.ByDistance;\nimport com.osdu.model.delfi.geo.GeoLocation;\nimport com.osdu.model.delfi.geo.SpatialFilter;\nimport com.osdu.model.osdu.OsduSearchObject;\nimport com.osdu.model.osdu.OsduSearchResult;\nimport com.osdu.model.property.DelfiPortalProperties;\nimport com.osdu.request.OsduHeader;\nimport java.util.Arrays;\nimport java.util.HashMap;\nimport java.util.List;\nimport java.util.Map;\nimport org.junit.Test;\nimport org.junit.runner.RunWith;\nimport org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.boot.test.context.SpringBootTest;\nimport org.springframework.boot.test.context.SpringBootTest.WebEnvironment;\nimport org.springframework.boot.test.mock.mockito.MockBean;\nimport org.springframework.messaging.MessageHeaders;\nimport org.springframework.test.context.junit4.SpringRunner;\n\n@RunWith(SpringRunner.class)\n@SpringBootTest(webEnvironment = WebEnvironment.RANDOM_PORT)\npublic class DelfiSearchServiceTest {\n\n private static final String KIND = \"kind\";\n private static final String PARTITION = \"partition\";\n private static final String AUTHORIZATION = \"authorization\";\n private static final String RESULT_1 = \"result-1\";\n private static final String RESULT_2 = \"result-2\";\n private static final String APP_KEY = \"appKey\";\n\n @MockBean\n private DelfiPortalProperties portalProperties;\n @MockBean\n private DelfiSearchClient delfiSearchClient;\n @MockBean\n private AuthenticationService authenticationService;\n @Autowired\n private ObjectMapper objectMapper;\n\n @Autowired\n private DelfiSearchService searchService;\n\n @Test\n public void shouldSearchIndexByDistance() {\n\n //given\n Map<String, Object> headersMap = new HashMap<>();\n headersMap.put(KIND_HEADER_KEY, KIND);\n headersMap.put(OsduHeader.PARTITION, PARTITION);\n headersMap.put(OsduHeader.AUTHORIZATION, AUTHORIZATION);\n MessageHeaders headers = new MessageHeaders(headersMap);\n\n OsduSearchObject osduSearchObject = new OsduSearchObject();\n osduSearchObject.setStart(1);\n osduSearchObject.setCount(2);\n\n osduSearchObject.setGeoCentroid(Arrays.asList(Arrays.asList(36.742612, -99.074218)));\n\n DelfiSearchObject expectedDelfiSearchObject = new DelfiSearchObject();\n expectedDelfiSearchObject.setKind(KIND);\n expectedDelfiSearchObject.setLimit(2);\n expectedDelfiSearchObject.setOffset(1);\n SpatialFilter filter = new SpatialFilter();\n filter.setField(\"data.dlLatLongWGS84\");\n GeoLocation location = new ByDistance(Arrays.asList(Arrays.asList(36.742612, -99.074218)),\n 1000D);\n filter.setByDistance(location);\n expectedDelfiSearchObject.setSpatialFilter(filter);\n\n DelfiSearchResult delfiSearchResult = new DelfiSearchResult();\n delfiSearchResult.setTotalCount(2);\n delfiSearchResult.setResults(Arrays.asList(RESULT_1, RESULT_2));\n\n when(authenticationService.getUserGroups(eq(AUTHORIZATION), eq(PARTITION)))\n .thenReturn(new UserGroups());\n when(portalProperties.getAppKey()).thenReturn(APP_KEY);\n when(delfiSearchClient.searchIndex(eq(AUTHORIZATION), eq(APP_KEY), same(PARTITION),\n eq(expectedDelfiSearchObject))).thenReturn(delfiSearchResult);\n\n // when\n OsduSearchResult searchResult = (OsduSearchResult) searchService\n .searchIndex(osduSearchObject, headers);\n\n //then\n assertThat(searchResult.getCount()).isEqualTo(2);\n assertThat(searchResult.getTotalHits()).isEqualTo(2);\n assertThat(searchResult.getStart()).isEqualTo(1);\n assertThat(searchResult.getResults()).isEqualTo(Arrays.asList(RESULT_1, RESULT_2));\n }\n\n @Test\n public void shouldThrowExceptionIfParametersInvalid() throws JsonProcessingException {\n\n //given\n Map<String, Object> headersMap = new HashMap<>();\n headersMap.put(KIND_HEADER_KEY, KIND);\n headersMap.put(OsduHeader.PARTITION, PARTITION);\n headersMap.put(OsduHeader.AUTHORIZATION, AUTHORIZATION);\n MessageHeaders headers = new MessageHeaders(headersMap);\n\n OsduSearchObject osduSearchObject = new OsduSearchObject();\n osduSearchObject.setStart(1);\n osduSearchObject.setCount(2);\n osduSearchObject.setFulltext(null);\n osduSearchObject.setMetadata(null);\n osduSearchObject.setGeoCentroid(null);\n osduSearchObject.setGeoLocation(null);\n\n when(authenticationService.getUserGroups(eq(AUTHORIZATION), eq(PARTITION)))\n .thenReturn(new UserGroups());\n\n // when\n Throwable thrown = catchThrowable(() -> searchService.searchIndex(osduSearchObject, headers));\n\n // then\n assertThat(thrown)\n .isInstanceOf(SearchException.class)\n .hasMessageContaining(\"Input parameters validation fail - \");\n }\n}\n" }, { "alpha_fraction": 0.7560843825340271, "alphanum_fraction": 0.7582476735115051, "avg_line_length": 31.438596725463867, "blob_id": "6455369196ec9ea51de913deaa05b1351b6be0f9", "content_id": "549816c3aa2f199927c76dcd66ab5c078a5deaaa", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3698, "license_type": "permissive", "max_line_length": 117, "num_lines": 114, "path": "/compatibility-layer/service/delfi-client/src/test/java/com/osdu/service/delfi/DelfiAuthenticationServiceTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.service.delfi;\n\nimport static org.assertj.core.api.Assertions.catchThrowable;\nimport static org.junit.Assert.assertEquals;\nimport static org.mockito.ArgumentMatchers.eq;\nimport static org.mockito.Mockito.when;\n\nimport com.osdu.client.DelfiEntitlementsClient;\nimport com.osdu.exception.OsduException;\nimport com.osdu.exception.OsduUnauthorizedException;\nimport com.osdu.model.delfi.entitlement.Group;\nimport com.osdu.model.delfi.entitlement.UserGroups;\nimport com.osdu.model.property.DelfiPortalProperties;\nimport java.util.Collections;\nimport org.assertj.core.api.Assertions;\nimport org.junit.Test;\nimport org.junit.runner.RunWith;\nimport org.mockito.InjectMocks;\nimport org.mockito.Mock;\nimport org.mockito.junit.MockitoJUnitRunner;\n\n@RunWith(MockitoJUnitRunner.class)\npublic class DelfiAuthenticationServiceTest {\n\n private static final String APP_KEY = \"appKey\";\n private static final String PARTITION = \"partition\";\n private static final String AUTH = \"auth\";\n\n @Mock\n private DelfiEntitlementsClient delfiEntitlementsClient;\n @Mock\n private DelfiPortalProperties delfiPortalProperties;\n @InjectMocks\n private DelfiAuthenticationService delfiAuthenticationService;\n\n @Test\n public void shouldGetUserGroups() {\n\n // given\n UserGroups expectedUserGroups = new UserGroups();\n when(delfiPortalProperties.getAppKey()).thenReturn(APP_KEY);\n when(delfiEntitlementsClient.getUserGroups(eq(AUTH), eq(APP_KEY), eq(PARTITION))).thenReturn(expectedUserGroups);\n\n // when\n UserGroups userGroups = delfiAuthenticationService.getUserGroups(AUTH, PARTITION);\n\n // then\n assertEquals(expectedUserGroups, userGroups);\n }\n\n @Test\n public void shouldFailIfNoAuthToken() {\n\n // given\n when(delfiPortalProperties.getAppKey()).thenReturn(APP_KEY);\n String authToken = null;\n\n // when\n Throwable thrown = catchThrowable(\n () -> delfiAuthenticationService.getUserGroups(authToken, PARTITION));\n\n // then\n Assertions.assertThat(thrown)\n .isInstanceOf(OsduException.class)\n .hasMessageContaining(\"Missing authorization token\");\n }\n\n @Test\n public void shouldCheckAuthentication() {\n\n // given\n UserGroups userGroups = new UserGroups();\n userGroups.setGroups(Collections.singletonList(new Group()));\n\n when(delfiPortalProperties.getAppKey()).thenReturn(APP_KEY);\n when(delfiEntitlementsClient.getUserGroups(eq(AUTH), eq(APP_KEY), eq(PARTITION))).thenReturn(userGroups);\n\n // when\n delfiAuthenticationService.checkAuthentication(AUTH, PARTITION);\n }\n\n @Test\n public void shouldFailCheckAuthenticationIfThereIsNoUserGroups() {\n\n // given\n when(delfiPortalProperties.getAppKey()).thenReturn(APP_KEY);\n when(delfiEntitlementsClient.getUserGroups(eq(AUTH), eq(APP_KEY), eq(PARTITION))).thenReturn(null);\n\n // when\n Throwable thrown = catchThrowable(\n () -> delfiAuthenticationService.checkAuthentication(AUTH, PARTITION));\n\n // then\n Assertions.assertThat(thrown)\n .isInstanceOf(OsduUnauthorizedException.class)\n .hasMessageContaining(\"Missing user groups\");\n }\n}\n" }, { "alpha_fraction": 0.7278350591659546, "alphanum_fraction": 0.7329896688461304, "avg_line_length": 39.42361068725586, "blob_id": "065a44cadc79f140d11bd0e4f699626027b4806a", "content_id": "1d01b6318bebbc0be9d6faad73a66d9c7cfa624e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 5820, "license_type": "permissive", "max_line_length": 101, "num_lines": 144, "path": "/compatibility-layer/service/ingest/src/main/java/com/osdu/service/google/CustomMediaHttpUploader.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.service.google;\n\nimport static com.google.api.client.googleapis.media.MediaHttpUploader.UploadState.MEDIA_COMPLETE;\nimport static com.google.api.client.googleapis.media.MediaHttpUploader.UploadState.MEDIA_IN_PROGRESS;\n\nimport com.google.api.client.googleapis.media.MediaHttpUploader.UploadState;\nimport com.google.api.client.http.AbstractInputStreamContent;\nimport com.google.api.client.http.GenericUrl;\nimport com.google.api.client.http.HttpRequest;\nimport com.google.api.client.http.HttpRequestFactory;\nimport com.google.api.client.http.HttpResponse;\nimport com.google.api.client.http.HttpTransport;\nimport com.google.api.client.http.InputStreamContent;\nimport com.google.api.client.http.javanet.NetHttpTransport;\nimport com.google.api.client.util.ByteStreams;\nimport com.google.api.client.util.Preconditions;\nimport com.osdu.model.upload.ContentChunk;\nimport java.io.BufferedInputStream;\nimport java.io.IOException;\nimport java.io.InputStream;\nimport lombok.extern.slf4j.Slf4j;\nimport org.springframework.stereotype.Service;\n\n@Service\n@Slf4j\npublic class CustomMediaHttpUploader {\n\n static final int MB = 0x100000;\n int chunkSize = 10 * MB;\n\n /**\n * Perform chunked resumable upload.\n *\n * @param mediaContent content to upload\n * @param uploadUrl url to upload to\n * @return HttpResponse of the upload request\n * @throws IOException if connection down\n */\n public HttpResponse resumableUpload(InputStreamContent mediaContent, GenericUrl uploadUrl)\n throws IOException {\n try (InputStream contentInputStream = new BufferedInputStream(mediaContent.getInputStream())) {\n UploadProgress progress = new UploadProgress(mediaContent.getLength(),\n UploadState.NOT_STARTED,\n uploadUrl);\n\n HttpResponse response;\n while (true) {\n ContentChunk contentChunk = buildContentChunk(mediaContent,\n progress.getTotalBytesServerReceived());\n HttpTransport transport = new NetHttpTransport();\n HttpRequestFactory requestFactory = transport.createRequestFactory();\n HttpRequest currentRequest = requestFactory.buildPutRequest(uploadUrl, null);\n currentRequest.setContent(contentChunk.getContent());\n currentRequest.getHeaders().setContentRange(contentChunk.getContentRange());\n currentRequest.getHeaders().setContentLength(mediaContent.getLength());\n\n response = executeCurrentRequestWithoutGZip(currentRequest);\n\n try {\n if (response.isSuccessStatusCode()) {\n progress.setTotalBytesServerReceived(mediaContent.getLength());\n if (mediaContent.getCloseInputStream()) {\n contentInputStream.close();\n }\n progress.updateProgressState(MEDIA_COMPLETE);\n return response;\n }\n\n if (response.getStatusCode() != 308) {\n return response;\n }\n\n long newBytesServerReceived = getNextByteIndex(response.getHeaders().getRange());\n long currentBytesServerReceived =\n newBytesServerReceived - progress.getTotalBytesServerReceived();\n Preconditions.checkState(currentBytesServerReceived >= 0\n && currentBytesServerReceived <= contentChunk.getLength());\n long notSendBytes = contentChunk.getLength() - currentBytesServerReceived;\n\n if (notSendBytes > 0) {\n contentInputStream.reset();\n long actualSkipValue = contentInputStream.skip(currentBytesServerReceived);\n Preconditions.checkState(currentBytesServerReceived == actualSkipValue);\n }\n\n progress.setTotalBytesServerReceived(newBytesServerReceived);\n progress.updateProgressState(MEDIA_IN_PROGRESS);\n } finally {\n response.disconnect();\n }\n }\n }\n }\n\n private ContentChunk buildContentChunk(InputStreamContent mediaContent,\n Long totalBytesServerReceived) {\n\n int blockSize = (int) Math.min(chunkSize, mediaContent.getLength() - totalBytesServerReceived);\n\n AbstractInputStreamContent contentChunk;\n InputStream contentInputStream = mediaContent.getInputStream();\n contentInputStream.mark(blockSize);\n\n InputStream limitInputStream = ByteStreams.limit(contentInputStream, blockSize);\n contentChunk = new InputStreamContent(mediaContent.getType(),\n limitInputStream).setRetrySupported(true).setLength(blockSize).setCloseInputStream(false);\n String mediaContentLengthStr = String.valueOf(mediaContent.getLength());\n\n String bytes = \"bytes\";\n String contentRange = blockSize == 0 ? String.format(\"%s %s\", bytes, mediaContentLengthStr)\n : String.format(\"%s %d-%d/%s\", bytes, totalBytesServerReceived,\n totalBytesServerReceived + blockSize - 1, mediaContentLengthStr);\n\n return new ContentChunk(contentChunk, contentRange, blockSize);\n }\n\n private HttpResponse executeCurrentRequestWithoutGZip(HttpRequest request) throws IOException {\n request.setThrowExceptionOnExecuteError(false);\n return request.execute();\n }\n\n private long getNextByteIndex(String rangeHeader) {\n if (rangeHeader == null) {\n return 0L;\n }\n return Long.parseLong(rangeHeader.substring(rangeHeader.indexOf('-') + 1)) + 1;\n }\n}" }, { "alpha_fraction": 0.7455024123191833, "alphanum_fraction": 0.753839373588562, "avg_line_length": 31.098590850830078, "blob_id": "ac0b15bc145962594d9fb182e5c492e7f19a8f92", "content_id": "5cce584b43a5774ea1939fd35430bb6befccc9f9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2279, "license_type": "permissive", "max_line_length": 96, "num_lines": 71, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/legal/validation/LegalValidator.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.legal.validation;\n\nimport javax.validation.ConstraintValidator;\nimport javax.validation.ConstraintValidatorContext;\n\nimport org.apache.commons.lang3.math.NumberUtils;\n\nimport org.opengroup.osdu.core.common.model.storage.Record;\n\nimport io.jsonwebtoken.lang.Collections;\nimport org.opengroup.osdu.core.common.model.storage.validation.ValidationDoc;\n\npublic class LegalValidator implements ConstraintValidator<ValidLegal, Record> {\n\n\t@Override\n\tpublic void initialize(ValidLegal constraintAnnotation) {\n\t\t// do nothing\n\t}\n\n\t@Override\n\tpublic boolean isValid(Record record, ConstraintValidatorContext context) {\n\n\t\tcontext.disableDefaultConstraintViolation();\n\n\t\tif (record.getAncestry() != null && !Collections.isEmpty(record.getAncestry().getParents())) {\n\t\t\tfor (String parent : record.getAncestry().getParents()) {\n\t\t\t\tString[] tokens = parent.split(\":\");\n\n\t\t\t\tif (tokens.length != 4) {\n\t\t\t\t\tString msg = String.format(ValidationDoc.INVALID_PARENT_RECORD_ID_FORMAT, parent);\n\n\t\t\t\t\tcontext.buildConstraintViolationWithTemplate(msg).addConstraintViolation();\n\t\t\t\t\treturn false;\n\t\t\t\t}\n\n\t\t\t\tif (!NumberUtils.isCreatable(tokens[tokens.length - 1])) {\n\t\t\t\t\tString msg = String.format(ValidationDoc.INVALID_PARENT_RECORD_VERSION_FORMAT, parent);\n\n\t\t\t\t\tcontext.buildConstraintViolationWithTemplate(msg).addConstraintViolation();\n\t\t\t\t\treturn false;\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn true;\n\t\t}\n\n\t\tif (Collections.isEmpty(record.getLegal().getLegaltags())) {\n\t\t\tcontext.buildConstraintViolationWithTemplate(ValidationDoc.RECORD_LEGAL_TAGS_NOT_EMPTY)\n\t\t\t\t\t.addConstraintViolation();\n\t\t\treturn false;\n\t\t}\n\n\t\treturn true;\n\t}\n}\n" }, { "alpha_fraction": 0.7878270745277405, "alphanum_fraction": 0.7940841913223267, "avg_line_length": 36.40425491333008, "blob_id": "c33a4962cd7cf40d8dc49dd7727dfc6a68cad2eb", "content_id": "c46ca8d1670b023ed4b2efad6cca87136b1b7b80", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1758, "license_type": "permissive", "max_line_length": 84, "num_lines": 47, "path": "/osdu-r2/os-delivery/delivery-core/src/main/java/org/opengroup/osdu/delivery/service/FileListServiceImpl.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.delivery.service;\n\nimport lombok.RequiredArgsConstructor;\nimport lombok.extern.slf4j.Slf4j;\nimport org.opengroup.osdu.core.common.model.file.FileListRequest;\nimport org.opengroup.osdu.core.common.model.file.FileListResponse;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.delivery.provider.interfaces.FileListService;\nimport org.opengroup.osdu.delivery.provider.interfaces.FileLocationRepository;\nimport org.opengroup.osdu.delivery.provider.interfaces.ValidationService;\nimport org.springframework.stereotype.Service;\n\n@Service\n@Slf4j\n@RequiredArgsConstructor\npublic class FileListServiceImpl implements FileListService {\n\n final ValidationService validationService;\n final FileLocationRepository fileLocationRepository;\n\n @Override\n public FileListResponse getFileList(FileListRequest request, DpsHeaders headers) {\n log.debug(\"Request file list with parameters : {}\", request);\n validationService.validateFileListRequest(request);\n\n FileListResponse response = fileLocationRepository.findAll(request);\n log.debug(\"File list result : {}\", response);\n return response;\n }\n\n}\n" }, { "alpha_fraction": 0.718219518661499, "alphanum_fraction": 0.7250094413757324, "avg_line_length": 24.990196228027344, "blob_id": "a29cbb894ef84ed3f375968a1bfeeeea034e2216", "content_id": "8c6c9dee7df80f51ffe331be7e4f1105e7311c45", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2651, "license_type": "permissive", "max_line_length": 93, "num_lines": 102, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/storage/RecordMetadata.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.storage;\n\nimport java.util.ArrayList;\nimport java.util.List;\n\nimport lombok.Data;\nimport lombok.NoArgsConstructor;\nimport org.apache.http.HttpStatus;\n\nimport org.opengroup.osdu.core.common.model.entitlements.Acl;\nimport org.opengroup.osdu.core.common.model.http.AppException;\nimport org.opengroup.osdu.core.common.model.legal.Legal;\n\n@Data\n@NoArgsConstructor\npublic class RecordMetadata {\n\n\tprivate String id;\n\n\tprivate String kind;\n\n\tprivate Acl acl;\n\n\tprivate Legal legal;\n\n\tprivate RecordAncestry ancestry;\n\n\tprivate List<String> gcsVersionPaths = new ArrayList<>();\n\n\tprivate RecordState status;\n\n\tprivate String user;\n\n\t// epoch time\n\tprivate long createTime;\n\n\tprivate String modifyUser;\n\n\t// epoch time\n\tprivate long modifyTime;\n\n\tpublic RecordMetadata(Record record) {\n\t\tthis.id = record.getId();\n\t\tthis.kind = record.getKind();\n\t\tthis.acl = record.getAcl();\n\t\tthis.legal = record.getLegal();\n\t\tthis.ancestry = record.getAncestry();\n\t}\n\n public Long getLatestVersion() {\n String latestVersionPath = this.gcsVersionPaths.get(this.gcsVersionPaths.size() - 1);\n String[] versionTokens = latestVersionPath.split(\"/\");\n return Long.parseLong(versionTokens[versionTokens.length - 1]);\n }\n\n\tpublic boolean hasVersion() {\n\t\tif (this.getGcsVersionPaths().isEmpty()) {\n\t\t\treturn false;\n\t\t} else {\n\t\t\treturn true;\n\t\t}\n\t}\n\n\tpublic void addGcsPath(long version) {\n\t\tthis.gcsVersionPaths.add(String.format(\"%s/%s/%s\", this.kind, this.id, version));\n\t}\n\n\tpublic String getVersionPath(Long version) {\n\t\tfor (String path : this.gcsVersionPaths) {\n\t\t\tif (path.contains(Long.toString(version))) {\n\t\t\t\treturn path;\n\t\t\t}\n\t\t}\n\n\t\tthrow new AppException(HttpStatus.SC_NOT_FOUND, \"Record version not found\",\n\t\t\t\t\"The requested record version was not found\");\n\t}\n\n\tpublic void resetGcsPath(List<String> gcsVersionPathList) {\n\t this.gcsVersionPaths.clear();\n\t for (String path: gcsVersionPathList) {\n\t this.gcsVersionPaths.add(path);\n }\n }\n}\n" }, { "alpha_fraction": 0.6400688886642456, "alphanum_fraction": 0.6458094120025635, "avg_line_length": 33.84000015258789, "blob_id": "556f0928c70fbf66e9c4a0cc6d7fc6048e7aed56", "content_id": "212b036b7ec7919e227129857d405c608a41739a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1742, "license_type": "permissive", "max_line_length": 76, "num_lines": 50, "path": "/osdu-r2/terraform/datastore-import/datastore_import.py", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# Copyright 2020 Google LLC\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Imports JSON dumps into Cloud Datastore.\"\"\"\nimport os, sys\nimport json\nfrom google.cloud import datastore\n\n\nDATASTORE_CLIENT = datastore.Client(namespace=\"odes-namespace\")\n\n\ndef import_data(jsonfile):\n \"\"\"Load JSON, parse it into Datastore Entity object.\"\"\"\n with open(jsonfile, \"r\") as infile:\n result = json.load(infile)\n\n for item in result:\n entity = item[\"entity\"]\n kind = entity[\"key\"][\"path\"][0][\"kind\"]\n key = DATASTORE_CLIENT.key(kind)\n entry = datastore.Entity(key=key)\n props = entity[\"properties\"]\n exclude_list = []\n for key in props.keys():\n if \"stringValue\" in props[key].keys():\n entry[key] = props[key][\"stringValue\"]\n else:\n entry[key] = None\n if \"excludeFromIndexes\" in props[key].keys():\n exclude_list.append(key)\n entry.exclude_from_indexes = exclude_list\n DATASTORE_CLIENT.put(entry)\n infile.close()\n\n\nif __name__ == \"__main__\":\n path = os.path.dirname(sys.argv[0])\n import_data(path + \"/ingestion-strategy.json\")\n import_data(path + \"/schema-data.json\")\n" }, { "alpha_fraction": 0.506447434425354, "alphanum_fraction": 0.539329469203949, "avg_line_length": 33.276241302490234, "blob_id": "7b6c576c4dfb99c8633fd91e353bce2b7e6f9954", "content_id": "227adbe7db2bf60050006583ceb3e15383b094a9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6204, "license_type": "permissive", "max_line_length": 120, "num_lines": 181, "path": "/compatibility-layer/docs/API/Ingestion API.md", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# Ingestion API overview\n\nThe current Ingestion service implementation supports uploading of Work Product Components and Files to the DELFI Data\nEcosystem.\n\nThe service requires the manifest to be added to each ingestion request. The manifest is a JSON description of the Work \nProducts, Work Product Components, or File to be ingested. The incoming manifest is compared to the \n`WorkProductLoadManifestStagedFiles` schema stored in the **service/ingest/src/main/resource/** directory.\n\n## Ingestion API\n\n### POST /submit\n\nSubmit OSDU Work Product Components or files for ingestion to the DELFI Data Ecosystem.\n\n| | Description |\n| ------------------- | -------------------------------------------------------------------------------------- |\n| Authorization | Authorization token must be included in the header: `Authorization: \"Bearer {token}\"`. | \n| URL parameters | None |\n| Request body | Must contain the manifest |\n| Content Type | `application/json` |\n| Return Content Type | `application/json` |\n\n#### Ingestion request body example\n\n```sh\ncurl -X POST \\\n https://{Apigee URI}/submit \\\n -H 'Accept: */*' \\\n -H 'Accept-Encoding: gzip, deflate' \\\n -H 'Authorization: Bearer <your token here>' \\\n -H 'Cache-Control: no-cache' \\\n -H 'Connection: keep-alive' \\\n -H 'Content-Length: 4276' \\\n -H 'Content-Type: application/json' \\\n -H 'Host: {Apigee URI}' \\\n -d '{\n \"WorkProduct\": {\n \"ResourceTypeID\": \"srn:type:work-product/WellLog:\",\n \"ResourceSecurityClassification\": \"srn:reference-data/ResourceSecurityClassification:RESTRICTED:\",\n \"Data\": {\n \"GroupTypeProperties\": {\n \"Components\": []\n },\n \"IndividualTypeProperties\": {\n \"Name\": \"AKM-11 LOG\",\n \"Description\": \"Well Log\"\n },\n \"ExtensionProperties\": {}\n },\n \"ComponentsAssociativeIDs\": [\n \"wpc-1\"\n ]\n },\n \"WorkProductComponents\": [\n {\n \"ResourceTypeID\": \"srn:type:work-product-component/WellLog:\",\n \"ResourceSecurityClassification\": \"srn:reference-data/ResourceSecurityClassification:RESTRICTED:\",\n \"Data\": {\n \"GroupTypeProperties\": {\n \"Files\": [],\n \"Artefacts\": []\n },\n \"IndividualTypeProperties\": {\n \"Name\": \"AKM-11 LOG\",\n \"Description\": \"Well Log\",\n \"WellboreID\": \"srn:master-data/Wellbore:1013:\",\n \"TopMeasuredDepth\": {\n \"Depth\": 2182.0004,\n \"UnitOfMeasure\": \"srn:reference-data/UnitOfMeasure:M:\"\n },\n \"BottomMeasuredDepth\": {\n \"Depth\": 2481.0,\n \"UnitOfMeasure\": \"srn:reference-data/UnitOfMeasure:M:\"\n },\n \"Curves\": [\n {\n \"Mnemonic\": \"DEPT\",\n \"TopDepth\": 2182.0,\n \"BaseDepth\": 2481.0,\n \"DepthUnit\": \"srn:reference-data/UnitOfMeasure:M:\",\n \"CurveUnit\": \"srn:reference-data/UnitOfMeasure:M:\"\n },\n {\n \"Mnemonic\": \"GR\",\n \"TopDepth\": 2182.0,\n \"BaseDepth\": 2481.0,\n \"DepthUnit\": \"srn:reference-data/UnitOfMeasure:M:\",\n \"CurveUnit\": \"srn:reference-data/UnitOfMeasure:GAPI:\"\n },\n {\n \"Mnemonic\": \"DT\",\n \"TopDepth\": 2182.0,\n \"BaseDepth\": 2481.0,\n \"DepthUnit\": \"srn:reference-data/UnitOfMeasure:M:\",\n \"CurveUnit\": \"srn:reference-data/UnitOfMeasure:US/F:\"\n },\n {\n \"Mnemonic\": \"RHOB\",\n \"TopDepth\": 2182.0,\n \"BaseDepth\": 2481.0,\n \"DepthUnit\": \"srn:reference-data/UnitOfMeasure:M:\",\n \"CurveUnit\": \"srn:reference-data/UnitOfMeasure:G/C3:\"\n },\n {\n \"Mnemonic\": \"DRHO\",\n \"TopDepth\": 2182.0,\n \"BaseDepth\": 2481.0,\n \"DepthUnit\": \"srn:reference-data/UnitOfMeasure:M:\",\n \"CurveUnit\": \"srn:reference-data/UnitOfMeasure:G/C3:\"\n },\n {\n \"Mnemonic\": \"NPHI\",\n \"TopDepth\": 2182.0,\n \"BaseDepth\": 2481.0,\n \"DepthUnit\": \"srn:reference-data/UnitOfMeasure:M:\",\n \"CurveUnit\": \"srn:reference-data/UnitOfMeasure:V/V:\"\n }\n ]\n },\n \"ExtensionProperties\": {}\n },\n \"AssociativeID\": \"wpc-1\",\n \"FileAssociativeIDs\": [\n \"f-1\"\n ]\n }\n ],\n \"Files\": [\n {\n \"ResourceTypeID\": \"srn:type:file/las2:\",\n \"ResourceSecurityClassification\": \"srn:reference-data/ResourceSecurityClassification:RESTRICTED:\",\n \"Data\": {\n \"GroupTypeProperties\": {\n \"FileSource\": \"\",\n \"PreLoadFilePath\": \"{Path to File}\"\n },\n \"IndividualTypeProperties\": {},\n \"ExtensionProperties\": {}\n },\n \"AssociativeID\": \"f-1\"\n }\n ]\n}\n'\n```\n\n> Note that the example request doesn't contain an actual authorization token, link to file, or Apigee URI.\n\n#### Ingestion response body example\n\nReturned is the ingestion job Universally Unique Identifier (UUID). The UUID can be used to get the current status of\nthe ingestion job.\n \n```json\n{\n \"jobId\":\"g83d3182-961a-4250-a73b-51b400cc54e2\"\n}\n```\n\n### GET /{jobId}/status \n\nGet the status of the ingestion job by ID. Possible statuses: FAILED, RUNNING, or COMPLETE.\n\nURI example: `/b90d7319-983q-4459-ao92-51b500cc54e2/status`\n\n#### Ingestion status response example\n\nReturned is the status of the requested ingestion job. The response body also contains the list of SRNs of Work Product \nComponents and Files that are being ingested.\n\n```json\n{\n \"id\":\"b90d7319-983q-4459-ao92-51b500cc54e2\",\n \"status\":\"COMPLETE\",\n \"srns\": [\n \"srn:type:work-product-component/WellLog0cd300a9a6ce483ea19e5f38ca97c199:\",\n \"srn:file/las2:ccc84e9603e7435392f690629acf5638:\"\n ]\n}\n```\n" }, { "alpha_fraction": 0.786171555519104, "alphanum_fraction": 0.791293203830719, "avg_line_length": 41.216217041015625, "blob_id": "9dc52a8b4ce225d61247c4879d37def65fd18843", "content_id": "b23085749da4ac52d2ed59c1ac1f448436805b7b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1562, "license_type": "permissive", "max_line_length": 85, "num_lines": 37, "path": "/osdu-r2/os-ingest/ingest-core/src/main/java/org/opengroup/osdu/ingest/client/IWorkflowServiceClient.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.ingest.client;\n\nimport static org.opengroup.osdu.core.common.model.http.DpsHeaders.AUTHORIZATION;\nimport static org.opengroup.osdu.core.common.model.http.DpsHeaders.DATA_PARTITION_ID;\n\nimport org.opengroup.osdu.core.common.model.workflow.StartWorkflowRequest;\nimport org.opengroup.osdu.ingest.aspect.CheckClientResponse;\nimport org.springframework.cloud.openfeign.FeignClient;\nimport org.springframework.web.bind.annotation.PostMapping;\nimport org.springframework.web.bind.annotation.RequestBody;\nimport org.springframework.web.bind.annotation.RequestHeader;\n\n@FeignClient(url = \"${osdu.workflow-service.url}\", name = \"workflow-service\")\npublic interface IWorkflowServiceClient {\n\n @CheckClientResponse\n @PostMapping(\"/startWorkflow\")\n feign.Response startWorkflow(@RequestHeader(AUTHORIZATION) String authToken,\n @RequestHeader(DATA_PARTITION_ID) String partition,\n @RequestBody StartWorkflowRequest startWorkflowRequest);\n}\n" }, { "alpha_fraction": 0.7305747270584106, "alphanum_fraction": 0.7370114922523499, "avg_line_length": 32.46154022216797, "blob_id": "7f35b2758933aab604f38027b6fba834a3e9dd7c", "content_id": "ffa2f10ec47478ee9e9fc03310a0df34fc2ee301", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2175, "license_type": "permissive", "max_line_length": 98, "num_lines": 65, "path": "/compatibility-layer/service/ingest/src/main/java/com/osdu/service/validation/JsonValidationService.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.service.validation;\n\nimport com.fasterxml.jackson.databind.JsonNode;\nimport com.networknt.schema.JsonMetaSchema;\nimport com.networknt.schema.JsonSchemaException;\nimport com.networknt.schema.JsonSchemaFactory;\nimport com.networknt.schema.ValidationMessage;\nimport com.osdu.exception.IngestException;\nimport com.osdu.service.processing.CustomSchemeFetcher;\nimport java.util.Set;\nimport lombok.RequiredArgsConstructor;\nimport lombok.extern.slf4j.Slf4j;\nimport org.springframework.stereotype.Service;\n\n@Service\n@Slf4j\n@RequiredArgsConstructor\npublic class JsonValidationService {\n\n final CustomSchemeFetcher schemeFetcher;\n\n /**\n * Validates given json against given schema.\n *\n * @param schema schema that will be used to validate json\n * @param toValidate json string to validate against given schema\n * @return {@link Set} of validation messages\n */\n public Set<ValidationMessage> validate(JsonNode schema, JsonNode toValidate) {\n try {\n return getFactory()\n .getSchema(schema)\n .validate(toValidate);\n } catch (JsonSchemaException e) {\n throw new IngestException(\n String.format(\"Error creating json validation schema from json object: %s\", schema), e);\n }\n }\n\n private JsonSchemaFactory getFactory() {\n return JsonSchemaFactory.builder(JsonSchemaFactory.getInstance())\n .addMetaSchema(JsonMetaSchema\n .builder(\"http://json-schema.org/draft-07/schema#\", JsonMetaSchema.getDraftV4())\n .build())\n .uriFetcher(schemeFetcher, \"https\")\n .build();\n }\n\n}\n" }, { "alpha_fraction": 0.6693426966667175, "alphanum_fraction": 0.6733567714691162, "avg_line_length": 30.15625, "blob_id": "68d0f533e0d6f79f47cad8d295bbc0e3d2e451dc", "content_id": "ba1808f70a585609f574c046293a0ff3fc81d698", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1993, "license_type": "permissive", "max_line_length": 97, "num_lines": 64, "path": "/osdu-r2/os-qa/src/main/java/com/osdu/core/data/provider/DataProviderImpl.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.core.data.provider;\n\nimport com.google.gson.Gson;\nimport com.google.gson.JsonElement;\nimport com.google.gson.JsonParser;\nimport com.google.gson.reflect.TypeToken;\nimport lombok.SneakyThrows;\n\nimport java.io.FileReader;\nimport java.util.*;\n\npublic class DataProviderImpl {\n static JsonElement element;\n static List<Map<String, String>> parseFromJson;\n\n /**\n * Fills in the list with the data from JsonElement\n */\n public static void fillInList(String filePath, String blockName) {\n parse(filePath, blockName);\n parseFromJson = new Gson().fromJson(element, new TypeToken<List<Map<String, String>>>() {\n }.getType());\n }\n\n /**\n * Convert List<Map> into List<Object> so it can be used for data provider\n *\n * @return iterator\n */\n public static Iterator<Object[]> mainIterator() {\n List<Object[]> data = new ArrayList<>();\n parseFromJson.forEach(item -> data.add(new Object[]{new HashMap<>(item)}));\n return data.iterator();\n }\n\n /**\n * Parse json data into JsonElement\n *\n * @param filepath for json\n * @param blockName in the json file\n */\n @SneakyThrows\n private static void parse(String filepath, String blockName) {\n element = new JsonParser().parse(new FileReader(filepath))\n .getAsJsonObject()\n .get(blockName);\n }\n}" }, { "alpha_fraction": 0.8041760921478271, "alphanum_fraction": 0.8132054209709167, "avg_line_length": 35.163265228271484, "blob_id": "b3c9ae4f46830ea287cd88b1690728640f5e9da3", "content_id": "875cfee5cc3aeb40ebf67289b613443a1bf6d29b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1772, "license_type": "permissive", "max_line_length": 114, "num_lines": 49, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/legal/jobs/ComplianceMessagePushReceiver.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.legal.jobs;\n\nimport com.google.gson.Gson;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.core.common.http.RequestBodyExtractor;\nimport org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.stereotype.Component;\nimport org.springframework.web.context.annotation.RequestScope;\n\n@Component\n@RequestScope\npublic class ComplianceMessagePushReceiver {\n\n\t@Autowired\n\tprivate DpsHeaders dpsHeaders;\n\n\t@Autowired\n\tprivate RequestBodyExtractor requestBodyExtractor;\n\n\t@Autowired\n\tprivate LegalTagConsistencyValidator legalTagConsistencyValidator;\n\n\t@Autowired\n\tprivate ILegalComplianceChangeService legalComplianceChangeService;\n\n\tpublic void receiveMessageFromHttpRequest() {\n\t\tLegalTagChangedCollection dto = new Gson().fromJson(this.requestBodyExtractor.extractDataFromRequestBody(),\n\t\t\t\tLegalTagChangedCollection.class);\n\t\tLegalTagChangedCollection validDto = this.legalTagConsistencyValidator.checkLegalTagStatusWithLegalService(dto);\n\t\tthis.legalComplianceChangeService.updateComplianceOnRecords(validDto, this.dpsHeaders);\n\t}\n}\n" }, { "alpha_fraction": 0.7554092407226562, "alphanum_fraction": 0.7648165822029114, "avg_line_length": 39.88461685180664, "blob_id": "d02cc314743be84e2b676a3c9cde8d126fe9545b", "content_id": "1ce6a8f96e3863b51575e9a0bd6fa4066f4468eb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 1063, "license_type": "permissive", "max_line_length": 131, "num_lines": 26, "path": "/compatibility-layer/Dockerfile", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# Use the official maven/Java 8 image to create a build artifact.\n# https://hub.docker.com/_/maven\nFROM maven:3-jdk-8-alpine AS builder\n\n# Copy local code to the container image.\n#create all the needed folders\nWORKDIR /app\nCOPY pom.xml .\nCOPY common/ common/\nCOPY service/ service/\n\n# Build a release artifact for the child project\nRUN mvn -T2 package -DskipTests -B\n\n# Use AdoptOpenJDK for base image.\n# It's important to use OpenJDK 8u191 or above that has container support enabled.\n# https://hub.docker.com/r/adoptopenjdk/openjdk8\n# https://docs.docker.com/develop/develop-images/multistage-build/#use-multi-stage-builds\nFROM openjdk:8-slim\nWORKDIR /app\nARG SERVICE_NAME\nENV SERVICE_NAME $SERVICE_NAME\n# Copy the jar to the production image from the builder stage.\nCOPY --from=builder /app/service/${SERVICE_NAME}/target/osdu-gcp-service-${SERVICE_NAME}-*.jar osdu-gcp-service-${SERVICE_NAME}.jar\n# Run the web service on container startup.\nCMD java -Djava.security.egd=file:/dev/./urandom -Dserver.port=${PORT} -jar /app/osdu-gcp-service-${SERVICE_NAME}.jar\n" }, { "alpha_fraction": 0.7333948612213135, "alphanum_fraction": 0.7444649338722229, "avg_line_length": 29.11111068725586, "blob_id": "8e45b7e43eef786003ecc2cbeaae4f37f02ecbd9", "content_id": "e313b367088c38a5dd587d2af90a2825a7d35f92", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1084, "license_type": "permissive", "max_line_length": 99, "num_lines": 36, "path": "/compatibility-layer/service/search/src/main/java/com/osdu/model/delfi/geo/SpatialFilter.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.model.delfi.geo;\n\nimport lombok.Data;\n\n/**\n * GeoLocation object descriptor used by Delfi API.\n */\n@Data\npublic class SpatialFilter {\n\n private static final String GEO_LOCATION_FIELD_ID = \"data.dlLatLongWGS84\";\n\n String field = GEO_LOCATION_FIELD_ID;\n //Delfi spec assumes that there can be 1 of 3 different types of objects and in order to maintain\n // that we need this property\n GeoLocation byBoundingBox;\n GeoLocation byDistance;\n GeoLocation byGeoPolygon;\n\n}\n" }, { "alpha_fraction": 0.6784786581993103, "alphanum_fraction": 0.681018054485321, "avg_line_length": 58.342464447021484, "blob_id": "47c298b7f97ac85a330f292fba0f4142b6b8247f", "content_id": "983ee08c37026de39ac0ba0de79d40ea634ec384", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 17327, "license_type": "permissive", "max_line_length": 241, "num_lines": 292, "path": "/compatibility-layer/docs/OSDU Compatibility Layer Services.md", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# OSDU Compatibility Layer - Services Overview\n\nThe OSDU Compatibility Layer consists of three core OSDU services &mdash; Ingestion, Delivery, and Search &mdash; and \ntwo supplementary services that handle common functionality such as querying the DELFI Data Ecosystem and Cloud \nFirestore.\n\n## Contents\n\n* [Ingestion service overview](#ingestion-service-overview)\n* [Delivery service overview](#delivery-service-overview)\n* [Search service overview](#search-service-overview)\n* [Delfi-client](#delfi-client)\n* [Srn-mapper](#srn-mapper)\n\n## Ingestion service overview\n\nThe Ingestion service is designed to upload new data to DELFI. The current Ingestion service implementation supports the \nfollowing OSDU types:\n\n* Work Product Components\n* Files with the following extensions: .las, .csv, .pdf, and .txt\n\nIngestion of Work Product isn't currently supported.\n\nThe service requires a Work Product load manifest to be added to each ingestion request. The manifest is a JSON \ndescription of what the client needs OSDU GCP to ingest &mdash; metadata of the WPC or File to be ingested. The manifest \nmust be created according to the OSDU standard.\n\n### Ingestion API\n\nFor information on the Ingestion service endpoints, consult the following document inside the project's docs folder:\n\n* [Ingestion API]\n\n### Ingestion service interactions\n\nThe Ingestion service uses the **delfi-client** service to authenticate the incoming ingestion requests and to send OSDU\ntypes to DELFI for ingestion.\n\nAdditionally, the service uses the **srn-mapper** service to create new SRN to DELFI record mappings in Cloud Firestore \nwhen the incoming data or file is successfully ingested. The SRN to DELFI mappings are used by the [Delivery \nservice](#delivery-service-overview) to retrieve Work Product Components and Files upon delivery requests.\n\nThe Ingestion service uses Google Cloud Storage (GCS) as a buffer for uploaded files.\n\n### Ingestion service workflow\n\nThe basic workflow for the Ingestion service is to validate the incoming manifest and start the ingestion process. If \nfiles were added to the request, the service loads them to Google Cloud Storage before uploading them to DELFI. The file \nlocations must be added as fields in the loaded manifest. The service stores files by the signed URLs received from \nDELFI. \n\nAfter ingestion is completed, the client receives a Universally Unique Identifier (UUID) of the ingestion job. The \nclient can query the Ingestion service with the received UUID to learn the status of the current ingestion job.\n\nDetailed ingestion flow:\n\n1. Verify that the request contains a valid authorization token.\n * If the authorization token is valid, start the ingestion process.\n * If the authorization token is invalid, stop the workflow. Respond to the client with the **401 Unauthorized**\n message.\n2. Validate the manifest.\n * Receive the manifest and compare it to the schema stored inside the OSDU Compatibility Layer.\n * If the manifest is not valid, fail ingestion and respond to the client with the FAILED status.\n * If the manifest is valid, continue the ingestion process.\n3. Get the Work Product\n * Get Work Product Components from the received Work Product.\n * Get links to files.\n4. Get the signed URL from DELFI. By the signed URLs, the received files are stored.\n5. Submit files to DELFI for ingestion.\n * If file submitting isn't completed, create the failed file result.\n * Set the `record` property to `NULL`\n * Set the `srn` property to `NULL`\n * Set the `success` property to `FALSE`\n * Add the exception message to the `summaries` list\n * If submitting is completed, fetch the ID of the new DELFI record.\n6. Generate an SRN for the file.\n7. Get the file record from DELFI and enrich it with data from the manifest.\n8. Validate the enriched record against the JSON schema stored in the project.\n9. Create the SRN to DELFI record ID mapping entry in Cloud Firestore.\n10. Create the result with the record, SRN, result of processing, and summary.\n * Check how the files were processed.\n * Collect the summaries of files processing.\n * Generate an SRN for WPC.\n * Create a DELFI record for WPC. Get the record ID and create an SRN to Record ID mapping entry in Firestore.\n * Validate the DELFI record against the JSON schema.\n * Create the resulting WPC with Files, SRN, WPC record, the result of processing (Files processed && WPC valid), and \n summary.\n * Check how the WPCs were processed.\n * Collect the summaries of WPC processing.\n * Generate the SRN for WP.\n * Create a record for WP in DELFI.\n * Validate the record in DELFI using the JSON schema.\n * Create the resulting WP with WPCs, SRN, WP record, the result of processing (WPCs processed and WP valid), and \n summary.\n11. Check the result of processing WP.\n * If the result is successful, the resulted ingest job is created with the generated SRNs and the `COMPLETED` \n status.\n * If the result is unsuccessful, all the newly created DELFI records are failed. \n * Set the lifecycle status of the DELFI records to `RESCINDED`.\n * Create the resulting ingestion job with SRNs, the `FAILED` status, and a summary.\n12. Save the resulting ingestion job to Cloud Firestore.\n13. Return the inner ingestion job status to the client.\n\n### Ingestion file types\n\nThe Ingestion service supports the following file types, as defined by the OSDU standard:\n\n* .las\n* .csv, .pdf, .txt as opaque files\n\nOnly the .las files are actually processed by DELFI. Other supported file types are only stored in DELFI without any \nprior processing.\n\nFor any file types not in the list, the Ingestion service sets the ingestion process will complete with exception and\nthe result of ingestion will be marked as `FAILED`.\n\n### Ingestion processes\n \nThe Ingestion service consists of the following internal services:\n\n| Service | Description |\n| --------------- | ---------------------------------------------------------------------------- |\n| Enrich | Enriches Work Product Components records retrieved from DELFI |\n| Ingest | Formats the incoming ingestion request and sends data for ingestion to DELFI |\n| JSON validation | Validates the load manifest against the JSON schema stored in the project |\n| Storage | Uploads the files sent for ingestion to the Google Cloud Storage bucket |\n| Submit | Submits files to DELFI for processing |\n\n#### Enrich service workflow\n\nThe Enrich service is designed to eliminate the differences in record formats in DELFI and OSDU. \nFor example, a DELFI record does not contain the `ResourceCurationStatus` field, which is required by OSDU.\n \nThe Enrich service's algorithm to update data:\n\n1. Fetch a DELFI record by ID.\n2. Put Work Product Component data from the load manifest to the DELFI record data.\n3. Collect and put to the DELFI record data that corresponds to the OSDU record format.\n\nThe following OSDU fields are added by the enrichment service into the DELFI records for each OSDU Work Product \nComponent:\n\n| Additional fields | Description |\n| ------------------------------ | ------------------------------------------------------------------------------------ |\n| ResourceHomeRegionID | The name of the home GCP region for the OSDU resource object |\n| ResourceHostRegionIDs | The name of the host GCP region(s) for the OSDU resource object |\n| ResourceObjectCreationDateTime | Timestamp of the time at which Version 1 of this OSDU resource object was originated |\n| ResourceVersionCreationDateTime | Timestamp of the time when the current version of this resource entered the OSDU |\n| ResourceCurationStatus | Describes the current Curation status. Possible values: CREATED, CURATING, CURATED |\n| ResourceLifecycleStatus | Describes the current Resource Lifecycle status. Possible values - LOADING, RECEIVED, ACCEPTED, RESCINDED, DELETED |\n| ResourceSecurityClassification | Classifies the security level of the resources. Possible values = RESTRICTED, CLASSIFIED, CONFIDENTIAL, MOST-CONFIDENTIAL <br> **Always set to RESTRICTED in the current Ingestion service implementation** |\n\nAfter the enrichment service adds OSDU fields to the DELFI record, this record is validated using the \n`WorkProductComponent` schema, which is stored in Cloud Firestore by the `ResourceTypeID` field. \n\nIf validation fails for at least one Work Product Component, ingestion for all loaded resources is canceled. More \nspecifically, in DELFI each new record's `ResourceLifecycleStatus` property is set to `RESCINDED`.\n\n## Delivery service overview\n\nThe Delivery service fetches records with well data or records with links to files from DELFI. For input, the Delivery \nservice expects a list of SRNs. \n\nThe Delivery service in OSDU Compatibility Layer delivers Work Products, Work Product Components, and File types to the \nclient. In case with files, the service responds with direct links to files and file records metadata.\n\n### Delivery API\n\nFor information on the Delivery service endpoints, consult the following document inside the project's docs folder:\n\n* [Delivery API]\n\n### Delivery service interactions\n\nThe Delivery service uses the **delfi-client** service to authenticate the incoming delivery requests and send requests \nto DELFI.\n\nThe service also uses the **srn-mapper** service to search for SRN to DELFI record mappings, which are used to build \nrequests to DELFI.\n\n### Delivery service workflow\n\n1. Verify that the request contains a valid authorization token.\n * If the authorization token is valid, start the delivery process.\n * If the authorization token is invalid, stop the workflow. Respond to the client with the **401 Unauthorized**\n message.\n2. For each SRN:\n * Find the SRN to DELFI record ID mapping in Cloud Firestore. \n * If there's no record for the current SRN in Cloud Firestore, set the processing result status to `NO_MAPPING`\n for this SRN, and then return the result.\n * From the found mapping, get the DELFI record ID for the current SRN, and then query DELFI with this ID.\n * Determine whether the record returned by DELFI contains _file_ or _data_. Perform one of the following processes \n on the record:\n * If the record contains a URL to a GCS bucket, this record contains a file. Query DELFI to get the signed URL\n for the file. Extend the result object:\n * Set the processing result status to `FILE`.\n * Set the data retrieved from the DELFI record.\n * Set the `FileLocation` property to the signed URL generated by DELFI.\n * Return the result.\n * If the record doesn't contain a URL to a GCS bucket, this record only contains data. Extend the result object:\n * Set the data retrieved from the DELFI record.\n * Set the processing result status to `DATA`.\n * Return the result.\n * Add the record data that came from the DELFI record's `osdu` property to the response object.\n * Add the file URL from the record data to the `FileLocation` property of the response object.\n * Return the obtained data to the requester. The unprocessed SRNs are added to the `unprocessedSrns` field in the \n response object.\n \n## Search service overview\n\nThe Search service provides the functionality to find subsurface records in DELFI. The service accepts search terms such\nas `fulltext`, `geospatial`, `metadata`, and `lineage`, and can return detailed data about the found item.\n\nThe search request to the service must come in the OSDU format. The service transforms the search object to the format\ncompatible with DELFI. The returned object from DELFI is mapped to the search result in the OSDU format and is then \nreturned to the client.\n\n### Search service API\n\nFor information on the Search service endpoints, consult the following document inside the project's docs folder:\n\n* [Search API]\n\n### Search service interactions\n\nThe Search service uses **delfi-client** to submit search requests to DELFI.\n\n### Search service workflow\n\n1. Verify that the request contains a valid authorization token.\n * If the authorization token is valid, start the search process.\n * If the authorization token is invalid, stop the workflow. Respond to the client with the **401 Unauthorized**.\n2. Verify that the search request body includes at least one of the following search parameters: `fulltext`, `metadata`,\n`geo_location`, or `geo_centroid`. Perform one of the following sub-steps:\n * If no parameters from the list `fulltext`, `metadata`, `geo_location`, and `geo_centroid` are present, respond to \n the client with a successful message and an empty search result object. Stop the workflow.\n * If at least one parameter from `fulltext`, `metadata`, `geo_location`, or `geo_centroid` is present, map the \n incoming search object to the DELFI search object. Continue to step 3.\n3. Query DELFI for the given search request.\n4. Receive the search result from DELFI and map it to the OSDU-compliant search result.\n5. Return the result to the client.\n\n### Mapping of OSDU and DELFI search terms\n\nThere's a divergence between the DELFI format for data and the OSDU standard in terms of how search queries are \nformatted, which is why certain OSDU search terms can't be fully mapped to a DELFI search query. The Search service in\nthe OSDU Compatibility Layer ignores such terms.\n\nConsult the following table for more information on supported and ignored search terms.\n\n| OSDU search term | Description | Supported in DELFI |\n| ---------------- | --------------------------------------------------------------------------------------------------------------------- | ------------------ |\n| fulltext | Single search expression | Yes |\n| geo_centroid | A list of numbers | Yes |\n| geo_location | Object with distance, type, and coordinates properties | Yes |\n| metadata | A list of string values | Yes |\n| facets | An array of facet names | Yes |\n| full_results | A boolean value that defines whether only indexed values should be returned <br> **Always `true` for queries to DELFI** | No |\n| sort | Object value to control sorting of search results | Yes |\n| start | The index of the first search result to be returned | Yes |\n| count | The number of search results to return for the current request | Yes |\n| map_aggregates | Boolean value <br> **Ignored by the Search service** | No |\n| zoom_level | Integer that represents the zoom level applied to geo queries <br> **Ignored by the Search service** | No |\n| aggregates_count | Integer used for the size of facet queries <br> **Ignored by the Search service** | No |\n\n## OSDU Compatibility Layer helper services\n\nThe OSDU Compatibility Layer comes with two helper services that implement the common features required by the three\ncore services &mdash; Ingestion, Delivery, and Search.\n\n### Delfi-client\n\nDelfi-client is a service that performs two key functions:\n\n* Validate the incoming requests. The service checks whether the authorization token and DELFI partition are correct.\n\n* Send ingestion, delivery, and search queries to DELFI. The delfi-client service queries DELFI for data such as stored \nrecords and files and saves data and files to DELFI.\n\n### Srn-mapper\n\nThe OSDU Compatibility Layer uses Cloud Firestore to store the mappings of SRNs to DELFI record IDs, and has a dedicated\nservice to communicate with Firestore.\n\nThe srn-mapper service is designed to get the stored SRN to DELFI record mappings and to store new mappings in case of \ningestion.\n\n[Ingestion API]: ./API/Ingestion%20API.md\n[Delivery API]: ./API/Delivery%20API.md\n[Search API]: ./API/Search%20API.md\n[OpenDES Contribution Wiki]: https://gitlab.opengroup.org/osdu/opendes-contribution-wiki/wikis/OSDU-(C)/Design-and-Implementation/Entity-and-Schemas/Comparing-OSDU-&-OpenDES-Schema-Semantics" }, { "alpha_fraction": 0.7338674068450928, "alphanum_fraction": 0.7416555285453796, "avg_line_length": 34.95199966430664, "blob_id": "be07c7bfcd27c7844565d294437b7389a69bc496", "content_id": "39df989a83535dc62d347ecc2e7892780dfa2139", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4494, "license_type": "permissive", "max_line_length": 97, "num_lines": 125, "path": "/compatibility-layer/service/delivery/src/test/java/com/osdu/service/processing/delfi/DelfiResultDataConverterTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.service.processing.delfi;\n\nimport static org.assertj.core.api.Assertions.assertThat;\nimport static org.junit.Assert.assertEquals;\n\nimport com.osdu.model.Record;\nimport com.osdu.model.osdu.delivery.delfi.ProcessingResult;\nimport com.osdu.model.osdu.delivery.delfi.ProcessingResultStatus;\nimport com.osdu.model.osdu.delivery.dto.DeliveryResponse;\nimport com.osdu.model.osdu.delivery.dto.ResponseFileLocation;\nimport java.util.Arrays;\nimport java.util.Collections;\nimport java.util.HashMap;\nimport java.util.List;\nimport java.util.Map;\nimport org.junit.Test;\nimport org.junit.runner.RunWith;\nimport org.mockito.InjectMocks;\nimport org.mockito.junit.MockitoJUnitRunner;\n\n@RunWith(MockitoJUnitRunner.class)\npublic class DelfiResultDataConverterTest {\n\n private static final String SRN_1 = \"srn_1\";\n private static final String ONE = \"one\";\n private static final String TWO = \"two\";\n private static final String TEST = \"test\";\n private static final String SRN_2 = \"srn_2\";\n private static final String SRN_3 = \"srn_3\";\n\n @InjectMocks\n private DelfiResultDataConverter resultDataConverter;\n\n @Test\n public void shouldConvertDataRecordResult() {\n\n // given\n Map<String, Object> data = new HashMap<>();\n data.put(ONE, TEST);\n data.put(TWO, TEST);\n Record record = new Record();\n record.setData(data);\n\n ProcessingResult dataResult = createProcessingResult(ProcessingResultStatus.DATA, null, data,\n SRN_1);\n\n // when\n DeliveryResponse response = resultDataConverter\n .convertProcessingResults(Collections.singletonList(dataResult));\n\n // then\n assertThat(response.getUnprocessedSrns()).isEmpty();\n assertThat(response.getResult()).hasSize(1);\n assertEquals(response.getResult().get(0).getData(), data);\n assertThat(response.getResult().get(0).getFileLocation()).isNull();\n assertThat(response.getResult().get(0).getSrn()).isEqualTo(SRN_1);\n }\n\n @Test\n public void shouldConvertDataRecordFileRecordAndNoMappingResults() {\n\n // given\n Map<String, Object> data = new HashMap<>();\n data.put(ONE, TEST);\n data.put(TWO, TEST);\n Record record = new Record();\n record.setData(data);\n ProcessingResult dataResult = createProcessingResult(ProcessingResultStatus.DATA, null, data,\n SRN_1);\n\n Record fileRecord = new Record();\n fileRecord.setAdditionalProperties(data);\n ProcessingResult fileResult = createProcessingResult(ProcessingResultStatus.FILE,\n \"http://url.com\", data, SRN_2);\n\n ProcessingResult noMappingResult = createProcessingResult(ProcessingResultStatus.NO_MAPPING,\n null, null, SRN_3);\n\n List<ProcessingResult> results = Arrays.asList(dataResult, fileResult, noMappingResult);\n\n // when\n DeliveryResponse response = resultDataConverter.convertProcessingResults(results);\n\n // then\n assertThat(response.getResult()).hasSize(2);\n assertEquals(response.getResult().get(0).getData(), data);\n assertThat(response.getResult().get(0).getFileLocation()).isNull();\n assertThat(response.getResult().get(0).getSrn()).isEqualTo(SRN_1);\n\n assertEquals(response.getResult().get(1).getData(), data);\n assertEquals(response.getResult().get(1).getFileLocation(),\n new ResponseFileLocation(\"http://url.com\"));\n assertThat(response.getResult().get(1).getSrn()).isEqualTo(SRN_2);\n\n assertThat(response.getUnprocessedSrns()).hasSize(1);\n assertThat(response.getUnprocessedSrns().get(0)).isEqualTo(SRN_3);\n }\n\n private ProcessingResult createProcessingResult(ProcessingResultStatus status,\n String fileLocation, Map<String, Object> data, String srn) {\n ProcessingResult processingResult = new ProcessingResult();\n processingResult.setProcessingResultStatus(status);\n processingResult.setFileLocation(fileLocation);\n processingResult.setData(data);\n processingResult.setSrn(srn);\n\n return processingResult;\n }\n}\n" }, { "alpha_fraction": 0.6995768547058105, "alphanum_fraction": 0.7094499468803406, "avg_line_length": 30.27941131591797, "blob_id": "e5d506afbfcf78bd92c60c207f15aaa5c183b67b", "content_id": "4395736f127c010a7402c7e1a9cce42805e854bd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2127, "license_type": "permissive", "max_line_length": 228, "num_lines": 68, "path": "/osdu-r2/os-workflow/scripts/deploy.sh", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# !/bin/bash\nWORKDIR=$(cd \"$(dirname \"$0\")\"/..; pwd)\ncd \"$WORKDIR\" || exit 0\n\nif [[ -z $1 ]]; then\n cat << EOF\nUsage: $0 provider mvn-user mvn-password cache-bucket [gcp-region]\nBuild and deploy a container to Cloud Run\n\n provider provider name\n mvn-user Maven repository user\n mvn-password Maven repository password\n cache-bucket GCS bucket for caching Cloud Build results\n region Google Cloud region (default: us-central1)\n\n\nEOF\n exit 1\nfi\n\nPROVIDER=$1\nMAVEN_REPO_USER=$2\nMAVEN_REPO_PASS=$3\nCACHE_BUCKET=$4\nREGION=$5\n[[ -z $REGION ]] && REGION=us-central1\n\nif [[ -z $GOOGLE_CLOUD_PROJECT ]]; then\n echo \"Enter your GCP project ID:\"\n read -r GOOGLE_CLOUD_PROJECT\nfi\n\ngcloud config set project \"$GOOGLE_CLOUD_PROJECT\"\n\nif [[ -z $MAVEN_REPO_USER ]]; then\n echo \"Enter Maven user name:\"\n read -r MAVEN_REPO_USER\nfi\n\nif [[ -z $MAVEN_REPO_PASS ]]; then\n echo \"Enter Maven user passwrod:\"\n read -r MAVEN_REPO_PASS\nfi\n\nif [[ -z $CACHE_BUCKET ]]; then\n echo \"Enter the GCS bucket for caching Cloud Build results\"\n read -r CACHE_BUCKET\nfi\n\nCOMMIT_SHA=$(git rev-parse --short HEAD 2>/dev/null)\n[[ -z $COMMIT_SHA ]] && COMMIT_SHA=latest\ngcloud builds submit --config \"${WORKDIR}\"/cloudbuild.yaml --substitutions=_PROVIDER_NAME=\"$PROVIDER\",_SHORT_SHA=\"$COMMIT_SHA\",_CACHE_BUCKET=\"$CACHE_BUCKET\",_MAVEN_REPO_USER=\"$MAVEN_REPO_USER\",_MAVEN_REPO_PASS=\"$MAVEN_REPO_PASS\"\n\ngcloud run deploy os-workflow --image gcr.io/\"${GOOGLE_CLOUD_PROJECT}\"/os-workflow/workflow-\"${PROVIDER}\":\"${COMMIT_SHA}\" --platform managed --region \"$REGION\"\n" }, { "alpha_fraction": 0.743658721446991, "alphanum_fraction": 0.7486549019813538, "avg_line_length": 33.46357727050781, "blob_id": "5ea4dacfb1dbb01f1db45e27723988f818308e90", "content_id": "1d6c34811f6d826eea1209077087fca35c163971", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 5204, "license_type": "permissive", "max_line_length": 91, "num_lines": 151, "path": "/osdu-r2/os-delivery/delivery-core/src/test/java/org/opengroup/osdu/delivery/service/FileListServiceImplTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.delivery.service;\n\nimport static org.assertj.core.api.Assertions.catchThrowable;\nimport static org.assertj.core.api.BDDAssertions.then;\nimport static org.mockito.BDDMockito.given;\nimport static org.mockito.BDDMockito.willThrow;\nimport static org.opengroup.osdu.delivery.TestUtils.AUTHORIZATION_TOKEN;\nimport static org.opengroup.osdu.delivery.TestUtils.PARTITION;\n\nimport java.time.LocalDateTime;\nimport java.time.ZoneOffset;\nimport java.util.Arrays;\nimport java.util.Date;\nimport java.util.HashMap;\nimport java.util.Map;\nimport javax.validation.ConstraintViolationException;\nimport org.apache.commons.lang3.RandomStringUtils;\nimport org.junit.jupiter.api.BeforeEach;\nimport org.junit.jupiter.api.Test;\nimport org.junit.jupiter.api.extension.ExtendWith;\nimport org.mockito.InOrder;\nimport org.mockito.Mock;\nimport org.mockito.Mockito;\nimport org.mockito.junit.jupiter.MockitoExtension;\nimport org.opengroup.osdu.core.common.model.file.DriverType;\nimport org.opengroup.osdu.core.common.model.file.FileListRequest;\nimport org.opengroup.osdu.core.common.model.file.FileListResponse;\nimport org.opengroup.osdu.core.common.model.file.FileLocation;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.delivery.provider.interfaces.FileListService;\nimport org.opengroup.osdu.delivery.provider.interfaces.FileLocationRepository;\nimport org.opengroup.osdu.delivery.provider.interfaces.ValidationService;\n\n@ExtendWith(MockitoExtension.class)\nclass FileListServiceImplTest {\n\n private static final String GCS_FOLDER = \"gs://bucket/folder/\";\n private static final String TEMP_USER = \"temp-user\";\n\n @Mock\n private ValidationService validationService;\n @Mock\n private FileLocationRepository fileLocationRepository;\n\n private FileListService fileListService;\n\n @BeforeEach\n void setUp() {\n fileListService = new FileListServiceImpl(validationService, fileLocationRepository);\n }\n\n @Test\n void shouldReturnFileListByRequest() {\n // given\n LocalDateTime now = LocalDateTime.now();\n FileListRequest request = FileListRequest.builder()\n .timeFrom(now.minusHours(1))\n .timeTo(now)\n .pageNum(0)\n .items((short) 5)\n .userID(TEMP_USER)\n .build();\n DpsHeaders headers = getHeaders();\n\n given(fileLocationRepository.findAll(request)).willReturn(FileListResponse.builder()\n .content(Arrays.asList(\n getFileLocation(toDate(now.minusMinutes(10))),\n getFileLocation(toDate(now.minusMinutes(20)))))\n .number(0)\n .numberOfElements(2)\n .size(5)\n .build());\n\n // when\n FileListResponse response = fileListService.getFileList(request, headers);\n\n // then\n then(response).isEqualToIgnoringGivenFields(FileListResponse.builder()\n .number(0)\n .numberOfElements(2)\n .size(5)\n .build(), \"content\");\n then(response.getContent()).hasSize(2);\n\n InOrder inOrder = Mockito.inOrder(validationService, fileLocationRepository);\n inOrder.verify(validationService).validateFileListRequest(request);\n inOrder.verify(fileLocationRepository).findAll(request);\n inOrder.verifyNoMoreInteractions();\n }\n\n @Test\n void shouldThrowExceptionWhenGetFileListRequestIsInvalid() {\n // given\n FileListRequest request = FileListRequest.builder()\n .build();\n DpsHeaders headers = getHeaders();\n\n willThrow(ConstraintViolationException.class).given(validationService)\n .validateFileListRequest(request);\n\n // when\n Throwable thrown = catchThrowable(() -> fileListService.getFileList(request, headers));\n\n // then\n then(thrown).isInstanceOf(ConstraintViolationException.class);\n\n InOrder inOrder = Mockito.inOrder(validationService, fileLocationRepository);\n inOrder.verify(validationService).validateFileListRequest(request);\n inOrder.verifyNoMoreInteractions();\n }\n\n private DpsHeaders getHeaders() {\n Map<String, String> headers = new HashMap<>();\n headers.put(DpsHeaders.AUTHORIZATION, AUTHORIZATION_TOKEN);\n headers.put(DpsHeaders.DATA_PARTITION_ID, PARTITION);\n\n return DpsHeaders.createFromMap(headers);\n }\n\n private Date toDate(LocalDateTime dateTime) {\n return Date.from(dateTime.toInstant(ZoneOffset.UTC));\n }\n\n private FileLocation getFileLocation(Date createdDate) {\n String fileID = RandomStringUtils.randomAlphanumeric(3, 32);\n return FileLocation.builder()\n .fileID(fileID)\n .driver(DriverType.GCS)\n .location(GCS_FOLDER + fileID)\n .createdAt(createdDate)\n .createdBy(TEMP_USER)\n .build();\n }\n\n}\n" }, { "alpha_fraction": 0.7405434250831604, "alphanum_fraction": 0.7480021119117737, "avg_line_length": 33.12727355957031, "blob_id": "0b855d03b8c1a87573706878ebb815baf9eb1857", "content_id": "68294891d1bb0060e0258266fb2d23f38cc19b17", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1877, "license_type": "permissive", "max_line_length": 98, "num_lines": 55, "path": "/osdu-r2/os-ingest/ingest-core/src/main/java/org/opengroup/osdu/ingest/validation/schema/JsonValidationServiceImpl.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.ingest.validation.schema;\n\nimport com.fasterxml.jackson.databind.JsonNode;\nimport com.networknt.schema.JsonMetaSchema;\nimport com.networknt.schema.JsonSchemaException;\nimport com.networknt.schema.JsonSchemaFactory;\nimport com.networknt.schema.ValidationMessage;\nimport java.util.Set;\nimport lombok.RequiredArgsConstructor;\nimport lombok.extern.slf4j.Slf4j;\nimport org.opengroup.osdu.ingest.exception.ServerErrorException;\nimport org.springframework.stereotype.Service;\n\n@Service\n@Slf4j\n@RequiredArgsConstructor\npublic class JsonValidationServiceImpl implements IJsonValidationService {\n\n @Override\n public Set<ValidationMessage> validate(JsonNode schema, JsonNode toValidate) {\n try {\n return getFactory()\n .getSchema(schema)\n .validate(toValidate);\n } catch (JsonSchemaException e) {\n throw new ServerErrorException(\n String.format(\"Error creating json validation schema from json object: %s\", schema), e);\n }\n }\n\n private JsonSchemaFactory getFactory() {\n return JsonSchemaFactory.builder(JsonSchemaFactory.getInstance())\n .addMetaSchema(JsonMetaSchema\n .builder(\"http://json-schema.org/draft-07/schema#\", JsonMetaSchema.getDraftV4())\n .build())\n .build();\n }\n\n}\n" }, { "alpha_fraction": 0.5372741222381592, "alphanum_fraction": 0.5510165691375732, "avg_line_length": 36.277191162109375, "blob_id": "083c9ed1baffe83795ddc5950442776985818521", "content_id": "355d53c0e6c9283a515afe4fa10ef5b75dbcde29", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Maven POM", "length_bytes": 10624, "license_type": "permissive", "max_line_length": 146, "num_lines": 285, "path": "/osdu-r2/os-qa/pom.xml", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!--\n ~ Copyright 2019 Google LLC\n ~\n ~ Licensed under the Apache License, Version 2.0 (the \"License\");\n ~ you may not use this file except in compliance with the License.\n ~ You may obtain a copy of the License at\n ~\n ~ https://www.apache.org/licenses/LICENSE-2.0\n ~\n ~ Unless required by applicable law or agreed to in writing, software\n ~ distributed under the License is distributed on an \"AS IS\" BASIS,\n ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n ~ See the License for the specific language governing permissions and\n ~ limitations under the License.\n -->\n\n<project xmlns=\"http://maven.apache.org/POM/4.0.0\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd\">\n <modelVersion>4.0.0</modelVersion>\n\n <groupId>com.osdu</groupId>\n <artifactId>osdu-qa</artifactId>\n <version>1.0-SNAPSHOT</version>\n\n <properties>\n <lombok.version>1.18.0</lombok.version>\n <owner.version>1.0.9</owner.version>\n <apache.version>3.7</apache.version>\n <logger.version>2.16</logger.version>\n <java.version>1.8</java.version>\n <testNg.version>6.14.3</testNg.version>\n <mockito.version>1.10.19</mockito.version>\n <jdbc.version>8.0.15</jdbc.version>\n <slf4j.simple.version>1.7.25</slf4j.simple.version>\n <rest.assured.versin>3.3.0</rest.assured.versin>\n <gson.version>2.8.5</gson.version>\n <json-simple.version>LATEST</json-simple.version>\n <awaitility.version>4.0.1</awaitility.version>\n <mock.version>3.10.8</mock.version>\n\n <!--configs for allure -->\n <plugin.allure.version>2.9</plugin.allure.version>\n <allure-testng.version>2.8.1</allure-testng.version>\n <aspectj.version>1.8.10</aspectj.version>\n <surefire.plugin.version>2.20</surefire.plugin.version>\n <maven.site.plugin.version>3.7.1</maven.site.plugin.version>\n <maven-project-info-reports.version>3.0.0</maven-project-info-reports.version>\n </properties>\n\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-compiler-plugin</artifactId>\n <configuration>\n <source>${java.version}</source>\n <target>${java.version}</target>\n </configuration>\n </plugin>\n\n <!--- ALLURE -->\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-site-plugin</artifactId>\n <version>${maven.site.plugin.version}</version>\n </plugin>\n\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-project-info-reports-plugin</artifactId>\n <version>${maven-project-info-reports.version}</version>\n </plugin>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-surefire-plugin</artifactId>\n <version>${surefire.plugin.version}</version>\n <configuration>\n <forkCount>0</forkCount>\n <argLine>\n -javaagent:\"${settings.localRepository}\\org\\aspectj\\aspectjweaver\\${aspectj.version}\\aspectjweaver-${aspectj.version}.jar\"\n </argLine>\n <systemPropertyVariables>\n <allure.results.directory>${project.build.directory}/allure-results\n </allure.results.directory>\n <property>\n <name>allure.results.directory</name>\n <value>${project.build.directory}/allure-results</value>\n </property>\n </systemPropertyVariables>\n <suiteXmlFiles>\n <suiteXmlFile>${suiteXmlFile}</suiteXmlFile>\n </suiteXmlFiles>\n </configuration>\n <dependencies>\n <dependency>\n <groupId>org.aspectj</groupId>\n <artifactId>aspectjweaver</artifactId>\n <version>${aspectj.version}</version>\n </dependency>\n </dependencies>\n </plugin>\n </plugins>\n </build>\n\n <reporting>\n <excludeDefaults>true</excludeDefaults>\n <plugins>\n <plugin>\n <groupId>io.qameta.allure</groupId>\n <artifactId>allure-maven</artifactId>\n <version>${plugin.allure.version}</version>\n <configuration>\n <reportVersion>2.3.3</reportVersion>\n </configuration>\n </plugin>\n </plugins>\n </reporting>\n\n <dependencies>\n <!--FRAMEWORKS-->\n <dependency>\n <groupId>org.projectlombok</groupId>\n <artifactId>lombok</artifactId>\n <version>${lombok.version}</version>\n </dependency>\n <dependency>\n <groupId>org.aeonbits.owner</groupId>\n <artifactId>owner</artifactId>\n <version>${owner.version}</version>\n </dependency>\n\n <!--UTIL LIBRARIES-->\n <dependency>\n <groupId>org.apache.commons</groupId>\n <artifactId>commons-lang3</artifactId>\n <version>${apache.version}</version>\n </dependency>\n\n <!--REPORTING-->\n <dependency>\n <groupId>io.qameta.allure</groupId>\n <artifactId>allure-testng</artifactId>\n <version>${allure-testng.version}</version>\n </dependency>\n <dependency>\n <groupId>io.qameta.allure</groupId>\n <artifactId>allure-rest-assured</artifactId>\n <version>LATEST</version>\n </dependency>\n\n <!--LOGGERS-->\n <dependency>\n <groupId>org.apache.logging.log4j</groupId>\n <artifactId>log4j-core</artifactId>\n <version>${logger.version}</version>\n </dependency>\n <dependency>\n <groupId>org.apache.logging.log4j</groupId>\n <artifactId>log4j-api</artifactId>\n <version>${logger.version}</version>\n </dependency>\n <dependency>\n <groupId>org.slf4j</groupId>\n <artifactId>slf4j-simple</artifactId>\n <version>${slf4j.simple.version}</version>\n </dependency>\n\n <!-- TESTING -->\n <dependency>\n <groupId>org.testng</groupId>\n <artifactId>testng</artifactId>\n <version>${testNg.version}</version>\n <scope>test</scope>\n </dependency>\n <dependency>\n <groupId>io.rest-assured</groupId>\n <artifactId>rest-assured</artifactId>\n <version>${rest.assured.versin}</version>\n <scope>test</scope>\n </dependency>\n <dependency>\n <groupId>org.testng</groupId>\n <artifactId>testng</artifactId>\n <version>${testNg.version}</version>\n <scope>compile</scope>\n </dependency>\n <dependency>\n <groupId>com.codeborne</groupId>\n <artifactId>selenide</artifactId>\n <version>LATEST</version>\n </dependency>\n\n <!-- Wait expected response -->\n <dependency>\n <groupId>org.awaitility</groupId>\n <artifactId>awaitility</artifactId>\n <version>${awaitility.version}</version>\n <scope>test</scope>\n </dependency>\n\n <!-- Data transfer -->\n <dependency>\n <groupId>com.google.code.gson</groupId>\n <artifactId>gson</artifactId>\n <version>${gson.version}</version>\n </dependency>\n <dependency>\n <groupId>com.googlecode.json-simple</groupId>\n <artifactId>json-simple</artifactId>\n <version>${json-simple.version}</version>\n </dependency>\n\n <!-- Mock -->\n <dependency>\n <groupId>org.mock-server</groupId>\n <artifactId>mockserver-netty</artifactId>\n <version>${mock.version}</version>\n </dependency>\n <dependency>\n <groupId>org.mock-server</groupId>\n <artifactId>mockserver-client-java</artifactId>\n <version>${mock.version}</version>\n </dependency>\n\n <!--GCP-->\n <dependency>\n <groupId>com.google.cloud</groupId>\n <artifactId>google-cloud-firestore</artifactId>\n <version>1.31.0</version>\n </dependency>\n <dependency>\n <groupId>com.google.cloud</groupId>\n <artifactId>google-cloud-pubsub</artifactId>\n <version>1.102.0</version>\n </dependency>\n <dependency>\n <groupId>com.google.cloud</groupId>\n <artifactId>google-cloud-storage</artifactId>\n <version>1.100.0</version>\n <scope>test</scope>\n </dependency>\n <dependency>\n <groupId>com.google.cloud</groupId>\n <artifactId>google-cloud-nio</artifactId>\n <version>0.120.0-alpha</version>\n </dependency>\n <dependency>\n <groupId>com.google.cloud</groupId>\n <artifactId>libraries-bom</artifactId>\n <version>3.0.0</version>\n <type>pom</type>\n <scope>import</scope>\n </dependency>\n <dependency>\n <groupId>com.google.http-client</groupId>\n <artifactId>google-http-client</artifactId>\n <version>1.33.0</version>\n <scope>test</scope>\n </dependency>\n <dependency>\n <groupId>com.google.guava</groupId>\n <artifactId>guava</artifactId>\n <version>28.1-jre</version>\n </dependency>\n <dependency>\n <groupId>com.google.http-client</groupId>\n <artifactId>google-http-client</artifactId>\n <version>1.33.0</version>\n </dependency>\n\n <!--Spring -->\n <dependency>\n <groupId>org.springframework</groupId>\n <artifactId>spring-context</artifactId>\n <version>5.2.2.RELEASE</version>\n </dependency>\n <dependency>\n <groupId>org.jsoup</groupId>\n <artifactId>jsoup</artifactId>\n <version>1.12.1</version>\n </dependency>\n </dependencies>\n</project>\n" }, { "alpha_fraction": 0.6448158025741577, "alphanum_fraction": 0.6670985221862793, "avg_line_length": 42.446529388427734, "blob_id": "2c665914b177c01fe8ac4a92e6ff6b8f82b3a38c", "content_id": "f96d3fb706e3b9f3a4f36d90b82ffa0d595dad58", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 23157, "license_type": "permissive", "max_line_length": 146, "num_lines": 533, "path": "/osdu-r2/os-delivery/README.md", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# OSDU R2 Prototype Delivery service\n\n## Contents\n\n* [Introduction](#introduction)\n* [System interactions](#system-interactions)\n* [Validations](#validations)\n* [Delivery API](#delivery-api)\n * [POST /delivery](#post-delivery)\n * [POST /getLocation](#post-getlocation)\n * [POST /getFileLocation](#post-getfilelocation)\n * [POST /getFileList](#post-getfilelist)\n* [Service Provider Interfaces](#service-provider-interfaces)\n* [GCP implementation](#gcp-implementation)\n* [Datastore](#datastore)\n* [Firestore](#firestore-collections)\n\n## Introduction\n\nThe OSDU R2 Delivery service provides internal and external API endpoints to let the application or\nuser fetch any records from the system or request file location data. For example, users can\nrequest generation of an individual signed URL per file. Using a signed URL, OSDU R2 users will be\nable to upload their files to the system.\n\nThe current implementation of the Delivery service supports only cloud platform-specific locations.\nThe future implementations might allow the use of on-premises locations.\n\n## System interactions\n\nThe Delivery service defines the following workflows:\n\n* Delivery\n* File upload\n* File location delivery\n* File list delivery\n\n### Delivery\n\nThe delivery workflow is defined for the `/delivery` API endpoint. The user or application needs to\nstart this workflow when they need to obtain OSDU data from the system. The OSDU data can include\nMaster Data, Reference Data, Work Product, Work Product Component, or File.\n\nUpon a `/delivery` request:\n\n1. Verify the incoming request.\n * Verify the authentication token. Fail delivery if the token is missing or invalid, and then\n respond with the `401 Unauthorized` status.\n * Verify the partition ID. Fail delivery if the partition ID is missing or invalid or doesn't\n have assigned user groups, and then respond with the `400 Bad Request` status.\n2. For each SRN in the request:\n * Find the _SRN to DELFI record ID_ mapping in the database.\n * If there's no record for the current SRN in the database, set the processing result status\n to `NO_MAPPING` for this SRN, and then return the result.\n * From the found mapping, get the DELFI record ID for the current SRN, and then query DELFI with\n this ID.\n * Determine whether the record returned by DELFI contains _file_ or _data_. Perform one of the\n following processes on the record:\n * If the record contains a URL to a file, query the system to get the signed URL for the\n file, and then:\n * Set the processing result status to `FILE` for this SRN.\n * Set the data retrieved from the record.\n * Set the `FileLocation` property to the signed URL generated by DELFI.\n * If the record doesn't contain a URL, this record only contains data. Carry out the\n following actions:\n * Set the data retrieved from the record.\n * Set the processing result status to `DATA`.\n * Add the record data that came from the record's `osdu` property to the response object.\n * Add the file URL from the record data to the `FileLocation` property of the response object.\n3. Return the obtained data to the requester. The unprocessed SRNs are added to the\n`unprocessedSrns` property in the response body.\n\n### File upload\n\nThe file upload workflow is defined for the `/getLocation` API endpoint. The following diagram\nillustrates the workflow.\n\n![OSDU R2 Delivery Service getLocation](https://user-images.githubusercontent.com/21691607/76421952-233e5100-63ad-11ea-8893-3ad5b6950b4c.png)\n\nUpon a request to get a location for a file:\n\n1. Verify the incoming request.\n * Verify the authentication token. Fail signed URL generation if the token is missing or\n invalid, and then respond with the `401 Unauthorized` status.\n * Verify the partition ID. Fail signed URL generation if the partition ID is missing, invalid or\n doesn't have assigned user groups, and then respond with the `400 Bad Request` status.\n * Verify the file ID if it's passed in the request body. Fail signed URL generation if the file\n ID is invalid or if this ID has already been created. Respond with `400 Bad Request` status\n and the `Location for fileID {ID} already exists` message.\n2. Generate a new Universally Unique Identifier (UUID) for the file if a file ID wasn't provided.\n3. Create an empty object in storage, and then generate a signed URL with the write access for that\nobject. By the signed URL, the user or application will upload their file for ingestion. The\ngenerated signed URL has a maximum duration of 7 days.\n * By the signed URL, the user or application will upload their file.\n * The generated signed URL has the maximum duration of 7 days.\n > How signed URLs are generated depends on the cloud platform.\n4. Create a record with file data in the database. The record will contain a key-value pair with the\nfile ID as the key and object as the value. For more information on the record, consult the\n[Firestore](#firestore-collections) section.\n5. Return the signed URL and file ID to the application or user.\n\n### File location delivery\n\nThe file location delivery workflow is defined for the `/getFileLocation` API endpoint. The\nfollowing diagram demonstrates the workflow.\n\n![OSDU R2 Delivery Service getFileLocation](https://user-images.githubusercontent.com/21691607/76414998-11ef4780-63a1-11ea-8a38-cb4dc4522d83.png)\n\nUpon request from an OSDU R2 service:\n\n1. Validate the incoming request.\n * Verify the authentication token. Fail file location delivery if the token is missing or\n invalid, and then respond with the `401 Unauthorized` status.\n * Verify the partition ID. Fail file location delivery if the partition ID is missing, invalid\n or doesn't have assigned user groups, and then respond with the `400 Bad Request` status.\n2. Query the database with the `FileID` to get the file record.\n3. Return the `Location` and `Driver` from the file record to the calling service.\n\n### File list delivery\n\nThe file list delivery workflow is defined for the `/getFileList` API endpoint.\n\nUpon request from another OSDU R2 service:\n\n1. Verify the incoming request.\n * Verify the authentication token. Fail file list delivery if the token is missing or invalid,\n and then respond with the `401 Unauthorized` status.\n * Verify the partition ID. Fail file list delivery if the partition ID is missing, invalid or\n doesn't have assigned user groups, and then respond with the `400 Bad Request` status.\n * Validate the file list request.\n2. Obtain the requested files from the database.\n3. Return the result to the caller.\n\n## Database interactions\n\nDuring each workflow, the Delivery service queries the database. For more information about the file\nrecords in the database, consult the [file-locations collection](#firestore-collections) section.\n\n## Validations\n\nThe Delivery service's current implementation performs a general check of the validity of the\nauthorization token and DELFI partition ID before the service starts generation of a location.\n\nHowever, the Delivery service in the OSDU R2 Prototype doesn't perform any verification whether a\nfile upload happened or whether the user started ingestion after uploading a file. In future OSDU\nimplementations, the Delivery service will be able to check if file uploads did happen.\n\n## Delivery API\n\nThe Delivery service's API includes the following three methods in the OSDU R2 Prototype:\n\n* `/delivery`, external\n* `/getLocation`, external\n* `/getFileLocation`, internal\n* `/getFileList`, internal\n\nGeneral considerations related to querying the Delivery API:\n\n* Each endpoint must receive the authentication bearer token in the \"Authorization\" header. Example:\n`\"Authorization\": \"Bearer {token}\"`\n* Each endpoint must receive a DELFI partition ID in header. Example:\n`\"Partition-Id\": \"default-partition\"`\n* The request and response Content Type is **application/json**\n\n### POST /delivery\n\nThe `/delivery` API endpoint delivers a list of records with OSDU data retrieved from the system\nand, if no records were found for some SRNs, a list of unprocessed SRNs.\n\n#### Request body\n\nA delivery request contains a list of SRNs of the OSDU Master Data, Reference Data, Work Product,\nWork Product Component, or File that the user wants to obtain from the system. If the client wants\nto deliver a Work Product with full data for all related Work Product Components and Files, all\nrelated SRNs for WPCs and Files must also be included in the list.\n\n| Property | Type | Description |\n| -------------- | -------- | -------------------- |\n| SRNS | `List` | A list of SRNs |\n| TargetRegionID | `String` | The target region ID |\n\nExample request for master data:\n\n```sh\ncurl --location --request POST 'https://{path}/delivery' \\\n --header 'Authorization: Bearer {token}' \\\n --header 'Content-Type: application/json' \\\n --header 'Partition-Id: {assigned DELFI partition ID}' \\\n --data-raw '{\n \"SRNS\": [\"srn:master-data/Wellbore:2221:\"],\n \"TargetRegionID\": \"test\"\n }'\n```\n\nExample request for a file:\n\n```sh\ncurl --location --request POST 'https://{path}/delivery' \\\n --header 'Authorization: Bearer {token}' \\\n --header 'Content-Type: application/json' \\\n --header 'Partition-Id: {assigned DELFI partition ID}' \\\n --data-raw '{\n \"SRNS\": [\"srn:file/las2:c589ade1b53111e99047c99b225c8828:1\"],\n \"TargetRegionID\": \"test\"\n }'\n```\n\n#### Response body\n\nThe output from the `/delivery` endpoint is a list of results for the given list of SRNs. This list\nmay also contain links to download locations such as Google Cloud Storage if any of the given SRNs\nis associated with a file.\n\nResponse body for a master data request:\n\n```json\n{\n \"UnprocessedSRNs\": [],\n \"Result\": [\n {\n \"Data\": {\n \"GroupTypeProperties\": {},\n \"IndividualTypeProperties\": {\n \"BottomHoleLocation\": {\n \"MeasuredDepthUOMID\": \"srn:reference-data/UnitOfMeasure:M:\",\n \"GeographicCoordinates\": [{\n \"X\": 53.1488564,\n \"Y\": 5.77885285\n }],\n \"VerticalCRSID\": \"srn:reference-data/VerticalCRS:NAP:\"\n },\n \"FacilityID\": \"4be5df1b-7640-11e9-861e-b4d5bde9ee44\",\n \"WellboreDepth\": [{\n \"WellboreDepth\": 1916,\n \"WellboreDepthUnitOfMeasureID\": \"srn:reference-data/UnitOfMeasure:M:\",\n \"WellboreDepthTypeID\": \"srn:reference-data/WellboreDepthType:Total Depth:\"\n }, {\n \"WellboreDepth\": 1916,\n \"WellboreDepthUnitOfMeasureID\": \"srn:reference-data/UnitOfMeasure:M:\",\n \"WellboreDepthTypeID\": \"srn:reference-data/WellboreDepth:TVD:\"\n }],\n \"PreferredZeroDepthPoint\": {\n \"OffsetHeight\": 4.5\n },\n \"ShellStandardLegendTypeID\": \"srn:reference-data/ShellStandardLegendType:P and A oil and gas shows:\",\n \"SequenceNumber\": 1,\n \"IntialDrillingReasonTypeID\": \"srn:reference-data/DrillingReasonType:Exploratory:\",\n \"FacilityName\": [{\n \"FacilityNameTypeID\": \"srn:reference-data/FacilityNameType:Name:\",\n \"FacilityName\": \"WRM-01\"\n }],\n \"WellID\": \"srn:master-data/Well:2221:\",\n \"FacilityTypeID\": \"srn:reference-data/FacilityType:Wellbore:\"\n }\n },\n \"SRN\": \"srn:master-data/Wellbore:2221:\"\n }\n ]\n}\n```\n\nThe following response body is given for a file request.\n\n```json\n{\n \"UnprocessedSRNs\": [],\n \"Result\": [{\n \"FileLocation\": {\n \"SignedURL\": \"https://storage.googleapis.com/bucket/object?X-Goog-Algorithm=...\"\n },\n \"Data\": {\n \"GroupTypeProperties\": {\n \"OriginalFilePath\": \"http://gitlab.opengroup.org/osdu/open-test-data/tree/master/1-data/3-provided/tno/well-logs/8267_a0801_1996_comp.las\"\n },\n \"IndividualTypeProperties\": {},\n \"ExtensionProperties\": {}\n },\n \"SRN\": \"srn:file/las2:c589ade1b53111e99047c99b225c8828:1\"\n }\n ]\n}\n```\n\n### POST /getLocation\n\nThe `/getLocation` API endpoint creates a new location in the landing zone, such as a GCS bucket,\nand returns it to the user so they upload a file for ingestion by that location.\n\n#### Request body\n\n| Property | Type | Description |\n| ------------- | -------- | --------------------- |\n| FileID | `String` | Unique ID of the file |\n\n> **Note**: If a `FileID` isn't provided in the request, the Delivery service generates a\n> Universally Unique Identifier (UUID) to be stored in `FileID`. If a `FileID` is provided and is\n> already registered in the system, an error is returned.\n\n> **Note**: The `FileID` must correspond to the regular expression: `^[\\w,\\s-]+(\\.\\w+)?$`.\n\nRequest example:\n\n```sh\ncurl --location --request POST 'https://{path}/getLocation' \\\n --header 'Authorization: Bearer {token}' \\\n --header 'Content-Type: application/json' \\\n --header 'Partition-Id: {DELFI partition ID}' \\\n --data-raw '{\n \"FileID\": \"8900a83f-18c6-4b1d-8f38-309a208779cc\"\n }'\n```\n\n#### Response\n\nThe Delivery service returns the following data.\n\n| Property | Type | Description |\n| --------- | -------- | ----------------------------------------------------- |\n| FileID | `String` | ID of the file to be ingested |\n| Location | `List` | List of key-value pairs with cloud provider details to access the landing zone* |\n| SignedURL | `String` | Signed URL by which the file to be ingested is stored |\n\n> **Note**: The landing zone is a location in a cloud provider's platform where the user uploaded\n> files for OSDU ingestion. The landing zone consists of the `Driver` and `Location` properties,\n> which are stored in the database for each file upload request.\n\nResponse example:\n\n```json\n{\n \"FileID\": \"file ID\",\n \"Location\": {\n \"SignedURL\": \"GCS signed URL\"\n }\n}\n```\n\n### POST /getFileLocation\n\nThe `/getFileLocation` API endpoint works similarly to `/getLocation`, but is internal and returns\nthe landing zone &mdash; `Location` and `Driver` &mdash; of a particular file instead of a signed\nURL.\n\nOnce the OSDU security model is formulated and approved, the `/getFileLocation` API endpoint will\nnot be returning files that belong to other users.\n\n#### Request body\n\n| Property | Type | Description |\n| -------- | -------- | ----------------------------- |\n| FileID | `String` | ID of the file to be ingested |\n\nRequest example:\n\n```sh\ncurl --location --request POST 'https://{path}/getFileLocation' \\\n --header 'Authorization: Bearer {token}' \\\n --header 'Content-Type: application/json' \\\n --header 'Partition-Id: {assigned DELFI partition ID}' \\\n --data-raw '{\n \"FileID\": \"8900a83f-18c6-4b1d-8f38-309a208779cc\"\n }'\n```\n\n#### Response\n\n| Property | Type | Description |\n| -------- | -------- | --------------------------------------------------- |\n| Driver | `String` | Description of the storage where the file is stored |\n| Location | `String` | Direct URI to the file in storage |\n\n### POST /getFileList\n\nThe `/getFileList` API endpoint allows auditing the attempted file uploads. The method is\nunavailable for third-party applications.\n\nThe ingestion process depends on whether the client application uploaded a file or not. The\n`/getFileList` endpoint is designed to let other OSDU services to inspect which user uploaded a\nfile, whether the file was uploaded to the landing zone, and whether the user started ingestion\nafter the file upload.\n\n#### Request\n\n| Property | Type | Description |\n| -------- | ---------- | ------------------------------------------- |\n| TimeFrom | `datetime` | Timestamp |\n| TimeTo | `datetime` | Time interval for the CreatedAd filter |\n| PageNum | `integer` | The page number to return paginated results |\n| Items | `short` | Pagination of the result |\n| UserID | `String` | The ID of the user role or group |\n\n> **Note**: The `UserID` property isn't supported in the OSDU R2 Prototype.\n\nRequest example:\n\n```sh\ncurl --location --request POST 'https://{path}/getFileList' \\\n --header 'Authorization: Bearer {token}' \\\n --header 'Partition-Id: {assigned DELFI partition ID}' \\\n --header 'Content-Type: application/json' \\\n --data-raw '{\n \"PageNum\": 0,\n \"TimeFrom\": \"2020-01-01T16:21:00.552Z\",\n \"UserID\": \"common-user\",\n \"TimeTo\": \"2020-02-15T16:28:44.220Z\",\n \"Items\": 2\n }'\n```\n\n### Response\n\nA paginated result of the records stored in the database.\n\n| Property | Type | Description |\n| ---------------- | --------- | ------------------------------------------------ |\n| Content | `List` | List of file records retrieved from the database |\n| Number | `integer` | Some number |\n| NumberOfElements | `integer` | The amount of the returned records |\n| Size | `short` | The size of the Content list |\n\nEach file record contains the following properties: `FileID`, `Driver`, `Location`, `CreatedAt`,\n`CreatedBy`. For more information the returned properties, consult the [Firestore\ncollections](#firestore-collections) section.\n\nResponse example:\n\n```json\n{\n \"Content\": [\n {\n \"FileID\": \"30a1ace6-1c8f-4f08-9982-2e9c5df8e878\",\n \"Driver\": \"GCS\",\n \"Location\": \"gs://osdu-temporary-files/common-user/1580461525198-2020-02-12-05-23-25-198/30a1ace6-1c8f-4f08-9982-2e9c5df8e878\",\n \"CreatedAt\": \"2020-02-12T05:24:25.198+0000\",\n \"CreatedBy\": \"common-user\"\n },\n {\n \"FileID\": \"da057da3-0fdb-41e4-afdc-3b63b812d484\",\n \"Driver\": \"GCS\",\n \"Location\": \"gs://osdu-temporary-files/common-user/1580461525198-2020-02-13-12-19-14-205/da057da3-0fdb-41e4-afdc-3b63b812d484\",\n \"CreatedAt\": \"2020-02-13T12:19:14.205+0000\",\n \"CreatedBy\": \"common-user\"\n }\n ],\n \"Number\": 0,\n \"NumberOfElements\": 2,\n \"Size\": 2\n}\n```\n\n## Service Provider Interfaces\n\nThe Delivery service has several Service Provider Interfaces that the classes need to implement.\n\n| Interface | Required/Optional | Path |\n| ---------------------- | ----------------------- | ------------------------------------------------------------------------ |\n| AuthenticationService | Optional to implement | `delivery-core/src/main/java/.../provider/interfaces/AuthenticationService` |\n| FileListService | Optional to implement | `delivery-core/src/main/java/.../provider/interfaces/FileListService` |\n| FileLocationRepository | Optional to implement | `delivery-core/src/main/java/.../provider/interfaces/FileLocationRepository` |\n| FileService | Optional to implement | `delivery-core/src/main/java/.../provider/interfaces/FileService` |\n| LocationMapper | Obligatory to implement | `delivery-core/src/main/java/.../provider/interfaces/LocationMapper` |\n| LocationService | Optional to implement | `delivery-core/src/main/java/.../provider/interfaces/LocationService` |\n| StorageRepository | Obligatory to implement | `delivery-core/src/main/java/.../provider/interfaces/StorageRepository` |\n| StorageService | Obligatory to implement | `delivery-core/src/main/java/.../provider/interfaces/StorageService` |\n| ValidationService | Optional to implement | `delivery-core/src/main/java/.../provider/interfaces/ValidationService` |\n\n## GCP implementation\n\nThe GCP Identity and Access Management service account for the Delivery service must have the\n`iam.serviceAccounts.signBlob` permission.\n\nThe predefined **Cloud Functions Service Agent**, **Cloud Run Service Agent**, and **Service Account\nToken Creator** roles include the required permission.\n\nFor development purposes, it's recommended to create a separate service account.\nIt's enough to grant the **Service Account Token Creator** role to the development service account.\n\nObtaining user credentials for Application Default Credentials isn't suitable in this case because\nsigning a blob is only available with the service account credentials. Remember to set the\n`GOOGLE_APPLICATION_CREDENTIALS` environment variable. Follow the [instructions on the Google\ndeveloper's portal][application-default-credentials].\n\n### Persistence layer\n\nThe GCP implementation contains two mutually exclusive modules to work with the persistence layer.\nPresently, OSDU R2 connects to legacy Cloud Datastore for compatibility with the current OpenDES\nimplementation. In the future OSDU releases, Cloud Datastore will be replaced by a Cloud Firestore\nimplementation that's already available in the project.\n\n* The Cloud Datastore implementation is located in the **provider/delivery-gcp-datastore** folder.\n* The Cloud Firestore implementation is located in the **provider/delivery-gcp** folder.\n\nTo learn more about available collections, follow to the [Firestore collections](#firestore-collections)\nsection.\n\n## Datastore\n\nThe service account for Delivery service must have the `datastore.indexes.*` permissions. The\npredefined **roles/datastore.indexAdmin** and **roles/datastore.owner** roles include the required\npermission.\n\n## Firestore collections\n\nThe GCP implementation of the Delivery service uses Cloud Firestore with the following collections\nand indexes.\n\n### `file-locations` collection\n\n| Field | Type | Description |\n| --------- | -------- | ------------------------------------------------------------------------- |\n| FileID | `Object` | Unique file ID that references a file data object with Driver, Location, CreatedAt, and CreatedBy |\n| Driver | `String` | Description of the storage where files were loaded |\n| Location | `String` | Direct URI to the file in storage |\n| CreatedAt | `String` | Time when the record was created |\n| CreatedBy | `String` | ID of the user that requested file location |\n\n> **Note**: The `Location` value might be different from the signed URL returned to the user.\n> **Note**: The `CreatedBy` property isn't supported in the OSDU R2 Prototype.\n\n### Indexes\n\n#### Single Field\n\n| Collection ID | Field path | Collection scope | Collection group scope |\n| -------------- | ---------- | ---------------- | ---------------------- |\n| file-locations | FileID | _no changes_ | _no changes_ |\n\n#### Composite\n\n| Collection ID | Fields | Query scope |\n| -------------- | ---------------------------------- | ----------- |\n| file-locations | `CreatedBy: ASC`, `CreatedAt: ASC` | Collection |\n\n[application-default-credentials]: https://developers.google.com/identity/protocols/application-default-credentials#calling\n" }, { "alpha_fraction": 0.719584584236145, "alphanum_fraction": 0.7255192995071411, "avg_line_length": 41.15625, "blob_id": "ccdf5a4d8eaeac78214eb594e2c4f9c833206dbf", "content_id": "1d828ef269405ba76658345ede4bdf6e12387161", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1348, "license_type": "permissive", "max_line_length": 75, "num_lines": 32, "path": "/osdu-r2/os-qa/src/main/java/com/osdu/core/utils/constants/RequestConstantHolder.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.core.utils.constants;\n\npublic class RequestConstantHolder {\n public static final String AUTHORIZATION = \"Authorization\";\n public static final String BEARER = \"Bearer \";\n public static final String PARTITION_ID = \"Data-Partition-Id\";\n public static final String LEGAL_TAGS = \"Legal-Tags\";\n public static final String KIND = \"kind\";\n public static final String APP_KEY = \"AppKey\";\n public static final String CONTENT_TYPE = \"Content-Type\";\n public static final String ACCEPT = \"Accept\";\n public static final String ACCEPT_VALUE = \"*/*\";\n public static final String CONTENT_TYPE_JSON = \"application/json\";\n public static final String HTTP_GET = \"GET\";\n public static final String HTTP_POST = \"POST\";\n}" }, { "alpha_fraction": 0.7180548310279846, "alphanum_fraction": 0.7263321280479431, "avg_line_length": 35.47169876098633, "blob_id": "a9c232055bf41361ed4f9204bfa9780b880b3f19", "content_id": "5f3f14c7950f1b1773fd8a07a78b6b2843d65636", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1933, "license_type": "permissive", "max_line_length": 133, "num_lines": 53, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/legal/validation/PropertiesValidator.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.legal.validation;\n\nimport org.opengroup.osdu.core.common.model.legal.Properties;\nimport org.opengroup.osdu.core.common.model.legal.validation.rules.Rule;\n\nimport javax.validation.ConstraintValidator;\nimport javax.validation.ConstraintValidatorContext;\nimport java.util.List;\n\n//This will hold validation for the Properties model for the properties that rely on other property values for their validation rules\n//any properties in this model that do not rely on other properties will have their own validator\npublic class PropertiesValidator implements ConstraintValidator<ValidLegalTagProperties, Properties> {\n\n private final List<Rule> ruleSet;\n\n public PropertiesValidator(List<Rule> ruleSet){\n this.ruleSet = ruleSet;\n }\n\n @Override\n public void initialize(ValidLegalTagProperties constraintAnnotation) {\n //needed by interface - we don't use\n }\n\n @Override\n public boolean isValid(Properties legalTagProperties, ConstraintValidatorContext context) {\n for (Rule rule : ruleSet) {\n if(rule.shouldCheck(legalTagProperties)) {\n if (!rule.isValid(legalTagProperties, context)) {\n return false;\n }\n }\n }\n return true;\n }\n}\n" }, { "alpha_fraction": 0.7761194109916687, "alphanum_fraction": 0.7843540906906128, "avg_line_length": 31.383333206176758, "blob_id": "2a73fc55cd2c3466b1acd58619225065f123c04f", "content_id": "231a8bf5119b521877866cd0f4051f324fcba89d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1943, "license_type": "permissive", "max_line_length": 120, "num_lines": 60, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/multitenancy/TenantInfoFactory.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.multitenancy;\n\nimport org.opengroup.osdu.core.common.model.http.AppException;\nimport org.springframework.beans.factory.FactoryBean;\nimport org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.context.annotation.Primary;\nimport org.springframework.stereotype.Component;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.core.common.provider.interfaces.ITenantFactory;\nimport org.opengroup.osdu.core.common.model.tenant.TenantInfo;\nimport org.springframework.web.context.annotation.RequestScope;\n\n@Component\n@RequestScope\n@Primary\npublic class TenantInfoFactory implements FactoryBean<TenantInfo> {\n\n\t@Autowired\n\tprivate ITenantFactory tenantFactory;\n\n\t@Autowired\n\tprivate DpsHeaders headers;\n\n\t@Override\n\tpublic TenantInfo getObject() throws Exception {\n\t\tString id = this.headers.getPartitionIdWithFallbackToAccountId();\n\t\tTenantInfo tenantInfo = this.tenantFactory.getTenantInfo(id);\n\t\tif (tenantInfo == null) {\n\t\t\tthrow AppException.createUnauthorized(String.format(\"could not retrieve tenant info for data partition id: %s\", id));\n\t\t}\n\t\treturn tenantInfo;\n\t}\n\n\t@Override\n\tpublic Class<?> getObjectType() {\n\t\treturn TenantInfo.class;\n\t}\n\n\t@Override\n\tpublic boolean isSingleton() {\n\t\treturn false;\n\t}\n}\n" }, { "alpha_fraction": 0.8545454740524292, "alphanum_fraction": 0.8545454740524292, "avg_line_length": 54, "blob_id": "6b042f90ea45bcdc8ca9f9ea371d97a3516d0ed9", "content_id": "b0ac9d4e8070c914d04239fa2c415ea58c463d63", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 55, "license_type": "permissive", "max_line_length": 54, "num_lines": 1, "path": "/osdu-r2/os-delivery/CONTRIBUTING.md", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "This project is currently not accepting contributions.\n" }, { "alpha_fraction": 0.7255520224571228, "alphanum_fraction": 0.7315106987953186, "avg_line_length": 28.71875, "blob_id": "5b3654cbbcc356398c9195a9d7b2e00ec4b149eb", "content_id": "ae8532cf2dec3f21dee69171bab7527a8111c871", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2853, "license_type": "permissive", "max_line_length": 106, "num_lines": 96, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/logging/JaxRsDpsLog.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.logging;\n\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.core.common.model.http.HeadersToLog;\nimport org.opengroup.osdu.core.common.model.http.Request;\nimport org.opengroup.osdu.core.common.logging.audit.AuditPayload;\nimport org.springframework.beans.factory.annotation.Value;\nimport org.springframework.stereotype.Component;\nimport org.springframework.web.context.annotation.RequestScope;\n\nimport javax.inject.Inject;\nimport java.util.Collections;\nimport java.util.List;\nimport java.util.Map;\n\n@Component\n@RequestScope\npublic class JaxRsDpsLog implements AutoCloseable {\n\n\t@Value(\"${LOG_PREFIX}\")\n private String LOG_PREFIX;\n\n\tprivate ILogger log;\n\tprivate DpsHeaders headers;\n\n\t@Inject\n\tpublic JaxRsDpsLog(ILogger log, DpsHeaders headers){\n\t\tthis.log = log;\n\t\tthis.headers = headers;\n\t}\n\n\tpublic void audit(AuditPayload auditPayload) {\n\t\tlog.audit(LOG_PREFIX + \".audit\", auditPayload, this.getLabels());\n\t}\n\n\tpublic void request(Request httpRequest) {\n\t\tlog.request(LOG_PREFIX + \".request\", httpRequest, this.getLabels());\n\t}\n\n\tpublic void info(String message) {\n\t\tlog.info(LOG_PREFIX + \".app\", message, this.getLabels());\n\t}\n\n\tpublic void warning(String message) {\n\t\tlog.warning(LOG_PREFIX + \".app\", message, this.getLabels());\n\t}\n\n\tpublic void warning(List<String> messages) {\n\t\tif (messages == null || messages.isEmpty()) {\n\t\t\treturn;\n\t\t}\n\t\tint sn = 0;\n\t\tStringBuilder sb = new StringBuilder();\n\t\tfor (String s : messages) {\n\t\t\tsb.append(String.format(\"%d: %s\", sn++, s)).append(System.lineSeparator());\n\t\t}\n\t\tlog.warning(LOG_PREFIX + \".app\", sb.toString(), this.getLabels());\n\t}\n\n\tpublic void warning(String message, Exception e) {\n\t\tlog.warning(LOG_PREFIX + \".app\", message, e, this.getLabels());\n\t}\n\n\tpublic void error(String message) {\n\t\tlog.error(LOG_PREFIX + \".app\", message, this.getLabels());\n\t}\n\n\tpublic void error(String message, Exception e) {\n\t\tlog.error(LOG_PREFIX + \".app\", message, e, this.getLabels());\n\t}\n\n\t@Override\n\tpublic void close() throws Exception {\n\t}\n\n\tprivate Map<String, String> getLabels() {\n\t\treturn new HeadersToLog(Collections.emptyList()).createStandardLabelsFromMap(this.headers.getHeaders());\n\t}\n}\n" }, { "alpha_fraction": 0.6489277482032776, "alphanum_fraction": 0.6536933779716492, "avg_line_length": 36.582088470458984, "blob_id": "4a09afa2896d5398d722980cf70329b6671ce165", "content_id": "968ff96f754800a4b0e2d96b3c9487ac68b677f8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2518, "license_type": "permissive", "max_line_length": 118, "num_lines": 67, "path": "/osdu-r2/os-python-sdk/osdu_api/model/record.py", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# Copyright 2020 Google LLC\n# Copyright 2020 Amazon\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom osdu_api.model.acl import Acl\nfrom osdu_api.model.legal import Legal\nfrom osdu_api.model.legal_compliance import LegalCompliance\nfrom osdu_api.model.record_ancestry import RecordAncestry\n\n'''\nA record model mirroring what's found in core common\n'''\nclass Record:\n def __init__(self, id: str, version: int, kind: str, acl: Acl, legal: Legal, data: dict, ancestry: RecordAncestry,\n meta: dict):\n self.id = id\n self.version = version\n self.kind = kind\n self.acl = acl\n self.legal = legal\n self.data = data\n self.ancestry = ancestry\n self.meta = meta\n\n '''\n Overloaded constructor meant to throw KeyError if any record values are missing\n from the dict\n '''\n @classmethod\n def from_dict(cls, record_dict: dict):\n id = record_dict['id']\n version = record_dict['version']\n kind = record_dict['kind']\n acl = Acl(record_dict['acl']['viewers'], record_dict['acl']['owners'])\n legal = Legal(record_dict['legal']['legaltags'], record_dict['legal']['otherRelevantDataCountries'], \n LegalCompliance[record_dict['legal']['status']])\n data = record_dict['data']\n meta = record_dict['meta']\n\n parents = []\n try:\n parents = record_dict['ancestry']['parents']\n except KeyError:\n # warn the user that ancestry wasn't found, not essential attribute\n print('Attribute \"ancestry\" is missing from dict being converted to record')\n\n ancestry = RecordAncestry(parents)\n\n return cls(id, version, kind, acl, legal, data, ancestry, meta)\n\n def convert_to_dict(self):\n record_converted = self.__dict__\n record_converted['acl'] = self.acl.__dict__\n record_converted['legal'] = self.legal.get_dict()\n record_converted['ancestry'] = self.ancestry.__dict__\n return record_converted\n" }, { "alpha_fraction": 0.781993567943573, "alphanum_fraction": 0.7855305671691895, "avg_line_length": 37.875, "blob_id": "d6c5e0cb672d2fba4b801790ab1679da937a8867", "content_id": "1bf2a41503810da1c2290c7356dce765cf74085a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3110, "license_type": "permissive", "max_line_length": 93, "num_lines": 80, "path": "/osdu-r2/os-workflow/workflow-core/src/main/java/org/opengroup/osdu/workflow/service/WorkflowStatusServiceImpl.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.workflow.service;\n\nimport lombok.RequiredArgsConstructor;\nimport lombok.extern.slf4j.Slf4j;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.workflow.exception.WorkflowNotFoundException;\nimport org.opengroup.osdu.workflow.model.GetStatusRequest;\nimport org.opengroup.osdu.workflow.model.GetStatusResponse;\nimport org.opengroup.osdu.workflow.model.UpdateStatusRequest;\nimport org.opengroup.osdu.workflow.model.UpdateStatusResponse;\nimport org.opengroup.osdu.workflow.model.WorkflowStatus;\nimport org.opengroup.osdu.workflow.provider.interfaces.IValidationService;\nimport org.opengroup.osdu.workflow.provider.interfaces.IWorkflowStatusRepository;\nimport org.opengroup.osdu.workflow.provider.interfaces.IWorkflowStatusService;\nimport org.springframework.stereotype.Service;\n\n@Service\n@Slf4j\n@RequiredArgsConstructor\npublic class WorkflowStatusServiceImpl implements IWorkflowStatusService {\n\n final IValidationService validationService;\n final IWorkflowStatusRepository workflowStatusRepository;\n\n @Override\n public GetStatusResponse getWorkflowStatus(GetStatusRequest request, DpsHeaders headers) {\n log.debug(\"Request get workflow status with parameters : {}\", request);\n\n validationService.validateGetStatusRequest(request);\n\n WorkflowStatus workflowStatus = workflowStatusRepository\n .findWorkflowStatus(request.getWorkflowId());\n\n if (workflowStatus == null) {\n throw new WorkflowNotFoundException(\n String.format(\"Workflow for workflow id - %s not found\", request.getWorkflowId()));\n }\n\n GetStatusResponse response = GetStatusResponse.builder()\n .workflowStatusType(workflowStatus.getWorkflowStatusType()).build();\n\n log.debug(\"Get workflow status result: {}\", response);\n return response;\n }\n\n @Override\n public UpdateStatusResponse updateWorkflowStatus(UpdateStatusRequest request,\n DpsHeaders headers) {\n log.debug(\"Request update workflow status with parameters : {}\", request);\n\n validationService.validateUpdateStatusRequest(request);\n\n WorkflowStatus workflowStatus = workflowStatusRepository\n .updateWorkflowStatus(request.getWorkflowId(), request.getWorkflowStatusType());\n\n UpdateStatusResponse response = UpdateStatusResponse.builder()\n .workflowId(workflowStatus.getWorkflowId())\n .workflowStatusType(workflowStatus.getWorkflowStatusType()).build();\n\n log.debug(\"Get workflow status result: {}\", response);\n return response;\n }\n\n}\n" }, { "alpha_fraction": 0.5540961027145386, "alphanum_fraction": 0.563083291053772, "avg_line_length": 33.03529357910156, "blob_id": "97f9d911108a1d2cc53842ab1a965179a3a006f0", "content_id": "703055fd14fce6512fa2972d81d1b8c3668beea2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2893, "license_type": "permissive", "max_line_length": 136, "num_lines": 85, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/http/ResponseHeaders.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.http;\n\nimport java.util.ArrayList;\nimport java.util.HashMap;\nimport java.util.List;\nimport java.util.Map;\n\npublic class ResponseHeaders {\n public static final Map<String, List<Object>> STANDARD_RESPONSE_HEADERS = new HashMap<>();\n\n static {\n STANDARD_RESPONSE_HEADERS.put(\"Access-Control-Allow-Origin\", new ArrayList<Object>() {\n {\n add(\"*\");\n }\n });\n STANDARD_RESPONSE_HEADERS.put(\"Access-Control-Allow-Headers\", new ArrayList<Object>() {\n {\n add(\"origin, content-type, accept, authorization, account-id, data-partition-id, correlation-id, on-behalf-of, appkey\");\n }\n });\n STANDARD_RESPONSE_HEADERS.put(\"Access-Control-Allow-Methods\", new ArrayList<Object>() {\n {\n add(\"GET, POST, PUT, DELETE, OPTIONS, HEAD\");\n }\n });\n STANDARD_RESPONSE_HEADERS.put(\"Access-Control-Allow-Credentials\", new ArrayList<Object>() {\n {\n add(\"true\");\n }\n });\n STANDARD_RESPONSE_HEADERS.put(\"X-Frame-Options\", new ArrayList<Object>() {\n {\n add(\"DENY\");\n }\n });\n STANDARD_RESPONSE_HEADERS.put(\"X-XSS-Protection\", new ArrayList<Object>() {\n {\n add(\"1; mode=block\");\n }\n });\n STANDARD_RESPONSE_HEADERS.put(\"X-Content-Type-Options\", new ArrayList<Object>() {\n {\n add(\"nosniff\");\n }\n });\n STANDARD_RESPONSE_HEADERS.put(\"Cache-Control\", new ArrayList<Object>() {\n {\n add(\"no-cache, no-store, must-revalidate\");\n }\n });\n STANDARD_RESPONSE_HEADERS.put(\"Content-Security-Policy\", new ArrayList<Object>() {\n {\n add(\"default-src 'self'\");\n }\n });\n STANDARD_RESPONSE_HEADERS.put(\"Strict-Transport-Security\", new ArrayList<Object>() {\n {\n add(\"max-age=31536000; includeSubDomains\");\n }\n });\n STANDARD_RESPONSE_HEADERS.put(\"Expires\", new ArrayList<Object>() {\n {\n add(\"0\");\n }\n });\n }\n}\n" }, { "alpha_fraction": 0.7470378875732422, "alphanum_fraction": 0.7577013969421387, "avg_line_length": 33.448978424072266, "blob_id": "d18a6ad2be9cba9b478166b54a4768591f212234", "content_id": "471f6f576503200d56ccf7a7cf6e862df8f4c1c5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1688, "license_type": "permissive", "max_line_length": 111, "num_lines": 49, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/search/QueryRequest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.search;\n\nimport com.fasterxml.jackson.annotation.JsonProperty;\nimport io.swagger.annotations.ApiModelProperty;\nimport lombok.AllArgsConstructor;\nimport lombok.Data;\nimport lombok.NoArgsConstructor;\nimport org.opengroup.osdu.core.common.SwaggerDoc;\nimport org.opengroup.osdu.core.common.model.search.validation.ValidOffset;\n\nimport javax.validation.constraints.Min;\n\n@Data\n@NoArgsConstructor\n@AllArgsConstructor\n@ValidOffset\npublic class QueryRequest extends Query {\n\n @Min(value = 0, message = SwaggerDoc.OFFSET_VALIDATION_MIN_MSG)\n @JsonProperty(\"offset\")\n @ApiModelProperty(value = SwaggerDoc.OFFSET_DESCRIPTION, dataType = \"java.lang.Integer\", example = \"0\")\n private int from;\n\n // aggregation: only make it available in pre demo for now\n @ApiModelProperty(value = SwaggerDoc.AGGREGATEBY_DESCRIPTION, dataType = \"java.lang.String\", hidden = true)\n private String aggregateBy;\n\n @Override\n public String toString(){\n return new com.google.gson.Gson().toJson(this);\n }\n}\n" }, { "alpha_fraction": 0.7423102259635925, "alphanum_fraction": 0.7448185086250305, "avg_line_length": 40.168479919433594, "blob_id": "a4e3d0d5a967d3f4f0a0720f92cc3f7c7ed76220", "content_id": "73cf31d700be2f6a8f663a0440302544eb72e5e3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 7575, "license_type": "permissive", "max_line_length": 99, "num_lines": 184, "path": "/osdu-r2/os-workflow/provider/workflow-gcp/src/main/java/org/opengroup/osdu/workflow/provider/gcp/repository/FirestoreWorkflowStatusRepository.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.workflow.provider.gcp.repository;\n\nimport static org.opengroup.osdu.workflow.model.WorkflowStatus.Fields.AIRFLOW_RUN_ID;\nimport static org.opengroup.osdu.workflow.model.WorkflowStatus.Fields.SUBMITTED_AT;\nimport static org.opengroup.osdu.workflow.model.WorkflowStatus.Fields.SUBMITTED_BY;\nimport static org.opengroup.osdu.workflow.model.WorkflowStatus.Fields.WORKFLOW_ID;\nimport static org.opengroup.osdu.workflow.model.WorkflowStatus.Fields.WORKFLOW_STATUS_TYPE;\n\nimport com.google.api.core.ApiFuture;\nimport com.google.cloud.firestore.DocumentReference;\nimport com.google.cloud.firestore.DocumentSnapshot;\nimport com.google.cloud.firestore.FieldValue;\nimport com.google.cloud.firestore.Firestore;\nimport com.google.cloud.firestore.QueryDocumentSnapshot;\nimport com.google.cloud.firestore.QuerySnapshot;\nimport com.google.cloud.firestore.WriteResult;\nimport java.util.HashMap;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.concurrent.ExecutionException;\nimport java.util.concurrent.Future;\nimport lombok.RequiredArgsConstructor;\nimport lombok.extern.slf4j.Slf4j;\nimport org.apache.commons.lang3.ObjectUtils;\nimport org.opengroup.osdu.workflow.model.WorkflowStatus;\nimport org.opengroup.osdu.workflow.model.WorkflowStatusType;\nimport org.opengroup.osdu.workflow.provider.gcp.exception.WorkflowStatusNotFoundException;\nimport org.opengroup.osdu.workflow.provider.gcp.exception.WorkflowStatusNotUpdatedException;\nimport org.opengroup.osdu.workflow.provider.gcp.exception.WorkflowStatusQueryException;\nimport org.opengroup.osdu.workflow.provider.interfaces.IWorkflowStatusRepository;\nimport org.springframework.stereotype.Repository;\n\n@Repository\n@Slf4j\n@RequiredArgsConstructor\npublic class FirestoreWorkflowStatusRepository implements IWorkflowStatusRepository {\n\n private static final String COLLECTION_NAME = \"workflow-status\";\n\n final Firestore firestore;\n\n @Override\n public WorkflowStatus findWorkflowStatus(String workflowId) {\n log.debug(\"Requesting workflow status by workflow id - {}\", workflowId);\n ApiFuture<QuerySnapshot> query = firestore.collection(COLLECTION_NAME)\n .whereEqualTo(WORKFLOW_ID, workflowId)\n .get();\n\n QuerySnapshot querySnapshot = getSafety(query,\n String.format(\"Failed to find a workflow status by Workflow id - %s\", workflowId));\n\n List<QueryDocumentSnapshot> documents = querySnapshot.getDocuments();\n\n if (documents.size() > 1) {\n throw new WorkflowStatusQueryException(\n String.format(\"Find workflow status returned %s documents(s), expected 1, query by\"\n + \" Workflow id - %s\",\n documents.size(), workflowId));\n }\n\n WorkflowStatus workflowStatus = documents.isEmpty()\n ? null\n : buildWorkflowStatus(documents.get(0));\n\n log.debug(\"Found workflow status : {}\", workflowStatus);\n return workflowStatus;\n }\n\n @Override\n public WorkflowStatus saveWorkflowStatus(WorkflowStatus workflowStatus) {\n\n log.info(\"Saving workflow status location : {}\", workflowStatus);\n final String errorMsg = \"Exceptions during saving workflow status: \" + workflowStatus;\n\n Map<String, Object> data = getWorkflowStatusData(workflowStatus);\n ApiFuture<DocumentReference> query = firestore.collection(COLLECTION_NAME).add(data);\n DocumentReference addedDocRef = getSafety(query, errorMsg);\n log.info(\"Fetch DocumentReference pointing to a new document with an auto-generated ID : {}\",\n addedDocRef);\n\n DocumentSnapshot saved = getSafety(addedDocRef.get(),\n \"Saved Workflow status should exist\");\n log.info(\"Fetch saved workflow status : {}\", saved);\n return buildWorkflowStatus(saved);\n }\n\n @Override\n public WorkflowStatus updateWorkflowStatus(String workflowId,\n WorkflowStatusType workflowStatusType) {\n\n log.info(\"Update workflow status for workflow id: {}, new status: {}\", workflowId,\n workflowStatusType);\n\n ApiFuture<QuerySnapshot> query = firestore.collection(COLLECTION_NAME)\n .whereEqualTo(WORKFLOW_ID, workflowId)\n .get();\n\n QuerySnapshot querySnapshot = getSafety(query,\n String.format(\"Failed to find a workflow status by Workflow id - %s\", workflowId));\n\n List<QueryDocumentSnapshot> documents = querySnapshot.getDocuments();\n\n if (documents.size() > 1) {\n throw new WorkflowStatusQueryException(\n String.format(\"Found more than one (%s) workflow status documents, expected 1, query by\"\n + \" Workflow id - %s\",\n documents.size(), workflowId));\n }\n\n if (documents.isEmpty()) {\n throw new WorkflowStatusNotFoundException(\n String.format(\"Workflow status for Workflow id: %s not found\", workflowId));\n }\n\n String documentId = documents.get(0).getId();\n WorkflowStatus workflowStatus = buildWorkflowStatus(documents.get(0));\n\n if (workflowStatus.getWorkflowStatusType().equals(workflowStatusType)) {\n throw new WorkflowStatusNotUpdatedException(String.format(\n \"Workflow status for workflow id: %s already has status:%s and can not be updated\",\n workflowId, workflowStatusType));\n }\n\n workflowStatus.setWorkflowStatusType(workflowStatusType);\n\n ApiFuture<WriteResult> updateQuery = firestore.collection(COLLECTION_NAME).document(documentId)\n .update(WORKFLOW_STATUS_TYPE, workflowStatusType.toString());\n\n getSafety(updateQuery,\n String.format(\"Failed to update workflow status document, workflow id: %s\", workflowId));\n\n return workflowStatus;\n }\n\n private <T> T getSafety(Future<T> future, String errorMsg) {\n try {\n return future.get();\n } catch (InterruptedException e) {\n Thread.currentThread().interrupt();\n throw new WorkflowStatusQueryException(errorMsg, e);\n } catch (ExecutionException e) {\n throw new WorkflowStatusQueryException(errorMsg, e);\n }\n }\n\n private WorkflowStatus buildWorkflowStatus(DocumentSnapshot snap) {\n log.info(\"Build workflow status. Document snapshot : {}\", snap.getData());\n return WorkflowStatus.builder()\n .workflowId(snap.getString(WORKFLOW_ID))\n .airflowRunId(snap.getString(AIRFLOW_RUN_ID))\n .workflowStatusType(WorkflowStatusType.valueOf(snap.getString(WORKFLOW_STATUS_TYPE)))\n .submittedAt(snap.getDate(SUBMITTED_AT))\n .submittedBy(snap.getString(SUBMITTED_BY)).build();\n }\n\n private Map<String, Object> getWorkflowStatusData(WorkflowStatus workflowStatus) {\n Object submittedAt = ObjectUtils.defaultIfNull(workflowStatus.getSubmittedAt(),\n FieldValue.serverTimestamp());\n\n Map<String, Object> data = new HashMap<>();\n data.put(WORKFLOW_ID, workflowStatus.getWorkflowId());\n data.put(AIRFLOW_RUN_ID, workflowStatus.getAirflowRunId());\n data.put(WORKFLOW_STATUS_TYPE, workflowStatus.getWorkflowStatusType().name());\n data.put(SUBMITTED_AT, submittedAt);\n data.put(SUBMITTED_BY, workflowStatus.getSubmittedBy());\n return data;\n }\n}\n" }, { "alpha_fraction": 0.7586941123008728, "alphanum_fraction": 0.7612964510917664, "avg_line_length": 35.75652313232422, "blob_id": "184ea5fc44b6a9e68cfcf4065032818aa214e7ca", "content_id": "d92b0669a34695d2832735e4cafd279c6e77b951", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4227, "license_type": "permissive", "max_line_length": 97, "num_lines": 115, "path": "/compatibility-layer/service/search/src/main/java/com/osdu/service/DelfiSearchService.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.service;\n\nimport static com.osdu.request.OsduHeader.extractHeaderByName;\nimport static java.util.Objects.isNull;\n\nimport com.osdu.client.delfi.DelfiSearchClient;\nimport com.osdu.exception.SearchException;\nimport com.osdu.mapper.SearchObjectMapper;\nimport com.osdu.mapper.SearchResultMapper;\nimport com.osdu.model.SearchObject;\nimport com.osdu.model.SearchResult;\nimport com.osdu.model.delfi.DelfiSearchObject;\nimport com.osdu.model.delfi.DelfiSearchResult;\nimport com.osdu.model.osdu.OsduSearchObject;\nimport com.osdu.model.property.DelfiPortalProperties;\nimport com.osdu.request.OsduHeader;\nimport javax.inject.Named;\nimport lombok.RequiredArgsConstructor;\nimport lombok.extern.slf4j.Slf4j;\nimport org.apache.commons.lang.NotImplementedException;\nimport org.springframework.messaging.MessageHeaders;\nimport org.springframework.stereotype.Service;\n\n/**\n * Delfi API query service.\n */\n@Service\n@Slf4j\n@RequiredArgsConstructor\npublic class DelfiSearchService implements SearchService {\n\n public static final String KIND_HEADER_KEY = \"kind\";\n\n final DelfiPortalProperties portalProperties;\n\n @Named\n final SearchObjectMapper searchObjectMapper;\n @Named\n final SearchResultMapper searchResultMapper;\n\n final DelfiSearchClient delfiSearchClient;\n final AuthenticationService authenticationService;\n\n /**\n * NOT IMPLEMENTED YET Searches Delfi partition using index.\n *\n * @param searchObject parameters to use during search\n * @param headers headers of the orriginal search request to get authorization header from\n * them\n * @return {@link SearchResult} the result of the search from Delfi portal\n */\n @Override\n public SearchResult searchIndexWithCursor(SearchObject searchObject, MessageHeaders headers) {\n throw new NotImplementedException();\n }\n\n /**\n * Searches Delfi partition.\n *\n * @param searchObject parameters to use during search\n * @param headers headers of the orriginal search request to get authorization header from\n * them\n * @return {@link SearchResult} the result of the search from Delfi portal\n */\n @Override\n public SearchResult searchIndex(SearchObject searchObject, MessageHeaders headers) {\n log.debug(\"Received request to query Delfi Portal for data with following arguments: {},{}\",\n searchObject, headers);\n\n String kind = extractHeaderByName(headers, KIND_HEADER_KEY);\n String partition = extractHeaderByName(headers, OsduHeader.PARTITION);\n String authorizationToken = extractHeaderByName(headers, OsduHeader.AUTHORIZATION);\n\n authenticationService.checkAuthentication(authorizationToken, partition);\n\n checkIfInputParametersValid((OsduSearchObject) searchObject);\n\n DelfiSearchObject delfiSearchObject = searchObjectMapper\n .osduToDelfi((OsduSearchObject) searchObject, kind, partition);\n DelfiSearchResult searchResult = delfiSearchClient.searchIndex(\n authorizationToken,\n portalProperties.getAppKey(),\n partition,\n delfiSearchObject);\n SearchResult osduSearchResult = searchResultMapper\n .delfiToOsdu(searchResult, (OsduSearchObject) searchObject);\n log.debug(\"Received search result: {}\", osduSearchResult);\n return osduSearchResult;\n }\n\n private void checkIfInputParametersValid(OsduSearchObject searchObject) {\n if (isNull(searchObject.getFulltext())\n && isNull(searchObject.getMetadata())\n && isNull(searchObject.getGeoCentroid())\n && isNull(searchObject.getGeoLocation())) {\n throw new SearchException(\"Input parameters validation fail - \" + searchObject);\n }\n }\n}\n" }, { "alpha_fraction": 0.7891994118690491, "alphanum_fraction": 0.794410228729248, "avg_line_length": 38.8301887512207, "blob_id": "b1f7f62b53870bf008e7820f60dd951e05049928", "content_id": "a078cb09d116821b199faa1e90d33d945ee84d7a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2111, "license_type": "permissive", "max_line_length": 86, "num_lines": 53, "path": "/osdu-r2/os-delivery/delivery-core/src/main/java/org/opengroup/osdu/delivery/api/FileListApi.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.delivery.api;\n\nimport lombok.RequiredArgsConstructor;\nimport lombok.extern.slf4j.Slf4j;\nimport org.opengroup.osdu.core.common.model.file.FileListRequest;\nimport org.opengroup.osdu.core.common.model.file.FileListResponse;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.core.common.model.storage.StorageRole;\nimport org.opengroup.osdu.delivery.provider.interfaces.FileListService;\nimport org.springframework.security.access.prepost.PreAuthorize;\nimport org.springframework.validation.annotation.Validated;\nimport org.springframework.web.bind.annotation.PostMapping;\nimport org.springframework.web.bind.annotation.RequestBody;\nimport org.springframework.web.bind.annotation.RestController;\nimport org.springframework.web.context.annotation.RequestScope;\n\n@Slf4j\n@RequiredArgsConstructor\n@RestController\n@RequestScope\n@Validated\npublic class FileListApi {\n\n final DpsHeaders headers;\n final FileListService fileListService;\n\n // TODO: Create the permission for os-delivery and change pre authorize annotation\n @PostMapping(\"/getFileList\")\n @PreAuthorize(\"@authorizationFilter.hasPermission('\" + StorageRole.CREATOR + \"')\")\n public FileListResponse getFileList(@RequestBody FileListRequest request) {\n log.debug(\"File list request received : {}\", request);\n FileListResponse fileListResponse = fileListService.getFileList(request, headers);\n log.debug(\"File list result ready : {}\", fileListResponse);\n return fileListResponse;\n }\n\n}\n" }, { "alpha_fraction": 0.7894363403320312, "alphanum_fraction": 0.7933356761932373, "avg_line_length": 42.400001525878906, "blob_id": "72cbd7707abac10ce8b0a0cc7d1bd875f48fa279", "content_id": "9ba298eb8196a87199483c7f2797e429aa0a0d68", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2821, "license_type": "permissive", "max_line_length": 98, "num_lines": 65, "path": "/osdu-r2/os-delivery/delivery-core/src/main/java/org/opengroup/osdu/delivery/api/FileLocationApi.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.delivery.api;\n\nimport lombok.RequiredArgsConstructor;\nimport lombok.extern.slf4j.Slf4j;\nimport org.opengroup.osdu.core.common.model.file.FileLocationRequest;\nimport org.opengroup.osdu.core.common.model.file.FileLocationResponse;\nimport org.opengroup.osdu.core.common.model.file.LocationRequest;\nimport org.opengroup.osdu.core.common.model.file.LocationResponse;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.core.common.model.storage.StorageRole;\nimport org.opengroup.osdu.delivery.provider.interfaces.LocationService;\nimport org.springframework.security.access.prepost.PreAuthorize;\nimport org.springframework.validation.annotation.Validated;\nimport org.springframework.web.bind.annotation.PostMapping;\nimport org.springframework.web.bind.annotation.RequestBody;\nimport org.springframework.web.bind.annotation.RestController;\nimport org.springframework.web.context.annotation.RequestScope;\n\n@Slf4j\n@RequiredArgsConstructor\n@RestController\n@RequestScope\n@Validated\npublic class FileLocationApi {\n\n final DpsHeaders headers;\n final LocationService locationService;\n\n // TODO: Create the permission for os-delivery and change pre authorize annotation\n @PostMapping(\"/getLocation\")\n @PreAuthorize(\"@authorizationFilter.hasPermission('\" + StorageRole.CREATOR + \"')\")\n public LocationResponse getLocation(@RequestBody LocationRequest request) {\n log.debug(\"Location request received : {}\", request);\n LocationResponse locationResponse = locationService.getLocation(request, headers);\n log.debug(\"Location result ready : {}\", locationResponse);\n return locationResponse;\n }\n\n // TODO: Create the permission for os-delivery and change pre authorize annotation\n @PostMapping(\"/getFileLocation\")\n @PreAuthorize(\"@authorizationFilter.hasPermission('\" + StorageRole.CREATOR + \"')\")\n public FileLocationResponse getFileLocation(@RequestBody FileLocationRequest request) {\n log.debug(\"File location request received : {}\", request);\n FileLocationResponse fileLocationResponse = locationService.getFileLocation(request, headers);\n log.debug(\"File location result ready : {}\", fileLocationResponse);\n return fileLocationResponse;\n }\n\n}\n" }, { "alpha_fraction": 0.7306859493255615, "alphanum_fraction": 0.7422382831573486, "avg_line_length": 30.454545974731445, "blob_id": "1bb7b6059ce28a997b8aafef0dff6840ea6f0c96", "content_id": "759f6144b901037be777bf5d869fdc1131e4f405", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1385, "license_type": "permissive", "max_line_length": 123, "num_lines": 44, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/search/SortQuery.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.search;\n\nimport io.swagger.annotations.ApiModelProperty;\nimport lombok.Data;\nimport lombok.NoArgsConstructor;\nimport org.opengroup.osdu.core.common.SwaggerDoc;\n\nimport java.util.List;\n\n@Data\n@NoArgsConstructor\npublic class SortQuery {\n\n @ApiModelProperty(value = SwaggerDoc.SORT_FIELD_DESCRIPTION, dataType = \"[Ljava.lang.String;\")\n private List<String> field;\n\n @ApiModelProperty(value = SwaggerDoc.SORT_ORDER_DESCRIPTION, dataType = \"[Lorg.opengroup.osdu.search.model.SortOrder;\")\n private List<SortOrder> order;\n\n public String getFieldByIndex(int index) {\n return field.get(index);\n }\n\n public SortOrder getOrderByIndex(int index) {\n return order.get(index);\n }\n}\n\n" }, { "alpha_fraction": 0.7451944351196289, "alphanum_fraction": 0.7472507953643799, "avg_line_length": 35.675411224365234, "blob_id": "6404bb75aa171b6ac9b73a0ad4407426f5b11a46", "content_id": "e94c0cefa7c51a807aa304ea64c0ace6bed1e0cc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 11185, "license_type": "permissive", "max_line_length": 102, "num_lines": 305, "path": "/compatibility-layer/service/ingest/src/test/java/com/osdu/service/delfi/DelfiIngestionServiceTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.service.delfi;\n\nimport static org.assertj.core.api.Assertions.catchThrowable;\nimport static org.assertj.core.api.BDDAssertions.then;\nimport static org.mockito.ArgumentMatchers.any;\nimport static org.mockito.ArgumentMatchers.eq;\nimport static org.mockito.BDDMockito.given;\nimport static org.mockito.Mockito.inOrder;\nimport static org.mockito.Mockito.mock;\nimport static org.mockito.Mockito.times;\n\nimport com.google.cloud.storage.Blob;\nimport com.osdu.ReplaceCamelCase;\nimport com.osdu.client.DelfiIngestionClient;\nimport com.osdu.client.delfi.RecordDataFields;\nimport com.osdu.exception.IngestException;\nimport com.osdu.exception.OsduBadRequestException;\nimport com.osdu.model.Record;\nimport com.osdu.model.RequestContext;\nimport com.osdu.model.delfi.DelfiRecord;\nimport com.osdu.model.delfi.signed.SignedFile;\nimport com.osdu.model.delfi.signed.SignedUrlResult;\nimport com.osdu.model.property.DelfiPortalProperties;\nimport com.osdu.model.type.base.OsduObject;\nimport com.osdu.model.type.file.FileData;\nimport com.osdu.model.type.file.FileGroupTypeProperties;\nimport com.osdu.model.type.manifest.ManifestFile;\nimport com.osdu.service.IngestionService;\nimport com.osdu.service.JsonUtils;\nimport com.osdu.service.PortalService;\nimport com.osdu.service.StorageService;\nimport java.net.URL;\nimport java.util.Arrays;\nimport java.util.HashMap;\nimport java.util.List;\nimport java.util.Map;\nimport org.apache.commons.lang3.RandomStringUtils;\nimport org.junit.jupiter.api.BeforeEach;\nimport org.junit.jupiter.api.DisplayNameGeneration;\nimport org.junit.jupiter.api.Test;\nimport org.junit.jupiter.api.extension.ExtendWith;\nimport org.mockito.InOrder;\nimport org.mockito.Mock;\nimport org.mockito.invocation.InvocationOnMock;\nimport org.mockito.junit.jupiter.MockitoExtension;\nimport org.springframework.http.HttpStatus;\n\n@ExtendWith(MockitoExtension.class)\n@DisplayNameGeneration(ReplaceCamelCase.class)\npublic class DelfiIngestionServiceTest {\n\n private static final String APP_KEY = \"appKey\";\n private static final String AUTHORIZATION_TOKEN = \"authToken\";\n private static final String PARTITION = \"partition\";\n private static final String EXTERNAL_FILE_STORAGE = \"http://some.host.com\";\n private static final String FILE_ASSOCIATIVE_ID_1 = \"file-id-1\";\n private static final String STORAGE_HREF = \"http://storage.host.com\";\n\n @Mock\n private DelfiIngestionClient delfiIngestionClient;\n @Mock\n private StorageService storageService;\n @Mock\n private PortalService portalService;\n\n private DelfiPortalProperties portalProperties = DelfiPortalProperties.builder()\n .appKey(APP_KEY)\n .build();\n\n private IngestionService ingestionService;\n\n @BeforeEach\n public void setUp() {\n ingestionService = new DelfiIngestionService(portalProperties, delfiIngestionClient,\n storageService, portalService);\n }\n\n @Test\n public void shouldUploadFileToDelfiLandingZone() throws Exception {\n // given\n String fileName = \"file.las\";\n\n String fileExternalHref = EXTERNAL_FILE_STORAGE + \"/\" + fileName;\n URL fileExternalUrl = new URL(fileExternalHref);\n\n String relativeFilePath = \"/some-landing-zone/some-user/uuid\" + \"/\" + fileName;\n URL fileLocationUrl = new URL(STORAGE_HREF + relativeFilePath);\n\n ManifestFile file = getManifestFile(fileExternalHref);\n\n Blob blob = mock(Blob.class);\n given(storageService.uploadFileToStorage(fileExternalUrl, fileName))\n .willReturn(blob);\n given(delfiIngestionClient.getSignedUrlForLocation(fileName, AUTHORIZATION_TOKEN,\n APP_KEY, PARTITION)).willReturn(SignedUrlResult.builder()\n .responseCode(201)\n .locationUrl(fileLocationUrl)\n .relativeFilePath(relativeFilePath)\n .build());\n\n // when\n SignedFile signedFile = ingestionService.uploadFile(file, AUTHORIZATION_TOKEN, PARTITION);\n\n // then\n then(signedFile).isEqualTo(SignedFile.builder()\n .file(file)\n .locationUrl(fileLocationUrl)\n .relativeFilePath(relativeFilePath)\n .build());\n\n InOrder inOrder = inOrder(portalService, storageService, delfiIngestionClient);\n\n inOrder.verify(storageService).uploadFileToStorage(fileExternalUrl, fileName);\n inOrder.verify(delfiIngestionClient)\n .getSignedUrlForLocation(fileName, AUTHORIZATION_TOKEN, APP_KEY, PARTITION);\n inOrder.verify(storageService)\n .writeFileToSignedUrlLocation(blob, fileLocationUrl);\n inOrder.verifyNoMoreInteractions();\n }\n\n @Test\n public void shouldThrowIngestExceptionWhenPreloadFilePathIsInvalid() throws Exception {\n // given\n String fileExternalHref = \"invalid-file-url\";\n\n ManifestFile file = getManifestFile(fileExternalHref);\n\n // when\n Throwable thrown = catchThrowable(\n () -> ingestionService.uploadFile(file, AUTHORIZATION_TOKEN, PARTITION));\n\n // then\n then(thrown)\n .isInstanceOf(IngestException.class)\n .hasMessage(\"Could not create URL from preload file path: invalid-file-url\");\n\n InOrder inOrder = inOrder(portalService, storageService, delfiIngestionClient);\n inOrder.verifyNoMoreInteractions();\n }\n\n @Test\n public void shouldThrowIngestExceptionWhenPreloadFilePathDoesNotContainFilename() throws Exception {\n // given\n String fileExternalHref = EXTERNAL_FILE_STORAGE;\n URL fileExternalUrl = new URL(fileExternalHref);\n\n String relativeFilePath = \"/some-landing-zone/some-user/uuid\";\n URL fileLocationUrl = new URL(STORAGE_HREF + relativeFilePath);\n\n ManifestFile file = getManifestFile(fileExternalHref);\n\n // when\n Throwable thrown = catchThrowable(\n () -> ingestionService.uploadFile(file, AUTHORIZATION_TOKEN, PARTITION));\n\n // then\n then(thrown)\n .isInstanceOf(IngestException.class)\n .hasMessage(\"File name obtained is empty, URL : http://some.host.com\");\n\n InOrder inOrder = inOrder(portalService, storageService, delfiIngestionClient);\n inOrder.verifyNoMoreInteractions();\n }\n\n @Test\n public void shouldThrowIngestExceptionWhenUnableToGetSignedUrlInLandingZone() throws Exception {\n // given\n String fileName = \"invalid-file\";\n\n String fileExternalHref = EXTERNAL_FILE_STORAGE + \"/\" + fileName;\n URL fileExternalUrl = new URL(fileExternalHref);\n\n String relativeFilePath = \"/some-landing-zone/some-user/uuid\" + \"/\" + fileName;\n URL fileLocationUrl = new URL(STORAGE_HREF + relativeFilePath);\n\n ManifestFile file = getManifestFile(fileExternalHref);\n\n Blob blob = mock(Blob.class);\n given(storageService.uploadFileToStorage(fileExternalUrl, fileName))\n .willReturn(blob);\n given(delfiIngestionClient.getSignedUrlForLocation(fileName, AUTHORIZATION_TOKEN,\n APP_KEY, PARTITION)).willReturn(SignedUrlResult.builder()\n .responseCode(HttpStatus.BAD_REQUEST.value())\n .build());\n\n // when\n Throwable thrown = catchThrowable(\n () -> ingestionService.uploadFile(file, AUTHORIZATION_TOKEN, PARTITION));\n\n // then\n then(thrown)\n .isInstanceOf(IngestException.class)\n .hasMessage(\"Count not fetch a signed URL to landing zone for file: invalid-file\");\n\n InOrder inOrder = inOrder(portalService, storageService, delfiIngestionClient);\n inOrder.verify(storageService)\n .uploadFileToStorage(fileExternalUrl, fileName);\n inOrder.verify(delfiIngestionClient)\n .getSignedUrlForLocation(fileName, AUTHORIZATION_TOKEN, APP_KEY, PARTITION);\n inOrder.verifyNoMoreInteractions();\n }\n\n @Test\n public void shouldMarkRecordsAsFailedAndSaveThem() {\n // given\n List<Record> records = Arrays.asList(getDelfiRecord(), getDelfiRecord(), getDelfiRecord());\n RequestContext requestContext = RequestContext.builder()\n .authorizationToken(AUTHORIZATION_TOKEN)\n .partition(PARTITION)\n .build();\n\n given(portalService.putRecord(any(Record.class), eq(AUTHORIZATION_TOKEN), eq(PARTITION)))\n .willAnswer(this::getFailRecordAnswer);\n\n // when\n List<Record> failedRecords = ingestionService.failRecords(records, requestContext);\n\n // then\n then(failedRecords)\n .hasSize(3)\n .extracting(\"data.osdu.ResourceLifecycleStatus\", String.class)\n .containsOnly(\"srn:reference-data/ResourceLifecycleStatus:RESCINDED:\");\n\n InOrder inOrder = inOrder(portalService, storageService, delfiIngestionClient);\n inOrder.verify(portalService, times(3))\n .putRecord(any(Record.class), eq(AUTHORIZATION_TOKEN), eq(PARTITION));\n inOrder.verifyNoMoreInteractions();\n }\n\n @Test\n public void shouldFallThroughExceptionDuringFailingRecords() {\n // given\n List<Record> records = Arrays.asList(getDelfiRecord(), getDelfiRecord(), getDelfiRecord());\n RequestContext requestContext = RequestContext.builder()\n .authorizationToken(AUTHORIZATION_TOKEN)\n .partition(PARTITION)\n .build();\n\n given(portalService.putRecord(any(Record.class), eq(AUTHORIZATION_TOKEN), eq(PARTITION)))\n .willAnswer(this::getFailRecordAnswer)\n .willThrow(new OsduBadRequestException(\"bad request\"));\n\n // when\n Throwable thrown = catchThrowable(() -> ingestionService.failRecords(records, requestContext));\n\n // then\n then(thrown)\n .isInstanceOf(OsduBadRequestException.class)\n .hasMessage(\"bad request\");\n\n InOrder inOrder = inOrder(portalService, storageService, delfiIngestionClient);\n inOrder.verify(portalService, times(2))\n .putRecord(any(Record.class), eq(AUTHORIZATION_TOKEN), eq(PARTITION));\n inOrder.verifyNoMoreInteractions();\n }\n\n private ManifestFile getManifestFile(String fileHref) {\n FileGroupTypeProperties fileGroupTypeProperties = new FileGroupTypeProperties();\n fileGroupTypeProperties.setPreLoadFilePath(fileHref);\n\n FileData fileData = new FileData();\n fileData.setGroupTypeProperties(fileGroupTypeProperties);\n\n ManifestFile file = new ManifestFile();\n file.setAssociativeId(FILE_ASSOCIATIVE_ID_1);\n file.setData(fileData);\n\n return file;\n }\n\n private Record getDelfiRecord() {\n OsduObject osduObject = new OsduObject();\n osduObject.setResourceID(\"srn:file/las2:\" + RandomStringUtils.randomAlphabetic(32) + \":1\");\n osduObject.setResourceLifecycleStatus(\"srn:reference-data/ResourceLifecycleStatus:CREATED:\");\n\n Map<String, Object> data = new HashMap<>();\n data.put(RecordDataFields.OSDU_DATA, osduObject);\n\n return DelfiRecord.builder()\n .data(data)\n .build();\n }\n\n private Record getFailRecordAnswer(InvocationOnMock invocation) {\n Record record = invocation.getArgument(0);\n return JsonUtils.deepCopy(record, Record.class);\n }\n\n}" }, { "alpha_fraction": 0.5523189306259155, "alphanum_fraction": 0.5899017453193665, "avg_line_length": 34.730613708496094, "blob_id": "fb2e7749161d6023e5d8b0e5585ce5698b41509b", "content_id": "704a482dcd10634c5249c27f8d8f07f93d2e49f8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 8758, "license_type": "permissive", "max_line_length": 121, "num_lines": 245, "path": "/compatibility-layer/docs/API/Delivery API.md", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# Delivery API overview\n\nThe Delivery service fetches records with well data or records with links to files from the DELFI Data Ecosystem.\nThe service can deliver OSDU Work Products, Work Product Components, and Files. In case with files, the service responds\nwith direct links to files and metadata of file records. \n\nFor input, the Delivery service expects a list of Subsurface Data Universe Resource Numbers (SRNs). The service delivers \nWork Products, Work Product Components, and files. In case with files, the service responds with direct links &mdash; \nsigned URLs &mdash; to files and file records metadata.\n\nTo obtain a signed URL, the service calls a dedicated DELFI endpoint with the authorization token, DELFI partition, and \napplication key (the key of the client’s application registered in DELFI). Using this data, DELFI signs the URLs to \nfiles and returns them to the Delivery service.\n\n## Delivery API\n\nAll the delivery endpoints are relative to the path **https://{project-id}.apigee.net/**. \n\n### POST /delivery\n\nFetch any records with well data or records with links to files from DELFI.\n\n| | Description |\n| --------------------- | --------------------------------------------------------------------- |\n| Authorization | Authorization token in the header: `Authorization: \"Bearer {token}\"`. | \n| URL parameters | None |\n| Request Content Type | `application/json` |\n| Response Content Type | `application/json` |\n\n### Delivery request example\n\nThe delivery request is a JSON document with a list of SRNs of the OSDU **Work Products**, **Work Product Components**, \n**Master**, and **Reference Data** to be delivered to the requester. Additionally, the request must contain the target \nregion ID.\n\nGeneric delivery request:\n\n```json\n{\n \"SRNS\": [\"srn:file/type...\"],\n \"TargetRegionID\": \"\"\n}\n``` \n\n#### File delivery request example\n\n```sh\ncurl -X POST \\\n https://{Apigee URI}/delivery \\\n -H 'Accept: */*' \\\n -H 'Accept-Encoding: gzip, deflate' \\\n -H 'Authorization: Bearer <your token here>' \\\n -H 'Cache-Control: no-cache' \\\n -H 'Connection: keep-alive' \\\n -H 'Content-Length: 127' \\\n -H 'Content-Type: application/json' \\\n -H 'Host: {Apigee URI}' \\\n -d '{\n \"SRNS\":[\n \"srn:file/las2:cca54c53bfee4ce4b985e3fd1678ad09:1\"\n ],\n \"TargetRegionID\": 123\n}'\n```\n\n> Note that the example request doesn't contain an actual Apigee URI or an authorization token.\n\n#### Response to a file delivery request\n\nThe Delivery service responds with the list of results for the given list of SRNs. The list of results for a file \nrequest may contain links to download locations (such as Google Cloud Storage) if any of the given SRNs is associated \nwith a file.\n\n```json\n{\n \"UnprocessedSrns\": [],\n \"Result\": [\n {\n \"FileLocation\": \"{signed URL to download the file}\",\n \"Data\": {\n \"ResourceID\": \"srn:file/las2:cca54c53bfee4ce4b985e3fd1678ad09:1\",\n \"ResourceTypeID\": \"srn:type:file/las2:\",\n \"ResourceHostRegionIDs\": [],\n \"ResourceObjectCreationDatetime\": \"2019-11-14T16:37:11.273\",\n \"ResourceVersionCreationDatetime\": \"2019-11-14T16:37:11.273\",\n \"ResourceCurationStatus\": \"srn:reference-data/ResourceCurationStatus:CREATED:\",\n \"ResourceLifecycleStatus\": \"srn:reference-data/ResourceLifecycleStatus:RECEIVED:\",\n \"ResourceSecurityClassification\": \"srn:reference-data/ResourceSecurityClassification:RESTRICTED:\",\n \"Data\": {\n \"GroupTypeProperties\": {\n \"PreLoadFilePath\": \"{Link to File}\",\n \"FileSource\": \"\",\n \"FileSize\": 0\n },\n \"IndividualTypeProperties\": {},\n \"ExtensionProperties\": {}\n }\n },\n \"SRN\": \"srn:file/las2:cca54c53bfee4ce4b985e3fd1678ad09:1\"\n }\n ]\n}\n```\n\n> Note that the example response doesn't contain an actual link to the file.\n> `Result[0].Data.Data.GroupTypeProperties.PreLoadFilePath`.\n\n#### Delivery request example for Work Product Component\n\nThe following example demoes a request for a Work Product Component.\n\n```sh\ncurl -X POST \\\n https://{Apigee URI}/delivery \\\n -H 'Accept: */*' \\\n -H 'Accept-Encoding: gzip, deflate' \\\n -H 'Authorization: Bearer {token}' \\\n -H 'Cache-Control: no-cache' \\\n -H 'Connection: keep-alive' \\\n -H 'Content-Length: 127' \\\n -H 'Content-Type: application/json' \\\n -H 'Host: {Apigee URI}' \\\n -d '{\n \"SRNS\":[\n \"srn:work-product-component/WellLog:0195eb311b1c422c8f78ffc93c63e4db:1\"\n ],\n \"TargetRegionID\": \"123\"\n}'\n```\n\n> Note that the example request doesn't contain an actual Apigee URI and an authorization token.\n\n#### Delivery response example for Work Product Component\n\n```json\n{\n \"UnprocessedSrns\": [],\n \"Result\": [\n {\n \"Data\": {\n \"ResourceID\": \"srn:work-product-component/WellLog:0195eb311b1c422c8f78ffc93c63e4db:1\",\n \"ResourceTypeID\": \"srn:type:work-product-component/WellLog:1\",\n \"ResourceHostRegionIDs\": [],\n \"ResourceObjectCreationDatetime\": \"2019-11-14T16:37:12.261\",\n \"ResourceVersionCreationDatetime\": \"2019-11-14T16:37:12.261\",\n \"ResourceCurationStatus\": \"srn:reference-data/ResourceCurationStatus:CREATED:\",\n \"ResourceLifecycleStatus\": \"srn:reference-data/ResourceLifecycleStatus:RECEIVED:\",\n \"ResourceSecurityClassification\": \"srn:reference-data/ResourceSecurityClassification:RESTRICTED:\",\n \"Data\": {\n \"GroupTypeProperties\": {\n \"Files\": [\n \"srn:file/las2:cca54c53bfee4ce4b985e3fd1678ad09:1\"\n ],\n \"Artefacts\": []\n },\n \"IndividualTypeProperties\": {\n \"Name\": \"AKM-11 LOG\",\n \"Description\": \"Well Log\",\n \"WellboreID\": \"srn:master-data/Wellbore:1013:\",\n \"TopMeasuredDepth\": {\n \"Depth\": 2182.0004,\n \"UnitOfMeasure\": \"srn:reference-data/UnitOfMeasure:M:\"\n },\n \"BottomMeasuredDepth\": {\n \"Depth\": 2481.0,\n \"UnitOfMeasure\": \"srn:reference-data/UnitOfMeasure:M:\"\n },\n \"Curves\": [\n {\n \"Mnemonic\": \"DEPT\",\n \"TopDepth\": 2182.0,\n \"BaseDepth\": 2481.0,\n \"DepthUnit\": \"srn:reference-data/UnitOfMeasure:M:\",\n \"CurveUnit\": \"srn:reference-data/UnitOfMeasure:M:\"\n },\n {\n \"Mnemonic\": \"GR\",\n \"TopDepth\": 2182.0,\n \"BaseDepth\": 2481.0,\n \"DepthUnit\": \"srn:reference-data/UnitOfMeasure:M:\",\n \"CurveUnit\": \"srn:reference-data/UnitOfMeasure:GAPI:\"\n },\n {\n \"Mnemonic\": \"DT\",\n \"TopDepth\": 2182.0,\n \"BaseDepth\": 2481.0,\n \"DepthUnit\": \"srn:reference-data/UnitOfMeasure:M:\",\n \"CurveUnit\": \"srn:reference-data/UnitOfMeasure:US/F:\"\n },\n {\n \"Mnemonic\": \"RHOB\",\n \"TopDepth\": 2182.0,\n \"BaseDepth\": 2481.0,\n \"DepthUnit\": \"srn:reference-data/UnitOfMeasure:M:\",\n \"CurveUnit\": \"srn:reference-data/UnitOfMeasure:G/C3:\"\n },\n {\n \"Mnemonic\": \"DRHO\",\n \"TopDepth\": 2182.0,\n \"BaseDepth\": 2481.0,\n \"DepthUnit\": \"srn:reference-data/UnitOfMeasure:M:\",\n \"CurveUnit\": \"srn:reference-data/UnitOfMeasure:G/C3:\"\n },\n {\n \"Mnemonic\": \"NPHI\",\n \"TopDepth\": 2182.0,\n \"BaseDepth\": 2481.0,\n \"DepthUnit\": \"srn:reference-data/UnitOfMeasure:M:\",\n \"CurveUnit\": \"srn:reference-data/UnitOfMeasure:V/V:\"\n }\n ]\n },\n \"ExtensionProperties\": {}\n },\n \"AssociativeID\": \"wpc-1\",\n \"FileAssociativeIDs\": [\n \"f-1\"\n ]\n },\n \"SRN\": \"srn:work-product-component/WellLog:0195eb311b1c422c8f78ffc93c63e4db:1\"\n }\n ]\n}\n``` \n\n## Delivery statuses\n\nThe Delivery service returns the following statuses:\n\n* **401 Unauthorized**. Returned to the client if the request doesn’t contain a valid authorization header.\n\nResponse example if authorization fails.\n\n```json\n{\n \"timestamp\": \"2019-11-29T09:16:40.332+0000\",\n \"status\": 401,\n \"error\": \"Unauthorized\",\n \"message\": \"Missing authorization token\",\n \"path\": \"/\"\n}\n```\n\n* **200 Success**. The response contains the requested data, the list of files (if any), and the list of unprocessed\nSRNs (if any).\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.7639859914779663, "avg_line_length": 31.685714721679688, "blob_id": "56ac8ea12aee17522e8e9ae3e7366fb38333a0ac", "content_id": "48c3df5dfceaebbc744dea7fac95ec4f1e827fa9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1144, "license_type": "permissive", "max_line_length": 75, "num_lines": 35, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/legal/LegalTagProperties.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.legal;\n\nimport lombok.Data;\nimport lombok.NoArgsConstructor;\n\nimport java.util.Map;\nimport java.util.Set;\n\n@Data\n@NoArgsConstructor\npublic class LegalTagProperties {\n private Map<String, String> countriesOfOrigin;\n private Map<String, String> otherRelevantDataCountries;\n private Set<String> securityClassifications;\n private Set<String> exportClassificationControlNumbers;\n private Set<String> personalDataTypes;\n private Set<String> dataTypes;\n}\n" }, { "alpha_fraction": 0.7013167142868042, "alphanum_fraction": 0.712404727935791, "avg_line_length": 36.97368240356445, "blob_id": "2a6cd1dc56b57cb92836388c9d61c93d68b4f050", "content_id": "e7ab29c5a569dd13007a17727e4ba70e876efe6e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1443, "license_type": "permissive", "max_line_length": 96, "num_lines": 38, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/units/ItemFactory.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.units;\n\nimport org.opengroup.osdu.core.common.model.units.impl.UnitEnergistics;\nimport org.opengroup.osdu.core.common.model.units.impl.UnitScaleOffset;\n\npublic class ItemFactory {\n private ItemFactory() {\n }\n\n public static IItem createModel(Object parsedRaw) {\n IItem result = null;\n if (parsedRaw instanceof org.opengroup.osdu.core.common.model.units.impl.Unit) {\n result = new Unit((org.opengroup.osdu.core.common.model.units.impl.Unit) parsedRaw);\n } else if (parsedRaw instanceof UnitScaleOffset) {\n result = new Unit((UnitScaleOffset) parsedRaw);\n } else if (parsedRaw instanceof UnitEnergistics) {\n result = new Unit((UnitEnergistics) parsedRaw);\n }\n return result;\n }\n}\n" }, { "alpha_fraction": 0.6086154580116272, "alphanum_fraction": 0.6258800029754639, "avg_line_length": 32.897727966308594, "blob_id": "183262c60b6b5cbd6a586e2aaccf6e9dc12143c3", "content_id": "396d21d50e89a15667f4e79b9c9ac471e52392ca", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Maven POM", "length_bytes": 5966, "license_type": "permissive", "max_line_length": 204, "num_lines": 176, "path": "/compatibility-layer/pom.xml", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!--\n ~ Copyright 2019 Google LLC\n ~\n ~ Licensed under the Apache License, Version 2.0 (the \"License\");\n ~ you may not use this file except in compliance with the License.\n ~ You may obtain a copy of the License at\n ~\n ~ https://www.apache.org/licenses/LICENSE-2.0\n ~\n ~ Unless required by applicable law or agreed to in writing, software\n ~ distributed under the License is distributed on an \"AS IS\" BASIS,\n ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n ~ See the License for the specific language governing permissions and\n ~ limitations under the License.\n -->\n\n<project xmlns=\"http://maven.apache.org/POM/4.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd\">\n <modelVersion>4.0.0</modelVersion>\n\n <groupId>com.osdu</groupId>\n <artifactId>osdu-gcp</artifactId>\n <version>1.0.1-SNAPSHOT</version>\n <packaging>pom</packaging>\n\n <name>osdu-gcp</name>\n <description>OSDU GCP Root Pom</description>\n\n <modules>\n <module>common</module>\n <module>service</module>\n </modules>\n\n <properties>\n <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>\n <java.version>1.8</java.version>\n <spring-cloud.version>Greenwich.SR2</spring-cloud.version>\n <org.mapstruct.version>1.3.0.Final</org.mapstruct.version>\n <org.springframwork.version>2.1.7.RELEASE</org.springframwork.version>\n <org.springfrawork.security.test.version>4.0.0.RELEASE</org.springfrawork.security.test.version>\n <org.projectlombok.version>1.18.8</org.projectlombok.version>\n <javax.inject.version>1</javax.inject.version>\n <spring-boot.version>2.1.6.RELEASE</spring-boot.version>\n <org.apache.httpcomponents.version>4.3.4</org.apache.httpcomponents.version>\n <assertj.version>3.14.0</assertj.version>\n <maven-checkstyle-plugin.version>3.1.0</maven-checkstyle-plugin.version>\n <maven.compiler.source>1.8</maven.compiler.source>\n <maven.compiler.target>1.8</maven.compiler.target>\n <docker.image.prefix>springio</docker.image.prefix>\n <google-api-services-storage.version>v1-rev20190624-1.30.1</google-api-services-storage.version>\n <maven-shade-plugin.version>3.2.1</maven-shade-plugin.version>\n </properties>\n\n <dependencyManagement>\n <dependencies>\n <dependency>\n <groupId>org.assertj</groupId>\n <artifactId>assertj-core</artifactId>\n <version>${assertj.version}</version>\n </dependency>\n <dependency>\n <groupId>org.springframework.boot</groupId>\n <artifactId>spring-boot-dependencies</artifactId>\n <version>${spring-boot.version}</version>\n <type>pom</type>\n <scope>import</scope>\n </dependency>\n <dependency>\n <groupId>org.springframework.cloud</groupId>\n <artifactId>spring-cloud-dependencies</artifactId>\n <version>${spring-cloud.version}</version>\n <type>pom</type>\n <scope>import</scope>\n </dependency>\n </dependencies>\n </dependencyManagement>\n\n <dependencies>\n <dependency>\n <groupId>org.apache.commons</groupId>\n <artifactId>commons-lang3</artifactId>\n </dependency>\n <dependency>\n <groupId>com.fasterxml.jackson.core</groupId>\n <artifactId>jackson-databind</artifactId>\n </dependency>\n <dependency>\n <groupId>com.fasterxml.jackson.module</groupId>\n <artifactId>jackson-module-parameter-names</artifactId>\n </dependency>\n <dependency>\n <groupId>com.fasterxml.jackson.datatype</groupId>\n <artifactId>jackson-datatype-jdk8</artifactId>\n </dependency>\n <dependency>\n <groupId>com.fasterxml.jackson.datatype</groupId>\n <artifactId>jackson-datatype-jsr310</artifactId>\n </dependency>\n </dependencies>\n\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-checkstyle-plugin</artifactId>\n <version>3.1.0</version>\n <configuration>\n <configLocation>google_checks.xml</configLocation>\n </configuration>\n <executions>\n <execution>\n <goals>\n <goal>checkstyle</goal>\n </goals>\n <phase>verify</phase>\n </execution>\n </executions>\n </plugin>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-pmd-plugin</artifactId>\n <version>3.12.0</version>\n <configuration>\n <targetJdk>1.8</targetJdk>\n <excludes>\n <exclude>**/*Bean.java</exclude>\n <exclude>**/generated/*.java</exclude>\n </excludes>\n <excludeRoots>\n <excludeRoot>target/generated-sources/</excludeRoot>\n </excludeRoots>\n </configuration>\n <executions>\n <execution>\n <goals>\n <goal>pmd</goal>\n </goals>\n <phase>verify</phase>\n </execution>\n </executions>\n </plugin>\n <plugin>\n <groupId>com.github.spotbugs</groupId>\n <artifactId>spotbugs-maven-plugin</artifactId>\n <version>3.1.12</version>\n <executions>\n <execution>\n <goals>\n <goal>spotbugs</goal>\n </goals>\n <phase>verify</phase>\n </execution>\n </executions>\n </plugin>\n <plugin>\n <groupId>org.jacoco</groupId>\n <artifactId>jacoco-maven-plugin</artifactId>\n <version>0.8.4</version>\n <executions>\n <execution>\n <goals>\n <goal>prepare-agent</goal>\n </goals>\n </execution>\n <execution>\n <id>report</id>\n <goals>\n <goal>report</goal>\n </goals>\n <phase>test</phase>\n </execution>\n </executions>\n </plugin>\n </plugins>\n </build>\n</project>\n" }, { "alpha_fraction": 0.6845901608467102, "alphanum_fraction": 0.6873770356178284, "avg_line_length": 41.95774459838867, "blob_id": "272fabedb57549aaeb0768709f7690b0f94fd4b5", "content_id": "4345c7344747185cb843845e5bc5be41602f4248", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 6100, "license_type": "permissive", "max_line_length": 118, "num_lines": 142, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/entitlements/EntitlementsService.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.entitlements;\n\nimport com.google.gson.JsonSyntaxException;\nimport org.apache.commons.lang3.StringUtils;\nimport org.opengroup.osdu.core.common.model.entitlements.EntitlementsException;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.core.common.model.entitlements.CreateGroup;\nimport org.opengroup.osdu.core.common.model.entitlements.GetMembers;\nimport org.opengroup.osdu.core.common.model.entitlements.GroupEmail;\nimport org.opengroup.osdu.core.common.model.entitlements.GroupInfo;\nimport org.opengroup.osdu.core.common.model.entitlements.Groups;\nimport org.opengroup.osdu.core.common.model.entitlements.MemberInfo;\nimport org.opengroup.osdu.core.common.model.entitlements.Members;\nimport org.opengroup.osdu.core.common.http.HttpRequest;\nimport org.opengroup.osdu.core.common.http.HttpResponse;\nimport org.opengroup.osdu.core.common.http.IHttpClient;\n\npublic class EntitlementsService implements IEntitlementsService {\n private final String rootUrl;\n private final IHttpClient httpClient;\n private final DpsHeaders headers;\n\n EntitlementsService(EntitlementsAPIConfig config,\n IHttpClient httpClient,\n DpsHeaders headers) {\n this.rootUrl = config.getRootUrl();\n this.httpClient = httpClient;\n this.headers = headers;\n if (config.apiKey != null) {\n headers.put(\"AppKey\", config.apiKey);\n }\n }\n\n @Override\n public MemberInfo addMember(GroupEmail groupEmail, MemberInfo memberInfo) throws EntitlementsException {\n String path = String.format(\"/groups/%s/members\", groupEmail.getGroupEmail());\n String url = this.createUrl(path);\n HttpResponse result = this.httpClient.send(\n HttpRequest.post(memberInfo).url(url).headers(this.headers.getHeaders()).build());\n return this.getResult(result, MemberInfo.class);\n }\n\n @Override\n public Members getMembers(GroupEmail groupEmail, GetMembers getMembers) throws EntitlementsException {\n String path = String.format(\"/groups/%s/members?cursor=%s&limit=%s&role=%s\",\n groupEmail.getGroupEmail(), getMembers.getCursor(), getMembers.getLimit(), getMembers.getRole());\n String url = this.createUrl(path);\n\n HttpResponse result = this.httpClient.send(\n HttpRequest.get().url(url).headers(this.headers.getHeaders()).build());\n return this.getResult(result, Members.class);\n }\n\n @Override\n public Groups getGroups() throws EntitlementsException {\n String path = String.format(\"/groups\");\n String url = this.createUrl(path);\n HttpRequest rq = HttpRequest.get().url(url).headers(this.headers.getHeaders()).build();\n HttpResponse result = this.httpClient.send(rq);\n Groups output = this.getResult(result, Groups.class);\n return output;\n }\n\n @Override\n public GroupInfo createGroup(CreateGroup group) throws EntitlementsException {\n String url = this.createUrl(\"/groups\");\n HttpResponse result = this.httpClient.send(\n HttpRequest.post(group).url(url).headers(this.headers.getHeaders()).build());\n GroupInfo output = this.getResult(result, GroupInfo.class);\n return output;\n }\n\n @Override\n public void deleteMember(String groupEmail, String memberEmail) throws EntitlementsException {\n String url = this.createUrl(String.format(\"/groups/%s/members/%s\", groupEmail, memberEmail));\n HttpResponse result = this.httpClient.send(\n HttpRequest.delete().url(url).headers(this.headers.getHeaders()).build());\n this.getResult(result, String.class);\n }\n\n @Override\n public Groups authorizeAny(String... groupNames) throws EntitlementsException {\n Groups groups = this.getGroups();\n if (groups.any(groupNames)) {\n return groups;\n } else {\n throw new EntitlementsException(\n String.format(\"User is unauthorized. %s does not belong to any of the given groups %s\",\n groups.getMemberEmail(), groupNames),\n null);\n }\n }\n\n @Override\n public void authenticate() throws EntitlementsException {\n String path = String.format(\"/auth/validate\");\n String url = this.createUrl(path);\n HttpRequest rq = HttpRequest.get().url(url).headers(this.headers.getHeaders()).build();\n HttpResponse result = this.httpClient.send(rq);\n this.getResult(result, Object.class);\n }\n\n private EntitlementsException generateEntitlementsException(HttpResponse result) {\n return new EntitlementsException(\n \"Error making request to Entitlements service. Check the inner HttpResponse for more info.\", result);\n }\n\n private String createUrl(String pathAndQuery) {\n return StringUtils.join(this.rootUrl, pathAndQuery);\n }\n\n private <T> T getResult(HttpResponse result, Class<T> type) throws EntitlementsException {\n if (result.isSuccessCode()) {\n try {\n return result.parseBody(type);\n } catch (JsonSyntaxException e) {\n throw new EntitlementsException(\"Error parsing response. Check the inner HttpResponse for more info.\",\n result);\n }\n } else {\n throw this.generateEntitlementsException(result);\n }\n }\n\n}\n" }, { "alpha_fraction": 0.7457988262176514, "alphanum_fraction": 0.7488757371902466, "avg_line_length": 37.76146697998047, "blob_id": "53efde207286cc76026efbff5e518bd3d23507dd", "content_id": "3357d6e0dd4b82d4aef46c3f66104c3d4c460cdc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4225, "license_type": "permissive", "max_line_length": 100, "num_lines": 109, "path": "/osdu-r2/os-workflow/provider/workflow-gcp-datastore/src/main/java/org/opengroup/osdu/workflow/provider/gcp/service/GoogleIapHelper.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.workflow.provider.gcp.service;\n\nimport static com.google.api.client.http.HttpMethods.POST;\n\nimport com.google.api.client.http.GenericUrl;\nimport com.google.api.client.http.HttpRequest;\nimport com.google.api.client.http.HttpRequestInitializer;\nimport com.google.api.client.http.HttpTransport;\nimport com.google.api.client.http.javanet.NetHttpTransport;\nimport com.google.api.client.http.json.JsonHttpContent;\nimport com.google.api.client.json.jackson2.JacksonFactory;\nimport com.google.auth.http.HttpCredentialsAdapter;\nimport com.google.auth.oauth2.GoogleCredentials;\nimport com.google.auth.oauth2.IdTokenCredentials;\nimport com.google.auth.oauth2.IdTokenProvider;\nimport java.io.IOException;\nimport java.net.URI;\nimport java.net.URISyntaxException;\nimport java.nio.charset.StandardCharsets;\nimport java.util.Collections;\nimport java.util.List;\nimport java.util.Map;\nimport org.apache.http.NameValuePair;\nimport org.apache.http.client.utils.URLEncodedUtils;\nimport org.jsoup.Jsoup;\nimport org.jsoup.nodes.Document;\nimport org.opengroup.osdu.workflow.exception.RuntimeException;\nimport org.opengroup.osdu.workflow.provider.gcp.exception.GoogleIamException;\nimport org.springframework.stereotype.Service;\n\n@Service\npublic class GoogleIapHelper {\n\n static final String IAM_SCOPE = \"https://www.googleapis.com/auth/iam\";\n final HttpTransport httpTransport = new NetHttpTransport();\n\n /**\n * Fetch Google IAP client ID\n * @param url service URL\n * @return IAP client ID\n */\n public String getIapClientId(String url) {\n try {\n Document doc = Jsoup.connect(url).get();\n\n String redirectLocation = doc.location();\n List<NameValuePair> queryParameters = URLEncodedUtils\n .parse(new URI(redirectLocation), StandardCharsets.UTF_8);\n\n return queryParameters.stream().filter(pair -> \"client_id\".equals(pair.getName())).findFirst()\n .orElseThrow(() -> new RuntimeException(\n String.format(\"No client_id found in redirect response to AirFlow - %s\", url)))\n .getValue();\n } catch (IOException | URISyntaxException e) {\n throw new RuntimeException(\"Exception during get Google IAP client id\", e);\n }\n }\n\n /**\n * Make request and add an IAP Bearer Authorization header with signed JWT token.\n */\n public HttpRequest buildIapRequest(String webServerUrl, String iapClientId,\n Map<String, Object> data) {\n try {\n JsonHttpContent jsonHttpContent = new JsonHttpContent(new JacksonFactory(), data);\n IdTokenProvider idTokenProvider = getIdTokenProvider();\n IdTokenCredentials credentials = IdTokenCredentials.newBuilder()\n .setIdTokenProvider(idTokenProvider)\n .setTargetAudience(iapClientId)\n .build();\n\n HttpRequestInitializer httpRequestInitializer = new HttpCredentialsAdapter(credentials);\n\n return httpTransport\n .createRequestFactory(httpRequestInitializer)\n .buildRequest(POST, new GenericUrl(webServerUrl), jsonHttpContent);\n } catch (IOException e) {\n throw new GoogleIamException(\"Exception when build authorized request\", e);\n }\n }\n\n private IdTokenProvider getIdTokenProvider() throws IOException {\n GoogleCredentials credentials =\n GoogleCredentials.getApplicationDefault().createScoped(Collections.singleton(IAM_SCOPE));\n // service account credentials are required to sign the jwt token\n if (!(credentials instanceof IdTokenProvider)) {\n throw new GoogleIamException(\n \"Google credentials : credentials that can provide id tokens expected\");\n }\n return (IdTokenProvider) credentials;\n }\n\n}\n" }, { "alpha_fraction": 0.768318772315979, "alphanum_fraction": 0.7731778621673584, "avg_line_length": 37.68421173095703, "blob_id": "960eca9b0786e49a7c668f83839e2659233574bd", "content_id": "f38252b8393ecc9545409c47177ac597fd0bf839", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 5145, "license_type": "permissive", "max_line_length": 99, "num_lines": 133, "path": "/compatibility-layer/service/search/src/test/java/com/osdu/integration/SearchServiceIntegrationTests.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.integration;\n\nimport static com.osdu.service.DelfiSearchService.KIND_HEADER_KEY;\nimport static org.junit.Assert.assertEquals;\nimport static org.mockito.ArgumentMatchers.any;\nimport static org.mockito.ArgumentMatchers.eq;\nimport static org.mockito.Mockito.when;\nimport static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status;\n\nimport com.fasterxml.jackson.databind.ObjectMapper;\nimport com.osdu.client.delfi.DelfiSearchClient;\nimport com.osdu.model.delfi.DelfiSearchObject;\nimport com.osdu.model.delfi.DelfiSearchResult;\nimport com.osdu.model.delfi.geo.ByBoundingBox;\nimport com.osdu.model.delfi.geo.SpatialFilter;\nimport com.osdu.model.osdu.GeoLocation;\nimport com.osdu.model.osdu.OsduSearchObject;\nimport com.osdu.model.osdu.OsduSearchResult;\nimport com.osdu.model.property.DelfiPortalProperties;\nimport com.osdu.request.OsduHeader;\nimport com.osdu.service.AuthenticationService;\nimport java.util.Arrays;\nimport java.util.HashMap;\nimport java.util.List;\nimport java.util.Map;\nimport org.junit.Test;\nimport org.junit.runner.RunWith;\nimport org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.boot.test.autoconfigure.web.servlet.AutoConfigureMockMvc;\nimport org.springframework.boot.test.context.SpringBootTest;\nimport org.springframework.boot.test.context.SpringBootTest.WebEnvironment;\nimport org.springframework.boot.test.mock.mockito.MockBean;\nimport org.springframework.http.HttpHeaders;\nimport org.springframework.http.ResponseEntity;\nimport org.springframework.test.context.junit4.SpringRunner;\nimport org.springframework.test.web.servlet.MockMvc;\nimport org.springframework.test.web.servlet.request.MockMvcRequestBuilders;\n\n@RunWith(SpringRunner.class)\n@SpringBootTest(webEnvironment = WebEnvironment.RANDOM_PORT)\n@AutoConfigureMockMvc\npublic class SearchServiceIntegrationTests {\n\n private static final String AUTHENTICATION = \"auth\";\n private static final String PARTITION = \"partition\";\n private static final String APP_KEY = \"appKey\";\n private static final String KIND = \"kind\";\n private static final int LIMIT = 3;\n private static final int OFFSET = 2;\n private static final String TEST = \"test\";\n\n @MockBean\n private DelfiPortalProperties portalProperties;\n @MockBean\n private DelfiSearchClient delfiSearchClient;\n @MockBean\n private AuthenticationService authenticationService;\n @Autowired\n private MockMvc mockMvc;\n private ObjectMapper mapper = new ObjectMapper();\n\n @Test\n public void shouldDeliverRecords() throws Exception {\n\n // given\n when(portalProperties.getAppKey()).thenReturn(APP_KEY);\n SpatialFilter spatialFilter = new SpatialFilter();\n List<List<Double>> coordinates = Arrays\n .asList(Arrays.asList(12.3, 23.4), Arrays.asList(34.5, 45.6));\n spatialFilter.setByBoundingBox(new ByBoundingBox(coordinates));\n\n DelfiSearchResult delfiSearchResult = new DelfiSearchResult();\n delfiSearchResult.setTotalCount(LIMIT);\n HashMap<Object, Object> data = new HashMap<>();\n data.put(TEST, TEST);\n delfiSearchResult.setResults(data);\n\n when(delfiSearchClient\n .searchIndex(eq(AUTHENTICATION), eq(APP_KEY), eq(PARTITION), any(DelfiSearchObject.class)))\n .thenReturn(delfiSearchResult);\n\n OsduSearchObject inputSearchObject = new OsduSearchObject();\n inputSearchObject.setStart(OFFSET);\n inputSearchObject.setCount(LIMIT);\n GeoLocation geoLocation = new GeoLocation();\n geoLocation.setType(\"byBoundingBox\");\n geoLocation.setCoordinates(coordinates);\n inputSearchObject.setGeoLocation(geoLocation);\n Map<String, List<String>> metadata = new HashMap<>();\n metadata.put(\"key\", Arrays.asList(\"value1\", \"value2\"));\n inputSearchObject.setMetadata(metadata);\n\n HttpHeaders headers = new HttpHeaders();\n headers.add(OsduHeader.AUTHORIZATION, AUTHENTICATION);\n headers.add(OsduHeader.PARTITION, PARTITION);\n headers.add(KIND_HEADER_KEY, KIND);\n\n OsduSearchResult expected = new OsduSearchResult();\n expected.setTotalHits(LIMIT);\n expected.setCount(LIMIT);\n expected.setStart(OFFSET);\n expected.setResults(data);\n\n // when\n ResponseEntity responseEntity = (ResponseEntity) mockMvc\n .perform(MockMvcRequestBuilders.post(\"/\")\n .headers(headers)\n .content(mapper.writeValueAsString(inputSearchObject)))\n .andExpect(status().isOk())\n .andReturn().getAsyncResult();\n\n // then\n OsduSearchResult response = (OsduSearchResult) responseEntity.getBody();\n\n assertEquals(response, expected);\n }\n}\n" }, { "alpha_fraction": 0.7296006679534912, "alphanum_fraction": 0.7387152910232544, "avg_line_length": 35, "blob_id": "8eb4ea73a9acefb51f32fb7e5c300197a608b339", "content_id": "ff583b87870fcf3b2255250487b075b0667ef280", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2304, "license_type": "permissive", "max_line_length": 108, "num_lines": 64, "path": "/osdu-r2/os-delivery/provider/delivery-gcp/src/test/java/org/opengroup/osdu/delivery/provider/gcp/repository/TestCredential.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.delivery.provider.gcp.repository;\n\nimport com.google.api.client.testing.util.SecurityTestUtils;\nimport com.google.auth.oauth2.ServiceAccountCredentials;\nimport com.google.auth.oauth2.UserCredentials;\nimport java.security.spec.PKCS8EncodedKeySpec;\nimport java.util.Base64;\nimport java.util.Collection;\nimport java.util.Collections;\nimport lombok.SneakyThrows;\n\npublic final class TestCredential {\n\n private static final String SA_CLIENT_EMAIL = \"[email protected]\";\n private static final String SA_CLIENT_ID = \"dummy-user\";\n private static final String SA_PRIVATE_KEY_ID = \"dummy-private-key-id\";\n private static final String SA_PRIVATE_KEY_PKCS8;\n private static final Collection<String> EMPTY_SCOPES = Collections.emptyList();\n\n private static final String CLIENT_SECRET = \"dummy-client-secret\";\n private static final String CLIENT_ID = \"dummy-client-id\";\n private static final String REFRESH_TOKEN = \"dummy-refresh-token\";\n\n static {\n PKCS8EncodedKeySpec keySpec = new PKCS8EncodedKeySpec(SecurityTestUtils.newEncodedRsaPrivateKeyBytes());\n SA_PRIVATE_KEY_PKCS8 = \"-----BEGIN PRIVATE KEY-----\\n\"\n + new String(Base64.getEncoder().encode(keySpec.getEncoded()))\n + \"\\n-----END PRIVATE KEY-----\\n\";\n }\n\n private TestCredential() {\n }\n\n @SneakyThrows\n static ServiceAccountCredentials getSa() {\n return ServiceAccountCredentials.fromPkcs8(\n SA_CLIENT_ID, SA_CLIENT_EMAIL, SA_PRIVATE_KEY_PKCS8, SA_PRIVATE_KEY_ID, EMPTY_SCOPES);\n }\n\n static UserCredentials getUserCredentials() {\n return UserCredentials.newBuilder()\n .setClientId(CLIENT_ID)\n .setClientSecret(CLIENT_SECRET)\n .setRefreshToken(REFRESH_TOKEN)\n .build();\n }\n\n}\n" }, { "alpha_fraction": 0.7754979133605957, "alphanum_fraction": 0.785153865814209, "avg_line_length": 27.568965911865234, "blob_id": "0b28c7cd344fb14ecb2f80d8fa01064aa1a69837", "content_id": "4c8d4b8b3b0085618d1280cf4747565e08c5f742", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1657, "license_type": "permissive", "max_line_length": 81, "num_lines": 58, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/storage/Schema.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.storage;\n\nimport java.util.Map;\nimport java.util.Optional;\n\nimport javax.validation.Valid;\nimport javax.validation.constraints.NotEmpty;\nimport javax.validation.constraints.NotNull;\n\nimport lombok.Builder;\nimport org.opengroup.osdu.core.common.model.storage.validation.ValidKind;\nimport org.opengroup.osdu.core.common.model.storage.validation.ValidNotNullArray;\nimport org.opengroup.osdu.core.common.model.storage.validation.ValidationDoc;\n\nimport lombok.AllArgsConstructor;\nimport lombok.Data;\nimport lombok.NoArgsConstructor;\n\nimport io.swagger.annotations.ApiModelProperty;\n\n@Data\n@Builder\n@AllArgsConstructor\n@NoArgsConstructor\npublic class Schema {\n\n\t@ValidKind\n\t@NotNull\n\t@ApiModelProperty(value = SwaggerDoc.SCHEMA_REQUEST_KIND,\n\t\t\trequired = true,\n\t\t\texample = SwaggerDoc.RECORD_KIND_EXAMPLE)\n\tprivate String kind;\n\n\t@Valid\n\t@NotEmpty(message = ValidationDoc.SCHEMA_ITEMS_NOT_EMPTY)\n\t@ValidNotNullArray\n\tprivate SchemaItem[] schema;\n\n\tprivate Map<String, Object> ext;\n\n}\n" }, { "alpha_fraction": 0.7248454093933105, "alphanum_fraction": 0.7263912558555603, "avg_line_length": 35.68041229248047, "blob_id": "58745ea56281e170980823e8462d765bd6260a75", "content_id": "05eff3b1a04d8e3b5ced79f8f3f28c66d5406884", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 7116, "license_type": "permissive", "max_line_length": 98, "num_lines": 194, "path": "/compatibility-layer/service/ingest/src/main/java/com/osdu/service/processing/IngestProcessService.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.service.processing;\n\nimport com.osdu.model.IngestHeaders;\nimport com.osdu.model.Record;\nimport com.osdu.model.RequestContext;\nimport com.osdu.model.ingest.IngestedFile;\nimport com.osdu.model.ingest.IngestedWp;\nimport com.osdu.model.ingest.IngestedWpc;\nimport com.osdu.model.job.IngestJob;\nimport com.osdu.model.job.IngestJobStatus;\nimport com.osdu.model.type.manifest.LoadManifest;\nimport com.osdu.model.type.manifest.ManifestFile;\nimport com.osdu.model.type.manifest.ManifestWp;\nimport com.osdu.model.type.manifest.ManifestWpc;\nimport com.osdu.service.AuthenticationService;\nimport com.osdu.service.IngestionService;\nimport com.osdu.service.JobStatusService;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Objects;\nimport java.util.function.Function;\nimport java.util.stream.Collectors;\nimport java.util.stream.Stream;\nimport lombok.RequiredArgsConstructor;\nimport lombok.extern.slf4j.Slf4j;\nimport org.springframework.stereotype.Service;\n\n@Service\n@Slf4j\n@RequiredArgsConstructor\npublic class IngestProcessService {\n\n final AuthenticationService authenticationService;\n final JobStatusService jobStatusService;\n final IngestWpService ingestWpService;\n final IngestionService ingestionService;\n\n /**\n * Process load manifest. It includes ingestion files, work product components and work product.\n * @param ingestJobId ingest job ID\n * @param loadManifest load manifest\n * @param headers ingest headers\n */\n public void processLoadManifest(String ingestJobId, LoadManifest loadManifest,\n IngestHeaders headers) {\n log.debug(\"Start the internal injection process. JobId: {}, loadManifest: {}, headers: {}\",\n ingestJobId, loadManifest, headers);\n RequestContext requestContext = getRequestContext(headers);\n log.debug(\"Ingestion request context: {}\", requestContext);\n\n log.debug(\"Start ingest work product.\");\n ManifestWp workProduct = getWorkProduct(loadManifest);\n IngestedWp ingestedWp = ingestWpService.processWp(workProduct, requestContext);\n log.debug(\"Work product is ingested: {}\", ingestedWp);\n\n log.debug(\"Save ingest job result.\");\n IngestJob ingestJob = getResultIngestJob(ingestedWp);\n\n // fail created records if WP has been unsuccessfully processed\n if (!ingestedWp.isSuccess()) {\n List<Record> recordsInWp = getWpRecordStream(ingestedWp)\n .collect(Collectors.toList());\n ingestionService.failRecords(recordsInWp, requestContext);\n }\n\n jobStatusService.save(ingestJob.toBuilder()\n .id(ingestJobId)\n .build());\n log.debug(\"Finished the internal async injection process. Ingest job: {}\", ingestJob);\n }\n\n private RequestContext getRequestContext(IngestHeaders headers) {\n String authorizationToken = headers.getAuthorizationToken();\n String partition = headers.getPartition();\n\n Map<String, String> groupEmailByName = authenticationService\n .getGroupEmailToName(authorizationToken, partition);\n\n return RequestContext.builder()\n .authorizationToken(authorizationToken)\n .partition(partition)\n .legalTags(headers.getLegalTags())\n .userGroupEmailByName(groupEmailByName)\n .headers(headers)\n .build();\n }\n\n private static List<ManifestWpc> getWorkProductComponents(LoadManifest loadManifest) {\n Map<String, ManifestFile> fileById = loadManifest.getFiles().stream()\n .collect(Collectors.toMap(ManifestFile::getAssociativeId, Function.identity()));\n return loadManifest.getWorkProductComponents().stream()\n .map(wpc -> {\n wpc.setFiles(wpc.getFileAssociativeIds().stream()\n .map(fileById::get)\n .map(file -> {\n file.setWpc(wpc);\n return file;\n })\n .collect(Collectors.toList()));\n return wpc;\n })\n .collect(Collectors.toList());\n }\n\n private static ManifestWp getWorkProduct(LoadManifest loadManifest) {\n ManifestWp workProduct = loadManifest.getWorkProduct();\n workProduct.setManifestWpcs(getWorkProductComponents(loadManifest));\n return workProduct;\n }\n\n private static IngestJob getResultIngestJob(IngestedWp ingestedWp) {\n if (ingestedWp.isSuccess()) {\n return getCompleteIngestJob(ingestedWp);\n }\n\n return getFailedIngestJob(ingestedWp);\n }\n\n private static IngestJob getCompleteIngestJob(IngestedWp wp) {\n List<String> srnsInWp = getWpSrnStream(wp)\n .collect(Collectors.toList());\n\n return IngestJob.builder()\n .status(IngestJobStatus.COMPLETE)\n .srns(srnsInWp)\n .summary(\"Ingestion successfully completed. Created \" + srnsInWp.size() + \" records.\")\n .build();\n }\n\n private static IngestJob getFailedIngestJob(IngestedWp wp) {\n List<String> srnsInWp = getWpSrnStream(wp)\n .collect(Collectors.toList());\n\n String wpcsIngestedSummary = getFailedSummary(wp, srnsInWp.size());\n\n return IngestJob.builder()\n .status(IngestJobStatus.FAILED)\n .srns(srnsInWp)\n .summary(wpcsIngestedSummary)\n .build();\n }\n\n private static Stream<String> getWpSrnStream(IngestedWp wp) {\n Stream<String> wpcSrnStream = wp.getIngestedWpcs().stream()\n .flatMap(IngestProcessService::getWpcSrnStream);\n return Stream.concat(Stream.of(wp.getSrn()), wpcSrnStream)\n .filter(Objects::nonNull);\n }\n\n private static Stream<String> getWpcSrnStream(IngestedWpc wpc) {\n Stream<String> srnStream = wpc.getIngestedFiles().stream()\n .map(IngestedFile::getSrn);\n return Stream.concat(Stream.of(wpc.getSrn()), srnStream);\n }\n\n private static String getFailedSummary(IngestedWp wp, int createdRecords) {\n String failedMsg = String.format(\"Ingestion failed. It was possible to create %s records.\",\n createdRecords);\n\n return Stream.concat(Stream.of(failedMsg), wp.getSummaries().stream())\n .filter(Objects::nonNull)\n .collect(Collectors.joining(System.lineSeparator()));\n }\n\n private static Stream<Record> getWpRecordStream(IngestedWp wp) {\n Stream<Record> recordStream = wp.getIngestedWpcs().stream()\n .flatMap(IngestProcessService::getWpcRecordStream);\n return Stream.concat(Stream.of(wp.getWpRecord()), recordStream)\n .filter(Objects::nonNull);\n }\n\n private static Stream<Record> getWpcRecordStream(IngestedWpc wpc) {\n Stream<Record> fileRecordStream = wpc.getIngestedFiles().stream()\n .map(IngestedFile::getRecord);\n return Stream.concat(Stream.of(wpc.getWpcRecord()), fileRecordStream);\n }\n\n}\n" }, { "alpha_fraction": 0.7850467562675476, "alphanum_fraction": 0.7943925261497498, "avg_line_length": 34.66666793823242, "blob_id": "d19a0b98f983d38f16737623e78e2155d33463cf", "content_id": "e524175e835929942fb5bc1c305a2d9fc1fc35f4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 107, "license_type": "permissive", "max_line_length": 88, "num_lines": 3, "path": "/osdu-r2/os-core-common/README.md", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# os-core-common\n\nThis folder contains the common core elements that are used across all OSDU R2 services.\n" }, { "alpha_fraction": 0.7681728601455688, "alphanum_fraction": 0.7717747092247009, "avg_line_length": 33.70454406738281, "blob_id": "11b7d9c1d93dc6c297689ef7aeb2eb429fdf9adb", "content_id": "5289851ab7cbbd77c56df1fb6feebccbc29fae26", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3054, "license_type": "permissive", "max_line_length": 96, "num_lines": 88, "path": "/compatibility-layer/service/srn-mapper/src/main/java/com/osdu/service/google/GcpSrnMappingService.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.service.google;\n\nimport static java.lang.String.format;\n\nimport com.osdu.exception.OsduNotFoundException;\nimport com.osdu.mapper.SchemaDataMapper;\nimport com.osdu.mapper.SrnToRecordMapper;\nimport com.osdu.model.ResourceTypeId;\nimport com.osdu.model.SchemaData;\nimport com.osdu.model.SrnToRecord;\nimport com.osdu.model.dto.SchemaDataDto;\nimport com.osdu.model.dto.SrnToRecordDto;\nimport com.osdu.repository.SchemaDataRepository;\nimport com.osdu.repository.SrnToRecordRepository;\nimport com.osdu.service.SrnMappingService;\nimport javax.inject.Named;\nimport lombok.RequiredArgsConstructor;\nimport lombok.extern.slf4j.Slf4j;\nimport org.springframework.stereotype.Service;\n\n@Service\n@Slf4j\n@RequiredArgsConstructor\npublic class GcpSrnMappingService implements SrnMappingService {\n\n final SchemaDataRepository schemaDataRepository;\n final SrnToRecordRepository srnToRecordRepository;\n\n @Named\n final SchemaDataMapper schemaDataMapper;\n @Named\n final SrnToRecordMapper srnToRecordMapper;\n\n @Override\n public SchemaData getSchemaData(String typeId) {\n log.debug(\"Request to get SchemaData by typeId: {}\", typeId);\n ResourceTypeId resourceTypeId = new ResourceTypeId(typeId);\n SchemaDataDto schemaDataDto = resourceTypeId.hasVersion()\n ? schemaDataRepository.findExactByTypeId(typeId)\n : schemaDataRepository.findLastByTypeId(typeId);\n\n if (schemaDataDto == null) {\n throw new OsduNotFoundException(format(\"Can not find schema data for type - %s\", typeId));\n }\n log.debug(\"Found SchemaData: {}\", schemaDataDto);\n\n return schemaDataMapper.schemaDataDtoToSchemaData(schemaDataDto);\n }\n\n @Override\n public void saveSchemaData(SchemaData schemaData) {\n log.debug(\"Request to save SchemaData : {}\", schemaData);\n schemaDataRepository\n .save(schemaDataMapper.schemaDataToSchemaDataDto(schemaData));\n }\n\n @Override\n public SrnToRecord getSrnToRecord(String srn) {\n log.debug(\"Request to get SrnToRecord by srn: {}\", srn);\n SrnToRecordDto srnToRecordDto = srnToRecordRepository.findBySrn(srn);\n log.debug(\"Found SrnToRecord: {}\", srnToRecordDto);\n return srnToRecordDto == null ? null\n : srnToRecordMapper.srnToRecordDtoToSrnToRecord(srnToRecordDto);\n }\n\n @Override\n public void saveSrnToRecord(SrnToRecord record) {\n log.debug(\"Request to save SrnToRecord: {}\", record);\n srnToRecordRepository.save(srnToRecordMapper.srnToRecordToSrnToRecordDto(record));\n }\n\n}\n" }, { "alpha_fraction": 0.6271501779556274, "alphanum_fraction": 0.6350534558296204, "avg_line_length": 33.69355010986328, "blob_id": "7f54cc41c33c0027b54b05c3a62cb2b40471c5f0", "content_id": "9c9561173cc97bdc5384830e4d16a4d0be5e6733", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2151, "license_type": "permissive", "max_line_length": 115, "num_lines": 62, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/logging/LogUtils.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.logging;\n\nimport org.apache.commons.lang3.StringUtils;\n\nimport java.util.*;\n\npublic class LogUtils {\n\n public static final String TRACE_CONTEXT_KEY = \"x-cloud-trace-context\";\n public static final String COR_ID = \"correlation-id\";\n public static final String ACCOUNT_ID = \"account-id\";\n public static final String DATA_PARTITION_ID = \"data-partition-id\";\n private static final HashSet<String> headerKeys = new HashSet<>();\n\n static {\n headerKeys.add(ACCOUNT_ID);\n headerKeys.add(DATA_PARTITION_ID);\n headerKeys.add(COR_ID);\n headerKeys.add(TRACE_CONTEXT_KEY);\n }\n\n public static Map<String, String> createStandardLabelsFromEntrySet(Set<Map.Entry<String, List<String>>> input){\n Map<String, String> output = new HashMap<>();\n if(input != null) {\n input.forEach((k) -> {\n String key = k.getKey().toLowerCase();\n if (headerKeys.contains(key))\n output.put(key, StringUtils.join(k.getValue(), ','));\n });\n }\n return output;\n }\n\n public static Map<String, String> createStandardLabelsFromMap(Map<String, String> input){\n Map<String, String> output = new HashMap<>();\n if(input != null) {\n input.forEach((k, v) -> {\n String key = k.toLowerCase();\n if (headerKeys.contains(key))\n output.put(key, v);\n });\n }\n return output;\n }\n}\n" }, { "alpha_fraction": 0.6016022562980652, "alphanum_fraction": 0.6155900359153748, "avg_line_length": 29.362934112548828, "blob_id": "7833d75a7e24d2d8b7a5c0a46f6d7042348a02bd", "content_id": "6e099a8bf909bae81570ba9f36a0c9f635b24c93", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Maven POM", "length_bytes": 7864, "license_type": "permissive", "max_line_length": 102, "num_lines": 259, "path": "/osdu-r2/os-core-common/pom.xml", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n<!--\n ~ Copyright 2020 Google LLC\n ~ Copyright 2017-2019, Schlumberger\n ~\n ~ Licensed under the Apache License, Version 2.0 (the \"License\");\n ~ you may not use this file except in compliance with the License.\n ~ You may obtain a copy of the License at\n ~\n ~ https://www.apache.org/licenses/LICENSE-2.0\n ~\n ~ Unless required by applicable law or agreed to in writing, software\n ~ distributed under the License is distributed on an \"AS IS\" BASIS,\n ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n ~ See the License for the specific language governing permissions and\n ~ limitations under the License.\n -->\n\n<project xmlns=\"http://maven.apache.org/POM/4.0.0\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd\">\n <modelVersion>4.0.0</modelVersion>\n\n <groupId>org.opengroup.osdu</groupId>\n <artifactId>os-core-common</artifactId>\n <version>0.0.13</version>\n <packaging>jar</packaging>\n\n <name>os-core-common</name>\n <description>Common core elements</description>\n\n <distributionManagement>\n <repository>\n <id>${env.ARTIFACTORY_REPOSITORY_ID}</id>\n <url>${env.ARTIFACTORY_REPOSITORY_URL}</url>\n </repository>\n </distributionManagement>\n\n <properties>\n <java.version>1.8</java.version>\n <maven.compiler.target>${java.version}</maven.compiler.target>\n <maven.compiler.source>${java.version}</maven.compiler.source>\n <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>\n <spring-boot.version>2.2.5.RELEASE</spring-boot.version>\n <spring-cloud.version>Hoxton.SR3</spring-cloud.version>\n </properties>\n\n <dependencyManagement>\n <dependencies>\n <dependency>\n <groupId>org.springframework.boot</groupId>\n <artifactId>spring-boot-dependencies</artifactId>\n <version>${spring-boot.version}</version>\n <type>pom</type>\n <scope>import</scope>\n </dependency>\n </dependencies>\n </dependencyManagement>\n\n <dependencies>\n\n <dependency>\n <groupId>org.springframework.boot</groupId>\n <artifactId>spring-boot-starter-web</artifactId>\n </dependency>\n\n <dependency>\n <groupId>org.projectlombok</groupId>\n <artifactId>lombok</artifactId>\n <optional>true</optional>\n </dependency>\n\n <!-- Jackson -->\n <dependency>\n <groupId>com.fasterxml.jackson.core</groupId>\n <artifactId>jackson-databind</artifactId>\n </dependency>\n <dependency>\n <groupId>com.fasterxml.jackson.module</groupId>\n <artifactId>jackson-module-parameter-names</artifactId>\n </dependency>\n <dependency>\n <groupId>com.fasterxml.jackson.datatype</groupId>\n <artifactId>jackson-datatype-jdk8</artifactId>\n </dependency>\n <dependency>\n <groupId>com.fasterxml.jackson.datatype</groupId>\n <artifactId>jackson-datatype-jsr310</artifactId>\n </dependency>\n\n <!-- SLB dependencies -->\n <dependency>\n <groupId>javax.inject</groupId>\n <artifactId>javax.inject</artifactId>\n <version>1</version>\n </dependency>\n <dependency>\n <groupId>com.google.guava</groupId>\n <artifactId>guava</artifactId>\n <version>28.2-jre</version>\n </dependency>\n <dependency>\n <groupId>com.google.code.gson</groupId>\n <artifactId>gson</artifactId>\n <version>2.8.5</version>\n </dependency>\n <dependency>\n <groupId>biz.paluch.redis</groupId>\n <artifactId>lettuce</artifactId>\n <version>4.5.0.Final</version>\n </dependency>\n <dependency>\n <groupId>io.swagger</groupId>\n <artifactId>swagger-jaxrs</artifactId>\n <version>1.5.22</version>\n <exclusions>\n <exclusion>\n <groupId>javax.ws.rs</groupId>\n <artifactId>jsr311-api</artifactId>\n </exclusion>\n <exclusion>\n <groupId>com.fasterxml.jackson.core</groupId>\n <artifactId>jackson-databind</artifactId>\n </exclusion>\n </exclusions>\n </dependency>\n <dependency>\n <groupId>com.google.http-client</groupId>\n <artifactId>google-http-client</artifactId>\n <version>1.31.0</version>\n <scope>compile</scope>\n </dependency>\n <dependency>\n <groupId>com.auth0</groupId>\n <artifactId>java-jwt</artifactId>\n <version>3.8.1</version>\n </dependency>\n <dependency>\n <groupId>io.jsonwebtoken</groupId>\n <artifactId>jjwt</artifactId>\n <version>0.9.1</version>\n </dependency>\n <!--Elasticsearch-->\n <dependency>\n <groupId>org.elasticsearch</groupId>\n <artifactId>elasticsearch</artifactId>\n <version>6.6.2</version>\n </dependency>\n <dependency>\n <groupId>org.elasticsearch.client</groupId>\n <artifactId>elasticsearch-rest-client</artifactId>\n <version>6.6.2</version>\n </dependency>\n <dependency>\n <groupId>org.elasticsearch.client</groupId>\n <artifactId>elasticsearch-rest-high-level-client</artifactId>\n <version>6.6.2</version>\n </dependency>\n\n <dependency>\n <groupId>junit</groupId>\n <artifactId>junit</artifactId>\n <scope>test</scope>\n </dependency>\n <dependency>\n <groupId>org.mockito</groupId>\n <artifactId>mockito-all</artifactId>\n <version>2.0.2-beta</version>\n <scope>test</scope>\n </dependency>\n <dependency>\n <groupId>org.powermock</groupId>\n <artifactId>powermock-module-junit4</artifactId>\n <version>2.0.2</version>\n <scope>test</scope>\n </dependency>\n\n <dependency>\n <groupId>org.apache.commons</groupId>\n <artifactId>commons-lang3</artifactId>\n </dependency>\n </dependencies>\n\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-checkstyle-plugin</artifactId>\n <version>3.1.0</version>\n <configuration>\n <configLocation>google_checks.xml</configLocation>\n </configuration>\n <executions>\n <execution>\n <goals>\n <goal>checkstyle</goal>\n </goals>\n <phase>verify</phase>\n </execution>\n </executions>\n </plugin>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-pmd-plugin</artifactId>\n <version>3.12.0</version>\n <configuration>\n <targetJdk>${java.version}</targetJdk>\n <excludes>\n <exclude>**/*Bean.java</exclude>\n <exclude>**/generated/*.java</exclude>\n </excludes>\n <excludeRoots>\n <excludeRoot>target/generated-sources/</excludeRoot>\n </excludeRoots>\n </configuration>\n <executions>\n <execution>\n <goals>\n <goal>pmd</goal>\n </goals>\n <phase>verify</phase>\n </execution>\n </executions>\n </plugin>\n <plugin>\n <groupId>com.github.spotbugs</groupId>\n <artifactId>spotbugs-maven-plugin</artifactId>\n <version>3.1.12</version>\n <executions>\n <execution>\n <goals>\n <goal>spotbugs</goal>\n </goals>\n <phase>verify</phase>\n </execution>\n </executions>\n </plugin>\n <plugin>\n <groupId>org.jacoco</groupId>\n <artifactId>jacoco-maven-plugin</artifactId>\n <version>0.8.4</version>\n <executions>\n <execution>\n <goals>\n <goal>prepare-agent</goal>\n </goals>\n </execution>\n <execution>\n <id>report</id>\n <goals>\n <goal>report</goal>\n </goals>\n <phase>test</phase>\n </execution>\n </executions>\n </plugin>\n </plugins>\n </build>\n</project>\n" }, { "alpha_fraction": 0.824999988079071, "alphanum_fraction": 0.824999988079071, "avg_line_length": 49, "blob_id": "4eeda4881d0c7a46aa7376704c4fd9a84dc26688", "content_id": "49003a2bc2522259c9ea275c91cb32098b436817", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 200, "license_type": "permissive", "max_line_length": 97, "num_lines": 4, "path": "/compatibility-layer/service/search/README.md", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# Search service working with data in OSDU format\n\nSearch service accepting search requests in OSDU format, transforming them into DELFI compatible \nformat and converting the result into OSDU format. " }, { "alpha_fraction": 0.737658679485321, "alphanum_fraction": 0.7433004379272461, "avg_line_length": 29.17021369934082, "blob_id": "49c809f8734790cd998b95e31c9dd966d52e9550", "content_id": "ab3382bbef6beb1045b0e1067be820487dd17db3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1418, "license_type": "permissive", "max_line_length": 96, "num_lines": 47, "path": "/osdu-r2/os-workflow/workflow-core/src/main/java/org/opengroup/osdu/workflow/provider/interfaces/IWorkflowStatusRepository.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.workflow.provider.interfaces;\n\nimport org.opengroup.osdu.workflow.model.WorkflowStatus;\nimport org.opengroup.osdu.workflow.model.WorkflowStatusType;\n\npublic interface IWorkflowStatusRepository {\n\n /**\n * Finds workflow status based on workflow id.\n *\n * @param workflowId workflow id\n * @return Workflow status\n */\n WorkflowStatus findWorkflowStatus(String workflowId);\n\n /**\n * Save workflow status.\n *\n * @param workflowStatus to save\n * @return saved workflow status\n */\n WorkflowStatus saveWorkflowStatus(WorkflowStatus workflowStatus);\n\n /**\n * Update workflow status based on workflow id.\n *\n * @param workflowId workflow id\n * @return Workflow status\n */\n WorkflowStatus updateWorkflowStatus(String workflowId, WorkflowStatusType workflowStatusType);\n}\n" }, { "alpha_fraction": 0.7235390543937683, "alphanum_fraction": 0.7242744565010071, "avg_line_length": 37.096981048583984, "blob_id": "d83974b53eb0a315149d87e8ad61f3038f32fb16", "content_id": "a8a76ecc148eec062b6799658ed1cd6e41c2ecf4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 17677, "license_type": "permissive", "max_line_length": 120, "num_lines": 464, "path": "/osdu-r2/os-workflow/provider/workflow-gcp/src/test/java/org/opengroup/osdu/workflow/provider/gcp/repository/FirestoreWorkflowStatusRepositoryTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.workflow.provider.gcp.repository;\n\nimport static org.assertj.core.api.Assertions.catchThrowable;\nimport static org.assertj.core.api.BDDAssertions.then;\nimport static org.mockito.ArgumentMatchers.anyMap;\nimport static org.mockito.ArgumentMatchers.eq;\nimport static org.mockito.BDDMockito.given;\nimport static org.mockito.BDDMockito.willThrow;\nimport static org.mockito.Mockito.RETURNS_DEEP_STUBS;\nimport static org.mockito.Mockito.lenient;\nimport static org.mockito.Mockito.mock;\nimport static org.mockito.Mockito.verify;\nimport static org.opengroup.osdu.workflow.model.WorkflowStatus.Fields.AIRFLOW_RUN_ID;\nimport static org.opengroup.osdu.workflow.model.WorkflowStatus.Fields.SUBMITTED_AT;\nimport static org.opengroup.osdu.workflow.model.WorkflowStatus.Fields.SUBMITTED_BY;\nimport static org.opengroup.osdu.workflow.model.WorkflowStatus.Fields.WORKFLOW_ID;\nimport static org.opengroup.osdu.workflow.model.WorkflowStatus.Fields.WORKFLOW_STATUS_TYPE;\n\nimport com.google.api.core.ApiFuture;\nimport com.google.api.core.ApiFutures;\nimport com.google.cloud.Timestamp;\nimport com.google.cloud.firestore.DocumentReference;\nimport com.google.cloud.firestore.DocumentSnapshot;\nimport com.google.cloud.firestore.FieldValue;\nimport com.google.cloud.firestore.Firestore;\nimport com.google.cloud.firestore.QueryDocumentSnapshot;\nimport com.google.cloud.firestore.QuerySnapshot;\nimport com.google.cloud.firestore.WriteResult;\nimport java.util.Arrays;\nimport java.util.Collections;\nimport java.util.Date;\nimport java.util.List;\nimport java.util.Map;\nimport org.junit.jupiter.api.BeforeEach;\nimport org.junit.jupiter.api.DisplayNameGeneration;\nimport org.junit.jupiter.api.Nested;\nimport org.junit.jupiter.api.Test;\nimport org.junit.jupiter.api.extension.ExtendWith;\nimport org.mockito.ArgumentCaptor;\nimport org.mockito.Captor;\nimport org.mockito.junit.jupiter.MockitoExtension;\nimport org.opengroup.osdu.workflow.ReplaceCamelCase;\nimport org.opengroup.osdu.workflow.model.WorkflowStatus;\nimport org.opengroup.osdu.workflow.model.WorkflowStatusType;\nimport org.opengroup.osdu.workflow.provider.gcp.exception.WorkflowStatusNotFoundException;\nimport org.opengroup.osdu.workflow.provider.gcp.exception.WorkflowStatusNotUpdatedException;\nimport org.opengroup.osdu.workflow.provider.gcp.exception.WorkflowStatusQueryException;\nimport org.opengroup.osdu.workflow.provider.interfaces.IWorkflowStatusRepository;\n\n@ExtendWith(MockitoExtension.class)\n@DisplayNameGeneration(ReplaceCamelCase.class)\nclass FirestoreWorkflowStatusRepositoryTest {\n\n private static final String COLLECTION_NAME = \"workflow-status\";\n private static final String TEST_WORKFLOW_ID = \"test-workflow-id\";\n private static final String TEST_AIRFLOW_RUN_ID = \"test-airflow-run-id\";\n private static final String USER = \"user-1\";\n\n private QueryDocumentSnapshot qDocSnap = mock(QueryDocumentSnapshot.class);\n private DocumentReference docRef = mock(DocumentReference.class);\n private DocumentSnapshot docSnap = mock(DocumentSnapshot.class);\n private Firestore firestore = mock(Firestore.class, RETURNS_DEEP_STUBS);\n private WriteResult writeResult = mock(WriteResult.class, RETURNS_DEEP_STUBS);\n\n private IWorkflowStatusRepository workflowStatusRepository;\n\n @BeforeEach\n void setUp() {\n workflowStatusRepository = new FirestoreWorkflowStatusRepository(firestore);\n }\n\n @Nested\n class FindWorkflowStatus {\n\n @Test\n void shouldFindWorkflowStatusByWorkflowId() {\n\n // given\n Date createdDate = new Date();\n List<QueryDocumentSnapshot> documents = Collections.singletonList(qDocSnap);\n QuerySnapshot querySnapshot = QuerySnapshot\n .withDocuments(null, Timestamp.now(), documents);\n ApiFuture<QuerySnapshot> queryFuture = ApiFutures.immediateFuture(querySnapshot);\n\n given(firestore.collection(COLLECTION_NAME)\n .whereEqualTo(WORKFLOW_ID, TEST_WORKFLOW_ID).get())\n .willReturn(queryFuture);\n\n givenDocSnap(qDocSnap, getWorkflowStatus(createdDate));\n\n // when\n WorkflowStatus workflowStatus = workflowStatusRepository\n .findWorkflowStatus(TEST_WORKFLOW_ID);\n\n // then\n then(workflowStatus).isNotNull();\n }\n\n @Test\n void shouldThrowExceptionWhenQueryFailed() {\n\n // given\n ApiFuture<QuerySnapshot> queryFuture =\n ApiFutures.immediateFailedFuture(new IllegalArgumentException(\"Failed query\"));\n\n given(firestore.collection(COLLECTION_NAME)\n .whereEqualTo(WORKFLOW_ID, TEST_WORKFLOW_ID).get())\n .willReturn(queryFuture);\n\n // when\n Throwable thrown = catchThrowable(() -> workflowStatusRepository.findWorkflowStatus(\n TEST_WORKFLOW_ID));\n\n // then\n then(thrown)\n .isInstanceOf(WorkflowStatusQueryException.class)\n .hasRootCauseInstanceOf(IllegalArgumentException.class)\n .hasMessage(\"Failed to find a workflow status by Workflow id - test-workflow-id\");\n }\n\n @Test\n void shouldThrowExceptionWhenFutureFailed() throws Exception {\n\n // given\n ApiFuture queryFuture = mock(ApiFuture.class);\n\n given(firestore.collection(COLLECTION_NAME)\n .whereEqualTo(WORKFLOW_ID, TEST_WORKFLOW_ID).get())\n .willReturn(queryFuture);\n willThrow(new InterruptedException(\"Failed future\")).given(queryFuture).get();\n\n // when\n Throwable thrown = catchThrowable(() -> workflowStatusRepository.findWorkflowStatus(\n TEST_WORKFLOW_ID));\n\n // then\n then(thrown)\n .isInstanceOf(WorkflowStatusQueryException.class)\n .hasRootCauseInstanceOf(InterruptedException.class)\n .hasMessage(\"Failed to find a workflow status by Workflow id - test-workflow-id\");\n }\n\n @Test\n void shouldThrowExceptionWhenItFindsFewDocuments() {\n // given\n List<QueryDocumentSnapshot> documents = Arrays.asList(qDocSnap, qDocSnap);\n QuerySnapshot querySnapshot = QuerySnapshot\n .withDocuments(null, Timestamp.now(), documents);\n ApiFuture<QuerySnapshot> queryFuture = ApiFutures.immediateFuture(querySnapshot);\n\n given(firestore.collection(COLLECTION_NAME)\n .whereEqualTo(WORKFLOW_ID, TEST_WORKFLOW_ID).get())\n .willReturn(queryFuture);\n\n // when\n Throwable thrown = catchThrowable(() -> workflowStatusRepository.findWorkflowStatus(\n TEST_WORKFLOW_ID));\n\n // then\n then(thrown)\n .isInstanceOf(WorkflowStatusQueryException.class)\n .hasMessage(\n \"Find workflow status returned 2 documents(s), expected 1, query by Workflow id - test-workflow-id\");\n }\n\n @Test\n void shouldReturnNullWhenNothingWasFound() {\n // given\n List<QueryDocumentSnapshot> documents = Collections.emptyList();\n QuerySnapshot querySnapshot = QuerySnapshot\n .withDocuments(null, Timestamp.now(), documents);\n ApiFuture<QuerySnapshot> queryFuture = ApiFutures.immediateFuture(querySnapshot);\n\n given(firestore.collection(COLLECTION_NAME)\n .whereEqualTo(WORKFLOW_ID, \"test\").get())\n .willReturn(queryFuture);\n\n // when\n WorkflowStatus workflowStatus = workflowStatusRepository.findWorkflowStatus(\"test\");\n\n // then\n then(workflowStatus).isNull();\n }\n\n }\n\n @Nested\n class SaveWorkflowStatus {\n\n @Captor\n ArgumentCaptor<Map<String, Object>> dataCaptor;\n\n @Test\n void shouldSaveWorkflowStatusAndReturnSavedEntity() {\n // given\n Date createdDate = new Date();\n WorkflowStatus workflowStatus = getWorkflowStatus(createdDate);\n\n ApiFuture<DocumentReference> query = ApiFutures.immediateFuture(docRef);\n ApiFuture<DocumentSnapshot> savedDoc = ApiFutures.immediateFuture(docSnap);\n\n given(firestore.collection(COLLECTION_NAME).add(anyMap())).willReturn(query);\n given(docRef.get()).willReturn(savedDoc);\n\n given(docSnap.getString(WorkflowStatus.Fields.WORKFLOW_ID)).willReturn(TEST_WORKFLOW_ID);\n given(docSnap.getString(AIRFLOW_RUN_ID)).willReturn(\n TEST_AIRFLOW_RUN_ID);\n given(docSnap.getString(WORKFLOW_STATUS_TYPE))\n .willReturn(WorkflowStatusType.SUBMITTED.name());\n given(docSnap.getDate(SUBMITTED_AT)).willReturn(createdDate);\n given(docSnap.getString(SUBMITTED_BY)).willReturn(USER);\n\n // when\n WorkflowStatus saved = workflowStatusRepository.saveWorkflowStatus(workflowStatus);\n\n // then\n then(saved).isEqualTo(workflowStatus);\n }\n\n @Test\n void shouldUseServerTimestampWhenCreateAtIsNotSpecified() {\n // given\n WorkflowStatus workflowStatus = WorkflowStatus.builder()\n .workflowId(TEST_WORKFLOW_ID)\n .airflowRunId(TEST_AIRFLOW_RUN_ID)\n .workflowStatusType(WorkflowStatusType.SUBMITTED)\n .submittedBy(USER)\n .build();\n\n ApiFuture<DocumentReference> query = ApiFutures.immediateFuture(docRef);\n ApiFuture<DocumentSnapshot> savedDoc = ApiFutures.immediateFuture(docSnap);\n\n given(firestore.collection(COLLECTION_NAME).add(anyMap())).willReturn(query);\n given(docRef.get()).willReturn(savedDoc);\n\n givenDocSnap(docSnap, workflowStatus);\n\n // when\n WorkflowStatus saved = workflowStatusRepository.saveWorkflowStatus(workflowStatus);\n\n // then\n then(saved).isEqualToIgnoringGivenFields(saved, SUBMITTED_AT);\n\n verify(firestore.collection(COLLECTION_NAME)).add(dataCaptor.capture());\n\n then(dataCaptor.getValue()).satisfies(map -> {\n then(map.get(WORKFLOW_ID)).isEqualTo(TEST_WORKFLOW_ID);\n then(map.get(SUBMITTED_AT)).isEqualTo(FieldValue.serverTimestamp());\n });\n }\n\n @Test\n void shouldThrowExceptionWhenSaveQueryFailed() {\n // given\n Date createdDate = new Date();\n WorkflowStatus workflowStatus = getWorkflowStatus(createdDate);\n\n ApiFuture<DocumentReference> query =\n ApiFutures.immediateFailedFuture(new IllegalArgumentException(\"Failed query\"));\n\n given(firestore.collection(COLLECTION_NAME).add(anyMap())).willReturn(query);\n\n // when\n Throwable thrown = catchThrowable(\n () -> workflowStatusRepository.saveWorkflowStatus(workflowStatus));\n\n // then\n then(thrown)\n .isInstanceOf(WorkflowStatusQueryException.class)\n .hasRootCauseInstanceOf(IllegalArgumentException.class)\n .hasMessageContaining(\"Exceptions during saving workflow status:\");\n }\n\n @Test\n void shouldThrowExceptionWhenUnableToFetchSavedEntity() {\n // given\n Date createdDate = new Date();\n WorkflowStatus workflowStatus = getWorkflowStatus(createdDate);\n\n ApiFuture<DocumentReference> query = ApiFutures.immediateFuture(docRef);\n ApiFuture<DocumentSnapshot> savedDoc =\n ApiFutures.immediateFailedFuture(new IllegalArgumentException(\"Failed get saved\"));\n\n given(firestore.collection(COLLECTION_NAME).add(anyMap())).willReturn(query);\n given(docRef.get()).willReturn(savedDoc);\n\n // when\n Throwable thrown = catchThrowable(\n () -> workflowStatusRepository.saveWorkflowStatus(workflowStatus));\n\n // then\n then(thrown)\n .isInstanceOf(WorkflowStatusQueryException.class)\n .hasRootCauseInstanceOf(IllegalArgumentException.class)\n .hasMessage(\"Saved Workflow status should exist\");\n }\n }\n\n @Nested\n class UpdateWorkflowStatus {\n\n @Test\n void shouldUpdateWorkflowStatusAndReturnSavedEntity() {\n\n // given\n Date createdDate = new Date();\n List<QueryDocumentSnapshot> documents = Collections.singletonList(qDocSnap);\n QuerySnapshot querySnapshot = QuerySnapshot\n .withDocuments(null, Timestamp.now(), documents);\n ApiFuture<QuerySnapshot> queryFuture = ApiFutures.immediateFuture(querySnapshot);\n\n lenient().when(firestore.collection(COLLECTION_NAME)\n .whereEqualTo(WORKFLOW_ID, TEST_WORKFLOW_ID).get())\n .thenReturn(queryFuture);\n\n givenDocSnap(qDocSnap, getWorkflowStatus(createdDate));\n\n ApiFuture<WriteResult> queryWriteFuture = ApiFutures.immediateFuture(writeResult);\n\n lenient().when(firestore.collection(COLLECTION_NAME).document(eq(\"doc-id\"))\n .update(eq(WORKFLOW_STATUS_TYPE), eq(\"running\"))).thenReturn(queryWriteFuture);\n\n // when\n WorkflowStatus saved = workflowStatusRepository\n .updateWorkflowStatus(TEST_WORKFLOW_ID, WorkflowStatusType.RUNNING);\n\n // then\n then(saved.getWorkflowStatusType()).isEqualTo(WorkflowStatusType.RUNNING);\n then(saved.getWorkflowId()).isEqualTo(TEST_WORKFLOW_ID);\n }\n\n @Test\n void shouldThrowExceptionWhenUpdateQueryFailed() {\n\n // given\n ApiFuture<QuerySnapshot> queryFuture =\n ApiFutures.immediateFailedFuture(new IllegalArgumentException(\"Failed query\"));\n\n given(firestore.collection(COLLECTION_NAME)\n .whereEqualTo(WORKFLOW_ID, TEST_WORKFLOW_ID).get())\n .willReturn(queryFuture);\n\n // when\n Throwable thrown = catchThrowable(() -> workflowStatusRepository.updateWorkflowStatus(\n TEST_WORKFLOW_ID, WorkflowStatusType.RUNNING));\n\n // then\n then(thrown)\n .isInstanceOf(WorkflowStatusQueryException.class)\n .hasRootCauseInstanceOf(IllegalArgumentException.class)\n .hasMessage(\"Failed to find a workflow status by Workflow id - test-workflow-id\");\n }\n\n @Test\n void shouldThrowExceptionWhenItFindsFewDocuments() {\n\n // given\n List<QueryDocumentSnapshot> documents = Arrays.asList(qDocSnap, qDocSnap);\n QuerySnapshot querySnapshot = QuerySnapshot\n .withDocuments(null, Timestamp.now(), documents);\n ApiFuture<QuerySnapshot> queryFuture = ApiFutures.immediateFuture(querySnapshot);\n\n given(firestore.collection(COLLECTION_NAME)\n .whereEqualTo(WORKFLOW_ID, TEST_WORKFLOW_ID).get())\n .willReturn(queryFuture);\n\n // when\n Throwable thrown = catchThrowable(() -> workflowStatusRepository.updateWorkflowStatus(\n TEST_WORKFLOW_ID, WorkflowStatusType.RUNNING));\n\n // then\n then(thrown)\n .isInstanceOf(WorkflowStatusQueryException.class)\n .hasMessage(\n \"Found more than one (2) workflow status documents, expected 1, query by Workflow id - test-workflow-id\");\n }\n\n @Test\n void shouldThrowExceptionWhenNothingWasFound() {\n // given\n List<QueryDocumentSnapshot> documents = Collections.emptyList();\n QuerySnapshot querySnapshot = QuerySnapshot\n .withDocuments(null, Timestamp.now(), documents);\n ApiFuture<QuerySnapshot> queryFuture = ApiFutures.immediateFuture(querySnapshot);\n\n given(firestore.collection(COLLECTION_NAME)\n .whereEqualTo(WORKFLOW_ID, TEST_WORKFLOW_ID).get())\n .willReturn(queryFuture);\n\n // when\n Throwable thrown = catchThrowable(() -> workflowStatusRepository\n .updateWorkflowStatus(TEST_WORKFLOW_ID, WorkflowStatusType.RUNNING));\n\n // then\n // then\n then(thrown)\n .isInstanceOf(WorkflowStatusNotFoundException.class)\n .hasMessage(\"Workflow status for Workflow id: test-workflow-id not found\");\n }\n\n @Test\n void shouldThrowExceptionIfWorkflowHasAlreadyDefinedStatus() {\n\n // given\n Date createdDate = new Date();\n List<QueryDocumentSnapshot> documents = Collections.singletonList(qDocSnap);\n QuerySnapshot querySnapshot = QuerySnapshot\n .withDocuments(null, Timestamp.now(), documents);\n ApiFuture<QuerySnapshot> queryFuture = ApiFutures.immediateFuture(querySnapshot);\n\n lenient().when(firestore.collection(COLLECTION_NAME)\n .whereEqualTo(WORKFLOW_ID, TEST_WORKFLOW_ID).get())\n .thenReturn(queryFuture);\n\n givenDocSnap(qDocSnap, getWorkflowStatus(createdDate));\n\n // when\n Throwable thrown = catchThrowable(() -> workflowStatusRepository.updateWorkflowStatus(\n TEST_WORKFLOW_ID, WorkflowStatusType.SUBMITTED));\n\n // then\n then(thrown)\n .isInstanceOf(WorkflowStatusNotUpdatedException.class)\n .hasMessage(\n \"Workflow status for workflow id: test-workflow-id already has status:SUBMITTED and can not be updated\");\n }\n }\n\n private WorkflowStatus getWorkflowStatus(Date createdDate) {\n return WorkflowStatus.builder()\n .workflowId(TEST_WORKFLOW_ID)\n .airflowRunId(TEST_AIRFLOW_RUN_ID)\n .workflowStatusType(WorkflowStatusType.SUBMITTED)\n .submittedAt(createdDate)\n .submittedBy(USER)\n .build();\n }\n\n private void givenDocSnap(DocumentSnapshot qDocSnap, WorkflowStatus workflowStatus) {\n given(qDocSnap.getString(WORKFLOW_ID)).willReturn(workflowStatus.getWorkflowId());\n given(qDocSnap.getString(AIRFLOW_RUN_ID)).willReturn(workflowStatus.getAirflowRunId());\n given(qDocSnap.getString(WORKFLOW_STATUS_TYPE))\n .willReturn(workflowStatus.getWorkflowStatusType().name());\n given(qDocSnap.getDate(SUBMITTED_AT)).willReturn(workflowStatus.getSubmittedAt());\n given(qDocSnap.getString(SUBMITTED_BY)).willReturn(workflowStatus.getSubmittedBy());\n }\n\n}\n" }, { "alpha_fraction": 0.7533753514289856, "alphanum_fraction": 0.760576069355011, "avg_line_length": 26.774999618530273, "blob_id": "e35ab581afb1a0b49517b8d852760cdd8d9a01fa", "content_id": "f262c25fdeaba61fb6eadbaefe9fcc5fc3a2c259", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1111, "license_type": "permissive", "max_line_length": 75, "num_lines": 40, "path": "/compatibility-layer/service/delivery/src/main/java/com/osdu/model/osdu/delivery/dto/ResponseItem.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.model.osdu.delivery.dto;\n\nimport com.fasterxml.jackson.annotation.JsonInclude;\nimport com.fasterxml.jackson.annotation.JsonInclude.Include;\nimport com.fasterxml.jackson.annotation.JsonProperty;\nimport java.util.Map;\nimport lombok.Builder;\nimport lombok.Data;\n\n@Data\n@Builder\n@JsonInclude(Include.NON_NULL)\npublic class ResponseItem {\n\n @JsonProperty(\"FileLocation\")\n ResponseFileLocation fileLocation;\n\n @JsonProperty(\"Data\")\n Map<String, Object> data;\n\n @JsonProperty(\"SRN\")\n String srn;\n\n}\n" }, { "alpha_fraction": 0.7231968641281128, "alphanum_fraction": 0.7387914061546326, "avg_line_length": 30.090909957885742, "blob_id": "4341a3e7bc64f144db3a8df41c7c3191b4ec2a82", "content_id": "f79ce42e3c285805ad233acbc3b65c73e255a5d4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1026, "license_type": "permissive", "max_line_length": 75, "num_lines": 33, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/legal/StatusChangedTag.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.legal;\n\nimport lombok.Data;\n\n@Data\npublic class StatusChangedTag {\n private String changedTagName;\n private Enum changedTagStatus;\n\n StatusChangedTag(){\n }\n public StatusChangedTag(String changedTagName, Enum changedTagStatus) {\n this.changedTagName = changedTagName;\n this.changedTagStatus = changedTagStatus;\n }\n}\n" }, { "alpha_fraction": 0.732528030872345, "alphanum_fraction": 0.7372735142707825, "avg_line_length": 37.63333511352539, "blob_id": "8a1afaa3bdac4b50806dbe5a11eaa11c42cee2f5", "content_id": "2df3d3dc15ecd630b991b7370e37f81a7711918b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2318, "license_type": "permissive", "max_line_length": 86, "num_lines": 60, "path": "/compatibility-layer/service/delivery/src/main/java/com/osdu/service/processing/delfi/DelfiResultDataConverter.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.service.processing.delfi;\n\nimport com.osdu.model.osdu.delivery.delfi.ProcessingResult;\nimport com.osdu.model.osdu.delivery.delfi.ProcessingResultStatus;\nimport com.osdu.model.osdu.delivery.dto.DeliveryResponse;\nimport com.osdu.model.osdu.delivery.dto.ResponseFileLocation;\nimport com.osdu.model.osdu.delivery.dto.ResponseItem;\nimport com.osdu.service.processing.ResultDataConverter;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.stream.Collectors;\nimport lombok.extern.slf4j.Slf4j;\nimport org.springframework.stereotype.Service;\n\n@Service\n@Slf4j\npublic class DelfiResultDataConverter implements ResultDataConverter {\n\n @Override\n public DeliveryResponse convertProcessingResults(List<ProcessingResult> results) {\n log.debug(\"Processing results : {}\", results);\n\n Map<Boolean, List<ProcessingResult>> precessedToResultMap = results.stream()\n .collect(Collectors.partitioningBy(\n result -> !result.getProcessingResultStatus()\n .equals(ProcessingResultStatus.NO_MAPPING)));\n\n List<String> unprocessedSrns = precessedToResultMap.get(Boolean.FALSE).stream()\n .map(ProcessingResult::getSrn)\n .collect(Collectors.toList());\n\n List<ResponseItem> responseItems = precessedToResultMap.get(Boolean.TRUE).stream()\n .map(result -> ResponseItem.builder()\n .fileLocation(result.getFileLocation() == null? null :\n new ResponseFileLocation(result.getFileLocation()))\n .data(result.getData())\n .srn(result.getSrn()).build())\n .collect(Collectors.toList());\n\n return DeliveryResponse.builder()\n .result(responseItems)\n .unprocessedSrns(unprocessedSrns).build();\n }\n}\n" }, { "alpha_fraction": 0.6934260725975037, "alphanum_fraction": 0.7085967063903809, "avg_line_length": 30.019607543945312, "blob_id": "b59fc006fcdb99fdc224cec34b2c30664ec9ffbc", "content_id": "89267698de96cea9c6cad20be0713c30d7e7dfbb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1582, "license_type": "permissive", "max_line_length": 88, "num_lines": 51, "path": "/osdu-r2/os-dags/default-ingest.py", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# Copyright 2020 Google LLC\n# Copyright 2020 Amazon\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom airflow import DAG\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.utils.dates import days_ago\nfrom datetime import datetime, timedelta\nfrom create_records import create_records\n\n\"\"\"\nA workflow creating a record\n\"\"\"\n\ndefault_args = {\n 'owner': 'Airflow',\n 'depends_on_past': False,\n 'start_date': days_ago(2),\n 'email': ['[email protected]'],\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5),\n # 'queue': 'bash_queue',\n # 'pool': 'backfill',\n # 'priority_weight': 10,\n # 'end_date': datetime(2016, 1, 1),\n}\n\nworkflow_name = 'Default_ingest'\ndag = DAG(workflow_name, default_args=default_args, schedule_interval=timedelta(days=1))\n\n# comes from the experimental endpoint /api/experimental/dags/<DAG_NAME>/dag_runs \n\ncreate_records_op = PythonOperator(\n task_id='create_records',\n python_callable=create_records,\n provide_context=True,\n dag=dag\n)\n" }, { "alpha_fraction": 0.7087676525115967, "alphanum_fraction": 0.7155119776725769, "avg_line_length": 32.28571319580078, "blob_id": "6b3552c9c926c1b06638121da1e199a8fbfdca82", "content_id": "86e12ab5942d4e08f314cd91477e70aee93af188", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1631, "license_type": "permissive", "max_line_length": 91, "num_lines": 49, "path": "/compatibility-layer/common/src/main/java/com/osdu/request/OsduHeader.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.request;\n\nimport java.util.Map;\nimport lombok.extern.slf4j.Slf4j;\n\n@Slf4j\npublic final class OsduHeader {\n\n private OsduHeader() {\n }\n\n public static final String PARTITION = \"partition-id\";\n public static final String AUTHORIZATION = \"authorization\";\n public static final String LEGAL_TAGS = \"legal-tags\";\n public static final String RESOURCE_HOME_REGION_ID = \"resource-home-region-id\";\n public static final String RESOURCE_HOST_REGION_IDS = \"resource-host-region-ids\";\n\n /**\n * Extract header by name.\n *\n * @param headers headers from http request\n * @param headerKey header key\n * @return header value\n */\n public static String extractHeaderByName(Map<String, Object> headers, String headerKey) {\n log.debug(\"Extracting header with name : {} from map : {}\", headerKey, headers);\n String value = (String) headers.get(headerKey);\n log.debug(\"Does the request contain the '{}' header? {}. Value: {}\",\n headerKey, headers.containsKey(headerKey), value);\n return value;\n }\n\n}\n" }, { "alpha_fraction": 0.5985243916511536, "alphanum_fraction": 0.6002639532089233, "avg_line_length": 42.41666793823242, "blob_id": "f3b2bdf3f2aa7e87278324feb9519f0836371a2d", "content_id": "81d3d92e7c22d89eb672c2ea2ac043c490c7bff1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 16671, "license_type": "permissive", "max_line_length": 146, "num_lines": 384, "path": "/osdu-r2/os-qa/src/test/java/com/osdu/ingest/e2e/IngestAnyCloudTests.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.ingest.e2e;\n\nimport com.osdu.core.data.provider.DataProviders;\nimport com.osdu.core.endpoints.factories.FactoriesHolder;\nimport com.osdu.core.reporter.TestReporter;\nimport io.qameta.allure.Description;\nimport io.qameta.allure.restassured.AllureRestAssured;\nimport io.restassured.response.Response;\nimport org.apache.commons.lang3.StringUtils;\nimport org.awaitility.Awaitility;\nimport org.hamcrest.Matchers;\nimport org.testng.annotations.Test;\n\nimport java.util.HashMap;\nimport java.util.Map;\nimport java.util.UUID;\nimport java.util.concurrent.TimeUnit;\n\nimport static com.osdu.common.FilesKeeper.*;\nimport static com.osdu.core.data.parser.JsonParser.readJson;\nimport static com.osdu.core.data.provider.TestData.*;\nimport static io.restassured.RestAssured.given;\nimport static java.util.concurrent.TimeUnit.MINUTES;\nimport static org.apache.http.HttpStatus.*;\nimport static org.awaitility.Awaitility.await;\n\npublic class IngestAnyCloudTests extends BaseIngestService { //todo:::: add creds to env variable!!!!!!!\n FactoriesHolder factoriesHolder = new FactoriesHolder();\n\n /**\n * Services paths\n */\n String submitFunction = factoriesHolder.remoteFactoryCreator().getIngest(\"submit\");\n String getLocation = factoriesHolder.remoteFactoryCreator().getFileService(\"getLocation\");\n\n String getWorkflowStatus = factoriesHolder.remoteFactoryCreator().getWorkflowService(\"getStatus\");\n\n @Test(dataProvider = \"testedData\", dataProviderClass = DataProviders.class)\n @Description(\"Valid flow send request with all required fields for well log data type and with auth token\")\n public void i1_checkIngestByFile(Map<String, String> data) {\n\n String uniqueID = UUID.randomUUID().toString();\n TestReporter.reportStep(\"Create unique id %s\", uniqueID);\n\n String bodyRequestWithTheUniqueId = String.format((readJson(requestFileServicePath).toString()), uniqueID);\n TestReporter.reportStep(\"Insert unique id into request %s\", bodyRequestWithTheUniqueId);\n\n given()\n .filter(new AllureRestAssured())\n .spec(baseRequestSpec(specifiedHeadersSet()))\n .body(bodyRequestWithTheUniqueId)\n .when()\n .post(getLocation)\n .then()\n .statusCode(SC_OK)\n .and()\n .assertThat().body(FILE_ID, Matchers.is(uniqueID))\n .assertThat().body(SIGNED_URL, Matchers.notNullValue())\n .log()\n .all();\n\n String requestWithTheNewCreatedId = String.format((readJson(requestForIngestTemplate).toString()), data.get(DATA_TYPE_LOG), uniqueID);\n TestReporter.reportStep(\"Created via template request to the ingest %s\", requestWithTheNewCreatedId);\n\n Response ingestResponse = given()\n .filter(new AllureRestAssured())\n .spec(baseRequestSpec(specifiedHeadersSet()))\n .body(requestWithTheNewCreatedId)\n .when()\n .post(submitFunction);\n\n ingestResponse\n .then()\n .statusCode(SC_OK)\n .and()\n .assertThat().body(WORKFLOW_ID, Matchers.notNullValue())\n .log()\n .all();\n\n String workflowId = ingestResponse.then()\n .extract()\n .path(WORKFLOW_ID);\n\n String requestForIngestStatus = String.format(readJson(requestForWorkflowStatusTemplate).toString(), workflowId);\n\n Awaitility.setDefaultPollDelay(15, TimeUnit.SECONDS);\n await()\n .atMost(1, MINUTES)\n .with()\n .pollInterval(10, TimeUnit.SECONDS)\n .until(() -> given()\n .filter(new AllureRestAssured())\n .spec(baseRequestSpec(specifiedHeadersSet()))\n .body(requestForIngestStatus)\n .log()\n .method()\n .when()\n .post(getWorkflowStatus).jsonPath().get(STATUS), //TODO :: status should be finished\n s -> s.equals(data.get(STATUS)));\n\n TestReporter.reportStep(\"Job status is completed\");\n }\n\n @Test(dataProvider = \"testedData\", dataProviderClass = DataProviders.class)\n @Description(\"Valid flow send request with all required fields for opaque data type and with auth token\")\n public void i2_checkIngestByFile(Map<String, String> data) {\n\n String uniqueID = UUID.randomUUID().toString();\n TestReporter.reportStep(\"Create unique id %s\", uniqueID);\n\n String bodyRequestWithTheUniqueId = String.format((readJson(requestFileServicePath).toString()), uniqueID);\n TestReporter.reportStep(\"Insert unique id into request %s\", bodyRequestWithTheUniqueId);\n\n given()\n .filter(new AllureRestAssured())\n .spec(baseRequestSpec(specifiedHeadersSet()))\n .body(bodyRequestWithTheUniqueId)\n .when()\n .post(getLocation)\n .then()\n .statusCode(SC_OK)\n .and()\n .assertThat().body(FILE_ID, Matchers.is(uniqueID))\n .assertThat().body(SIGNED_URL, Matchers.notNullValue())\n .log()\n .all();\n\n String requestWithTheNewCreatedId = String.format((readJson(requestForIngestTemplate).toString()), data.get(DATA_TYPE_OPAQUE), uniqueID);\n TestReporter.reportStep(\"Created via template request to the ingest %s\", requestWithTheNewCreatedId);\n\n Response ingestResponse = given()\n .filter(new AllureRestAssured())\n .spec(baseRequestSpec(specifiedHeadersSet()))\n .body(requestWithTheNewCreatedId)\n .when()\n .post(submitFunction);\n\n ingestResponse\n .then()\n .statusCode(SC_OK)\n .and()\n .assertThat().body(WORKFLOW_ID, Matchers.notNullValue())\n .log()\n .all();\n\n String workflowId = ingestResponse.then()\n .extract()\n .path(WORKFLOW_ID);\n\n String requestForIngestStatus = String.format(readJson(requestForWorkflowStatusTemplate).toString(), workflowId);\n\n Awaitility.setDefaultPollDelay(15, TimeUnit.SECONDS);\n await()\n .atMost(1, MINUTES)\n .with()\n .pollInterval(10, TimeUnit.SECONDS)\n .until(() -> given()\n .filter(new AllureRestAssured())\n .spec(baseRequestSpec(specifiedHeadersSet()))\n .body(requestForIngestStatus)\n .log()\n .method()\n .when()\n .post(getWorkflowStatus).jsonPath().get(STATUS), //TODO :: status should be finished\n s -> s.equals(data.get(STATUS)));\n\n TestReporter.reportStep(\"Job status is completed\");\n }\n\n @Test(dataProvider = \"testedData\", dataProviderClass = DataProviders.class)\n @Description(\"Send request with all required fields and without auth tokens\")\n public void i3_checkIngestSubmitWithoutHeaders(Map<String, String> data) {\n String uniqueID = UUID.randomUUID().toString();\n TestReporter.reportStep(\"Create unique id %s\", uniqueID);\n\n String bodyRequestWithTheUniqueId = String.format((readJson(requestFileServicePath).toString()), uniqueID);\n TestReporter.reportStep(\"Insert unique id into request %s\", bodyRequestWithTheUniqueId);\n\n given()\n .filter(new AllureRestAssured())\n .spec(baseRequestSpec(specifiedHeadersSet()))\n .body(bodyRequestWithTheUniqueId)\n .when()\n .post(getLocation)\n .then()\n .statusCode(SC_OK)\n .and()\n .assertThat().body(FILE_ID, Matchers.is(uniqueID))\n .assertThat().body(SIGNED_URL, Matchers.notNullValue())\n .log()\n .all();\n\n String requestWithTheNewCreatedId = String.format((readJson(requestFileServicePath).toString()), uniqueID, data.get(DATA_TYPE_LOG));\n\n Response ingestFunction = given()\n .filter(new AllureRestAssured())\n .spec(baseRequestSpec(new HashMap<>()))\n .body(requestWithTheNewCreatedId)\n .when()\n .post(submitFunction);\n\n ingestFunction\n .then()\n .statusCode(SC_UNAUTHORIZED)\n .and()\n .log()\n .all();\n }\n\n @Test(dataProvider = \"testedData\", dataProviderClass = DataProviders.class)\n @Description(\"Send request without FileID field and with auth token\")\n public void i4_checkIngestSubmitWithoutWithoutOnOfTheRequiredFields(Map<String, String> data) {\n Response ingestFunction = given()\n .filter(new AllureRestAssured())\n .spec(baseRequestSpec(specifiedHeadersSet()))\n .body(readJson(requestForIngestWithoutFileId))\n .when()\n .post(submitFunction);\n\n ingestFunction\n .then()\n .statusCode(SC_BAD_REQUEST)\n .and()\n .log()\n .all();\n }\n\n @Test(dataProvider = \"testedData\", dataProviderClass = DataProviders.class)\n @Description(\"Send request with empty body and with auth token\")\n public void i5_checkIngestSubmitWithEmptyBody(Map<String, String> data) {\n given()\n .filter(new AllureRestAssured())\n .spec(baseRequestSpec(specifiedHeadersSet()))\n .body(StringUtils.EMPTY)\n .when()\n .post(submitFunction)\n .then()\n .statusCode(SC_BAD_REQUEST)\n .and()\n .log()\n .all();\n }\n\n @Test(dataProvider = \"testedData\", dataProviderClass = DataProviders.class)\n @Description(\"Send empty request with auth token\")\n public void i6_checkIngestSubmitWithEmptyValues(Map<String, String> data) {\n String requestWithTheNewCreatedId = String.format((readJson(requestForIngestTemplate).toString()), StringUtils.EMPTY, StringUtils.EMPTY);\n TestReporter.reportStep(\"Created via template request to the ingest %s\", requestWithTheNewCreatedId);\n\n given()\n .filter(new AllureRestAssured())\n .spec(baseRequestSpec(specifiedHeadersSet()))\n .body(requestWithTheNewCreatedId)\n .when()\n .post(submitFunction)\n .then()\n .statusCode(SC_BAD_REQUEST)\n .and()\n .assertThat().body(MESSAGE, Matchers.containsString(data.get(ERROR_INVALID_FORMAT)))\n .log()\n .all();\n }\n\n @Test(dataProvider = \"testedData\", dataProviderClass = DataProviders.class)\n @Description(\"Send request with invalid dataType and with auth token\")\n public void i7_checkIngestWithInvalidDataType(Map<String, String> data) {\n String uniqueID = UUID.randomUUID().toString();\n TestReporter.reportStep(\"Create unique id %s\", uniqueID);\n\n String bodyRequestWithTheUniqueId = String.format((readJson(requestFileServicePath).toString()), uniqueID);\n TestReporter.reportStep(\"Insert unique id into request %s\", bodyRequestWithTheUniqueId);\n\n given()\n .filter(new AllureRestAssured())\n .spec(baseRequestSpec(specifiedHeadersSet()))\n .body(bodyRequestWithTheUniqueId)\n .when()\n .post(getLocation)\n .then()\n .statusCode(SC_OK)\n .and()\n .assertThat().body(FILE_ID, Matchers.is(uniqueID))\n .assertThat().body(SIGNED_URL, Matchers.notNullValue())\n .log()\n .all();\n\n String requestWithTheNewCreatedId = String.format((readJson(requestForIngestTemplate).toString()), data.get(DATA_TYPE_INVALID), uniqueID);\n TestReporter.reportStep(\"Created via template request to the ingest %s\", requestWithTheNewCreatedId);\n\n given()\n .filter(new AllureRestAssured())\n .spec(baseRequestSpec(specifiedHeadersSet()))\n .body(requestWithTheNewCreatedId)\n .when()\n .post(submitFunction)\n .then()\n .statusCode(SC_BAD_REQUEST)\n .and()\n .assertThat().body(MESSAGE, Matchers.containsString(data.get(ERROR_INVALID_FORMAT)))\n .log()\n .all();\n }\n\n @Test(dataProvider = \"testedData\", dataProviderClass = DataProviders.class)\n @Description(\"Send request with not existed file id and with auth token\")\n public void i8_checkIngestWithNotExistedFileId(Map<String, String> data) {\n String uniqueID = UUID.randomUUID().toString();\n TestReporter.reportStep(\"Create unique id %s\", uniqueID);\n String bodyRequestWithTheUniqueId = String.format((readJson(requestFileServicePath).toString()), uniqueID);\n TestReporter.reportStep(\"Insert unique id into request %s\", bodyRequestWithTheUniqueId);\n\n String requestWithTheNewCreatedId = String.format((readJson(requestForIngestTemplate).toString()), data.get(DATA_TYPE_LOG), uniqueID);\n TestReporter.reportStep(\"Created via template request to the ingest %s\", requestWithTheNewCreatedId);\n\n given()\n .filter(new AllureRestAssured())\n .spec(baseRequestSpec(specifiedHeadersSet()))\n .body(requestWithTheNewCreatedId)\n .when()\n .post(submitFunction)\n .then()\n .statusCode(SC_BAD_REQUEST)\n .and()\n // .assertThat().body(MESSAGE, Matchers.containsString(data.get(ERROR_INVALID_FORMAT)))\n .log()\n .all();\n }\n\n @Test(dataProvider = \"testedData\", dataProviderClass = DataProviders.class)\n @Description(\"Send request with mismatched dataType value and with auth token\")\n public void i9_checkIngestWithMismatchedValueForDataType(Map<String, String> data) {\n String uniqueID = UUID.randomUUID().toString();\n TestReporter.reportStep(\"Create unique id %s\", uniqueID);\n\n String bodyRequestWithTheUniqueId = String.format((readJson(requestFileServicePath).toString()), uniqueID);\n TestReporter.reportStep(\"Insert unique id into request %s\", bodyRequestWithTheUniqueId);\n\n given()\n .filter(new AllureRestAssured())\n .spec(baseRequestSpec(specifiedHeadersSet()))\n .body(bodyRequestWithTheUniqueId)\n .when()\n .post(getLocation)\n .then()\n .statusCode(SC_OK)\n .and()\n .assertThat().body(FILE_ID, Matchers.is(uniqueID))\n .assertThat().body(SIGNED_URL, Matchers.notNullValue())\n .log()\n .all();\n\n String requestWithTheNewCreatedId = String.format((readJson(requestForIngestWithmismathedDataTypeValue).toString()), uniqueID);\n TestReporter.reportStep(\"Created via template request to the ingest %s\", requestWithTheNewCreatedId);\n\n given()\n .filter(new AllureRestAssured())\n .spec(baseRequestSpec(specifiedHeadersSet()))\n .body(requestWithTheNewCreatedId)\n .when()\n .post(submitFunction)\n .then()\n .statusCode(SC_BAD_REQUEST)\n .and()\n .assertThat().body(MESSAGE, Matchers.containsString(data.get(ERROR_INVALID_FORMAT)))\n .log()\n .all();\n }\n}" }, { "alpha_fraction": 0.7685492634773254, "alphanum_fraction": 0.7735326886177063, "avg_line_length": 40.068180084228516, "blob_id": "ac7dcbbb799297e3d3c738c25f7ddcd659a5d30a", "content_id": "0ab2792e620130e74d6385831c878532d9493e30", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1806, "license_type": "permissive", "max_line_length": 99, "num_lines": 44, "path": "/compatibility-layer/service/delfi-client/src/main/java/com/osdu/client/DelfiStorageClient.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.client;\n\nimport com.osdu.model.Record;\nimport com.osdu.model.delfi.DelfiRecord;\nimport com.osdu.model.delfi.SaveRecordsResult;\nimport java.util.List;\nimport org.springframework.cloud.openfeign.FeignClient;\nimport org.springframework.web.bind.annotation.GetMapping;\nimport org.springframework.web.bind.annotation.PathVariable;\nimport org.springframework.web.bind.annotation.PutMapping;\nimport org.springframework.web.bind.annotation.RequestBody;\nimport org.springframework.web.bind.annotation.RequestHeader;\n\n@FeignClient(url = \"${osdu.delfi.portal.url}/de/storage/v2/records\", name = \"delfi.storage.client\")\npublic interface DelfiStorageClient {\n\n @GetMapping(\"/{recordId}\")\n DelfiRecord getRecord(@PathVariable(\"recordId\") String recordId,\n @RequestHeader(\"Authorization\") String authorizationToken,\n @RequestHeader(\"slb-data-partition-id\") String partition,\n @RequestHeader(\"AppKey\") String applicationKey);\n\n @PutMapping\n SaveRecordsResult putRecords(@RequestBody List<Record> records,\n @RequestHeader(\"Authorization\") String authorizationToken,\n @RequestHeader(\"slb-data-partition-id\") String partition,\n @RequestHeader(\"AppKey\") String applicationKey);\n}" }, { "alpha_fraction": 0.7059990167617798, "alphanum_fraction": 0.7124442458152771, "avg_line_length": 37.056602478027344, "blob_id": "b690a5437f56dab064a41ed5d5449418e53633e1", "content_id": "cd4c43618bccb57ecccb60b62c47815ec3d0ecb2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2017, "license_type": "permissive", "max_line_length": 111, "num_lines": 53, "path": "/osdu-r2/os-dags/create_records.py", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# Copyright 2020 Google LLC\n# Copyright 2020 Amazon\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom osdu_api.model.acl import Acl\nfrom osdu_api.model.legal import Legal\nfrom osdu_api.model.legal_compliance import LegalCompliance\nfrom osdu_api.model.record_ancestry import RecordAncestry\nfrom osdu_api.storage.record_client import RecordClient\nfrom osdu_api.model.record import Record\nimport json\nfrom airflow.models import Variable\n\n\ndef create_records(**kwargs):\n # the only way to pass in values through the experimental api is through the conf parameter\n data_conf = kwargs['dag_run'].conf\n\n acl_dict = json.loads(data_conf['acl'])\n acl = Acl(acl_dict['viewers'], acl_dict['owners'])\n\n legal_dict = json.loads(data_conf['legal-tags'])\n legal = Legal(legal_dict['legaltags'], legal_dict['otherRelevantDataCountries'], LegalCompliance.compliant)\n ancestry = RecordAncestry([])\n record_id = None\n kind = Variable.get('record_kind')\n meta = [{}]\n version = 0\n data = data_conf['data']\n record = Record(record_id, version, kind, acl, legal, data, ancestry, meta)\n\n headers = {\n 'content-type': 'application/json',\n 'slb-data-partition-id': data_conf['partition-id'],\n 'Authorization': data_conf['authorization'],\n 'AppKey': data_conf['app-key']\n }\n\n record_client = RecordClient()\n resp = record_client.create_update_records([record], headers)\n\n return {\"response_status\": resp.status_code, \"text\": json.loads(resp.text)}\n" }, { "alpha_fraction": 0.7119691371917725, "alphanum_fraction": 0.7250965237617493, "avg_line_length": 29.83333396911621, "blob_id": "1b1d364b4159cac85f9f6f72bab12cb4ce766a45", "content_id": "58b1bfaedcd04e656ef7fc6572f9e4b02b4ae654", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1295, "license_type": "permissive", "max_line_length": 85, "num_lines": 42, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/search/ClusterSettings.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.search;\n\nimport lombok.AllArgsConstructor;\nimport lombok.Builder;\nimport lombok.Data;\n\n@Data\n@AllArgsConstructor\n@Builder\npublic class ClusterSettings {\n private String host;\n private int port;\n private String userNameAndPassword;\n private boolean https;\n private boolean tls;\n\n //TODO: Remove after fixing other modules which uses the legacy 3 arg constructor\n public ClusterSettings(String host, int port, String userNameAndPassword){\n this.host = host;\n this.port = port;\n this.userNameAndPassword = userNameAndPassword;\n this.https = true;\n this.tls = true;\n }\n}\n" }, { "alpha_fraction": 0.7795275449752808, "alphanum_fraction": 0.7900262475013733, "avg_line_length": 37.099998474121094, "blob_id": "fd4dcf463cdd917cc7011a5b0432f115ea4988bf", "content_id": "269acf839bfb3de981ac5beef684406cb9fc2160", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1524, "license_type": "permissive", "max_line_length": 176, "num_lines": 40, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/search/IndicesService.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.search;\n\nimport org.elasticsearch.ElasticsearchStatusException;\nimport org.elasticsearch.client.RestHighLevelClient;\nimport org.elasticsearch.common.settings.Settings;\nimport org.opengroup.osdu.core.common.model.search.IndexInfo;\n\nimport java.io.IOException;\nimport java.util.List;\nimport java.util.Map;\n\npublic interface IndicesService {\n\n boolean createIndex(RestHighLevelClient client, String index, Settings settings, String type, Map<String, Object> mapping) throws ElasticsearchStatusException, IOException;\n\n boolean isIndexExist(RestHighLevelClient client, String index) throws IOException;\n\n boolean deleteIndex(RestHighLevelClient client, String index) throws Exception;\n\n boolean deleteIndex(String index) throws Exception;\n\n List<IndexInfo> getIndexInfo(RestHighLevelClient client, String indexPattern) throws IOException;\n}\n" }, { "alpha_fraction": 0.6556499004364014, "alphanum_fraction": 0.6692875027656555, "avg_line_length": 45.96731948852539, "blob_id": "ee495fb11befface00d9215f3969d0abfb373f20", "content_id": "c0cd7df4dfc16503c58a00822bf59329ba1a637a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 14372, "license_type": "permissive", "max_line_length": 142, "num_lines": 306, "path": "/osdu-r2/os-workflow/README.md", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# OSDU Workflow Service\n\n## Contents\n\n* [Introduction](#introduction)\n* [System interactions](#system-interactions)\n* [Workflow API](#workflow-api)\n * [POST /startWorkflow](#post-startworkflow)\n * [POST /getStatus](#post-getstatus)\n * [POST /updateWorkflowStatus](#post-updateworkflowstatus)\n* [Service Provider Interfaces](#workflow-service-provider-interfaces)\n* [GCP implementation](#gcp-implementation)\n* [Firestore](#firestore-collections)\n\n## Introduction\n\nThe OSDU R2 Workflow service is designed to start business processes in the system. In the OSDU R2\nprototype phase, the service only starts ingestion of OSDU data.\n\nThe Workflow service provides a wrapper functionality around the Apache Airflow functions and is\ndesigned to carry out preliminary work with files before running the Airflow Directed Acyclic Graphs\n(DAGs) that will perform actual ingestion of OSDU data.\n\nIn OSDU R2, depending on the types of data, workflow, and user, the Workflow service starts the\nnecessary workflow such as well log ingestion or opaque ingestion.\n\n## System interactions\n\nThe Workflow service in the OSDU R2 Prototype defines the following workflows:\n\n* Ingestion of new files\n* Delivery of an ingestion workflow status\n* Update of the workflow status\n\n### Start ingestion\n\nThe ingestion workflow starts by a call to the `/startWorkflow` API endpoint. The following diagram\nshows the workflow.\n\n![OSDU R2 WorkflowService startWorkflow](https://user-images.githubusercontent.com/21691607/75542676-ef684080-5a28-11ea-93a3-c28ed13c1fe5.png)\n\nUpon a `/startWorkflow` request:\n\n1. Validate the incoming request.\n * Verify the authorization token. Fail ingestion if the token is missing or invalid, and then\n respond with the `401 Unauthorized` status.\n * Verify the partition ID. Fail ingestion if the partition ID is missing, invalid or doesn't\n have assigned user groups, and then respond with the `400 Bad Request` status.\n * Check that the workflow type is \"ingest\" or \"osdu\".\n * Check that the data type is \"well_log\" or \"opaque\".\n > The `DataType` property can actually be any string value. If the `DataType` value is not\n > \"well_log\", then it's treated as the \"opaque\" data type.\n2. Query the database to obtain a DAG suitable for the current request. The Workflow service\ndecides which DAG to run by the following three parameters:\n * `WorkflowType`\n * `DataType`\n * `UserType`\n3. Submit a new ingestion job to the OSDU R2 Workflow Engine (Apache Airflow).\n4. Create a workflow data record in the database with the **submitted** status.\n5. Respond with the workflow ID to the Ingestion service.\n\n### Get workflow status\n\nUpon a `/getStatus` request:\n\n1. Validate the incoming request.\n * Verify the authorization token. If the token is missing or invalid, respond with the `401\n Unauthorized` status.\n * Verify the partition ID. If the partition ID is missing, invalid or doesn't have assigned user\n groups, respond with the `400 Bad Request` status.\n2. Query the database with the workflow ID received from the client.\n * Respond with the **404 Not Found** status if the requested workflow ID isn't found.\n3. Return the workflow job status to the user or application.\n\n### Update workflow status\n\nIn OSDU R2, the Opaque Ingestion DAG or the Manifest Ingestion DAG query the Workflow service to\nupdate the status of a workflow job. The ingestion workflow status can be set to **running**,\n**finished**, or **failed**.\n\n![OSDU R2 Workflow Status Update](https://user-images.githubusercontent.com/21691607/77782134-77a92800-705f-11ea-92f0-b36f33e00fe0.png)\n\nUpon an `/updateWorkflowStatus` request:\n\n1. Validate the incoming request.\n * Verify the authorization token. Fail workflow status update if the token is missing or\n invalid, and then respond with the `401 Unauthorized` status.\n * Verify the partition ID. Fail workflow status update if the partition ID is missing, invalid\n or doesn't have assigned user groups, and then respond with the `400 Bad Request` status.\n * Fail the request if the workflow ID or status is not provided.\n * Fail the request if the workflow status is not **running**, **finished**, or **failed**.\n2. Update the workflow status in the database.\n * Fail the update if the workflow ID is not found in the database.\n * Fail the update if there's more than one workflow found by the workflow ID.\n * Fail the update if the stored status and the incoming status are the same.\n3. Return the workflow ID and the workflow status to the OSDU R2 service or component that requested\nthe update.\n\n## Workflow API\n\nThe OSDU R2 Workflow API includes the following endpoints:\n\n* `/startWorkflow`, internal\n* `/getStatus`, external\n* `/updateWorkflowStatus`, internal\n\nGeneral considerations related to querying the Workflow API:\n\n* Each endpoint must receive the authentication bearer token in the \"Authorization\" header. Example:\n`\"Authorization\": \"Bearer {token}\"`\n* Each endpoint must receive the partition ID in the \"Partition-ID\" header. Example:\n`\"Partition-Id: \"default_partition\"`\n* The request and response Content Type is \"application/json\"\n\n### POST /startWorkflow\n\nThe `/startWorkflow` API endpoint starts a new workflow. This endpoint closed for external requests.\n\nThe `/startWorkflow` endpoint is a wrapper around the Airflow invocation, and is designed to\nreconfigure the default workflows. For each combination of the user, data, and workflow types, the\nAPI identifies a suitable DAG and then calls Airflow.\n\nFor OSDU R2 Prototype, the API doesn't reconfigure the workflows and only queries the database to\ndetermine which DAG to run.\n\n#### Request body\n\n| Property | Type | Description |\n| ------------ | -------- | --------------------------------------------------------------- |\n| WorkflowType | `String` | Type of workflow job to run &mdash; \"osdu\" or \"ingest\" |\n| DataType | `String` | Type of data to be ingested &mdash; \"well_log\" or \"opaque\" |\n| Context | `Object` | Data required to run a DAG, provided as list of key-value pairs |\n\n> The Context may include a file location, ACL and legal tags, and the Airflow run ID. The\n> **/startWorkflow API** passes the Context to Airflow without modifying it.\n\nRequest example:\n\n```sh\ncurl --location --request POST 'https://{path}/startWorkflow' \\\n --header 'Authorization: Bearer {token}' \\\n --header 'Partition-Id: {assigned DELFI partition ID}' \\\n --header 'Content-Type: application/json' \\\n --data-raw '{\n \"WorkflowType\": \"ingest\",\n \"DataType\": \"opaque\",\n \"Context\": {}\n }'\n```\n\n#### Response body\n\n| Property | Type | Description |\n| ---------- | -------- | ----------------------------- |\n| WorkflowID | `String` | Unique ID of the workflow job |\n\n### POST /getStatus\n\nThe `/getStatus` API endpoint returns the current status of a workflow job. This endpoint is\navailable for external requests.\n\n#### Request body\n\n| Property | Type | Description |\n| ---------- | -------- | --------------------------- |\n| WorkflowID | `String` | Unique ID of a workflow job |\n\nRequest example:\n\n```sh\ncurl --location --request POST 'https://{path}/getStatus' \\\n --header 'Authorization: Bearer {token}' \\\n --header 'Partition-Id: {assigned DELFI partition ID}' \\\n --header 'Content-Type: application/json' \\\n --data-raw '{\n \"WorkflowID\": \"2b905e77-7e04-4c04-8581-7b4c224164dd\"\n }'\n```\n\n#### Response body\n\nIf the workflow ID is found in the database, the following response is returned to the user.\n\n| Property | Type | Description |\n| -------- | -------- | ---------------------------------------------------------------------------- |\n| Status | `String` | Current status of a workflow &mdash; submitted, running, finished, or failed |\n\nIf the workflow ID isn't found in the database, the `404 Not Found` status is returned.\n\n### POST /updateWorkflowStatus\n\nThe `/updateWorkflowStatus` API endpoint updates the status of a workflow job. This endpoint is not\navailable for external requests. The endpoint is necesasry to let Apache Airflow DAGs update the\nworkflow status.\n\n#### Request body\n\n| Property | Type | Description |\n| ---------- | -------- | ------------------------------------------------ |\n| WorkflowID | `String` | Unique ID of a workflow that needs to be updated |\n| Status | `String` | New status of the workflow |\n\nRequest example:\n\n```sh\ncurl --location --request POST 'https://{path}/updateWorkflowStatus' \\\n --header 'Authorization: Bearer {token}' \\\n --header 'Partition-Id: {assigned DELFI partition ID}' \\\n --header 'Content-Type: application/json' \\\n --data-raw '{\n \"WorkflowID\": \"2b905e77-7e04-4c04-8581-7b4c224164dd\",\n \"Status\": \"finished\"\n }'\n```\n\n#### Response body\n\n| Property | Type | Description |\n| ---------- | -------- | ---------------------------------------- |\n| WorkflowID | `String` | Unique ID of a workflow that was updated |\n| Status | `String` | The latest status of the workflow |\n\nResponse body example:\n\n```json\n{\n \"WorkflowID\": \"2b905e77-7e04-4c04-8581-7b4c224164dd\",\n \"Status\": \"finished\"\n}\n```\n\n## Workflow Service Provider Interfaces\n\nThe Workflow service has several Service Provider Interfaces that the classes need to implement.\n\n| Interface | Obligatory / Optional | Path |\n| --------------------------- | ----------------------- | ---------------------------------------------------------------------------- |\n| AuthenticationService | Obligatory to implement | `workflow-core/src/main/.../provider/interfaces/AuthenticationService` |\n| IngestionStrategyRepository | Obligatory to implement | `workflow-core/src/main/.../provider/interfaces/IngestionStrategyRepository` |\n| IngestionStrategyService | Optional to implement | `workflow-core/src/main/.../provider/interfaces/IngestionStrategyService` |\n| SubmitIngestService | Obligatory to implement | `workflow-core/src/main/.../provider/interfaces/IngestionStrategyService` |\n| ValidationService | Optional to implement | `workflow-core/src/main/.../provider/interfaces/IngestionStrategyService` |\n| WorkflowService | Optional to implement | `workflow-core/src/main/.../provider/interfaces/IngestionStrategyService` |\n| WorkflowStatusRepository | Obligatory to implement | `workflow-core/src/main/.../provider/interfaces/IngestionStrategyService` |\n| WorkflowStatusService | Optional to implement | `workflow-core/src/main/.../provider/interfaces/IngestionStrategyService` |\n\n## GCP implementation\n\nThe GCP Identity and Access Management service account for the Workflow service must have the\n**Composer User** and **Cloud Datastore User** roles.\n\nNote that obtaining user credentials for Application Default Credentials isn't suitable for the\ndevelopment purposes because signing a blob is only available with the service account credentials.\nRemember to set the `GOOGLE_APPLICATION_CREDENTIALS` environment variable. Follow the [instructions\non the Google developer's portal][application-default-credentials].\n\n### Persistence layer\n\nThe GCP implementation contains two mutually exclusive modules to work with the persistence layer.\nPresently, OSDU R2 connects to legacy Cloud Datastore for compatibility with the current OpenDES\nimplementation. In the future OSDU releases, Cloud Datastore will be replaced by the existing Cloud\nFirestore implementation that's already available in the project.\n\n* The Cloud Datastore implementation is located in the **provider/workflow-gcp-datastore** folder.\n* The Cloud Firestore implementation is located in the **provider/workflow-gcp** folder.\n\nTo learn more about available collections, follow to the [Firestore collections](#firestore-collections)\nsection.\n\n## Firestore collections\n\nUpon an ingestion request, the Workflow service needs to determine which DAG to run. To do that, the\nservice queries the database with the workflow type and data type.\n\nThe GCP-based implementation of the Workflow service uses Cloud Firestore with the following\n`ingestion-strategy` and `workflow-status` collections.\n\n> The Cloud Datastore implementation in OSDU R2 uses the same collections as Cloud Firestore.\n\n### `ingestion-strategy`\n\nThe database needs to store the following information to help determine a DAG.\n\n| Property | Type | Description |\n| ------------ | -------- | --------------------------------------------------- |\n| WorkflowType | `String` | Supported workflow types &mdash; \"osdu\" or \"ingest\" |\n| DataType | `String` | Supported data types &mdash; \"well_log\" or \"opaque\" |\n| UserID | `String` | Unique identifier of the user group or role |\n| DAGName | `String` | Name of the DAG |\n\n> The OSDU R2 Prototype doesn't support the **UserID** property. When the security system is\n> finalized, the **UserID** property will store the ID of the user group or role.\n\n### `workflow-status`\n\nAfter a workflow starts, the Workflow service stores the following information in the database.\n\n| Property | Type | Description |\n| ------------ | -------- | ---------------------------------------------------------------------------- |\n| WorkflowID | `String` | Unique workflow ID |\n| AirflowRunID | `String` | Unique Airflow process ID generated by the Workflow service |\n| Status | `String` | Current status of a workflow &mdash; submitted, running, finished, or failed |\n| SubmittedAt | `String` | Timestamp when the workflow job was submitted to Workflow Engine |\n| SubmittedBy | `String` | ID of the user role or group. Not supported in OSDU R2 |\n\n[application-default-credentials]: https://developers.google.com/identity/protocols/application-default-credentials#calling\n" }, { "alpha_fraction": 0.7085561752319336, "alphanum_fraction": 0.7192513346672058, "avg_line_length": 34.03125, "blob_id": "109827eaa3c2b43737da0c6e2e8828d54e750727", "content_id": "69c9223e30f4ef91f49e041afe9ff5e5f9aa084f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1122, "license_type": "permissive", "max_line_length": 79, "num_lines": 32, "path": "/osdu-r2/os-python-sdk/osdu_api/test/test_base_client.py", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# Copyright 2020 Google LLC\n# Copyright 2020 Amazon\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport mock\nfrom osdu_api.base_client import BaseClient\n\nclass TestBaseClient(unittest.TestCase):\n\n @mock.patch.object(BaseClient, '_get_bearer_token', return_value=\"stubbed\")\n @mock.patch.object(BaseClient, '_read_variables', return_value=\"stubbed\")\n def test_init(self, mocked_token_method, mocked_config_method):\n # Arrange\n\n # Act\n client = BaseClient()\n\n # Assert\n mocked_token_method.assert_called()\n mocked_config_method.assert_called()\n\n" }, { "alpha_fraction": 0.7181243300437927, "alphanum_fraction": 0.7218459248542786, "avg_line_length": 39.712120056152344, "blob_id": "825b061beff7cc471996d97144cf59d06b2a8f43", "content_id": "0ab288a030657e8bc0de8e3963614e40ca0e84b2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 13435, "license_type": "permissive", "max_line_length": 226, "num_lines": 330, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/search/Config.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.search;\n\nimport com.google.common.base.Strings;\nimport org.opengroup.osdu.core.common.model.search.DeploymentEnvironment;\nimport org.springframework.beans.factory.annotation.Value;\nimport org.springframework.stereotype.Component;\n\nimport java.util.regex.PatternSyntaxException;\n\n@Component\npublic class Config {\n\n @Value(\"${DEPLOYMENT_ENVIRONMENT}\")\n private static String DEPLOYMENT_ENVIRONMENT;\n\n @Value(\"${ENVIRONMENT}\")\n private static String ENVIRONMENT;\n\n @Value(\"${INDEXER_HOST}\")\n private static String INDEXER_HOST;\n\n @Value(\"${SEARCH_HOST}\")\n private static String SEARCH_HOST;\n\n @Value(\"${STORAGE_QUERY_RECORD_FOR_CONVERSION_HOST}\")\n private static String STORAGE_QUERY_RECORD_FOR_CONVERSION_HOST;\n\n @Value(\"${STORAGE_QUERY_RECORD_HOST}\")\n private static String STORAGE_QUERY_RECORD_HOST;\n\n @Value(\"${STORAGE_RECORDS_BATCH_SIZE}\")\n private static String STORAGE_RECORDS_BATCH_SIZE;\n\n @Value(\"${STORAGE_SCHEMA_HOST}\")\n private static String STORAGE_SCHEMA_HOST;\n\n @Value(\"${ENTITLEMENTS_HOST}\")\n private static String ENTITLEMENTS_HOST;\n\n @Value(\"${ENTITLEMENT_TARGET_AUDIENCE}\")\n private static String ENTITLEMENT_TARGET_AUDIENCE;\n\n @Value(\"${INDEXER_QUEUE_HOST}\")\n private static String INDEXER_QUEUE_HOST;\n\n @Value(\"${ELASTIC_DATASTORE_KIND}\")\n private static String ELASTIC_DATASTORE_KIND;\n\n @Value(\"${ELASTIC_DATASTORE_ID}\")\n private static String ELASTIC_DATASTORE_ID;\n\n @Value(\"${REDIS_SEARCH_HOST}\")\n private static String REDIS_SEARCH_HOST;\n\n @Value(\"${REDIS_SEARCH_PORT}\")\n private static String REDIS_SEARCH_PORT;\n\n @Value(\"${SCHEMA_CACHE_EXPIRATION}\")\n private static String SCHEMA_CACHE_EXPIRATION;\n\n @Value(\"${INDEX_CACHE_EXPIRATION}\")\n private static String INDEX_CACHE_EXPIRATION;\n\n @Value(\"${ELASTIC_CACHE_EXPIRATION}\")\n private static String ELASTIC_CACHE_EXPIRATION;\n\n @Value(\"${CURSOR_CACHE_EXPIRATION}\")\n private static String CURSOR_CACHE_EXPIRATION;\n\n @Value(\"${GOOGLE_CLOUD_PROJECT}\")\n private static String GOOGLE_CLOUD_PROJECT;\n\n @Value(\"${GAE_SERVICE}\")\n private static String GAE_SERVICE;\n\n @Value(\"${GAE_VERSION}\")\n private static String GAE_VERSION;\n\n @Value(\"${ELASTIC_HOST}\")\n private static String ELASTIC_HOST;\n\n @Value(\"${ELASTIC_CLUSTER_NAME}\")\n private static String ELASTIC_CLUSTER_NAME;\n\n @Value(\"${KEY_RING}\")\n private static String KEY_RING;\n\n @Value(\"${KMS_KEY}\")\n private static String KMS_KEY;\n\n @Value(\"${GOOGLE_AUDIENCES}\")\n private static String GOOGLE_AUDIENCES;\n\n @Value(\"${CRON_INDEX_CLEANUP_PATTERN}\")\n private static String CRON_INDEX_CLEANUP_PATTERN;\n\n @Value(\"${CRON_INDEX_CLEANUP_TENANTS}\")\n private static String CRON_INDEX_CLEANUP_TENANTS;\n\n @Value(\"${CRON_INDEX_CLEANUP_THRESHOLD_DAYS}\")\n private static String CRON_INDEX_CLEANUP_THRESHOLD_DAYS;\n\n @Value(\"${CRON_EMPTY_INDEX_CLEANUP_THRESHOLD_DAYS}\")\n private static String CRON_EMPTY_INDEX_CLEANUP_THRESHOLD_DAYS;\n\n @Value(\"${SMART_SEARCH_CCS_DISABLED}\")\n private static String SMART_SEARCH_CCS_DISABLED;\n\n\n public static DeploymentEnvironment getDeploymentEnvironment() {\n return Strings.isNullOrEmpty(DEPLOYMENT_ENVIRONMENT) ? DeploymentEnvironment.CLOUD : DeploymentEnvironment.valueOf(DEPLOYMENT_ENVIRONMENT);\n }\n\n public static String getIndexerHostUrl() {\n return !Strings.isNullOrEmpty(INDEXER_HOST) ? INDEXER_HOST : getEnvironmentVariable(\"INDEXER_HOST\");\n }\n\n public static String getSearchHostUrl() {\n return !Strings.isNullOrEmpty(SEARCH_HOST) ? SEARCH_HOST : getEnvironmentVariable(\"SEARCH_HOST\");\n }\n\n public static String getStorageQueryRecordFoRConversionHostUrl() {\n return !Strings.isNullOrEmpty(STORAGE_QUERY_RECORD_FOR_CONVERSION_HOST) ? STORAGE_QUERY_RECORD_FOR_CONVERSION_HOST : getEnvironmentVariable(\"STORAGE_QUERY_RECORD_FOR_CONVERSION_HOST\");\n }\n\n public static String getStorageQueryRecordHostUrl() {\n return !Strings.isNullOrEmpty(STORAGE_QUERY_RECORD_HOST) ? STORAGE_QUERY_RECORD_HOST : getEnvironmentVariable(\"STORAGE_QUERY_RECORD_HOST\");\n }\n\n // reduced to reflect limitation on storage record:batch api\n public static int getStorageRecordsBatchSize() {\n String storageRecordsBatchSize = !Strings.isNullOrEmpty(STORAGE_RECORDS_BATCH_SIZE) ? STORAGE_RECORDS_BATCH_SIZE : getEnvironmentVariable(\"STORAGE_RECORDS_BATCH_SIZE\");\n return Integer.parseInt(getDefaultOrEnvironmentValue(storageRecordsBatchSize, 20));\n }\n\n public static String getStorageSchemaHostUrl() {\n return !Strings.isNullOrEmpty(STORAGE_SCHEMA_HOST) ? STORAGE_SCHEMA_HOST : getEnvironmentVariable(\"STORAGE_SCHEMA_HOST\");\n }\n\n public static String getEntitlementsHostUrl() {\n return !Strings.isNullOrEmpty(ENTITLEMENTS_HOST) ? ENTITLEMENTS_HOST : getEnvironmentVariable(\"ENTITLEMENTS_HOST\");\n }\n\n public static String getEntitlementTargetAudience() {\n return !Strings.isNullOrEmpty(ENTITLEMENT_TARGET_AUDIENCE) ? ENTITLEMENT_TARGET_AUDIENCE : getEnvironmentVariable(\"ENTITLEMENT_TARGET_AUDIENCE\");\n }\n\n public static String getIndexerQueueHost() {\n return !Strings.isNullOrEmpty(INDEXER_QUEUE_HOST) ? INDEXER_QUEUE_HOST : getEnvironmentVariable(\"INDEXER_QUEUE_HOST\");\n }\n\n public static String getElasticCredentialsDatastoreKind() {\n return !Strings.isNullOrEmpty(ELASTIC_DATASTORE_KIND) ? ELASTIC_DATASTORE_KIND : getEnvironmentVariable(\"ELASTIC_DATASTORE_KIND\");\n }\n\n public static String getElasticCredentialsDatastoreId() {\n return !Strings.isNullOrEmpty(ELASTIC_DATASTORE_ID) ? ELASTIC_DATASTORE_ID : getEnvironmentVariable(\"ELASTIC_DATASTORE_ID\");\n }\n\n public static boolean isLocalEnvironment() {\n String environment = !Strings.isNullOrEmpty(ENVIRONMENT) ? ENVIRONMENT : getEnvironmentVariable(\"ENVIRONMENT\");\n return \"local\".equalsIgnoreCase(environment);\n }\n\n public static boolean isPreP4d() {\n String environment = !Strings.isNullOrEmpty(ENVIRONMENT) ? ENVIRONMENT : getEnvironmentVariable(\"ENVIRONMENT\");\n return isLocalEnvironment() ||\n \"evd\".equalsIgnoreCase(environment) ||\n \"evt\".equalsIgnoreCase(environment);\n }\n\n public static boolean isPreDemo() {\n String environment = !Strings.isNullOrEmpty(ENVIRONMENT) ? ENVIRONMENT : getEnvironmentVariable(\"ENVIRONMENT\");\n return isPreP4d() ||\n \"p4d\".equalsIgnoreCase(environment);\n }\n\n public static String getSearchRedisHost() {\n return !Strings.isNullOrEmpty(REDIS_SEARCH_HOST) ? REDIS_SEARCH_HOST : getEnvironmentVariable(\"REDIS_SEARCH_HOST\");\n }\n\n public static int getSearchRedisPort() {\n String redisSearchPort = !Strings.isNullOrEmpty(REDIS_SEARCH_PORT) ? REDIS_SEARCH_PORT : getEnvironmentVariable(\"REDIS_SEARCH_PORT\");\n return Integer.parseInt(getDefaultOrEnvironmentValue(redisSearchPort, 6379));\n }\n\n public static int getSchemaCacheExpiration() {\n String schemaCacheExpiration = !Strings.isNullOrEmpty(SCHEMA_CACHE_EXPIRATION) ? SCHEMA_CACHE_EXPIRATION : getEnvironmentVariable(\"SCHEMA_CACHE_EXPIRATION\");\n return Integer.parseInt(getDefaultOrEnvironmentValue(schemaCacheExpiration, 60));\n }\n\n public static int getIndexCacheExpiration() {\n String indexCacheExpiration = !Strings.isNullOrEmpty(INDEX_CACHE_EXPIRATION) ? INDEX_CACHE_EXPIRATION : getEnvironmentVariable(\"INDEX_CACHE_EXPIRATION\");\n return Integer.parseInt(getDefaultOrEnvironmentValue(indexCacheExpiration, 60));\n }\n\n public static int getKindsCacheExpiration() {\n return 2 * 24 * 60;\n }\n\n public static int getKindsRedisDataBase() {\n return 1;\n }\n\n public static int getAttributesCacheExpiration() {\n return 2 * 24 * 60;\n }\n\n public static int getElasticCacheExpiration() {\n String elasticCacheExpiration = !Strings.isNullOrEmpty(ELASTIC_CACHE_EXPIRATION) ? ELASTIC_CACHE_EXPIRATION : getEnvironmentVariable(\"ELASTIC_CACHE_EXPIRATION\");\n return Integer.parseInt(getDefaultOrEnvironmentValue(elasticCacheExpiration, 1440));\n }\n\n public static int getCursorCacheExpiration() {\n String cursorCacheExpiration = !Strings.isNullOrEmpty(CURSOR_CACHE_EXPIRATION) ? CURSOR_CACHE_EXPIRATION : getEnvironmentVariable(\"CURSOR_CACHE_EXPIRATION\");\n return Integer.parseInt(getDefaultOrEnvironmentValue(cursorCacheExpiration, 60));\n }\n\n\n // google cloud project\n public static String getGoogleCloudProjectId() {\n return !Strings.isNullOrEmpty(GOOGLE_CLOUD_PROJECT) ? GOOGLE_CLOUD_PROJECT : getEnvironmentVariable(\"GOOGLE_CLOUD_PROJECT\");\n }\n\n public static String getDeployedServiceId() {\n return !Strings.isNullOrEmpty(GAE_SERVICE) ? GAE_SERVICE : getEnvironmentVariable(\"GAE_SERVICE\");\n }\n\n public static String getDeployedVersionId() {\n return !Strings.isNullOrEmpty(GAE_VERSION) ? GAE_VERSION : getEnvironmentVariable(\"GAE_VERSION\");\n }\n\n // elastic cluster settings\n public static String getElasticServerAddress() {\n return !Strings.isNullOrEmpty(ELASTIC_HOST) ? ELASTIC_HOST : getEnvironmentVariable(\"ELASTIC_HOST\");\n }\n\n public static String getElasticClusterName() {\n return !Strings.isNullOrEmpty(ELASTIC_CLUSTER_NAME) ? ELASTIC_CLUSTER_NAME : getEnvironmentVariable(\"ELASTIC_CLUSTER_NAME\");\n }\n\n\n // google KMS settings\n public static String getKmsRing() {\n String keyRing = !Strings.isNullOrEmpty(KEY_RING) ? KEY_RING : getEnvironmentVariable(\"KEY_RING\");\n return getDefaultOrEnvironmentValue(keyRing, \"csqp\");\n }\n\n public static String getKmsKey() {\n String kmsKey = !Strings.isNullOrEmpty(KMS_KEY) ? KMS_KEY : getEnvironmentVariable(\"KMS_KEY\");\n return getDefaultOrEnvironmentValue(kmsKey, \"searchService\");\n }\n\n\n // google endpoints\n public static String getGoogleAudiences() {\n return !Strings.isNullOrEmpty(GOOGLE_AUDIENCES) ? GOOGLE_AUDIENCES : getEnvironmentVariable(\"GOOGLE_AUDIENCES\");\n }\n\n public static String[] getIndexCleanupPattern() {\n String patternStr = !Strings.isNullOrEmpty(CRON_INDEX_CLEANUP_PATTERN) ? CRON_INDEX_CLEANUP_PATTERN : getEnvironmentVariable(\"CRON_INDEX_CLEANUP_PATTERN\");\n if (!Strings.isNullOrEmpty(patternStr)) {\n try {\n return patternStr.split(\",\");\n } catch (PatternSyntaxException ignored) { }\n }\n return new String[0];\n }\n\n public static String[] getIndexCleanupTenants() {\n String patternStr = !Strings.isNullOrEmpty(CRON_INDEX_CLEANUP_TENANTS) ? CRON_INDEX_CLEANUP_TENANTS : getEnvironmentVariable(\"CRON_INDEX_CLEANUP_TENANTS\");\n if (!Strings.isNullOrEmpty(patternStr)) {\n try {\n return patternStr.split(\",\");\n } catch (PatternSyntaxException ignored) { }\n }\n return new String[0];\n }\n\n // number of days before the indices will be cleaned, most commonly used for tests indices\n public static int getIndexCleanupThresholdDays() {\n String cronIndexCleanupThresholdDays = !Strings.isNullOrEmpty(CRON_INDEX_CLEANUP_THRESHOLD_DAYS) ? CRON_INDEX_CLEANUP_THRESHOLD_DAYS : getEnvironmentVariable(\"CRON_INDEX_CLEANUP_THRESHOLD_DAYS\");\n return Integer.parseInt(getDefaultOrEnvironmentValue(cronIndexCleanupThresholdDays, 3));\n }\n\n public static int getEmptyIndexCleanupThresholdDays() {\n String cronEmptyIndexCleanupThresholdDays = !Strings.isNullOrEmpty(CRON_EMPTY_INDEX_CLEANUP_THRESHOLD_DAYS) ? CRON_EMPTY_INDEX_CLEANUP_THRESHOLD_DAYS : getEnvironmentVariable(\"CRON_EMPTY_INDEX_CLEANUP_THRESHOLD_DAYS\");\n return Integer.parseInt(getDefaultOrEnvironmentValue(cronEmptyIndexCleanupThresholdDays, 7));\n }\n\n // TODO: Remove this temporary implementation when ECE CCS is utilized\n public static final Boolean isSmartSearchCcsDisabled() {\n String smartSearchCcsDisabled = !Strings.isNullOrEmpty(SMART_SEARCH_CCS_DISABLED) ? SMART_SEARCH_CCS_DISABLED : getEnvironmentVariable(\"SMART_SEARCH_CCS_DISABLED\");\n return Boolean.TRUE.toString().equalsIgnoreCase(smartSearchCcsDisabled);\n }\n\n private static <T> String getDefaultOrEnvironmentValue(T givenValue, T defaultValue) {\n if (givenValue == null || Strings.isNullOrEmpty(givenValue.toString())) {\n return defaultValue.toString();\n }\n return givenValue.toString();\n }\n\n private static String getEnvironmentVariable(String propertyKey) {\n return System.getProperty(propertyKey, System.getenv(propertyKey));\n }\n\n private static Config instance = new Config();\n\n public static Config Instance() {\n return instance;\n }\n}\n" }, { "alpha_fraction": 0.7285407781600952, "alphanum_fraction": 0.7371244430541992, "avg_line_length": 29.557376861572266, "blob_id": "ca1ca425de03970fe135071f116a04f3e1f669a9", "content_id": "655c123d8b2e3b3de821bb84bd3142a7d35f6e6e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1864, "license_type": "permissive", "max_line_length": 91, "num_lines": 61, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/search/RecordChangedMessages.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.search;\n\n\nimport com.google.common.base.Strings;\nimport lombok.AllArgsConstructor;\nimport lombok.Builder;\nimport lombok.Data;\nimport lombok.NoArgsConstructor;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.springframework.web.context.annotation.RequestScope;\n\nimport java.util.Map;\n\n@Data\n@Builder\n@NoArgsConstructor\n@AllArgsConstructor\n@RequestScope\npublic class RecordChangedMessages {\n\n private String messageId;\n private String publishTime;\n private String data;\n private Map<String, String> attributes;\n\n public String getDataPartitionId() {\n String output = attributes.get(DpsHeaders.DATA_PARTITION_ID);\n if(Strings.isNullOrEmpty(output))\n output = attributes.get(DpsHeaders.ACCOUNT_ID);\n return output;\n }\n\n public boolean missingAccountId() {\n return this.attributes == null || Strings.isNullOrEmpty(this.getDataPartitionId());\n }\n\n public String getCorrelationId() {\n return attributes.get(DpsHeaders.CORRELATION_ID.toLowerCase());\n }\n\n public boolean hasCorrelationId() {\n return this.attributes != null && !Strings.isNullOrEmpty(this.getCorrelationId());\n }\n}\n" }, { "alpha_fraction": 0.7623478770256042, "alphanum_fraction": 0.7680744528770447, "avg_line_length": 33.07316970825195, "blob_id": "fcb7106f158baf148e6b36f9947d32e2406a6008", "content_id": "a8da0702472a10ad0e4f7f98910f228af7ecd122", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1397, "license_type": "permissive", "max_line_length": 75, "num_lines": 41, "path": "/osdu-r2/os-ingest/ingest-core/src/main/java/org/opengroup/osdu/ingest/provider/interfaces/IValidationService.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.ingest.provider.interfaces;\n\nimport javax.validation.ConstraintViolationException;\nimport org.opengroup.osdu.core.common.exception.BadRequestException;\nimport org.opengroup.osdu.ingest.model.SubmitRequest;\nimport org.opengroup.osdu.ingest.model.WorkProductLoadManifest;\n\npublic interface IValidationService {\n\n /**\n * Validates submit request using Java Bean Validation.\n *\n * @param request location request\n * @throws ConstraintViolationException if request is invalid\n */\n void validateSubmitRequest(SubmitRequest request);\n\n /**\n * Validates work product load manifest using JSON schema.\n * @param loadManifest load manifest\n * @throws BadRequestException if manifest is invalid\n */\n void validateManifest(WorkProductLoadManifest loadManifest);\n\n}\n" }, { "alpha_fraction": 0.7771998047828674, "alphanum_fraction": 0.7811009883880615, "avg_line_length": 43.36538314819336, "blob_id": "8fcbae11ba4d41b8feed0d357c5fc65ae34ab768", "content_id": "ac011e4afd3a71dfa7ccf87a3f50bf68773fbf94", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2307, "license_type": "permissive", "max_line_length": 95, "num_lines": 52, "path": "/compatibility-layer/service/ingest/src/main/java/com/osdu/client/DelfiIngestionClient.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.client;\n\nimport com.osdu.client.delfi.Header;\nimport com.osdu.model.delfi.signed.SignedUrlResult;\nimport com.osdu.model.delfi.status.JobStatusResponse;\nimport com.osdu.model.delfi.submit.SubmitFileObject;\nimport com.osdu.model.delfi.submit.SubmitFileResult;\nimport org.springframework.cloud.openfeign.FeignClient;\nimport org.springframework.web.bind.annotation.GetMapping;\nimport org.springframework.web.bind.annotation.PathVariable;\nimport org.springframework.web.bind.annotation.PostMapping;\nimport org.springframework.web.bind.annotation.RequestHeader;\n\n@FeignClient(url = \"${osdu.delfi.portal.url}/de/ingestion/v1\", name = \"delfi.ingestion.client\")\npublic interface DelfiIngestionClient {\n\n @GetMapping(\"/landingzoneUrl?fileName={fileName}\")\n SignedUrlResult getSignedUrlForLocation(@PathVariable(\"fileName\") String fileName,\n @RequestHeader(Header.AUTHORIZATION) String authorizationToken,\n @RequestHeader(Header.APP_KEY) String applicationKey,\n @RequestHeader(Header.SLB_DATA_PARTITION_ID) String partition);\n\n @PostMapping(\"/submit\")\n SubmitFileResult submitFile(@RequestHeader(Header.AUTHORIZATION) String authorization,\n @RequestHeader(Header.APP_KEY) String applicationKey,\n @RequestHeader(Header.SLB_DATA_PARTITION_ID) String partition,\n @RequestHeader(Header.SLB_ACCOUNT_ID) String accountId,\n SubmitFileObject submitFileObject);\n\n @GetMapping(\"/status?jobId={jobId}\")\n JobStatusResponse getJobStatus(@PathVariable(\"jobId\") String jobId,\n @RequestHeader(Header.AUTHORIZATION) String authorizationToken,\n @RequestHeader(Header.APP_KEY) String applicationKey,\n @RequestHeader(Header.SLB_DATA_PARTITION_ID) String partition);\n\n}\n" }, { "alpha_fraction": 0.6187391877174377, "alphanum_fraction": 0.632556140422821, "avg_line_length": 26.247058868408203, "blob_id": "cdf7c1b70d1a4f9bdfc5d40e45ef50d86242de0a", "content_id": "e1b8d51c293023139da8c140acd6bf5a9c805fda", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": true, "language": "Java", "length_bytes": 2316, "license_type": "permissive", "max_line_length": 97, "num_lines": 85, "path": "/osdu-r2/os-core-common/src/test/java/org/opengroup/osdu/core/common/cache/VmCacheTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.cache;\n\nimport org.junit.Test;\n\nimport static org.junit.Assert.assertEquals;\nimport static org.junit.Assert.assertNull;\n\npublic class VmCacheTest {\n @Test\n public void should_returnCachedItem_andThen_returnUpdateItem_andThen_notReturnDeletedItem() {\n\n String id = \"1\";\n String value = \"abc\";\n VmCache<String, String> sut = new VmCache<>(2, 2);\n\n assertNull(sut.get(id));\n\n sut.put(id, value);\n assertEquals(value, sut.get(id));\n\n sut.put(id, \"newVal\");\n assertEquals(\"newVal\", sut.get(id));\n\n sut.delete(id);\n assertNull(sut.get(id));\n }\n\n @Test\n public void should_invalidateItem_after_expirationHasPassed() throws InterruptedException {\n String id = \"1\";\n String value = \"abc\";\n VmCache<String, String> sut = new VmCache<>(1, 1);\n\n sut.put(id, value);\n assertEquals(value, sut.get(id));\n Thread.sleep(1010);\n\n assertNull(sut.get(id));\n }\n\n @Test\n public void should_overwriteItems_after_cacheLimitIsReached() {\n String id = \"1\";\n String value = \"abc\";\n VmCache<String, String> sut = new VmCache<>(1, 1);\n\n sut.put(id, value);\n assertEquals(value, sut.get(id));\n\n sut.put(\"new\", \"value\");\n\n assertNull(sut.get(id));\n }\n\n @Test\n public void should_returnCachedItem_when_itHasBeenCleared() {\n\n String id = \"1\";\n String value = \"abc\";\n VmCache<String, String> sut = new VmCache<>(2, 2);\n sut.put(id, value);\n assertEquals(value, sut.get(id));\n\n sut.clearAll();\n\n assertNull(sut.get(id));\n }\n}\n" }, { "alpha_fraction": 0.7728915810585022, "alphanum_fraction": 0.7795180678367615, "avg_line_length": 36.727272033691406, "blob_id": "bd323022320420a2f9da00b18a9e4846d0ef625b", "content_id": "83236e3b8b3431ea973c6172cd4e06a90e39c705", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1660, "license_type": "permissive", "max_line_length": 95, "num_lines": 44, "path": "/compatibility-layer/service/ingest/src/main/java/com/osdu/function/IngestFunction.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.function;\n\nimport com.osdu.model.IngestResult;\nimport com.osdu.model.type.manifest.LoadManifest;\nimport com.osdu.service.InitialIngestService;\nimport java.util.function.Function;\nimport lombok.RequiredArgsConstructor;\nimport lombok.extern.slf4j.Slf4j;\nimport org.springframework.messaging.Message;\nimport org.springframework.messaging.support.GenericMessage;\nimport org.springframework.stereotype.Component;\n\n@Component\n@Slf4j\n@RequiredArgsConstructor\npublic class IngestFunction implements Function<Message<LoadManifest>, Message<IngestResult>> {\n\n final InitialIngestService ingestService;\n\n @Override\n public Message<IngestResult> apply(Message<LoadManifest> objectMessage) {\n log.debug(\"Ingest request received, with following parameters: {}\", objectMessage);\n final IngestResult ingestResult = ingestService\n .ingestManifest(objectMessage.getPayload(), objectMessage.getHeaders());\n log.debug(\"Ingest result ready, request: {}, result:{}\", objectMessage, ingestResult);\n return new GenericMessage<>(ingestResult);\n }\n}\n" }, { "alpha_fraction": 0.7457512021064758, "alphanum_fraction": 0.751189649105072, "avg_line_length": 25.745454788208008, "blob_id": "bb7b31b5aed45824dc2842dfe0c8027e550ab26b", "content_id": "e78d3ae0edc60fc54e7b1eb205acc53fade36fc8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1471, "license_type": "permissive", "max_line_length": 75, "num_lines": 55, "path": "/osdu-r2/os-workflow/workflow-core/src/main/java/org/opengroup/osdu/workflow/model/IngestionStrategy.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.workflow.model;\n\nimport com.fasterxml.jackson.annotation.JsonProperty;\nimport lombok.AllArgsConstructor;\nimport lombok.Builder;\nimport lombok.Data;\nimport lombok.NoArgsConstructor;\nimport org.opengroup.osdu.core.common.model.WorkflowType;\n\n@Data\n@NoArgsConstructor\n@AllArgsConstructor\n@Builder\npublic class IngestionStrategy {\n\n @JsonProperty(Fields.WORKFLOW_TYPE)\n WorkflowType workflowType;\n\n @JsonProperty(Fields.DATA_TYPE)\n String dataType;\n\n @JsonProperty(Fields.USER_ID)\n String userId;\n\n @JsonProperty(Fields.DAG_NAME)\n String dagName;\n\n public static final class Fields {\n\n public static final String WORKFLOW_TYPE = \"WorkflowType\";\n public static final String DATA_TYPE = \"DataType\";\n public static final String USER_ID = \"UserID\";\n public static final String DAG_NAME = \"DAGName\";\n\n private Fields() {\n }\n }\n\n}\n" }, { "alpha_fraction": 0.7664999961853027, "alphanum_fraction": 0.7678333520889282, "avg_line_length": 37.709678649902344, "blob_id": "576252a7b1cb9ab4ff35b28408cf73a687ce8fa7", "content_id": "94559990aac2524b8a3f8c47a4956e5d686a4621", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 6000, "license_type": "permissive", "max_line_length": 138, "num_lines": 155, "path": "/osdu-r2/os-workflow/workflow-core/src/test/java/org/opengroup/osdu/workflow/service/WorkflowServiceImplTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.workflow.service;\n\nimport static org.assertj.core.api.Assertions.catchThrowable;\nimport static org.assertj.core.api.BDDAssertions.then;\nimport static org.mockito.ArgumentMatchers.any;\nimport static org.mockito.ArgumentMatchers.eq;\nimport static org.mockito.ArgumentMatchers.isNull;\nimport static org.mockito.BDDMockito.given;\nimport static org.mockito.Mockito.doThrow;\nimport static org.mockito.Mockito.never;\nimport static org.mockito.Mockito.verify;\n\nimport java.util.HashMap;\nimport java.util.Map;\nimport org.junit.jupiter.api.BeforeEach;\nimport org.junit.jupiter.api.DisplayNameGeneration;\nimport org.junit.jupiter.api.Test;\nimport org.junit.jupiter.api.extension.ExtendWith;\nimport org.mockito.ArgumentCaptor;\nimport org.mockito.Captor;\nimport org.mockito.InOrder;\nimport org.mockito.Mock;\nimport org.mockito.Mockito;\nimport org.mockito.junit.jupiter.MockitoExtension;\nimport org.opengroup.osdu.core.common.model.WorkflowType;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.core.common.model.workflow.StartWorkflowRequest;\nimport org.opengroup.osdu.core.common.model.workflow.StartWorkflowResponse;\nimport org.opengroup.osdu.workflow.ReplaceCamelCase;\nimport org.opengroup.osdu.workflow.exception.RuntimeException;\nimport org.opengroup.osdu.workflow.model.WorkflowStatus;\nimport org.opengroup.osdu.workflow.model.WorkflowStatusType;\nimport org.opengroup.osdu.workflow.provider.interfaces.IIngestionStrategyService;\nimport org.opengroup.osdu.workflow.provider.interfaces.ISubmitIngestService;\nimport org.opengroup.osdu.workflow.provider.interfaces.IValidationService;\nimport org.opengroup.osdu.workflow.provider.interfaces.IWorkflowStatusRepository;\n\n@ExtendWith(MockitoExtension.class)\n@DisplayNameGeneration(ReplaceCamelCase.class)\nclass WorkflowServiceImplTest {\n\n private static final String AUTHORIZATION_TOKEN = \"authToken\";\n private static final String PARTITION = \"partition\";\n private static final String DAG_NAME = \"dag-name\";\n private static final String TEST_EXCEPTION = \"test-exception\";\n private static final String DATA_TYPE = \"test-type\";\n\n @Mock\n private IValidationService validationService;\n @Mock\n private IIngestionStrategyService ingestionStrategyService;\n @Mock\n private ISubmitIngestService submitIngestService;\n @Mock\n private IWorkflowStatusRepository workflowStatusRepository;\n\n @Captor\n ArgumentCaptor<WorkflowStatus> workflowStatusCaptor;\n\n WorkflowServiceImpl workflowService;\n\n @BeforeEach\n void setUp() {\n workflowService = new WorkflowServiceImpl(validationService, ingestionStrategyService, submitIngestService, workflowStatusRepository);\n }\n\n @Test\n void shouldStartWorkflow() {\n\n // given\n HashMap<String, Object> context = new HashMap<>();\n context.put(\"key\", \"value\");\n StartWorkflowRequest request = StartWorkflowRequest.builder()\n .workflowType(WorkflowType.INGEST)\n .dataType(DATA_TYPE)\n .context(context).build();\n DpsHeaders headers = getMessageHeaders();\n given(ingestionStrategyService\n .determineStrategy(eq(WorkflowType.INGEST), eq(DATA_TYPE), isNull()))\n .willReturn(DAG_NAME);\n\n // when\n StartWorkflowResponse startWorkflowResponse = workflowService\n .startWorkflow(request, headers);\n\n // then\n then(startWorkflowResponse.getWorkflowId()).isNotNull();\n InOrder inOrder = Mockito.inOrder(validationService,\n ingestionStrategyService, submitIngestService, workflowStatusRepository);\n inOrder.verify(validationService).validateStartWorkflowRequest(request);\n inOrder.verify(ingestionStrategyService)\n .determineStrategy(eq(WorkflowType.INGEST), eq(DATA_TYPE), isNull());\n inOrder.verify(submitIngestService).submitIngest(eq(DAG_NAME), eq(context));\n inOrder.verify(workflowStatusRepository).saveWorkflowStatus(workflowStatusCaptor.capture());\n inOrder.verifyNoMoreInteractions();\n\n then(workflowStatusCaptor.getValue()).satisfies(status -> {\n then(status.getAirflowRunId()).isNotNull();\n then(status.getWorkflowId()).isNotNull();\n then(status.getWorkflowStatusType()).isEqualTo(WorkflowStatusType.SUBMITTED);\n });\n }\n\n @Test\n void shouldNotSaveWorkflowStatusIfSubmitRequestFails() {\n\n // given\n HashMap<String, Object> context = new HashMap<>();\n context.put(\"key\", \"value\");\n StartWorkflowRequest request = StartWorkflowRequest.builder()\n .workflowType(WorkflowType.INGEST)\n .dataType(DATA_TYPE)\n .context(context).build();\n DpsHeaders headers = getMessageHeaders();\n given(ingestionStrategyService\n .determineStrategy(eq(WorkflowType.INGEST), eq(DATA_TYPE), isNull()))\n .willReturn(DAG_NAME);\n doThrow(new RuntimeException(TEST_EXCEPTION)).when(submitIngestService)\n .submitIngest(eq(\n DAG_NAME),\n eq(context));\n\n // when\n Throwable thrown = catchThrowable(() -> workflowService\n .startWorkflow(request, headers));\n\n // then\n then(thrown).isInstanceOf(RuntimeException.class);\n verify(workflowStatusRepository, never()).saveWorkflowStatus(any());\n }\n\n private DpsHeaders getMessageHeaders() {\n Map<String, String> headers = new HashMap<>();\n headers.put(DpsHeaders.AUTHORIZATION, AUTHORIZATION_TOKEN);\n headers.put(DpsHeaders.DATA_PARTITION_ID, PARTITION);\n\n return DpsHeaders.createFromMap(headers);\n }\n}\n" }, { "alpha_fraction": 0.7123697400093079, "alphanum_fraction": 0.7174683809280396, "avg_line_length": 36.907562255859375, "blob_id": "4e46a8037543fc979167f8b293227d149eaa9c69", "content_id": "10933203f2ad9974fba3eeb96381ce80445d1444", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 9022, "license_type": "permissive", "max_line_length": 97, "num_lines": 238, "path": "/osdu-r2/os-core-common/src/test/java/org/opengroup/osdu/core/common/http/UrlFetchServiceImplTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.http;\n\nimport org.apache.http.HttpEntity;\nimport org.apache.http.HttpStatus;\nimport org.apache.http.StatusLine;\nimport org.apache.http.client.methods.CloseableHttpResponse;\nimport org.apache.http.client.methods.HttpPost;\nimport org.apache.http.impl.client.CloseableHttpClient;\nimport org.apache.http.impl.client.HttpClients;\nimport org.junit.Before;\nimport org.junit.Test;\nimport org.junit.runner.RunWith;\nimport org.mockito.InjectMocks;\nimport org.mockito.Mock;\nimport org.mockito.runners.MockitoJUnitRunner;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.core.common.model.http.AppException;\nimport org.opengroup.osdu.core.common.model.http.HttpResponse;\nimport org.powermock.core.classloader.annotations.PrepareForTest;\n\nimport java.io.ByteArrayInputStream;\nimport java.io.InputStream;\nimport java.io.OutputStream;\nimport java.nio.charset.StandardCharsets;\n\nimport static org.junit.Assert.*;\nimport static org.mockito.Matchers.any;\nimport static org.mockito.Mockito.*;\n\n@RunWith(MockitoJUnitRunner.class)\n@PrepareForTest({HttpClients.class, OutputStream.class, DpsHeaders.class})\npublic class UrlFetchServiceImplTest {\n\n private static final String PUT = \"PUT\";\n private static final String POST = \"POST\";\n private static final String GET = \"GET\";\n private static final String HEADER_NAME = \"ANY_HEADER\";\n private static final String HEADER_VALUE = \"ANY_VALUE\";\n private static final String ADDRESS = \"http://test.com\";\n private static final String BODY = \"any http body\";\n private static final String RESPONSE = \"hello world\";\n\n @InjectMocks\n private UrlFetchServiceImpl sut;\n\n @Mock\n private HttpClientHandler httpClientHandler;\n\n @Mock\n private static DpsHeaders HEADERS;\n\n @Before\n public void setup() {\n HEADERS.put(HEADER_NAME, HEADER_VALUE);\n// mockStatic(HttpClients.class);\n }\n\n @Test\n public void should_returnResponse_when_getRequestIsSentSuccessfully() throws Exception {\n\n InputStream stream = new ByteArrayInputStream(RESPONSE.getBytes(StandardCharsets.UTF_8));\n\n StatusLine statusLine = mock(StatusLine.class);\n when(statusLine.getStatusCode()).thenReturn(HttpStatus.SC_OK);\n\n HttpEntity entity = mock(HttpEntity.class);\n when(entity.getContent()).thenReturn(stream);\n\n CloseableHttpResponse response = mock(CloseableHttpResponse.class);\n when(response.getStatusLine()).thenReturn(statusLine);\n when(response.getEntity()).thenReturn(entity);\n\n CloseableHttpClient httpClient = mock(CloseableHttpClient.class);\n when(httpClient.execute(any(HttpPost.class))).thenReturn(response);\n\n// when(HttpClients.createDefault()).thenReturn(httpClient);\n\n HttpResponse httpResponse = mock(HttpResponse.class);\n when(httpResponse.getResponseCode()).thenReturn(200);\n when(httpResponse.getBody()).thenReturn(RESPONSE);\n\n when(httpClientHandler.sendRequest(any(), any())).thenReturn(httpResponse);\n\n HttpResponse result = this.sut.sendRequest(GET, ADDRESS, HEADERS, null, null);\n assertEquals(HttpStatus.SC_OK, result.getResponseCode());\n assertEquals(RESPONSE, result.getBody());\n }\n\n @Test\n public void should_returnHttp404_when_httpMethodIsInvalid() {\n\n try {\n this.sut.sendRequest(\"DELETE\", ADDRESS, HEADERS, null, BODY);\n\n fail(\"Should not succeed\");\n } catch (AppException e) {\n assertEquals(HttpStatus.SC_NOT_FOUND, e.getError().getCode());\n assertEquals(\"Invalid HTTP method\", e.getError().getReason());\n assertEquals(\"Invalid HTTP method\", e.getError().getMessage());\n } catch (Exception e) {\n fail(\"Should not get different exception\");\n }\n }\n\n @Test\n public void should_returnResponse_when_postRequestIsSentSuccessfully() throws Exception {\n\n InputStream stream = new ByteArrayInputStream(RESPONSE.getBytes(StandardCharsets.UTF_8));\n\n StatusLine statusLine = mock(StatusLine.class);\n when(statusLine.getStatusCode()).thenReturn(HttpStatus.SC_OK);\n\n HttpEntity entity = mock(HttpEntity.class);\n when(entity.getContent()).thenReturn(stream);\n\n CloseableHttpResponse response = mock(CloseableHttpResponse.class);\n when(response.getStatusLine()).thenReturn(statusLine);\n when(response.getEntity()).thenReturn(entity);\n\n CloseableHttpClient httpClient = mock(CloseableHttpClient.class);\n when(httpClient.execute(any(HttpPost.class))).thenReturn(response);\n\n //when(HttpClients.createDefault()).thenReturn(httpClient);\n\n HttpResponse httpResponse = mock(HttpResponse.class);\n when(httpResponse.getResponseCode()).thenReturn(200);\n when(httpResponse.getBody()).thenReturn(RESPONSE);\n\n when(httpClientHandler.sendRequest(any(), any())).thenReturn(httpResponse);\n\n HttpResponse result = this.sut.sendRequest(POST, ADDRESS, HEADERS, null, BODY);\n assertEquals(HttpStatus.SC_OK, result.getResponseCode());\n assertEquals(RESPONSE, result.getBody());\n }\n\n @Test\n public void should_returnTrue_when_httpStatusCodeIsBetween200And204() {\n HttpResponse response = new HttpResponse();\n response.setResponseCode(HttpStatus.SC_OK);\n assertTrue(response.isSuccessCode());\n\n response.setResponseCode(HttpStatus.SC_CREATED);\n assertTrue(response.isSuccessCode());\n\n response.setResponseCode(HttpStatus.SC_ACCEPTED);\n assertTrue(response.isSuccessCode());\n\n response.setResponseCode(HttpStatus.SC_NON_AUTHORITATIVE_INFORMATION);\n assertTrue(response.isSuccessCode());\n\n response.setResponseCode(HttpStatus.SC_NO_CONTENT);\n assertTrue(response.isSuccessCode());\n }\n\n @Test\n public void should_returnFalse_when_httpStatusCodeIsLesserThan200AndBiggerThan204() {\n HttpResponse response = new HttpResponse();\n response.setResponseCode(HttpStatus.SC_CONTINUE);\n assertFalse(response.isSuccessCode());\n\n response.setResponseCode(HttpStatus.SC_SWITCHING_PROTOCOLS);\n assertFalse(response.isSuccessCode());\n\n response.setResponseCode(HttpStatus.SC_BAD_REQUEST);\n assertFalse(response.isSuccessCode());\n\n response.setResponseCode(HttpStatus.SC_FORBIDDEN);\n assertFalse(response.isSuccessCode());\n\n response.setResponseCode(HttpStatus.SC_INTERNAL_SERVER_ERROR);\n assertFalse(response.isSuccessCode());\n }\n\n @Test\n public void should_returnNull_when_responseBodyIsNull() {\n HttpResponse response = new HttpResponse();\n\n assertNull(response.getAsJsonObject());\n }\n\n @Test\n public void should_returnResponseBodyInJsonFormat_when_responseBodyIsNotNull() {\n final String BODY = \"{\\\"status\\\":200}\";\n\n HttpResponse response = new HttpResponse();\n response.setBody(BODY);\n\n assertEquals(BODY, response.getAsJsonObject().toString());\n }\n\n @Test\n public void should_returnResponse_when_putRequestIsSentSuccessfully() throws Exception {\n\n InputStream stream = new ByteArrayInputStream(RESPONSE.getBytes(StandardCharsets.UTF_8));\n\n StatusLine statusLine = mock(StatusLine.class);\n when(statusLine.getStatusCode()).thenReturn(HttpStatus.SC_OK);\n\n HttpEntity entity = mock(HttpEntity.class);\n when(entity.getContent()).thenReturn(stream);\n\n CloseableHttpResponse response = mock(CloseableHttpResponse.class);\n when(response.getStatusLine()).thenReturn(statusLine);\n when(response.getEntity()).thenReturn(entity);\n\n CloseableHttpClient httpClient = mock(CloseableHttpClient.class);\n when(httpClient.execute(any(HttpPost.class))).thenReturn(response);\n\n// when(HttpClients.createDefault()).thenReturn(httpClient);\n\n HttpResponse httpResponse = mock(HttpResponse.class);\n when(httpResponse.getResponseCode()).thenReturn(200);\n when(httpResponse.getBody()).thenReturn(RESPONSE);\n\n when(httpClientHandler.sendRequest(any(), any())).thenReturn(httpResponse);\n\n HttpResponse result = this.sut.sendRequest(PUT, ADDRESS, HEADERS, null, BODY);\n assertEquals(HttpStatus.SC_OK, result.getResponseCode());\n assertEquals(RESPONSE, result.getBody());\n }\n}\n" }, { "alpha_fraction": 0.7059314846992493, "alphanum_fraction": 0.7226399183273315, "avg_line_length": 33.20000076293945, "blob_id": "0bea7d4c189c798408522f0d1b0d2ded62a85b42", "content_id": "4220b5f3d59eb8825cb644074ed0e363b915c9fd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1197, "license_type": "permissive", "max_line_length": 77, "num_lines": 35, "path": "/osdu-r2/os-core-common/src/test/java/org/opengroup/osdu/core/common/http/HttpRequestTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.http;\n\nimport org.junit.Test;\n\nimport static junit.framework.TestCase.assertTrue;\nimport static org.junit.Assert.assertEquals;\n\npublic class HttpRequestTest {\n @Test\n public void should_setDefaults_when_creatingInstance() {\n HttpRequest sut = HttpRequest.get().url(\"http://google.com\").build();\n assertEquals(\"http://google.com\", sut.getUrl());\n assertEquals(\"GET\", sut.getHttpMethod());\n assertEquals(5000, sut.getConnectionTimeout());\n assertTrue(sut.isFollowRedirects());\n }\n\n}\n" }, { "alpha_fraction": 0.7482014298439026, "alphanum_fraction": 0.7609912157058716, "avg_line_length": 27.43181800842285, "blob_id": "09980a6d57c2a6fb2bba47d6612dee08fa47abc6", "content_id": "35ed6030557859d960e7768852ae0c718ef2da5c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1251, "license_type": "permissive", "max_line_length": 75, "num_lines": 44, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/tenant/TenantInfo.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n// TODO: High-level file comment.\n\npackage org.opengroup.osdu.core.common.model.tenant;\n\nimport lombok.Data;\nimport lombok.NoArgsConstructor;\n\nimport java.util.List;\n\n@Data\n@NoArgsConstructor\npublic class TenantInfo {\n\tpublic static final String COMMON = \"common\";\n\n\tprivate Long id;\n\tprivate String name;\n\tprivate String projectId;\n\tprivate String serviceAccount;\n\tprivate String complianceRuleSet;\n\tprivate String dataPartitionId;\n\tprivate List<String> crmAccountIds;\n\n\tpublic static class ComplianceRuleSets {\n\t\tpublic static final String SHARED = \"shared\";\n\t\tpublic static final String CUSTOMER = \"customer\";\n\t}\n}\n" }, { "alpha_fraction": 0.7552497982978821, "alphanum_fraction": 0.7606806755065918, "avg_line_length": 36.32432556152344, "blob_id": "065fd207931d27c0297cbf217ec426425439cd2f", "content_id": "be1a2a2cc69fd3e0185eef14fb65aa526bf9326a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2762, "license_type": "permissive", "max_line_length": 99, "num_lines": 74, "path": "/osdu-r2/os-delivery/provider/delivery-gcp-datastore/src/main/java/org/opengroup/osdu/delivery/provider/gcp/repository/GcpStorageRepository.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.delivery.provider.gcp.repository;\n\nimport static java.lang.String.format;\nimport static org.opengroup.osdu.delivery.provider.gcp.model.constant.StorageConstant.GCS_PROTOCOL;\n\nimport com.google.cloud.storage.Blob;\nimport com.google.cloud.storage.BlobId;\nimport com.google.cloud.storage.BlobInfo;\nimport com.google.cloud.storage.HttpMethod;\nimport com.google.cloud.storage.Storage;\nimport com.google.cloud.storage.Storage.SignUrlOption;\nimport java.net.URI;\nimport java.net.URL;\nimport java.nio.charset.StandardCharsets;\nimport java.util.concurrent.TimeUnit;\nimport lombok.RequiredArgsConstructor;\nimport lombok.extern.slf4j.Slf4j;\nimport org.apache.commons.lang3.ArrayUtils;\nimport org.opengroup.osdu.delivery.model.SignedObject;\nimport org.opengroup.osdu.delivery.provider.interfaces.StorageRepository;\nimport org.springframework.http.MediaType;\nimport org.springframework.stereotype.Repository;\nimport org.springframework.web.util.UriUtils;\n\n@Repository\n@Slf4j\n@RequiredArgsConstructor\npublic class GcpStorageRepository implements StorageRepository {\n\n final Storage storage;\n\n @Override\n public SignedObject createSignedObject(String bucketName, String filepath) {\n log.debug(\"Creating the signed blob in bucket {} for path {}\", bucketName, filepath);\n\n BlobId blobId = BlobId.of(bucketName, filepath);\n BlobInfo blobInfo = BlobInfo.newBuilder(blobId)\n .setContentType(MediaType.APPLICATION_OCTET_STREAM_VALUE)\n .build();\n Blob blob = storage.create(blobInfo, ArrayUtils.EMPTY_BYTE_ARRAY);\n URL signedUrl = storage.signUrl(blobInfo, 7L, TimeUnit.DAYS,\n SignUrlOption.httpMethod(HttpMethod.PUT),\n SignUrlOption.withV4Signature());\n\n log.debug(\"Signed URL for created storage object. Object ID : {} , Signed URL : {}\",\n blob.getGeneratedId(), signedUrl);\n return SignedObject.builder()\n .uri(getObjectUri(blob))\n .url(signedUrl)\n .build();\n }\n\n private URI getObjectUri(Blob blob) {\n String filepath = UriUtils.encodePath(blob.getName(), StandardCharsets.UTF_8);\n return URI.create(format(\"%s%s/%s\", GCS_PROTOCOL, blob.getBucket(), filepath));\n }\n\n}\n" }, { "alpha_fraction": 0.7597448825836182, "alphanum_fraction": 0.7682494521141052, "avg_line_length": 28.39583396911621, "blob_id": "ce860d9fe213914537983cc3e59531f601176555", "content_id": "bf04202eca2b91fbfe6fe6c452c308d59cd5939d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1411, "license_type": "permissive", "max_line_length": 120, "num_lines": 48, "path": "/osdu-r2/os-python-sdk/README.md", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# OSDU Python SDK\n\nOSDU DAGs are cloud platform-agnostic by design. However, there are specific implementation requirements by cloud\nplatforms, and the OSDU R2 Prototype provides a dedicated Python SDK to make sure that DAGs are independent from the\ncloud platforms.\n\nThe Python SDK must be installed on the Airflow worker instance and be used by the DAGs. Each cloud provider needs to\nmodify and configure this SDK to run on their cloud platform.\n\nIn OSDU R2 Prototype, the SDK encapsulates calls to the ODES Storage and Search services. In the future releases, the\nSDK might provide additional interfaces, in particular, for the Ingestion service.\n\nTo authenticate requests, the DAGs needs to add a bearer token to the payload, which is passed to the SDK's methods when\ncalling the OSDU services.\n\nThe Python SDK is a package to interface with OSDU microservices.\n\nTo install this package:\n\n1. Install `setuptools` and `wheel`:\n\n```sh\npython3 -m pip install --user --upgrade setuptools wheel\n```\n\n2. Run the following command:\n\n```sh\npython setup.py sdist bdist_wheel\n```\n\n3. Uninstall `osdu-api`:\n\n```sh\npip uninstall osdu-api\n```\n\n4. Run make sure to substitute your machine's path in that command\n\n```sh\npython -m pip install <YOUR_PATH_TO_PYTHON_SDK>/dist/osdu_api-0.0.1-py3-none-any.whl\n```\n\n5. Import and use the SDK in your code:\n\n```python\nfrom osdu_api.storage.record_client import RecordClient\n```\n" }, { "alpha_fraction": 0.756373941898346, "alphanum_fraction": 0.771482527256012, "avg_line_length": 35.517242431640625, "blob_id": "2bfa639f74a8e31307678ba052a1efe03faf3268", "content_id": "3f038aac39e217a02cd6bd31132fa6df729f0ac4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1059, "license_type": "permissive", "max_line_length": 156, "num_lines": 29, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/http/IUrlFetchService.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.http;\n\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.core.common.model.http.HttpResponse;\n\nimport java.net.URISyntaxException;\nimport java.util.Map;\n\npublic interface IUrlFetchService {\n\n HttpResponse sendRequest(String httpMethod, String address, DpsHeaders headers, Map<String, String> queryParams, String body) throws URISyntaxException;\n}\n" }, { "alpha_fraction": 0.7305280566215515, "alphanum_fraction": 0.7318481802940369, "avg_line_length": 31.063491821289062, "blob_id": "c9454eb804345e98deda2c3019944acd6027fb7c", "content_id": "92185a746c292005449ad4599d152feb239a16ea", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 6060, "license_type": "permissive", "max_line_length": 103, "num_lines": 189, "path": "/osdu-r2/os-ingest/ingest-core/src/test/java/org/opengroup/osdu/ingest/validation/ValidationServiceImplTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.ingest.validation;\n\nimport static org.assertj.core.api.Assertions.assertThat;\nimport static org.assertj.core.api.Assertions.catchThrowable;\nimport static org.assertj.core.api.BDDAssertions.then;\nimport static org.mockito.BDDMockito.given;\n\nimport com.networknt.schema.ValidationMessage;\nimport com.networknt.schema.ValidatorTypeCode;\nimport java.util.Collections;\nimport javax.validation.ConstraintValidator;\nimport javax.validation.ConstraintValidatorFactory;\nimport javax.validation.ConstraintViolationException;\nimport javax.validation.Validation;\nimport javax.validation.Validator;\nimport javax.validation.ValidatorFactory;\nimport org.hibernate.validator.HibernateValidatorConfiguration;\nimport org.junit.jupiter.api.BeforeAll;\nimport org.junit.jupiter.api.BeforeEach;\nimport org.junit.jupiter.api.DisplayNameGeneration;\nimport org.junit.jupiter.api.Nested;\nimport org.junit.jupiter.api.Test;\nimport org.junit.jupiter.api.extension.ExtendWith;\nimport org.mockito.Mock;\nimport org.mockito.junit.jupiter.MockitoExtension;\nimport org.opengroup.osdu.core.common.exception.BadRequestException;\nimport org.opengroup.osdu.ingest.ReplaceCamelCase;\nimport org.opengroup.osdu.ingest.model.SubmitRequest;\nimport org.opengroup.osdu.ingest.model.WorkProductLoadManifest;\nimport org.opengroup.osdu.ingest.provider.interfaces.IValidationService;\nimport org.opengroup.osdu.ingest.validation.schema.ILoadManifestValidationService;\n\n@ExtendWith(MockitoExtension.class)\n@DisplayNameGeneration(ReplaceCamelCase.class)\nclass ValidationServiceImplTest {\n\n private static final String FILE_ID = \"file-id\";\n\n @Mock\n private ILoadManifestValidationService loadManifestValidationService;\n\n private static Validator validator;\n private IValidationService validationService;\n\n @BeforeAll\n static void initAll() {\n HibernateValidatorConfiguration configuration = (HibernateValidatorConfiguration) Validation\n .byDefaultProvider()\n .configure();\n\n ValidatorFactory factory = configuration\n .constraintValidatorFactory(new TestConstraintValidatorFactory())\n .buildValidatorFactory();\n validator = factory.getValidator();\n }\n\n @BeforeEach\n void setUp() {\n validationService = new ValidationServiceImpl(validator, loadManifestValidationService);\n }\n\n @Nested\n class ValidateSubmitRequest {\n\n @Test\n void shouldSuccessfullyValidateEmptyRequest() {\n // given\n SubmitRequest request = SubmitRequest.builder()\n .fileId(FILE_ID)\n .dataType(\"WELL_LOG\")\n .build();\n\n // when\n Throwable thrown = catchThrowable(() -> validationService.validateSubmitRequest(request));\n\n // then\n assertThat(thrown).isNull();\n }\n\n @Test\n void shouldFailValidationIfNoDataType() {\n // given\n SubmitRequest request = SubmitRequest.builder()\n .fileId(FILE_ID)\n .build();\n\n // when\n Throwable thrown = catchThrowable(() -> validationService.validateSubmitRequest(request));\n\n // then\n assertThat(thrown)\n .isInstanceOf(ConstraintViolationException.class)\n .hasMessage(\"Invalid Submit request\");\n }\n\n @Test\n void shouldFailValidationIfNoFileId() {\n // given\n SubmitRequest request = SubmitRequest.builder()\n .dataType(\"WELL_LOG\")\n .build();\n\n // when\n Throwable thrown = catchThrowable(() -> validationService.validateSubmitRequest(request));\n\n // then\n assertThat(thrown)\n .isInstanceOf(ConstraintViolationException.class)\n .hasMessage(\"Invalid Submit request\");\n }\n\n }\n\n @Nested\n class ValidateManifest {\n\n @Test\n void shouldSuccessfullyValidateManifest() {\n // given\n WorkProductLoadManifest loadManifest = WorkProductLoadManifest.builder()\n .build();\n given(loadManifestValidationService.validateManifest(loadManifest))\n .willReturn(Collections.emptySet());\n\n // when\n Throwable thrown = catchThrowable(() -> validationService.validateManifest(loadManifest));\n\n // then\n then(thrown).isNull();\n }\n\n @Test\n void shouldThrownExceptionWhenValidationReturnsErrors() {\n // given\n WorkProductLoadManifest loadManifest = WorkProductLoadManifest.builder()\n .build();\n ValidationMessage message = ValidationMessage\n .of(\"type\", ValidatorTypeCode.TYPE, \"$.WorkProduct\", \"null\", \"object\");\n given(loadManifestValidationService.validateManifest(loadManifest))\n .willReturn(Collections.singleton(message));\n\n // when\n Throwable thrown = catchThrowable(() -> validationService.validateManifest(loadManifest));\n\n // then\n then(thrown)\n .isInstanceOf(BadRequestException.class)\n .hasMessageMatching(\"Failed to validate json from manifest (.*), validation result is (.*)\");\n }\n\n }\n\n static class TestConstraintValidatorFactory implements ConstraintValidatorFactory {\n\n ConstraintValidatorFactory constraintValidatorFactory = Validation\n .buildDefaultValidatorFactory().getConstraintValidatorFactory();\n\n @Override\n public <T extends ConstraintValidator<?, ?>> T getInstance(Class<T> key) {\n if (SubmitRequestValidator.class.equals(key)) {\n return (T) new SubmitRequestValidator();\n }\n\n return constraintValidatorFactory.getInstance(key);\n }\n\n @Override\n public void releaseInstance(ConstraintValidator<?, ?> instance) {\n\n }\n }\n\n}\n" }, { "alpha_fraction": 0.7665505409240723, "alphanum_fraction": 0.7715834379196167, "avg_line_length": 36.985294342041016, "blob_id": "a9e62d0b226937591e529a25406c606fa4a46353", "content_id": "c398c093d173fc90dee9714cbc8da2161a68286d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2583, "license_type": "permissive", "max_line_length": 96, "num_lines": 68, "path": "/osdu-r2/os-workflow/provider/workflow-gcp-datastore/src/main/java/org/opengroup/osdu/workflow/provider/gcp/service/SubmitIngestServiceImpl.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.workflow.provider.gcp.service;\n\nimport static java.lang.String.format;\nimport static java.nio.charset.StandardCharsets.UTF_8;\n\nimport com.google.api.client.http.HttpRequest;\nimport com.google.api.client.http.HttpResponse;\nimport com.google.api.client.http.HttpResponseException;\nimport java.io.IOException;\nimport java.util.Collections;\nimport java.util.Map;\nimport lombok.RequiredArgsConstructor;\nimport lombok.extern.slf4j.Slf4j;\nimport org.apache.commons.io.IOUtils;\nimport org.opengroup.osdu.workflow.exception.IntegrationException;\nimport org.opengroup.osdu.workflow.exception.RuntimeException;\nimport org.opengroup.osdu.workflow.provider.gcp.property.AirflowProperties;\nimport org.opengroup.osdu.workflow.provider.interfaces.ISubmitIngestService;\nimport org.springframework.stereotype.Service;\n\n@Service\n@RequiredArgsConstructor\n@Slf4j\npublic class SubmitIngestServiceImpl implements ISubmitIngestService {\n\n final AirflowProperties airflowProperties;\n final GoogleIapHelper googleIapHelper;\n private static final String AIRFLOW_PAYLOAD_PARAMETER_NAME = \"conf\";\n\n @Override\n public boolean submitIngest(String dagName, Map<String, Object> data) {\n\n try {\n String airflowUrl = airflowProperties.getUrl();\n String iapClientId = googleIapHelper.getIapClientId(airflowUrl);\n String webServerUrl = format(\"%s/api/experimental/dags/%s/dag_runs\", airflowUrl, dagName);\n\n HttpRequest request = googleIapHelper.buildIapRequest(webServerUrl, iapClientId,\n Collections.singletonMap(AIRFLOW_PAYLOAD_PARAMETER_NAME, data));\n HttpResponse response = request.execute();\n\n String airflowResponse = IOUtils.toString(response.getContent(), UTF_8);\n log.debug(\"Airflow response - \" + airflowResponse);\n\n return true;\n } catch (HttpResponseException e) {\n throw new IntegrationException(\"Airflow request fail\", e);\n } catch (IOException e) {\n throw new RuntimeException(e.getMessage(), e);\n }\n }\n}\n" }, { "alpha_fraction": 0.7281898856163025, "alphanum_fraction": 0.7495548725128174, "avg_line_length": 32.01960754394531, "blob_id": "7b6b13f8227a5875a3b555d52ab8e81dd28627b9", "content_id": "c67ede734780e4e65175331bcbf044ac8e72081e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1685, "license_type": "permissive", "max_line_length": 103, "num_lines": 51, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/http/AppError.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.http;\n\nimport com.fasterxml.jackson.annotation.JsonIgnore;\nimport lombok.Builder;\nimport lombok.Data;\nimport lombok.AllArgsConstructor;\nimport lombok.NoArgsConstructor;\n\nimport java.io.Serializable;\n\n@Data\n@AllArgsConstructor\n@Builder\npublic class AppError implements Serializable {\n private static final long serialVersionUID = 2405172041950241677L;\n private int code;\n private String reason;\n private String message;\n @JsonIgnore\n private String[] errors;\n // exclude debuggingInfo & originalException properties in response deserialization as they are not\n // required for swagger endpoint and Portal send weird multipart Content-Type in request\n @JsonIgnore\n private String debuggingInfo;\n @JsonIgnore\n private Exception originalException;\n\n //AppException creates App Errors with only these 3 attributes\n public AppError(int code, String reason, String message){\n this.code = code;\n this.reason = reason;\n this.message = message;\n }\n}\n\n" }, { "alpha_fraction": 0.7641886472702026, "alphanum_fraction": 0.7831734418869019, "avg_line_length": 47.125, "blob_id": "9f3c91cd7b161622ead16e0e1b3a6cebe2a7821a", "content_id": "269bb664df54202dfb23f7cf67b9f5aabd8ab184", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5004, "license_type": "permissive", "max_line_length": 144, "num_lines": 104, "path": "/compatibility-layer/README.md", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# OSDU Compatibility Layer\n\nThe OSDU Compatibility Layer is a dedicated layer for the [Open Subsurface Data Universe] standard developed for the \n[DELFI Data Ecosystem] on top of [Google Cloud Platform].\n\n> DELFI Data Ecosystem is being open-sourced as [OpenDES] and is an internal project of Schlumberger.\n\nThe OSDU Compatibility Layer provides a subset of [OSDU Release 1] functionality &mdash; Demo1 &mdash; sufficient for \ndemo and testing purposes. Demo 1 is centered around creating minimal services implementation to integrate with \nthird-party applications.\n\nThe OSDU Compatibility Layer Demo 1 covers the following 3 services:\n\n1. Search, implementation is the same as in OSDU Release 1\n2. Delivery, implementation is the same as in OSDU Release 1\n3. Ingest, reduced implementation compared to OSDU R1 covering only ingestion of .las files \n\nThe OSDU Compatibility Layer Release 1 will extend the three services to also cover processing other file types. A new\nservice Collection will also be added to the OSDU Compatibility Layer Release 1.\n\n**This is not an officially supported Google product.**\n\n## Getting Started\n\nTo start using the OSDU Compatibility Layer:\n\n1. Contact the Schlumberger account team for access to the [DELFI Data Ecosystem] test environment. The Schlumberger \nteam will provide you with the following details:\n * OpenID credentials\n * DELFI partition\n2. Contact the Google account team and provide them with the DELFI partition you received from SLB.\n3. Obtain the Apigee URI to which you can send your requests.\n\nThe Apigee URI looks similar to this: `https://<gcp-project-id>.apigee.net`.\n\nOnce you received this URI, you can start sending requests to the OSDU Compatibility Layer.\n\n## Implementation\n\nThe current implementation of the OSDU Compatibility Layer includes the following services:\n\n* Ingestion, receives ingestion requests and starts the ingestion process\n* Search, receives search requests and returns the results found in the DELFI Data Ecosystem\n* Delivery, receives delivery requests and returns work products, work product components, or files\n\nThe compatibility layer also includes two helper services:\n\n* Delfi-client, authenticates incoming requests and queries the DELFI Data Ecosystem\n* Srn-mapper, the service that communicates with Cloud Firestore to obtain DELFI record IDs or store new IDs by SRNs\n\n## Technology Stack\n\nThe OSDU Compatibility Layer is built with [Java] and [Terraform].\n\nThe project uses the following Java libraries:\n\n* [Spring Cloud Greenwich Service Release 2]\n* [Spring Framework 2.1.7]\n* [Spring Security Test 4.0.0]\n* [Spring Boot 2.1.6 ]\n* [MapStruct]\n* [Project Lombok 1.18.8]\n* [Javax Inject 1]\n* [Apache HttpClient 4.3.4]\n* [Jackson Databind 2.10.0.pr1]\n\nThe framework leverages [Google Cloud Platform] functionality, specifically:\n\n* [Cloud Storage](https://cloud.google.com/storage/): object store\n* [Firestore](https://cloud.google.com/firestore/): key-value pair lookup for OSDU SRN to DELFI GUID mapping\n* [Cloud Pub/Sub](https://cloud.google.com/pubsub): asynchronous ingestion\n\n## Built With\n\n* [Knative]: middleware components for modern, source-centric, and container-based applications that can run anywhere.\n* [Java]\n* [Google Cloud Platform]\n* [Terraform]\n\n## License\n\nThis project is licensed under the Apache License. Consult the [LICENSE](../LICENSE.md) file for details.\n\n[Open Subsurface Data Universe]: https://www.opengroup.org/osdu/forum-homepage\n[DELFI data ecosystem]: https://www.software.slb.com/delfi/openness/delfi-data-ecosystem\n[OSDU Release 1]: https://www.opengroup.org/membership/forums/open-subsurface-data-universe/achievement-and-plans\n[OpenDES]: https://www.slb.com/newsroom/press-release/2019/pr-2019-0822-osdu-data-ecosystem\n[Knative]: https://knative.dev/docs/\n[Java]: https://www.java.com/en/\n[Terraform]: https://www.terraform.io/\n[Google Cloud Platform]: https://cloud.google.com\n[Spring Cloud Greenwich Service Release 2]: https://mvnrepository.com/artifact/org.springframework.cloud/spring-cloud-dependencies/Greenwich.SR2\n[Spring Framework 2.1.7]: https://mvnrepository.com/artifact/org.springframework\n[Spring Security Test 4.0.0]: https://mvnrepository.com/artifact/org.springframework.security/spring-security-test/4.0.0.RELEASE\n[Spring Boot 2.1.6]: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot/2.1.6.RELEASE\n[MapStruct]: https://mapstruct.org/\n[Project Lombok 1.18.8]: https://mvnrepository.com/artifact/org.projectlombok/lombok/1.18.8\n[Javax Inject 1]: https://mvnrepository.com/artifact/javax.inject/javax.inject/1\n[Apache HttpClient 4.3.4]: https://mvnrepository.com/artifact/org.apache.httpcomponents/httpclient/4.3.4\n[Jackson Databind 2.10.0.pr1]: https://mvnrepository.com/artifact/com.fasterxml.jackson.core/jackson-databind/2.10.0.pr1\n[Cloud Run]: https://cloud.google.com/run/\n[Cloud Storage]: https://cloud.google.com/storage/\n[Cloud Firestore]: https://firestore.google.com/\n[Cloud Pub/Sub]: https://cloud.google.com/pubsub" }, { "alpha_fraction": 0.8107074499130249, "alphanum_fraction": 0.8107074499130249, "avg_line_length": 64.375, "blob_id": "26c19715a372532813a6ca18bca5c532c7274d46", "content_id": "a4d6dae6eb0ba075c5a333aeca147cc496001502", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 523, "license_type": "permissive", "max_line_length": 159, "num_lines": 8, "path": "/compatibility-layer/service/ingest/README.md", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# Ingest service working with data in OSDU format\n\n## Google cloud storage\nIs used as a buffer for uploaded files. \nService receives file location as a field in Load Manifest object.\nThen upload it to cloud storage and then send it to signed Url location.\nSetup [lifecycle rule](https://cloud.google.com/storage/docs/managing-lifecycles) to delete previously uploaded files from cloud storage that became redundant.\nExample for lifecycle configuration rule file (gcs-lifecycle-config.json) location in src/resources folder\n" }, { "alpha_fraction": 0.7602377533912659, "alphanum_fraction": 0.7653016448020935, "avg_line_length": 35.629032135009766, "blob_id": "41c151f9bbe7cf957bca8ce6f9610d4a723f56d7", "content_id": "912a68c06d405afda94a94a2405a25a212467502", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4542, "license_type": "permissive", "max_line_length": 113, "num_lines": 124, "path": "/osdu-r2/os-delivery/provider/delivery-gcp-datastore/src/test/java/org/opengroup/osdu/delivery/provider/gcp/service/StorageServiceImplTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.delivery.provider.gcp.service;\n\nimport static org.assertj.core.api.Assertions.catchThrowable;\nimport static org.assertj.core.api.BDDAssertions.then;\nimport static org.mockito.ArgumentMatchers.anyString;\nimport static org.mockito.ArgumentMatchers.eq;\nimport static org.mockito.BDDMockito.given;\nimport static org.mockito.Mockito.never;\nimport static org.mockito.Mockito.verify;\n\nimport java.net.URI;\nimport java.net.URL;\nimport java.time.Clock;\nimport java.time.Instant;\nimport org.apache.commons.lang3.RandomStringUtils;\nimport org.junit.jupiter.api.BeforeEach;\nimport org.junit.jupiter.api.DisplayNameGeneration;\nimport org.junit.jupiter.api.Test;\nimport org.junit.jupiter.api.extension.ExtendWith;\nimport org.mockito.ArgumentCaptor;\nimport org.mockito.Captor;\nimport org.mockito.Mock;\nimport org.mockito.junit.jupiter.MockitoExtension;\nimport org.opengroup.osdu.core.common.exception.BadRequestException;\nimport org.opengroup.osdu.delivery.ReplaceCamelCase;\nimport org.opengroup.osdu.delivery.model.SignedObject;\nimport org.opengroup.osdu.delivery.model.SignedUrl;\nimport org.opengroup.osdu.delivery.provider.gcp.TestUtils;\nimport org.opengroup.osdu.delivery.provider.gcp.model.property.FileLocationProperties;\nimport org.opengroup.osdu.delivery.provider.interfaces.StorageRepository;\nimport org.opengroup.osdu.delivery.provider.interfaces.StorageService;\n\n@ExtendWith(MockitoExtension.class)\n@DisplayNameGeneration(ReplaceCamelCase.class)\nclass StorageServiceImplTest {\n\n @Mock\n private StorageRepository storageRepository;\n\n @Captor\n ArgumentCaptor<String> filenameCaptor;\n\n private StorageService storageService;\n\n @BeforeEach\n void setUp() {\n FileLocationProperties fileLocationProperties\n = new FileLocationProperties(TestUtils.BUCKET_NAME, TestUtils.USER_DES_ID);\n storageService = new StorageServiceImpl(fileLocationProperties, storageRepository);\n }\n\n @Test\n void shouldCreateObjectSignedUrl() {\n // given\n SignedObject signedObject = getSignedObject();\n given(storageRepository.createSignedObject(eq(TestUtils.BUCKET_NAME), anyString())).willReturn(signedObject);\n\n // when\n SignedUrl signedUrl = storageService.createSignedUrl(\n TestUtils.FILE_ID, TestUtils.AUTHORIZATION_TOKEN, TestUtils.PARTITION);\n\n // then\n then(signedUrl).satisfies(url -> {\n then(url.getUrl().toString()).is(TestUtils.GCS_URL_CONDITION);\n then(url.getUri().toString()).matches(TestUtils.GCS_OBJECT_URI);\n then(url.getCreatedAt()).isBefore(now());\n then(url.getCreatedBy()).isEqualTo(TestUtils.USER_DES_ID);\n });\n\n verify(storageRepository).createSignedObject(eq(TestUtils.BUCKET_NAME), filenameCaptor.capture());\n then(filenameCaptor.getValue()).matches(TestUtils.USER_DES_ID + \".*?\" + TestUtils.UUID_REGEX);\n }\n\n @Test\n void shouldThrowExceptionWhenResultFilepathIsMoreThan1024Characters() {\n // given\n String fileId = RandomStringUtils.randomAlphanumeric(1024);\n\n // when\n Throwable thrown = catchThrowable(() -> storageService.createSignedUrl(fileId,\n TestUtils.AUTHORIZATION_TOKEN, TestUtils.PARTITION));\n\n // then\n then(thrown)\n .isInstanceOf(BadRequestException.class)\n .hasMessageContaining(\"The maximum filepath length is 1024 characters\");\n verify(storageRepository, never()).createSignedObject(anyString(), anyString());\n }\n\n private SignedObject getSignedObject() {\n String bucketName = RandomStringUtils.randomAlphanumeric(4);\n String folderName = TestUtils.USER_DES_ID + \"/\" + RandomStringUtils.randomAlphanumeric(9);\n String filename = TestUtils.getUuidString();\n\n URI uri = TestUtils.getGcsObjectUri(bucketName, folderName, filename);\n URL url = TestUtils.getGcsObjectUrl(bucketName, folderName, filename);\n\n return SignedObject.builder()\n .uri(uri)\n .url(url)\n .build();\n }\n\n private Instant now() {\n return Instant.now(Clock.systemUTC());\n }\n\n}\n" }, { "alpha_fraction": 0.7326642274856567, "alphanum_fraction": 0.7436131238937378, "avg_line_length": 42.880001068115234, "blob_id": "6fcc901b1fff3980c42bf3ae1922499257b55b05", "content_id": "bd9c70369fe1e691623d34fe3ce046708b70e77c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1096, "license_type": "permissive", "max_line_length": 122, "num_lines": 25, "path": "/osdu-r2/os-python-sdk/osdu_api/model/search/spatial_filter.py", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# Copyright 2020 Google LLC\n# Copyright 2020 Amazon\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom osdu_api.model.search.by_bounding_box import ByBoundingBox\nfrom osdu_api.model.search.by_distance import ByDistance\nfrom osdu_api.model.search.by_geo_polygon import ByGeoPolygon\n\nclass SpatialFilter:\n def __init__(self, field: str, by_bounding_box: ByBoundingBox, by_distance: ByDistance, by_geo_polygon: ByGeoPolygon):\n self.field = field\n self.by_bounding_box = by_bounding_box\n self.by_distance = by_distance\n self.by_bounding_box = by_geo_polygon" }, { "alpha_fraction": 0.7242621183395386, "alphanum_fraction": 0.7254364490509033, "avg_line_length": 36.56764602661133, "blob_id": "9846169ad839b3101abdd8f054de542d62f81e45", "content_id": "4cd0314db43b6434cd620cd7d5a16b9fc2ae5432", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 12773, "license_type": "permissive", "max_line_length": 102, "num_lines": 340, "path": "/osdu-r2/os-workflow/workflow-core/src/test/java/org/opengroup/osdu/workflow/WorkflowStatusMvcTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.workflow;\n\nimport static org.assertj.core.api.BDDAssertions.then;\nimport static org.mockito.ArgumentMatchers.any;\nimport static org.mockito.ArgumentMatchers.eq;\nimport static org.mockito.BDDMockito.given;\nimport static org.mockito.Mockito.verify;\nimport static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post;\nimport static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath;\nimport static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status;\n\nimport com.fasterxml.jackson.databind.ObjectMapper;\nimport java.nio.charset.StandardCharsets;\nimport java.util.Date;\nimport java.util.Objects;\nimport org.junit.jupiter.api.DisplayNameGeneration;\nimport org.junit.jupiter.api.Test;\nimport org.junit.jupiter.api.extension.ExtendWith;\nimport org.opengroup.osdu.core.common.model.entitlements.AuthorizationResponse;\nimport org.opengroup.osdu.core.common.model.http.AppException;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.core.common.provider.interfaces.IAuthorizationService;\nimport org.opengroup.osdu.workflow.model.GetStatusRequest;\nimport org.opengroup.osdu.workflow.model.GetStatusResponse;\nimport org.opengroup.osdu.workflow.model.UpdateStatusRequest;\nimport org.opengroup.osdu.workflow.model.UpdateStatusResponse;\nimport org.opengroup.osdu.workflow.model.WorkflowStatus;\nimport org.opengroup.osdu.workflow.model.WorkflowStatusType;\nimport org.opengroup.osdu.workflow.provider.interfaces.IIngestionStrategyRepository;\nimport org.opengroup.osdu.workflow.provider.interfaces.ISubmitIngestService;\nimport org.opengroup.osdu.workflow.provider.interfaces.IWorkflowStatusRepository;\nimport org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.boot.test.autoconfigure.web.servlet.AutoConfigureMockMvc;\nimport org.springframework.boot.test.context.SpringBootTest;\nimport org.springframework.boot.test.context.TestConfiguration;\nimport org.springframework.boot.test.mock.mockito.MockBean;\nimport org.springframework.http.HttpHeaders;\nimport org.springframework.http.MediaType;\nimport org.springframework.security.config.annotation.method.configuration.EnableGlobalMethodSecurity;\nimport org.springframework.security.config.annotation.web.builders.HttpSecurity;\nimport org.springframework.security.config.annotation.web.configuration.EnableWebSecurity;\nimport org.springframework.security.config.annotation.web.configuration.WebSecurityConfigurerAdapter;\nimport org.springframework.security.test.web.servlet.request.SecurityMockMvcRequestPostProcessors;\nimport org.springframework.test.context.junit.jupiter.SpringExtension;\nimport org.springframework.test.web.servlet.MockMvc;\nimport org.springframework.test.web.servlet.MvcResult;\n\n@ExtendWith(SpringExtension.class)\n@SpringBootTest\n@AutoConfigureMockMvc\n@DisplayNameGeneration(ReplaceCamelCase.class)\npublic class WorkflowStatusMvcTest {\n\n private static final String WORKFLOW_ID = \"workflow-id\";\n private static final String TEST_AUTH = \"test-auth\";\n private static final String PARTITION = \"partition\";\n private static final String UNAUTHORIZED_MSG = \"The user is not authorized to perform this action\";\n\n @Autowired\n private MockMvc mockMvc;\n @Autowired\n private ObjectMapper mapper;\n\n @MockBean\n private IIngestionStrategyRepository ingestionStrategyRepository;\n @MockBean\n private ISubmitIngestService submitIngestService;\n @MockBean\n private IWorkflowStatusRepository workflowStatusRepository;\n @MockBean\n private IAuthorizationService authorizationService;\n\n @Test\n public void shouldPassGetWorkflowStatusFlow() throws Exception {\n\n // given\n HttpHeaders headers = getHttpHeaders();\n\n GetStatusRequest request = GetStatusRequest.builder().workflowId(WORKFLOW_ID).build();\n WorkflowStatus status = WorkflowStatus.builder().workflowStatusType(WorkflowStatusType.RUNNING)\n .workflowId(WORKFLOW_ID)\n .airflowRunId(\"airflow-id\")\n .submittedAt(new Date())\n .build();\n\n given(workflowStatusRepository\n .findWorkflowStatus(eq(WORKFLOW_ID))).willReturn(status);\n\n given(authorizationService.authorizeAny(any(), eq(\"service.storage.creator\")))\n .willReturn(AuthorizationResponse.builder()\n .user(\"[email protected]\")\n .build());\n\n // when\n MvcResult mvcResult = mockMvc.perform(\n post(\"/getStatus\")\n .contentType(MediaType.APPLICATION_JSON)\n .characterEncoding(StandardCharsets.UTF_8.displayName())\n .with(SecurityMockMvcRequestPostProcessors.csrf())\n .headers(headers)\n .content(mapper.writeValueAsString(request)))\n .andExpect(status().isOk())\n .andReturn();\n\n // then\n GetStatusResponse startWorkflowResponse = mapper\n .readValue(mvcResult.getResponse().getContentAsString(), GetStatusResponse.class);\n then(startWorkflowResponse.getWorkflowStatusType()).isEqualTo(status.getWorkflowStatusType());\n }\n\n @Test\n public void shouldThrowNotFoundIfThereIsNoStatus() throws Exception {\n\n // given\n HttpHeaders headers = getHttpHeaders();\n GetStatusRequest request = GetStatusRequest.builder().workflowId(WORKFLOW_ID).build();\n\n given(workflowStatusRepository\n .findWorkflowStatus(eq(WORKFLOW_ID))).willReturn(null);\n\n given(authorizationService.authorizeAny(any(), eq(\"service.storage.creator\")))\n .willReturn(AuthorizationResponse.builder()\n .user(\"[email protected]\")\n .build());\n\n // when\n mockMvc.perform(\n post(\"/getStatus\")\n .contentType(MediaType.APPLICATION_JSON)\n .characterEncoding(StandardCharsets.UTF_8.displayName())\n .with(SecurityMockMvcRequestPostProcessors.csrf())\n .headers(headers)\n .content(mapper.writeValueAsString(request)))\n .andExpect(status().isNotFound())\n .andReturn();\n }\n\n\n @Test\n public void shouldFailGetStatusInvalidJson() throws Exception {\n\n // given\n HttpHeaders headers = new HttpHeaders();\n\n given(authorizationService.authorizeAny(any(), eq(\"service.storage.creator\")))\n .willReturn(AuthorizationResponse.builder()\n .user(\"[email protected]\")\n .build());\n\n // when\n MvcResult mvcResult = mockMvc.perform(\n post(\"/getStatus\")\n .contentType(MediaType.APPLICATION_JSON)\n .characterEncoding(StandardCharsets.UTF_8.displayName())\n .with(SecurityMockMvcRequestPostProcessors.csrf())\n .headers(headers)\n .content(\"{\\\"test\\\";\\\"test\\\"}\"))\n .andExpect(status().isBadRequest())\n .andReturn();\n\n // then\n then(Objects.requireNonNull(mvcResult.getResolvedException()).getMessage())\n .contains(\"JSON parse error\");\n }\n\n @Test\n public void shouldFailGetStatusUnauthorized() throws Exception {\n\n // given\n HttpHeaders headers = getHttpHeaders();\n\n GetStatusRequest request = GetStatusRequest.builder().workflowId(WORKFLOW_ID).build();\n\n given(authorizationService.authorizeAny(any(), eq(\"service.storage.creator\")))\n .willThrow(AppException.createUnauthorized(\"test: viewer\"));\n\n // when\n mockMvc.perform(\n post(\"/getStatus\")\n .contentType(MediaType.APPLICATION_JSON)\n .characterEncoding(StandardCharsets.UTF_8.displayName())\n .with(SecurityMockMvcRequestPostProcessors.csrf())\n .headers(headers)\n .content(mapper.writeValueAsString(request)))\n .andExpect(status().isUnauthorized())\n .andExpect(jsonPath(\"$.message\").value(UNAUTHORIZED_MSG))\n .andReturn();\n\n // then\n verify(authorizationService).authorizeAny(any(), eq(\"service.storage.creator\"));\n }\n\n\n @Test\n public void shouldPassUpdateWorkflowStatusFlow() throws Exception {\n\n // given\n HttpHeaders headers = getHttpHeaders();\n\n UpdateStatusRequest request = UpdateStatusRequest.builder()\n .workflowId(WORKFLOW_ID)\n .workflowStatusType(WorkflowStatusType.RUNNING).build();\n WorkflowStatus status = WorkflowStatus.builder()\n .workflowStatusType(WorkflowStatusType.SUBMITTED)\n .workflowId(WORKFLOW_ID)\n .airflowRunId(\"airflow-id\")\n .submittedAt(new Date())\n .build();\n\n WorkflowStatus updatedStatus = WorkflowStatus.builder()\n .workflowStatusType(WorkflowStatusType.RUNNING)\n .workflowId(WORKFLOW_ID)\n .airflowRunId(\"airflow-id\")\n .submittedAt(new Date())\n .build();\n\n given(workflowStatusRepository.findWorkflowStatus(eq(WORKFLOW_ID))).willReturn(status);\n\n given(workflowStatusRepository\n .updateWorkflowStatus(eq(status.getWorkflowId()), eq(WorkflowStatusType.RUNNING)))\n .willReturn(updatedStatus);\n\n given(authorizationService.authorizeAny(any(), eq(\"service.storage.creator\")))\n .willReturn(AuthorizationResponse.builder()\n .user(\"[email protected]\")\n .build());\n\n // when\n MvcResult mvcResult = mockMvc.perform(\n post(\"/updateStatus\")\n .contentType(MediaType.APPLICATION_JSON)\n .characterEncoding(StandardCharsets.UTF_8.displayName())\n .with(SecurityMockMvcRequestPostProcessors.csrf())\n .headers(headers)\n .content(mapper.writeValueAsString(request)))\n .andExpect(status().isOk())\n .andReturn();\n\n // then\n UpdateStatusResponse response = mapper\n .readValue(mvcResult.getResponse().getContentAsString(), UpdateStatusResponse.class);\n then(response.getWorkflowStatusType()).isEqualTo(WorkflowStatusType.RUNNING);\n then(response.getWorkflowId()).isEqualTo(WORKFLOW_ID);\n }\n\n\n @Test\n public void shouldFailUpdateStatusInvalidJson() throws Exception {\n\n // given\n HttpHeaders headers = new HttpHeaders();\n\n given(authorizationService.authorizeAny(any(), eq(\"service.storage.creator\")))\n .willReturn(AuthorizationResponse.builder()\n .user(\"[email protected]\")\n .build());\n\n // when\n MvcResult mvcResult = mockMvc.perform(\n post(\"/updateStatus\")\n .contentType(MediaType.APPLICATION_JSON)\n .characterEncoding(StandardCharsets.UTF_8.displayName())\n .with(SecurityMockMvcRequestPostProcessors.csrf())\n .headers(headers)\n .content(\"{\\\"test\\\";\\\"test\\\"}\"))\n .andExpect(status().isBadRequest())\n .andReturn();\n\n // then\n then(Objects.requireNonNull(mvcResult.getResolvedException()).getMessage())\n .contains(\"JSON parse error\");\n }\n\n @Test\n public void shouldFailUpdateStatusUnauthorized() throws Exception {\n\n // given\n HttpHeaders headers = getHttpHeaders();\n\n UpdateStatusRequest request = UpdateStatusRequest.builder()\n .workflowId(WORKFLOW_ID)\n .workflowStatusType(WorkflowStatusType.RUNNING).build();\n\n given(authorizationService.authorizeAny(any(), eq(\"service.storage.creator\")))\n .willThrow(AppException.createUnauthorized(\"test: viewer\"));\n\n // when\n mockMvc.perform(\n post(\"/updateStatus\")\n .contentType(MediaType.APPLICATION_JSON)\n .characterEncoding(StandardCharsets.UTF_8.displayName())\n .with(SecurityMockMvcRequestPostProcessors.csrf())\n .headers(headers)\n .content(mapper.writeValueAsString(request)))\n .andExpect(status().isUnauthorized())\n .andExpect(jsonPath(\"$.message\").value(UNAUTHORIZED_MSG))\n .andReturn();\n\n // then\n verify(authorizationService).authorizeAny(any(), eq(\"service.storage.creator\"));\n }\n\n private HttpHeaders getHttpHeaders() {\n HttpHeaders headers = new HttpHeaders();\n headers.add(DpsHeaders.AUTHORIZATION, TEST_AUTH);\n headers.add(DpsHeaders.DATA_PARTITION_ID, PARTITION);\n return headers;\n }\n\n @TestConfiguration\n @EnableWebSecurity\n @EnableGlobalMethodSecurity(prePostEnabled = true)\n public static class TestSecurityConfig extends WebSecurityConfigurerAdapter {\n\n @Override\n protected void configure(HttpSecurity http) throws Exception {\n\n http.httpBasic().disable()\n .csrf().disable(); //disable default authN. AuthN handled by endpoints proxy\n }\n\n }\n\n}\n" }, { "alpha_fraction": 0.6892753839492798, "alphanum_fraction": 0.7002898454666138, "avg_line_length": 30.363636016845703, "blob_id": "e90b3810396e8986ab2f7d7d055fba71d26e5f8c", "content_id": "dd18f6f1ebf8456234719e3c955b8b0d7a105756", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1725, "license_type": "permissive", "max_line_length": 150, "num_lines": 55, "path": "/compatibility-layer/scripts/deploy.sh", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# !/bin/bash\nWORKDIR=$(cd \"$(dirname \"$0\")\"/..; pwd)\ncd \"$WORKDIR\" || exit 0\n\nif [[ -z $1 ]]; then\n cat << EOF\nUsage: $0 app [service [gcp-region]]\nBuild and deploy a container to Cloud Run\n\n app name of the microservice to build\n service Cloud Run service name (default: same as app)\n region Google Cloud region (default: us-central1)\n\nEOF\n exit 1\nfi\n\nAPP=$1\nSERVICE=$2\nREGION=$3\n[[ -z $REGION ]] && REGION=us-central1\n[[ -z $SERVICE ]] && SERVICE=$APP\n\n\nif [[ -z $GOOGLE_CLOUD_PROJECT ]]; then\n echo \"Enter your GCP project ID:\"\n read -r GOOGLE_CLOUD_PROJECT\nfi\n\ngcloud config set project \"$GOOGLE_CLOUD_PROJECT\"\n\nif [[ -z $CACHE_BUCKET ]]; then\n echo \"Enter the GCS bucket for caching Cloud Build results\"\n read -r CACHE_BUCKET\nfi\n\nCOMMIT_SHA=$(git rev-parse --short HEAD 2>/dev/null)\n[[ -z $COMMIT_SHA ]] && COMMIT_SHA=latest\ngcloud builds submit --config \"${WORKDIR}\"/cloudbuild.yaml --substitutions=_SERVICE_NAME=\"$APP\",_SHORT_SHA=\"$COMMIT_SHA\",_CACHE_BUCKET=\"$CACHE_BUCKET\"\n\ngcloud beta run deploy \"$SERVICE\" --image gcr.io/\"${GOOGLE_CLOUD_PROJECT}\"/osdu-gcp-\"${APP}\":\"${COMMIT_SHA}\" --platform managed --region \"$REGION\"\n" }, { "alpha_fraction": 0.6605191230773926, "alphanum_fraction": 0.6748633980751038, "avg_line_length": 35.599998474121094, "blob_id": "698923d0085187502e41e73ff9b871d8abb542a0", "content_id": "4d9ba0bdb9b999aa8c2da517440f206747c9174d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2928, "license_type": "permissive", "max_line_length": 123, "num_lines": 80, "path": "/osdu-r2/os-core-common/src/test/java/org/opengroup/osdu/core/common/crs/TestUtils.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.crs;\n\nimport org.junit.Ignore;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\n\nimport java.io.ByteArrayOutputStream;\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.nio.charset.StandardCharsets;\n\n@Ignore\npublic class TestUtils {\n public static String DATE_FORMAT = \"yyyy-MM-dd HH:mm:ss\";\n private static String token = \"\";\n\n // public static String getAuthToken() {\n // \tif (Strings.isNullOrEmpty(token)) {\n // \t\tString creds = System.getProperty(\"INT_TESTER_CREDS\", System.getenv(\"INT_TESTER_CREDS\"));\n // \t\tif (StringUtils.isBlank(creds)) {\n // \t\t\tthrow new RuntimeException(\"Could not find INT_TESTER_CREDS. Needs to be in env variable or system property\");\n // \t\t}\n // \t\ttry {\n // \t\t\tGoogleServiceAccount gsa = new GoogleServiceAccount(creds);\n // \t\t\ttoken = \"Bearer \" + gsa.getAuthToken(\"245464679631-ktfdfpl147m1mjpbutl00b3cmffissgq.apps.googleusercontent.com\");\n // \t\t} catch (IOException e) {\n // \t\t\tthrow new RuntimeException(\"Error generating service account credentials\", e);\n // \t\t}\n // \t}\n // \treturn token;\n // }\n\n public static DpsHeaders getStandardHeaders(String tenant) {\n DpsHeaders headers = new DpsHeaders();\n //headers.put(DpsHeaders.AUTHORIZATION, getAuthToken());\n headers.put(DpsHeaders.ACCOUNT_ID, tenant);\n return headers;\n }\n\n public static boolean isEqual(double a, double b) {\n if (Double.isNaN(a) || Double.isNaN(b))\n return false;\n\n return Math.abs(a - b) <= Double.MIN_VALUE;\n }\n\n public static boolean isNullOrEmpty(String value) {\n return value == null || value.trim().isEmpty();\n }\n\n public static String readFile(String path) throws IOException {\n InputStream inputStream = TestUtils.class.getClass().getResourceAsStream(path);\n if (inputStream == null) {\n throw new IOException();\n }\n ByteArrayOutputStream outputStream = new ByteArrayOutputStream();\n byte[] buffer = new byte[1024];\n int length;\n while ((length = inputStream.read(buffer)) != -1) {\n outputStream.write(buffer, 0, length);\n }\n return outputStream.toString(StandardCharsets.UTF_8.toString());\n }\n}\n" }, { "alpha_fraction": 0.7225368022918701, "alphanum_fraction": 0.731596827507019, "avg_line_length": 32.94230651855469, "blob_id": "81b2aef6e1ab430fc7df667b50b625a3015870cb", "content_id": "975f465417e75f090fa56ee976f1b49eee2a9789", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1766, "license_type": "permissive", "max_line_length": 108, "num_lines": 52, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/crs/ConversionRecord.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.crs;\n\nimport com.google.gson.JsonElement;\nimport com.google.gson.JsonObject;\nimport lombok.AllArgsConstructor;\nimport lombok.Builder;\nimport lombok.Data;\nimport lombok.NoArgsConstructor;\nimport org.apache.http.HttpStatus;\nimport org.opengroup.osdu.core.common.model.http.AppException;\n\nimport java.util.ArrayList;\nimport java.util.List;\n\n@Data\n@Builder\n@NoArgsConstructor\n@AllArgsConstructor\npublic class ConversionRecord {\n private JsonObject recordJsonObject;\n private ConvertStatus convertStatus;\n @Builder.Default\n private List<String> conversionMessages = new ArrayList<>();\n\n public String getRecordId() {\n if (this.recordJsonObject == null) {\n throw new AppException(HttpStatus.SC_INTERNAL_SERVER_ERROR, \"error\", \"record does not exist\");\n }\n JsonElement recordId = this.recordJsonObject.get(\"id\");\n if (recordId == null || recordId.getAsString().isEmpty()) {\n throw new AppException(HttpStatus.SC_INTERNAL_SERVER_ERROR, \"error\", \"record does not have id\");\n }\n return recordId.getAsString();\n }\n}\n\n" }, { "alpha_fraction": 0.7782233357429504, "alphanum_fraction": 0.7816162705421448, "avg_line_length": 37.141178131103516, "blob_id": "528fc0d31e32c4ac5b63da024a3645112b799b9d", "content_id": "2ee4559013b915f184e1405b4413bb30e1253fc3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3242, "license_type": "permissive", "max_line_length": 117, "num_lines": 85, "path": "/compatibility-layer/service/ingest/src/main/java/com/osdu/service/delfi/DelfiEnrichService.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.service.delfi;\n\nimport static com.osdu.service.JsonUtils.deepCopy;\n\nimport com.osdu.client.delfi.RecordDataFields;\nimport com.osdu.model.IngestHeaders;\nimport com.osdu.model.Record;\nimport com.osdu.model.RequestContext;\nimport com.osdu.model.delfi.DelfiIngestedFile;\nimport com.osdu.model.delfi.enrich.EnrichedFile;\nimport com.osdu.model.type.file.OsduFile;\nimport com.osdu.model.type.wp.WorkProductComponent;\nimport com.osdu.service.EnrichService;\nimport com.osdu.service.PortalService;\nimport com.osdu.service.helper.IngestionHelper;\nimport java.time.LocalDateTime;\nimport java.time.ZoneOffset;\nimport lombok.RequiredArgsConstructor;\nimport lombok.extern.slf4j.Slf4j;\nimport org.springframework.stereotype.Service;\n\n@Service\n@RequiredArgsConstructor\n@Slf4j\npublic class DelfiEnrichService implements EnrichService {\n\n final PortalService portalService;\n\n @Override\n public EnrichedFile enrichRecord(DelfiIngestedFile file, String srn,\n RequestContext requestContext) {\n\n WorkProductComponent wpc = file.getSubmittedFile().getSignedFile().getFile().getWpc();\n WorkProductComponent reducedWpc = deepCopy(wpc, WorkProductComponent.class);\n\n Record record = portalService\n .getRecord(file.getRecordId(), requestContext.getAuthorizationToken(),\n requestContext.getPartition());\n\n record.getData().put(RecordDataFields.WPC_DATA, reducedWpc);\n record.getData().put(RecordDataFields.OSDU_DATA, generateOsduFileRecord(file, srn, requestContext.getHeaders()));\n\n Record enrichedRecord = portalService.putRecord(record, requestContext.getAuthorizationToken(),\n requestContext.getPartition());\n\n return EnrichedFile.builder()\n .delfiIngestedFile(file)\n .record(enrichedRecord)\n .build();\n }\n\n private OsduFile generateOsduFileRecord(DelfiIngestedFile file, String srn,\n IngestHeaders headers) {\n LocalDateTime now = LocalDateTime.now(ZoneOffset.UTC);\n OsduFile osduFile = deepCopy(file.getSubmittedFile().getSignedFile().getFile(), OsduFile.class);\n\n osduFile.setResourceID(srn);\n osduFile.setResourceTypeID(IngestionHelper.prepareTypeId(osduFile.getResourceTypeID()));\n osduFile.setResourceHomeRegionID(headers.getResourceHomeRegionID());\n osduFile.setResourceHostRegionIDs(headers.getResourceHostRegionIDs());\n osduFile.setResourceObjectCreationDatetime(now);\n osduFile.setResourceVersionCreationDatetime(now);\n osduFile.setResourceCurationStatus(\"srn:reference-data/ResourceCurationStatus:CREATED:\");\n osduFile.setResourceLifecycleStatus(\"srn:reference-data/ResourceLifecycleStatus:RECIEVED:\");\n\n return osduFile;\n }\n\n}\n" }, { "alpha_fraction": 0.7343465089797974, "alphanum_fraction": 0.7392097115516663, "avg_line_length": 36.3863639831543, "blob_id": "b8ec5ccbce9e3e9a11b3ab8e33807522b569746b", "content_id": "a69f5d9fe8727a37948c21a9c454ca31d7d58626", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1645, "license_type": "permissive", "max_line_length": 96, "num_lines": 44, "path": "/compatibility-layer/service/search/src/main/java/com/osdu/mapper/SearchResultMapper.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.mapper;\n\nimport com.osdu.model.delfi.DelfiSearchResult;\nimport com.osdu.model.osdu.OsduSearchObject;\nimport com.osdu.model.osdu.OsduSearchResult;\nimport org.mapstruct.DecoratedWith;\nimport org.mapstruct.Mapper;\nimport org.mapstruct.Mapping;\n\n@Mapper\n@DecoratedWith(SearchResultMapperDecorator.class)\npublic interface SearchResultMapper {\n\n /**\n * Maps {@link DelfiSearchResult} to OSDUSearchResult.\n *\n * @param searchResult to get actual search result data\n * @param osduSearchObject to get additional metedata information to enrich the result. OSDU\n * result includes some properties of the original search request like\n * facets or requested count/offset.\n * @return result of the search against Delfi Portal in OSDU compliant format.\n */\n @Mapping(source = \"searchResult.totalCount\", target = \"totalHits\")\n @Mapping(target = \"start\", ignore = true)\n OsduSearchResult delfiToOsdu(DelfiSearchResult searchResult,\n OsduSearchObject osduSearchObject);\n\n}\n" }, { "alpha_fraction": 0.7633545398712158, "alphanum_fraction": 0.765845537185669, "avg_line_length": 34.42156982421875, "blob_id": "c6653b3339893a1df0a398d727ae81173e76e883", "content_id": "10bb7e565fe31a7b422d6d84b46ec637267c7935", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3613, "license_type": "permissive", "max_line_length": 97, "num_lines": 102, "path": "/osdu-r2/os-workflow/workflow-core/src/test/java/org/opengroup/osdu/workflow/exception/handler/RestExceptionHandlerTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.workflow.exception.handler;\n\nimport static org.assertj.core.api.BDDAssertions.then;\nimport static org.hibernate.validator.internal.engine.path.PathImpl.createPathFromString;\nimport static org.mockito.ArgumentMatchers.anyBoolean;\nimport static org.mockito.BDDMockito.given;\n\nimport java.util.HashSet;\nimport java.util.Set;\nimport javax.validation.ConstraintViolation;\nimport javax.validation.ConstraintViolationException;\nimport org.junit.jupiter.api.BeforeEach;\nimport org.junit.jupiter.api.DisplayNameGeneration;\nimport org.junit.jupiter.api.Test;\nimport org.junit.jupiter.api.extension.ExtendWith;\nimport org.mockito.Mock;\nimport org.mockito.junit.jupiter.MockitoExtension;\nimport org.opengroup.osdu.core.common.logging.JaxRsDpsLog;\nimport org.opengroup.osdu.workflow.ReplaceCamelCase;\nimport org.springframework.http.HttpStatus;\nimport org.springframework.http.ResponseEntity;\nimport org.springframework.web.context.request.WebRequest;\n\n@ExtendWith(MockitoExtension.class)\n@DisplayNameGeneration(ReplaceCamelCase.class)\nclass RestExceptionHandlerTest {\n\n @Mock\n private WebRequest webRequest;\n @Mock\n private ConstraintViolation<String> constraintViolation;\n @Mock\n private JaxRsDpsLog logger;\n\n RestExceptionHandler restExceptionHandler;\n\n @BeforeEach\n void setUp() {\n restExceptionHandler = new RestExceptionHandler(logger);\n }\n\n @Test\n void shouldHandleIllegalArgumentException() {\n\n // given\n given(webRequest.getDescription(anyBoolean())).willReturn(\"uri=/test\");\n\n // when\n ResponseEntity<Object> response = restExceptionHandler\n .handleInvalidBody(new IllegalArgumentException(\"Cannot convert JSON\"), webRequest);\n\n // then\n then(response.getStatusCode()).isEqualTo(HttpStatus.BAD_REQUEST);\n then(response.getBody()).satisfies(body -> {\n ApiError error = (ApiError) body;\n then(error.getStatus()).isEqualTo(HttpStatus.BAD_REQUEST);\n then(error.getMessage()).isEqualTo(\"IllegalArgumentException: Cannot convert JSON\");\n });\n }\n\n @Test\n void shouldHandleConstraintException() {\n\n // given\n Set<ConstraintViolation<String>> constraints = new HashSet<>();\n constraints.add(constraintViolation);\n ConstraintViolationException constraintViolationException = new ConstraintViolationException(\n constraints);\n\n given(constraintViolation.getPropertyPath()).willReturn(createPathFromString(\"testField\"));\n given(constraintViolation.getMessage()).willReturn(\"testMessage\");\n\n // when\n ResponseEntity<Object> response = restExceptionHandler\n .handle(constraintViolationException, webRequest);\n\n // then\n then(response.getStatusCode()).isEqualTo(HttpStatus.BAD_REQUEST);\n then(response.getBody()).satisfies(body -> {\n ApiError error = (ApiError) body;\n then(error.getStatus()).isEqualTo(HttpStatus.BAD_REQUEST);\n then(error.getMessage()).contains(\"ConstraintViolationException\");\n then(error.getErrors().get(0)).isEqualTo(\"testField: testMessage\");\n });\n }\n}\n" }, { "alpha_fraction": 0.6776070594787598, "alphanum_fraction": 0.6815642714500427, "avg_line_length": 39.52830123901367, "blob_id": "e7b1f404dcb71936e2d050d66429f8f1ecb43222", "content_id": "92644f360beac6002008ab77c173a194e8260724", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4296, "license_type": "permissive", "max_line_length": 120, "num_lines": 106, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/legal/LegalService.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.legal;\n\nimport com.google.gson.JsonSyntaxException;\nimport org.apache.commons.lang3.StringUtils;\nimport org.opengroup.osdu.core.common.model.legal.*;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.core.common.http.HttpRequest;\nimport org.opengroup.osdu.core.common.http.HttpResponse;\nimport org.opengroup.osdu.core.common.http.IHttpClient;\n\npublic class LegalService implements ILegalProvider {\n private final String rootUrl;\n private final IHttpClient httpClient;\n private final DpsHeaders headers;\n\n LegalService(LegalAPIConfig config,\n IHttpClient httpClient,\n DpsHeaders headers) {\n this.rootUrl = config.getRootUrl();\n this.httpClient = httpClient;\n this.headers = headers;\n if (config.apiKey != null) {\n headers.put(\"AppKey\", config.apiKey);\n }\n }\n\n @Override\n public LegalTag create(LegalTag lt) throws LegalException {\n String url = this.createUrl(\"/legaltags\");\n HttpResponse result = this.httpClient.send(\n HttpRequest.post(lt).url(url).headers(this.headers.getHeaders()).build());\n return this.getResult(result, LegalTag.class);\n }\n\n @Override\n public LegalTag get(String name) throws LegalException {\n String url = this.createUrl(String.format(\"/legaltags/%s\", name));\n HttpResponse result = this.httpClient.send(\n HttpRequest.get().url(url).headers(this.headers.getHeaders()).build());\n return result.IsNotFoundCode() ? null : this.getResult(result, LegalTag.class);\n }\n\n @Override\n public void delete(String name) throws LegalException {\n String url = this.createUrl(String.format(\"/legaltags/%s\", name));\n HttpResponse result = this.httpClient.send(\n HttpRequest.delete().url(url).headers(this.headers.getHeaders()).build());\n this.getResult(result, String.class);\n }\n\n @Override\n public InvalidTagsWithReason validate(String... names) throws LegalException {\n String url = this.createUrl(String.format(\"/legaltags:validate\"));\n RequestLegalTags rlt = new RequestLegalTags();\n rlt.setNames(names);\n HttpResponse result = this.httpClient.send(\n HttpRequest.post(rlt).url(url).headers(this.headers.getHeaders()).build());\n return this.getResult(result, InvalidTagsWithReason.class);\n }\n\n @Override\n public LegalTagProperties getLegalTagProperties() throws LegalException {\n String url = this.createUrl(\"/legaltags:properties\");\n HttpResponse result = this.httpClient.send(\n HttpRequest.get().url(url).headers(this.headers.getHeaders()).build());\n return result.IsNotFoundCode() ? null : this.getResult(result, LegalTagProperties.class);\n }\n\n private LegalException generateException(HttpResponse result) {\n return new LegalException(\n \"Error making request to Legal service. Check the inner HttpResponse for more info.\", result);\n }\n\n private String createUrl(String pathAndQuery) {\n return StringUtils.join(this.rootUrl, pathAndQuery);\n }\n\n private <T> T getResult(HttpResponse result, Class<T> type) throws LegalException {\n if (result.isSuccessCode()) {\n try {\n return result.parseBody(type);\n } catch (JsonSyntaxException e) {\n throw new LegalException(\"Error parsing response. Check the inner HttpResponse for more info.\", result);\n }\n } else {\n throw this.generateException(result);\n }\n }\n}\n" }, { "alpha_fraction": 0.7351484894752502, "alphanum_fraction": 0.7391089200973511, "avg_line_length": 32.13114929199219, "blob_id": "75337973c7440ec77d87d987fe74a8a1cc274ef2", "content_id": "6df7f2d62128fede5a5f0c4e24fa46d3d8d65635", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2020, "license_type": "permissive", "max_line_length": 79, "num_lines": 61, "path": "/osdu-r2/os-qa/src/test/java/com/osdu/auth/GoogleToken.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.auth;\n\nimport com.codeborne.selenide.Configuration;\nimport com.codeborne.selenide.WebDriverRunner;\nimport com.osdu.core.data.provider.DataProviders;\nimport com.osdu.core.pages.GoogleLoginPage;\nimport com.osdu.core.pages.GoogleProviderPage;\nimport io.qameta.allure.Description;\nimport org.testng.annotations.AfterClass;\nimport org.testng.annotations.BeforeClass;\nimport org.testng.annotations.Test;\n\nimport java.util.Map;\n\nimport static com.codeborne.selenide.Condition.visible;\nimport static com.codeborne.selenide.Selenide.open;\nimport static com.osdu.core.utils.helper.EnvironmentVariableReceiver.*;\n\npublic class GoogleToken {\n GoogleLoginPage loginPage = new GoogleLoginPage();\n GoogleProviderPage providerPage = new GoogleProviderPage();\n final String PAGE_URL = getTokenPage();\n\n @BeforeClass\n public void startBrowser(){\n Configuration.browser = \"chrome\";\n open(PAGE_URL);\n }\n\n @Test(dataProvider = \"testedData\", dataProviderClass = DataProviders.class)\n @Description(\"Login to the account and receive token\")\n public void getToken(Map<String,String> data) {\n providerPage.getGoogleProvider().shouldBe(visible).click();\n loginPage.doLogin(getGoogleLogin(), getGooglePassword());\n\n //todo:\n //get token\n //save token\n }\n\n @AfterClass\n public void tearDown() {\n WebDriverRunner.getWebDriver().close();\n }\n}" }, { "alpha_fraction": 0.6859122514724731, "alphanum_fraction": 0.6951501369476318, "avg_line_length": 27.866666793823242, "blob_id": "714360f613202340feb1ac5df41beb2d379b8aaf", "content_id": "3a53affe678232b15d9e61628b60f6e2d8ee6d1e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2165, "license_type": "permissive", "max_line_length": 84, "num_lines": 75, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/http/HttpRequest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.http;\n\nimport com.google.gson.Gson;\nimport lombok.Builder;\nimport lombok.Data;\n\nimport java.util.HashMap;\nimport java.util.Map;\n\n@Builder\n@Data\npublic class HttpRequest {\n public static final String PATCH = \"PATCH\";\n public static final String POST = \"POST\";\n public static final String PUT = \"PUT\";\n public static final String GET = \"GET\";\n public static final String DELETE = \"DELETE\";\n\n String httpMethod;\n String url;\n String body;\n\n @Builder.Default\n Map<String, String> headers = new HashMap<>();\n @Builder.Default\n int connectionTimeout = 5000;\n @Builder.Default\n boolean followRedirects = true;\n\n public static <T> HttpRequestBuilder post(T body) {\n return HttpRequest.builder().httpMethod(POST).body(new Gson().toJson(body));\n }\n\n public static HttpRequestBuilder post() {\n return HttpRequest.builder().httpMethod(POST);\n }\n\n public static <T> HttpRequestBuilder put(T body) {\n return HttpRequest.builder().httpMethod(PUT).body(new Gson().toJson(body));\n }\n\n public static HttpRequestBuilder put() {\n return HttpRequest.builder().httpMethod(PUT);\n }\n\n public static HttpRequestBuilder get() {\n return HttpRequest.builder().httpMethod(GET);\n }\n\n public static HttpRequestBuilder delete() {\n return HttpRequest.builder().httpMethod(DELETE);\n }\n\n @Override\n public String toString() {\n return String.format(\"%s, httpMethod=%s\", url, httpMethod);\n }\n}\n" }, { "alpha_fraction": 0.7330785989761353, "alphanum_fraction": 0.749454140663147, "avg_line_length": 37.978721618652344, "blob_id": "4dc01a26b71995b7c33cc2cc1125a45f970c7264", "content_id": "b5d7257e5c83155a338ad12a3478ad5dfe3929e7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1832, "license_type": "permissive", "max_line_length": 127, "num_lines": 47, "path": "/osdu-r2/os-core-common/src/test/java/org/opengroup/osdu/core/common/model/search/IdTokenTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.search;\n\nimport org.junit.Assert;\nimport org.junit.Test;\nimport org.junit.runner.RunWith;\nimport org.mockito.runners.MockitoJUnitRunner;\nimport org.opengroup.osdu.core.common.model.search.IdToken;\n\n@RunWith(MockitoJUnitRunner.class)\npublic class IdTokenTest {\n\n @Test\n public void should_returnTrue_givenNull_refreshTokenTest() {\n Assert.assertTrue(IdToken.refreshToken(null));\n }\n\n @Test\n public void should_returnTrue_givenValidToken_refreshTokenTest() {\n IdToken idToken = IdToken.builder().tokenValue(\"tokenValue\").expirationTimeMillis(System.currentTimeMillis()).build();\n Assert.assertTrue(IdToken.refreshToken(idToken));\n }\n\n @Test\n public void should_returnFalse_whenTokenExpired_refreshTokenTest() {\n IdToken idToken = IdToken.builder().tokenValue(\"tokenValue\").expirationTimeMillis(System.currentTimeMillis()).build();\n idToken.setExpirationTimeMillis(System.currentTimeMillis()+1000000L);\n idToken = IdToken.builder().tokenValue(\"tokenValue\").expirationTimeMillis(System.currentTimeMillis()+1000000L).build();\n Assert.assertFalse(IdToken.refreshToken(idToken));\n }\n}\n" }, { "alpha_fraction": 0.7466307282447815, "alphanum_fraction": 0.7513477206230164, "avg_line_length": 36.099998474121094, "blob_id": "f222228bfca6c50e76328388084e478642dbd17b", "content_id": "443f64d7fea26cbba784f8d4dd259ae1c2b49ef8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4452, "license_type": "permissive", "max_line_length": 120, "num_lines": 120, "path": "/osdu-r2/terraform/README.md", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "## OSDU-GCP installation\n\nThis guide assumes the following:\n\n* Google Cloud SDK installed\n* Bash or a compatible shell\n* Python 3 environment\n* Terraform 0.12.8+ installed\n* Basic familiarity with Google Cloud Platform and Terraform\n\nTested on Google Cloud Shell.\n\n1. Set the `GOOGLE_CLOUD_PROJECT` environment variable to your GCP project ID:\n\n```bash\nexport GOOGLE_CLOUD_PROJECT=<your-project-id>\n```\n\n2. Ensure that the service containers are built and pushed to the GCP project's Container Registry.\n\nIf you want to use another project's registry, first complete the steps outlined in the [Deploying images from other\nGoogle Cloud projects] documentation.\n\nRun the following gcloud command from the directory where application source code is stored:\n\n```bash\ngcloud builds submit . --substitutions=_PROVIDER_NAME=gcp-datastore,_SHORT_SHA=$(git rev-parse --short HEAD)\n```\n\n3. [Create an instance of Cloud Firestore in Datastore mode]\n4. Create a service account which will be used by Terraform, e.g.:\n\n```bash\ngcloud iam service-accounts create terraform\n```\n\n5. Create a service account key and store it securely:\n\n```bash\ngcloud iam service-accounts keys create terraform.json --iam-account=terraform@${GOOGLE_CLOUD_PROJECT}.iam.gserviceaccount.com\n```\n\n6. Assign Project Editor, Cloud Run Admin, Datastore Index Admin, and Storage Object Admin roles to the account. You can\nremove them along with the service account once the deployment is complete.\n\n```bash\ngcloud projects add-iam-policy-binding $GOOGLE_CLOUD_PROJECT \\\n --member=serviceAccount:terraform@${GOOGLE_CLOUD_PROJECT}.iam.gserviceaccount.com \\\n --role=roles/editor\ngcloud projects add-iam-policy-binding $GOOGLE_CLOUD_PROJECT \\\n --member=serviceAccount:terraform@${GOOGLE_CLOUD_PROJECT}.iam.gserviceaccount.com \\\n --role=roles/datastore.indexAdmin\ngcloud projects add-iam-policy-binding $GOOGLE_CLOUD_PROJECT \\\n --member=serviceAccount:terraform@${GOOGLE_CLOUD_PROJECT}.iam.gserviceaccount.com \\\n --role=roles/storage.objectAdmin\ngcloud projects add-iam-policy-binding $GOOGLE_CLOUD_PROJECT \\\n --member=serviceAccount:terraform@${GOOGLE_CLOUD_PROJECT}.iam.gserviceaccount.com \\\n --role=roles/run.admin\n```\n\nIf you want Terraform to configure IAM roles for Cloud Run service account, add Project IAM Admin role as well:\n\n```bash\ngcloud projects add-iam-policy-binding $GOOGLE_CLOUD_PROJECT \\\n --member=serviceAccount:terraform@${GOOGLE_CLOUD_PROJECT}.iam.gserviceaccount.com \\\n --role=roles/resourcemanager.projectIamAdmin\n```\n\n7. Run the script to seed Cloud Datastore values:\n\n```bash\npip3 install google-cloud-datastore # Already installed in Cloud Shell\nexport GOOGLE_APPLICATION_CREDENTIALS=\"<path to the service account key, e.g. terraform.json>\"\npython3 datastore-import/datastore_import.py\n ```\n\n8. Create a Terraform variables file (e.g. `terraform.tfvars`) with the following contents:\n\n```bash\nproject = \"<GCP project ID>\"\ngcr_project = \"<project ID of the Container Registry where images reside>\"\ncredentials_file = \"<path to the service account key, e.g. terraform.json>\"\nregion = \"<your GCP region>\"\nenable_iam = false # Set to true if Terraform service account has Project IAM Admin role\nentitlement_service = \"<your OpenDES entitlements API URL, e.g. https://example.com/entitlements/v1>\"\n\n```\n\n9. Initialize a Terraform project and run `plan` to preview the infrastructure that will be created:\n\n```bash\nterraform init\nterraform plan --var-file=terraform.tfvars --out=terraform.tfplan\n```\n\n10. Apply the saved Terraform plan\n\n```bash\nterraform apply\n```\n\nAt the end of this step, it will output the URLs of Cloud Run services.\n\n11. If you didn't apply IAM changes via Terraform, configure the following roles for the service account used for OS\nservices:\n\n```bash\ngcloud projects add-iam-policy-binding $GOOGLE_CLOUD_PROJECT \\\n --member=serviceAccount:osdu-gcp-sa@${GOOGLE_CLOUD_PROJECT}.iam.gserviceaccount.com \\\n --role=roles/composer.user\ngcloud projects add-iam-policy-binding $GOOGLE_CLOUD_PROJECT \\\n --member=serviceAccount:osdu-gcp-sa@${GOOGLE_CLOUD_PROJECT}.iam.gserviceaccount.com \\\n --role=roles/datastore.user\ngcloud projects add-iam-policy-binding $GOOGLE_CLOUD_PROJECT \\\n --member=serviceAccount:osdu-gcp-sa@${GOOGLE_CLOUD_PROJECT}.iam.gserviceaccount.com \\\n --role=roles/iam.serviceAccountTokenCreator\n```\n\n[Deploying images from other Google Cloud projects]: https://cloud.google.com/run/docs/deploying#other-projects\n[Create an instance of Cloud Firestore in Datastore mode]: https://cloud.google.com/datastore/docs/quickstart\n" }, { "alpha_fraction": 0.7412188649177551, "alphanum_fraction": 0.7454217672348022, "avg_line_length": 34.06315612792969, "blob_id": "551876f9087d04b0ca44c94b63d3872d31815e97", "content_id": "c39e41c42c70dc6bf9f790f4a8945ae7d75ff6db", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3331, "license_type": "permissive", "max_line_length": 82, "num_lines": 95, "path": "/osdu-r2/os-ingest/provider/ingest-gcp/src/main/java/org/opengroup/osdu/ingest/provider/gcp/repository/FirestoreSchemaRepository.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.ingest.provider.gcp.repository;\n\nimport static java.lang.String.format;\n\nimport com.google.api.core.ApiFuture;\nimport com.google.cloud.firestore.Firestore;\nimport com.google.cloud.firestore.QueryDocumentSnapshot;\nimport com.google.cloud.firestore.QuerySnapshot;\nimport java.util.List;\nimport java.util.concurrent.ExecutionException;\nimport java.util.concurrent.Future;\nimport javax.inject.Named;\nimport lombok.RequiredArgsConstructor;\nimport lombok.extern.slf4j.Slf4j;\nimport org.opengroup.osdu.ingest.exception.SchemaDataQueryException;\nimport org.opengroup.osdu.ingest.model.SchemaData;\nimport org.opengroup.osdu.ingest.model.SchemaData.Fields;\nimport org.opengroup.osdu.ingest.provider.gcp.mapper.ISchemaDataMapper;\nimport org.opengroup.osdu.ingest.provider.gcp.model.dto.SchemaDataDto;\nimport org.opengroup.osdu.ingest.provider.interfaces.ISchemaRepository;\nimport org.springframework.stereotype.Repository;\n\n@Repository\n@Slf4j\n@RequiredArgsConstructor\npublic class FirestoreSchemaRepository implements ISchemaRepository {\n\n private static final String COLLECTION_NAME = \"schema-data\";\n\n final Firestore firestore;\n @Named\n final ISchemaDataMapper schemaDataMapper;\n\n @Override\n public SchemaData findByTitle(String title) {\n log.debug(\"Requesting schema data. Schema title : {}\", title);\n ApiFuture<QuerySnapshot> query = firestore.collection(COLLECTION_NAME)\n .whereEqualTo(Fields.TITLE, title).get();\n\n QuerySnapshot querySnapshot = getSafety(query,\n \"Failed to find a schema data by title \" + title);\n\n List<QueryDocumentSnapshot> documents = querySnapshot.getDocuments();\n\n if (documents.size() > 1) {\n throw new SchemaDataQueryException(\n format(\"Find by ID returned %s documents(s), expected 1, query ID : %s\",\n documents.size(), title));\n }\n\n SchemaData schemaData = documents.isEmpty()\n ? null\n : buildSchemaData(documents.get(0));\n\n log.debug(\"Found schema data : {}\", schemaData);\n return schemaData;\n }\n\n private <T> T getSafety(Future<T> future, String errorMsg) {\n try {\n return future.get();\n } catch (InterruptedException e) {\n Thread.currentThread().interrupt();\n throw new SchemaDataQueryException(errorMsg, e);\n } catch (ExecutionException e) {\n throw new SchemaDataQueryException(errorMsg, e);\n }\n }\n\n private SchemaData buildSchemaData(QueryDocumentSnapshot snapshot) {\n SchemaDataDto dto = SchemaDataDto.builder()\n .title(snapshot.getString(Fields.TITLE))\n .schema(snapshot.getString(Fields.SCHEMA))\n .createdAt(snapshot.getDate(Fields.CREATED_AT))\n .build();\n return schemaDataMapper.schemaDataDtoToSchemaData(dto);\n }\n\n}\n" }, { "alpha_fraction": 0.7907967567443848, "alphanum_fraction": 0.7959096431732178, "avg_line_length": 37.47541046142578, "blob_id": "ed62867a6d3cc1eea3d0311be52e9ae467f2a9b7", "content_id": "71a3f1238f707fb8eb391a0d3de8364db5b69337", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2347, "license_type": "permissive", "max_line_length": 97, "num_lines": 61, "path": "/osdu-r2/os-ingest/ingest-core/src/main/java/org/opengroup/osdu/ingest/validation/ValidationServiceImpl.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.ingest.validation;\n\nimport com.networknt.schema.ValidationMessage;\nimport java.util.Set;\nimport javax.validation.ConstraintViolation;\nimport javax.validation.ConstraintViolationException;\nimport javax.validation.Validator;\nimport lombok.RequiredArgsConstructor;\nimport lombok.extern.slf4j.Slf4j;\nimport org.apache.commons.collections4.CollectionUtils;\nimport org.opengroup.osdu.core.common.exception.BadRequestException;\nimport org.opengroup.osdu.ingest.model.SubmitRequest;\nimport org.opengroup.osdu.ingest.model.WorkProductLoadManifest;\nimport org.opengroup.osdu.ingest.provider.interfaces.IValidationService;\nimport org.opengroup.osdu.ingest.validation.schema.ILoadManifestValidationService;\nimport org.springframework.stereotype.Service;\n\n@Service\n@Slf4j\n@RequiredArgsConstructor\npublic class ValidationServiceImpl implements IValidationService {\n\n final Validator validator;\n final ILoadManifestValidationService loadManifestValidationService;\n\n @Override\n public void validateSubmitRequest(SubmitRequest request) {\n Set<ConstraintViolation<SubmitRequest>> constraintViolations =\n validator.validate(request, IValidationSequence.class);\n if (CollectionUtils.isNotEmpty(constraintViolations)) {\n throw new ConstraintViolationException(\"Invalid Submit request\", constraintViolations);\n }\n }\n\n @Override\n public void validateManifest(WorkProductLoadManifest loadManifest) {\n Set<ValidationMessage> errors = loadManifestValidationService.validateManifest(loadManifest);\n if (CollectionUtils.isNotEmpty(errors)) {\n throw new BadRequestException(String.format(\n \"Failed to validate json from manifest %s, validation result is %s\",\n loadManifest, errors));\n }\n }\n\n}\n" }, { "alpha_fraction": 0.7286772727966309, "alphanum_fraction": 0.735026478767395, "avg_line_length": 43.158878326416016, "blob_id": "710f45c0148e7ac281030330696b66d3dce071f3", "content_id": "b1ab524bc4475dc2d1cd299890f0f77ad8fd854d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4725, "license_type": "permissive", "max_line_length": 174, "num_lines": 107, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/http/AppException.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.http;\n\nimport com.google.common.base.Strings;\nimport lombok.Data;\nimport lombok.EqualsAndHashCode;\nimport lombok.Getter;\nimport org.apache.http.HttpStatus;\n\n@Data\n@EqualsAndHashCode(callSuper = false)\npublic class AppException extends RuntimeException {\n\n @Getter\n private AppError error;\n\n @Getter\n private Exception originalException;\n\n public AppError getError() {\n return this.error;\n }\n\n public AppException(int status, String reason, String message) {\n String sanitizedReason = this.sanitizeString(reason);\n String sanitizedMessage = this.sanitizeString(message);\n\n this.error = new AppError(status, sanitizedReason, sanitizedMessage);\n this.originalException = null;\n }\n\n public AppException(int status, String reason, String message, String[] errors) {\n this.error = AppError.builder().code(status).reason(reason).message(message).errors(errors).build();\n }\n\n\n public AppException(int status, String reason, String message, String debuggingInfo) {\n this.error = AppError.builder().code(status).reason(reason).message(message).debuggingInfo(debuggingInfo).build();\n }\n\n public AppException(int status, String reason, String message, String[] errors, String debuggingInfo) {\n this.error = AppError.builder().code(status).reason(reason).message(message).errors(errors).debuggingInfo(debuggingInfo).build();\n }\n\n public AppException(int status, String reason, String message, Exception originalException) {\n String sanitizedReason = this.sanitizeString(reason);\n String sanitizedMessage = this.sanitizeString(message);\n this.originalException = originalException;\n this.error = AppError.builder().code(status).reason(sanitizedReason).message(sanitizedMessage).originalException(originalException).build();\n }\n\n public AppException(int status, String reason, String message, String[] errors, Exception originalException) {\n this.error = AppError.builder().code(status).reason(reason).message(message).errors(errors).originalException(originalException).build();\n }\n\n public AppException(int status, String reason, String message, String debuggingInfo, Exception originalException) {\n this.error = AppError.builder().code(status).reason(reason).message(message).debuggingInfo(debuggingInfo).originalException(originalException).build();\n }\n\n public AppException(int status, String reason, String message, String debuggingInfo, Exception originalException, String[] errors) {\n this.error = AppError.builder().code(status).reason(reason).message(message).debuggingInfo(debuggingInfo).originalException(originalException).errors(errors).build();\n }\n\n public static AppException createForbidden(String debuggingInfo) {\n return new AppException(HttpStatus.SC_FORBIDDEN, \"Access denied\", \"The user is not authorized to perform this action\", debuggingInfo);\n }\n\n public static AppException createUnauthorized(String debuggingInfo) {\n return new AppException(HttpStatus.SC_UNAUTHORIZED, \"Unauthorized\", \"The user is not authorized to perform this action\", debuggingInfo);\n }\n\n public static AppException createForbidden(){\n return new AppException(403, \"Forbidden\", \"The user is not authorized to perform this action\");\n }\n\n public static AppException legalTagAlreadyExistsError(String name){\n return new AppException(409, \"Conflict\", \"A LegalTag already exists for the given name \" + name);\n }\n\n public static AppException legalTagDoesNotExistError(String name){\n return new AppException(404, \"Not found\", \"Cannot update a LegalTag that does not exist for given name \" + name);\n }\n\n public static AppException countryCodeLoadingError(){\n return new AppException(500, \"Internal Server Error\", \"Unexpected error. Please wait 30 seconds and try again.\");\n }\n\n private String sanitizeString(String msg) {\n return Strings.isNullOrEmpty(msg) ? \"\" : msg.replace('\\n', '_').replace('\\r', '_');\n }\n}\n" }, { "alpha_fraction": 0.733790934085846, "alphanum_fraction": 0.7387529015541077, "avg_line_length": 39.310001373291016, "blob_id": "7c3a61730cfca609e85852d4c58adbc19ed2cb68", "content_id": "0c3bbf1300ad50ed2581498a8dc941356588f3c6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 12092, "license_type": "permissive", "max_line_length": 106, "num_lines": 300, "path": "/compatibility-layer/service/ingest/src/test/java/com/osdu/service/delfi/DelfiSubmitServiceTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.service.delfi;\n\nimport static com.osdu.model.delfi.status.MasterJobStatus.COMPLETED;\nimport static com.osdu.model.delfi.status.MasterJobStatus.FAILED;\nimport static com.osdu.model.delfi.status.MasterJobStatus.RUNNING;\nimport static java.lang.String.format;\nimport static org.assertj.core.api.BDDAssertions.then;\nimport static org.junit.jupiter.params.provider.Arguments.arguments;\nimport static org.mockito.BDDMockito.given;\nimport static org.mockito.Mockito.timeout;\n\nimport com.osdu.ReplaceCamelCase;\nimport com.osdu.client.DelfiIngestionClient;\nimport com.osdu.model.RequestContext;\nimport com.osdu.model.delfi.DelfiFile;\nimport com.osdu.model.delfi.DelfiIngestedFile;\nimport com.osdu.model.delfi.status.JobInfo;\nimport com.osdu.model.delfi.status.JobPollingResult;\nimport com.osdu.model.delfi.status.JobStatusResponse;\nimport com.osdu.model.delfi.status.MasterJobStatus;\nimport com.osdu.model.delfi.status.Summary;\nimport com.osdu.model.delfi.submit.FileInput;\nimport com.osdu.model.delfi.submit.SubmitFileContext;\nimport com.osdu.model.delfi.submit.SubmitFileObject;\nimport com.osdu.model.delfi.submit.SubmitFileResult;\nimport com.osdu.model.delfi.submit.SubmitJobResult;\nimport com.osdu.model.delfi.submit.SubmittedFile;\nimport com.osdu.model.property.DelfiPortalProperties;\nimport com.osdu.model.property.SubmitProperties;\nimport com.osdu.service.SubmitService;\nimport java.util.Arrays;\nimport java.util.HashMap;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.stream.Collectors;\nimport java.util.stream.Stream;\nimport org.junit.jupiter.api.BeforeEach;\nimport org.junit.jupiter.api.DisplayNameGeneration;\nimport org.junit.jupiter.api.Test;\nimport org.junit.jupiter.api.extension.ExtendWith;\nimport org.junit.jupiter.params.ParameterizedTest;\nimport org.junit.jupiter.params.provider.Arguments;\nimport org.junit.jupiter.params.provider.MethodSource;\nimport org.mockito.AdditionalAnswers;\nimport org.mockito.InOrder;\nimport org.mockito.Mock;\nimport org.mockito.Mockito;\nimport org.mockito.junit.jupiter.MockitoExtension;\nimport org.springframework.web.client.RestTemplate;\n\n@ExtendWith(MockitoExtension.class)\n@DisplayNameGeneration(ReplaceCamelCase.class)\npublic class DelfiSubmitServiceTest {\n\n private static final String APP_KEY = \"appKey\";\n private static final String AUTHORIZATION_TOKEN = \"authToken\";\n private static final String PARTITION = \"partition\";\n\n private static final String JOB_ID_1 = \"jobId-1\";\n private static final String JOB_ID_2 = \"jobId-2\";\n private static final String JOB_ID_3 = \"jobId-3\";\n\n private static final String STORAGE_HREF = \"http://storage.host.com\";\n private static final String GCS_PROTOCOL = \"gs:/\";\n private static final String SUCCESS_METADATA_JSON_PATH = \"successrecords/success-metadata.json\";\n\n private static final String RECORD_KIND = \"tenant:ingestion-test:wellbore:1.0.0\";\n private static final String WPC_RESOURCE_TYPE_ID = \"srn:type:work-product-component/WellLog:version1\";\n private static final String FILE_RESOURCE_TYPE_ID = \"srn:type:file/las2:version1\";\n private static final String DELFI_RECORD_ID_1 = \"recordId-1\";\n private static final String OWNER_EMAIL_1 = \"[email protected]\";\n private static final String VIEWER_EMAIL_1 = \"[email protected]\";\n private static final String DATA_DEFAULT_OWNERS = \"data.default.owners\";\n private static final String DATA_DEFAULT_VIEWERS = \"data.default.viewers\";\n private static final String LAS_INGESTOR = \"[{\\\"LASIngestor\\\":{\\\"createRawWellRecord\\\":true}}]\";\n\n @Mock\n private RestTemplate restTemplate;\n @Mock\n private DelfiIngestionClient delfiIngestionClient;\n @Mock\n private DelfiPortalService portalService;\n\n private DelfiPortalProperties portalProperties = DelfiPortalProperties.builder()\n .appKey(APP_KEY)\n .build();\n private SubmitProperties submitProperties = SubmitProperties.builder()\n .pollingInterval(10)\n .pollingCycles(3)\n .build();\n\n private SubmitService submitService;\n\n @BeforeEach\n public void setUp() {\n submitService = new DelfiSubmitService(portalProperties, submitProperties, restTemplate,\n delfiIngestionClient, portalService);\n }\n\n @ParameterizedTest(name = \"#{index}: Job (jobId = {0}) should be in status {2}\")\n @MethodSource(\"awaitJobProvider\")\n public void shouldPollAndAwaitingStatusOfTheSubmittedIngestionJob(String jobId,\n List<MasterJobStatus> jobStatuses, MasterJobStatus expectedStatus) {\n // given\n RequestContext requestContext = getRequestContext();\n\n List<JobStatusResponse> jobStatusResponses = jobStatuses.stream()\n .map(status -> getJobStatusResponse(jobId, status))\n .collect(Collectors.toList());\n\n given(delfiIngestionClient.getJobStatus(jobId, AUTHORIZATION_TOKEN, APP_KEY, PARTITION))\n .willAnswer(AdditionalAnswers.returnsElementsOf(jobStatusResponses));\n\n // when\n JobPollingResult jobPollingResult = submitService.awaitSubmitJob(jobId, requestContext);\n\n // then\n then(jobPollingResult).isEqualToIgnoringGivenFields(JobPollingResult.builder()\n .runningJob(jobId)\n .status(expectedStatus)\n .build(), \"job\");\n\n InOrder inOrder = Mockito.inOrder(restTemplate, delfiIngestionClient, portalService);\n inOrder.verify(delfiIngestionClient, timeout(7 * 10).times(jobStatuses.size()))\n .getJobStatus(jobId, AUTHORIZATION_TOKEN, APP_KEY, PARTITION);\n inOrder.verifyNoMoreInteractions();\n }\n\n @Test\n public void shouldSubmitFileToIngestIntoDatalakeUsingLasIngestor() {\n // given\n String fileRelativePath = \"/some-landing-zone/some-user/uuid/file-name-1.las\";\n RequestContext requestContext = getRequestContext();\n SubmitFileContext fileContext = SubmitFileContext.builder()\n .relativeFilePath(fileRelativePath)\n .kind(RECORD_KIND)\n .wpcResourceTypeId(WPC_RESOURCE_TYPE_ID)\n .fileResourceTypeId(FILE_RESOURCE_TYPE_ID)\n .build();\n SubmitFileObject fileObject = getSubmitFileObject(fileContext, LAS_INGESTOR);\n\n given(delfiIngestionClient.submitFile(AUTHORIZATION_TOKEN, APP_KEY, PARTITION, PARTITION, fileObject))\n .willReturn(SubmitFileResult.builder().jobId(JOB_ID_1).build());\n\n // when\n SubmitJobResult submitJobResult = submitService.submitFile(fileContext, requestContext);\n\n // then\n then(submitJobResult).isEqualTo(SubmitJobResult.builder()\n .jobId(JOB_ID_1)\n .build());\n\n InOrder inOrder = Mockito.inOrder(restTemplate, delfiIngestionClient, portalService);\n inOrder.verify(delfiIngestionClient)\n .submitFile(AUTHORIZATION_TOKEN, APP_KEY, PARTITION, PARTITION, fileObject);\n inOrder.verifyNoMoreInteractions();\n }\n\n @Test\n public void shouldSubmitFileToIngestIntoDatalakeUsingDefaultIngestor() {\n // given\n String fileRelativePath = \"/some-landing-zone/some-user/uuid/file-name-2.csv\";\n RequestContext requestContext = getRequestContext();\n SubmitFileContext fileContext = SubmitFileContext.builder()\n .relativeFilePath(fileRelativePath)\n .kind(\"tenant:ingestion-test:wellbore-traj:1.0.0\")\n .wpcResourceTypeId(\"srn:type:work-product-component/WellboreTrajectory:version1\")\n .fileResourceTypeId(\"srn:type:file/csv:version1\")\n .build();\n SubmitFileObject fileObject = getSubmitFileObject(fileContext, null);\n\n given(delfiIngestionClient.submitFile(AUTHORIZATION_TOKEN, APP_KEY, PARTITION, PARTITION, fileObject))\n .willReturn(SubmitFileResult.builder().jobId(JOB_ID_2).build());\n\n // when\n SubmitJobResult submitJobResult = submitService.submitFile(fileContext, requestContext);\n\n // then\n then(submitJobResult).isEqualTo(SubmitJobResult.builder()\n .jobId(JOB_ID_2)\n .build());\n\n InOrder inOrder = Mockito.inOrder(restTemplate, delfiIngestionClient, portalService);\n inOrder.verify(delfiIngestionClient)\n .submitFile(AUTHORIZATION_TOKEN, APP_KEY, PARTITION, PARTITION, fileObject);\n inOrder.verifyNoMoreInteractions();\n }\n\n @Test\n public void shouldFetchRecordIdOfIngestedFile() {\n // given\n String outputLocation = \"/some-ingestion-persistent-zone/some-user/uuid/output\";\n String fileRelativePath = outputLocation + SUCCESS_METADATA_JSON_PATH;\n String fileUri = GCS_PROTOCOL + fileRelativePath;\n String fileSignedUrl = STORAGE_HREF + fileRelativePath\n + \"[email protected]&Expires=123&Signature=lX\";\n\n SubmittedFile file = SubmittedFile.builder().build();\n JobStatusResponse jobStatusResponse = JobStatusResponse.builder()\n .summary(Summary.builder()\n .outputLocation(outputLocation)\n .build())\n .jobInfo(JobInfo.builder()\n .jobId(JOB_ID_1)\n .masterJobStatus(COMPLETED)\n .build())\n .build();\n RequestContext requestContext = getRequestContext();\n DelfiFile delfiFile = DelfiFile.builder()\n .signedUrl(fileSignedUrl)\n .build();\n\n String jobMetadata = getMetadataFileContent(DELFI_RECORD_ID_1);\n\n given(portalService.getFile(fileUri, AUTHORIZATION_TOKEN, PARTITION))\n .willReturn(delfiFile);\n given(restTemplate.getForObject(fileSignedUrl, String.class))\n .willReturn(jobMetadata);\n\n // when\n DelfiIngestedFile ingestedFile = submitService\n .getIngestedFile(file, jobStatusResponse, requestContext);\n\n // then\n then(ingestedFile).isEqualTo(DelfiIngestedFile.builder()\n .submittedFile(file)\n .recordId(DELFI_RECORD_ID_1)\n .build());\n\n InOrder inOrder = Mockito.inOrder(restTemplate, delfiIngestionClient, portalService);\n inOrder.verify(portalService).getFile(fileUri, AUTHORIZATION_TOKEN, PARTITION);\n inOrder.verify(restTemplate).getForObject(fileSignedUrl, String.class);\n inOrder.verifyNoMoreInteractions();\n }\n\n private static Stream<Arguments> awaitJobProvider() {\n return Stream.of(\n arguments(JOB_ID_1, Arrays.asList(RUNNING, COMPLETED), COMPLETED),\n arguments(JOB_ID_2, Arrays.asList(RUNNING, FAILED), FAILED),\n arguments(JOB_ID_3, Arrays.asList(RUNNING, RUNNING, RUNNING), RUNNING)\n );\n }\n\n private RequestContext getRequestContext() {\n Map<String, String> emails = new HashMap<>();\n emails.put(DATA_DEFAULT_OWNERS, OWNER_EMAIL_1);\n emails.put(DATA_DEFAULT_VIEWERS, VIEWER_EMAIL_1);\n\n return RequestContext.builder()\n .authorizationToken(AUTHORIZATION_TOKEN)\n .partition(PARTITION)\n .userGroupEmailByName(emails)\n .build();\n }\n\n private JobStatusResponse getJobStatusResponse(String jobId, MasterJobStatus jobStatus) {\n return JobStatusResponse.builder()\n .jobInfo(JobInfo.builder()\n .jobId(jobId)\n .masterJobStatus(jobStatus)\n .build())\n .build();\n }\n\n private String getAcl(String ownerEmail, String viewerEmail) {\n return format(\"{\\\"acl\\\":{\\\"owners\\\":[\\\"%s\\\"],\\\"viewers\\\":[\\\"%s\\\"]}}\", ownerEmail, viewerEmail);\n }\n\n private String getMetadataFileContent(String recordId) {\n return format(\"{\\\"status\\\":201,\\\"message\\\":\\\"{\\\\\\\"recordCount\\\\\\\":1,\"\n + \"\\\\\\\"recordIds\\\\\\\":[\\\\\\\"%s\\\\\\\"],\\\\\\\"skippedRecordIds\\\\\\\":[]}\\\"}\", recordId);\n }\n\n private SubmitFileObject getSubmitFileObject(SubmitFileContext fileContext, String ingestor) {\n return SubmitFileObject.builder()\n .kind(fileContext.getKind())\n .acl(getAcl(OWNER_EMAIL_1, VIEWER_EMAIL_1))\n .filePath(GCS_PROTOCOL + fileContext.getRelativeFilePath())\n .fileInput(FileInput.FILE_PATH)\n .ingestorRoutines(ingestor)\n .build();\n }\n\n}" }, { "alpha_fraction": 0.7440841197967529, "alphanum_fraction": 0.7581069469451904, "avg_line_length": 37.03333282470703, "blob_id": "14667b605b282e9386c40540c3e816b62f464ca9", "content_id": "57be271c126aa97401a83422a970af921791742d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1141, "license_type": "permissive", "max_line_length": 101, "num_lines": 30, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/logging/ILogWriter.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.logging;\n\nimport org.opengroup.osdu.core.common.model.http.Request;\n\nimport java.util.Map;\nimport java.util.logging.Level;\n\npublic interface ILogWriter extends AutoCloseable{\n\n void writeJsonEntry(String logname, Map<String, Object> json, Map<String, String> labels);\n void writeRequestEntry(String logname, String text, Request request, Map<String, String> labels);\n void writeEntry(String logname, Level severity, String text, Map<String, String> labels);\n}\n" }, { "alpha_fraction": 0.7063291072845459, "alphanum_fraction": 0.7215189933776855, "avg_line_length": 34.90909194946289, "blob_id": "8b109a97ff250e72c1f3be35cc15e47854674b02", "content_id": "bee01578f0375b356c2abed526c9a3e19e09ab79", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 790, "license_type": "permissive", "max_line_length": 75, "num_lines": 22, "path": "/osdu-r2/os-python-sdk/osdu_api/model/acl.py", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# Copyright 2020 Google LLC\n# Copyright 2020 Amazon\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n'''\nAcl model mirroring what's found in core common\n'''\nclass Acl:\n def __init__(self, viewers: list, owners: list):\n self.viewers = viewers\n self.owners = owners\n" }, { "alpha_fraction": 0.7265892028808594, "alphanum_fraction": 0.7320573925971985, "avg_line_length": 28.85714340209961, "blob_id": "0b72d7750b08e9865b132758a2ab70fc39d4fc7f", "content_id": "ca173ede677b508c8e679cd6b87a01c70a764949", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1463, "license_type": "permissive", "max_line_length": 75, "num_lines": 49, "path": "/osdu-r2/os-delivery/delivery-core/src/main/java/org/opengroup/osdu/delivery/provider/interfaces/FileLocationRepository.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.delivery.provider.interfaces;\n\nimport org.opengroup.osdu.core.common.model.file.FileListRequest;\nimport org.opengroup.osdu.core.common.model.file.FileListResponse;\nimport org.opengroup.osdu.core.common.model.file.FileLocation;\n\npublic interface FileLocationRepository {\n\n /**\n * Finds a file location by file ID in a collection.\n *\n * @param fileID file ID\n * @return file location if it's found otherwise null\n */\n FileLocation findByFileID(String fileID);\n\n /**\n * Saves a file location in a collection.\n *\n * @param fileLocation file location\n * @return saved file location with populated ID\n */\n FileLocation save(FileLocation fileLocation);\n\n /**\n * Finds a file list page by request.\n *\n * @param request request\n * @return file list page\n */\n FileListResponse findAll(FileListRequest request);\n\n}\n" }, { "alpha_fraction": 0.7911938428878784, "alphanum_fraction": 0.7961870431900024, "avg_line_length": 37.64912414550781, "blob_id": "3943c95e153f8f5acc1ef1ac250c8967df37d3e7", "content_id": "0895779a7500d68db6e74d9136086ee748f89c4b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2203, "license_type": "permissive", "max_line_length": 98, "num_lines": 57, "path": "/osdu-r2/os-ingest/ingest-core/src/main/java/org/opengroup/osdu/ingest/service/SubmitServiceImpl.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.ingest.service;\n\nimport java.util.Map;\nimport lombok.RequiredArgsConstructor;\nimport lombok.extern.slf4j.Slf4j;\nimport org.opengroup.osdu.core.common.model.WorkflowType;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.ingest.model.SubmitRequest;\nimport org.opengroup.osdu.ingest.model.SubmitResponse;\nimport org.opengroup.osdu.ingest.provider.interfaces.ISubmitService;\nimport org.opengroup.osdu.ingest.provider.interfaces.IValidationService;\nimport org.opengroup.osdu.ingest.provider.interfaces.IWorkflowIntegrationService;\nimport org.opengroup.osdu.ingest.provider.interfaces.IWorkflowPayloadService;\nimport org.springframework.stereotype.Service;\n\n@Service\n@RequiredArgsConstructor\n@Slf4j\npublic class SubmitServiceImpl implements ISubmitService {\n\n final IWorkflowIntegrationService workflowIntegrationService;\n final IValidationService validationService;\n final IWorkflowPayloadService workflowPayloadService;\n\n @Override\n public SubmitResponse submit(SubmitRequest request, DpsHeaders headers) {\n log.debug(\"Submit request with payload - {}\", request);\n\n validationService.validateSubmitRequest(request);\n\n Map<String, Object> context = workflowPayloadService.getContext(request.getFileId(), headers);\n\n String workflowId = workflowIntegrationService.submitIngestToWorkflowService(\n WorkflowType.INGEST, request.getDataType(), context, headers);\n\n SubmitResponse response = SubmitResponse.builder().workflowId(workflowId).build();\n log.debug(\"Submit response - {}\", response);\n return response;\n }\n\n}\n" }, { "alpha_fraction": 0.7632508873939514, "alphanum_fraction": 0.7679623365402222, "avg_line_length": 33.65306091308594, "blob_id": "114bead3072ee7aa237f80509bc335de092cabb2", "content_id": "32adf3e795eec92bd2ddd2f5072bb82c84be9a30", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1698, "license_type": "permissive", "max_line_length": 75, "num_lines": 49, "path": "/osdu-r2/os-workflow/workflow-core/src/main/java/org/opengroup/osdu/workflow/provider/interfaces/IValidationService.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.workflow.provider.interfaces;\n\nimport javax.validation.ConstraintViolationException;\nimport org.opengroup.osdu.core.common.model.workflow.StartWorkflowRequest;\nimport org.opengroup.osdu.workflow.model.GetStatusRequest;\nimport org.opengroup.osdu.workflow.model.UpdateStatusRequest;\n\npublic interface IValidationService {\n\n /**\n * Validates get status request using Java Bean Validation.\n *\n * @param request get status request\n * @throws ConstraintViolationException if request is invalid\n */\n void validateGetStatusRequest(GetStatusRequest request);\n\n /**\n * Validates update status request using Java Bean Validation.\n *\n * @param request update status request\n * @throws ConstraintViolationException if request is invalid\n */\n void validateUpdateStatusRequest(UpdateStatusRequest request);\n\n /**\n * Validates start workflow request using Java Bean Validation.\n *\n * @param request start workflow request\n * @throws ConstraintViolationException if request is invalid\n */\n void validateStartWorkflowRequest(StartWorkflowRequest request);\n}\n" }, { "alpha_fraction": 0.48872488737106323, "alphanum_fraction": 0.5025283694267273, "avg_line_length": 41.05172348022461, "blob_id": "a9c2e60477a57971288ef5d8b3ca3a1dc67f7d3a", "content_id": "cd5e234b85784311d85a6d2073b02aec3750108e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 7319, "license_type": "permissive", "max_line_length": 132, "num_lines": 174, "path": "/compatibility-layer/docs/API/Search API.md", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# Search API overview\n\nThe Search service finds subsurface records in the DELFI Data Ecosystem. The service accepts search terms such as \n`fulltext`, `geospatial`, `metadata`, and `lineage`, and returns detailed data about the found item.\n\nThe search request to the service must come in the OSDU format, which is then internally transformed into the search \nobject that’s compatible with the DELFI Data Ecosystem. The returned object from DELFI is mapped to the search result in\nthe OSDU format and is then returned to the client.\n\n## Mapping of OSDU and DELFI search terms\n\nThere's a divergence between the DELFI format for data and the OSDU standard in terms of how search queries are \nformatted, which is why certain OSDU search terms can't be fully mapped to a DELFI search query. The Search service \nignores such terms.\n\nSee the next table for the supported OSDU search terms.\n\n| OSDU search term | Description | Supported by DELFI |\n| ---------------- | --------------------------------------------------------------------------------------- | ------------------ |\n| fulltext | Single search expression | Yes |\n| geo_centroid | A list of numbers | Yes |\n| geo_location | Object with distance, type, and coordinates properties | Yes | \n| metadata | A list of string values | Yes | \n| facets | An array of facet names | Yes |\n| full_results | A boolean value to defines if only indexed values should be returned. **Always `true`** | No |\n| sort | Object value to control sorting of search results | Yes | \n| start | The index of the first search result to be returned | Yes |\n| count | The number of search results to return for the current request | Yes | \n| map_aggregates | Boolean value. **Ignored** | No |\n| zoom_level | Integer that represents the zoom level applied to geo queries. **Ignored** | No |\n| aggregates_count | Integer used for the size of facet queries. **Ignored** | No |\n\n## Search result\n\n| Search result property | Description | Type |\n| ---------------------- | ----------------------------------------------------------------------------- | ----------------- |\n| results | A list of search result objects containing found metadata | A list of objects |\n| totalHits | The total number of found documents in the DELFI Portal | Integer |\n| facets | Contains the facet values for the facet names specified in the search request | A list of strings |\n| count | The total number of search results in the current response | Integer |\n| start | The index of the first search result in the current response | Integer |\n\n### POST /search\n\nThe request body must also contain at least one of the next properties &mdash; `fulltext`, `metadata`, `geo_location`, \nor `geo_centroid`. If these search request fields are missing, then an empty search result is returned.\n\n| Characteristics | Description |\n| ------------------- | -------------------------------------------------------------------------------------------------- |\n| Authorization | The request must contain the authorization token in the header: `Authorization: \"Bearer {token}\"`. | \n| URL parameters | None |\n| Request body | Must contain a list of SRNs: `{ \"SRNS\": [\"srn:file/type...\"] }`. |\n| Content Type | `application/json` |\n| Return Content Type | `application/json` |\n\n## Search API\n\nAll the delivery endpoints are relative to the path **https://{project-id}.apigee.net/**.\n\n### GET /search\n\nSearch for data in DELFI. \n\n#### Search request body example\n\n```sh\ncurl -X POST \\\n http://{Apigee URI}/search \\\n -H 'Accept: */*' \\\n -H 'Accept-Encoding: gzip, deflate' \\\n -H 'Authorization: Bearer <your token here>' \\\n -H 'Cache-Control: no-cache' \\\n -H 'Connection: keep-alive' \\\n -H 'Content-Length: 63' \\\n -H 'Content-Type: application/json' \\\n -H 'Host: {Apigee URI}' \\\n -d '{\n \"fulltext\" : \"AKM-11 LOG\",\n \"start\": 1,\n \"count\": 1\n}'\n```\n\n#### Search response body example\n\n```json\n{\n \"results\": [\n {\n \"data\": {\n \"IndividualTypeProperties.Description\": \"Well Log\",\n \"IndividualTypeProperties.Name\": \"AKM-11 LOG\",\n \"IndividualTypeProperties.WellboreID\": \"srn:master-data/Wellbore:1013:\",\n \"IndividualTypeProperties.TopMeasuredDepth.Depth\": 2182.0004\n },\n \"kind\": \"{partition}:ingestion-test:work-product-component:1.0.0\",\n \"namespace\": \"{partition}:ingestion-test\",\n \"legal\": {\n \"legaltags\": [\n \"{partition}-public-usa-dataset-1\"\n ],\n \"otherRelevantDataCountries\": [\n \"US\"\n ],\n \"status\": \"compliant\"\n },\n \"id\": \"{partition}:doc:b8c930a1b1cc4299b0ea93f81355aa1e\",\n \"acl\": {\n \"viewers\": [\n \"data.default.viewers@{partition}.p4d.cloud.slb-ds.com\"\n ],\n \"owners\": [\n \"data.default.owners@{partition}.p4d.cloud.slb-ds.com\"\n ]\n },\n \"type\": \"work-product-component\",\n \"version\": 1573656047229419\n }\n ],\n \"totalHits\": 68,\n \"count\": 1,\n \"start\": 1\n}\n```\n\n> Note that the example response doesn't contain an actual DELFI partition ID in `results[0].id`.\n\n### Search request example without required fields\n\n```sh\ncurl -X POST \\\n http://{Apigee URI}/search \\\n -H 'Accept: */*' \\\n -H 'Accept-Encoding: gzip, deflate' \\\n -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJpYXQiOjE1NzU2NDExMzB9.RZkUSPCEReWbQTDkSVN5ztz6iWN7wji5TqF2XR7A4FQ' \\\n -H 'Cache-Control: no-cache' \\\n -H 'Connection: keep-alive' \\\n -H 'Content-Length: 63' \\\n -H 'Content-Type: application/json' \\\n -H 'Host: {Apigee URI}' \\\n -d '{\n \"start\": 1,\n \"count\": 1\n}'\n```\n\nResponse body\n\n```json\n{\n \"status\": 400,\n \"error\": \"Bad Request\"\n}\n```\n\n## Search statuses\n\nThe Search service returns the following statuses:\n\n* **401 Unauthorized**. The request did not contain a valid authorization token.\n\nResponse example if authorization fails.\n\n```json\n{\n \"timestamp\": \"2019-11-29T09:16:40.332+0000\",\n \"status\": 401,\n \"error\": \"Unauthorized\",\n \"message\": \"Missing authorization token\",\n \"path\": \"/\"\n}\n```\n\n* **200 Success**. The response may or may not contain data for a given request.\n" }, { "alpha_fraction": 0.6964393258094788, "alphanum_fraction": 0.7012673616409302, "avg_line_length": 33.54166793823242, "blob_id": "71908cd5759ffc244e7e949f93c843e9a118a1a2", "content_id": "4682b30d2c6c8d3b5c3fd368e9809eda6fbe9dd1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1657, "license_type": "permissive", "max_line_length": 80, "num_lines": 48, "path": "/osdu-r2/os-qa/src/main/java/com/osdu/core/data/parser/JsonParser.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.core.data.parser;\n\nimport com.osdu.core.reporter.TestReporter;\nimport org.json.simple.parser.JSONParser;\nimport org.json.simple.parser.ParseException;\n\nimport java.io.FileNotFoundException;\nimport java.io.FileReader;\nimport java.io.IOException;\n\npublic class JsonParser {\n static FileReader reader;\n static Object parsedJson;\n\n public static Object readJson(String filename) {\n TestReporter.reportDebugStep(\"Try to get access to file: %s\", filename);\n try {\n reader = new FileReader(filename);\n } catch (FileNotFoundException exception) {\n TestReporter.reportErrorStep(\"File not found: %s\", exception);\n }\n\n TestReporter.reportDebugStep(\"Successfully read file: %s\", filename);\n JSONParser jsonParser = new JSONParser();\n try {\n parsedJson = jsonParser.parse(reader);\n } catch (IOException | ParseException exception) {\n TestReporter.reportErrorStep(\"File can't be parsed: %s\", exception);\n }\n return parsedJson;\n }\n}" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7271267771720886, "avg_line_length": 25.510639190673828, "blob_id": "1749ca9e8a15304dd4c3a8407431e863cffbb878", "content_id": "8a787c167a2360d9c013809745460e028f5e4bb9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1246, "license_type": "permissive", "max_line_length": 75, "num_lines": 47, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/units/impl/ScaleOffset.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.units.impl;\n\nimport com.fasterxml.jackson.annotation.JsonProperty;\nimport lombok.AllArgsConstructor;\nimport lombok.Data;\n\nimport javax.validation.constraints.NotNull;\n\n\n@Data\n@AllArgsConstructor\npublic class ScaleOffset extends UnitParameters {\n\n @NotNull\n @JsonProperty(\"offset\")\n private Double offset;\n @NotNull\n @JsonProperty(\"scale\")\n private Double scaleFactor;\n\n public ScaleOffset() {\n this.scaleFactor = Double.NaN;\n this.offset = Double.NaN;\n }\n\n public double scaleToSI() {\n return this.scaleFactor;\n }\n\n}\n" }, { "alpha_fraction": 0.6821670532226562, "alphanum_fraction": 0.6898419857025146, "avg_line_length": 32.05970001220703, "blob_id": "0cb6262c3b3e4cb4906f60fbb06f0e21e39ce140", "content_id": "5d84e5fa0ee58bc0c970e8ebe71926abc8c8a60f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2215, "license_type": "permissive", "max_line_length": 132, "num_lines": 67, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/search/QueryResponse.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.search;\n\nimport com.google.gson.ExclusionStrategy;\nimport com.google.gson.FieldAttributes;\nimport com.google.gson.Gson;\nimport com.google.gson.GsonBuilder;\nimport lombok.AllArgsConstructor;\nimport lombok.Builder;\nimport lombok.Data;\nimport lombok.NoArgsConstructor;\n\nimport java.util.ArrayList;\nimport java.util.Collections;\nimport java.util.List;\nimport java.util.Map;\n\n@Data\n@NoArgsConstructor\n@AllArgsConstructor\n@Builder\npublic class QueryResponse {\n\n private List<Map<String, Object>> results = new ArrayList<>();\n private List<AggregationResponse> aggregations = new ArrayList<>();\n private long totalCount;\n\n @Override\n public String toString() {\n if (this.aggregations == null) {\n ExclusionStrategy strategy = new ExclusionStrategy() {\n @Override\n public boolean shouldSkipField(FieldAttributes field) {\n return \"aggregations\".equals(field.getName());\n }\n\n @Override\n public boolean shouldSkipClass(Class<?> clazz) {\n return false;\n }\n };\n return new GsonBuilder().addSerializationExclusionStrategy(strategy).create().toJson(this, QueryResponse.class);\n } else {\n return new Gson().toJson(this, QueryResponse.class);\n }\n }\n\n public static QueryResponse getEmptyResponse() {\n return QueryResponse.builder().results(Collections.emptyList()).aggregations(Collections.emptyList()).totalCount(0).build();\n }\n}\n" }, { "alpha_fraction": 0.7659420371055603, "alphanum_fraction": 0.7739130258560181, "avg_line_length": 34.38461685180664, "blob_id": "0ba83fc1d95f2bbaedda819238e5ace673d2a239", "content_id": "3002ca7edf39d8c4331072793748f227d90ae99a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1380, "license_type": "permissive", "max_line_length": 92, "num_lines": 39, "path": "/osdu-r2/os-delivery/delivery-core/src/main/java/org/opengroup/osdu/delivery/service/FileServiceImpl.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.delivery.service;\n\nimport lombok.RequiredArgsConstructor;\nimport lombok.extern.slf4j.Slf4j;\nimport org.opengroup.osdu.core.common.model.file.FileRequest;\nimport org.opengroup.osdu.core.common.model.file.FileResponse;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.delivery.provider.interfaces.FileService;\nimport org.springframework.stereotype.Service;\n\n@Service\n@Slf4j\n@RequiredArgsConstructor\npublic class FileServiceImpl implements FileService {\n\n @Override\n public FileResponse getFile(FileRequest request, DpsHeaders headers) {\n log.debug(\"Request file with : {}\", request);\n log.debug(\"File result :\");\n throw new UnsupportedOperationException(\"It will be implemented in future iterations.\");\n }\n\n}\n" }, { "alpha_fraction": 0.7254955768585205, "alphanum_fraction": 0.72879958152771, "avg_line_length": 40.2613639831543, "blob_id": "5e36e811da921a285dde592f80597c41580e9088", "content_id": "9ad8fb745cd5459a8ae404cddbe8ce1af370c075", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3632, "license_type": "permissive", "max_line_length": 100, "num_lines": 88, "path": "/compatibility-layer/service/delfi-client/src/main/java/com/osdu/service/delfi/DelfiPortalService.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.service.delfi;\n\nimport com.osdu.client.DelfiFileClient;\nimport com.osdu.client.DelfiStorageClient;\nimport com.osdu.exception.OsduException;\nimport com.osdu.model.Record;\nimport com.osdu.model.delfi.DelfiFile;\nimport com.osdu.model.delfi.DelfiRecord;\nimport com.osdu.model.delfi.SaveRecordsResult;\nimport com.osdu.model.property.DelfiPortalProperties;\nimport com.osdu.service.PortalService;\nimport java.util.Collections;\nimport lombok.RequiredArgsConstructor;\nimport lombok.extern.slf4j.Slf4j;\nimport org.springframework.stereotype.Service;\n\n@Service\n@Slf4j\n@RequiredArgsConstructor\npublic class DelfiPortalService implements PortalService {\n\n final DelfiStorageClient storageClient;\n\n final DelfiFileClient fileClient;\n\n final DelfiPortalProperties portalProperties;\n\n @Override\n public Record getRecord(String id, String authorizationToken, String partition) {\n log.debug(\"Getting record with params : {}, {}, {}\", id, authorizationToken, partition);\n if (id == null || authorizationToken == null || partition == null) {\n throw new OsduException(String.format(\"Invalid parameters passed to client :\"\n + \" id: %s, authorizationToken : %s, partition: %s\", id, authorizationToken, partition));\n }\n DelfiRecord record = storageClient.getRecord(id, authorizationToken, partition,\n portalProperties.getAppKey());\n log.debug(\"Got record: /n\" + record.toString());\n\n return record;\n }\n\n @Override\n public DelfiFile getFile(String location, String authorizationToken, String partition) {\n log.debug(\"Getting file with params : {}, {}, {}\", location, authorizationToken, partition);\n if (location == null || authorizationToken == null || partition == null) {\n throw new OsduException(String.format(\"Invalid parameters passed to client :\"\n + \" location: %s, authorizationToken : %s, partition: %s\", location,\n authorizationToken, partition));\n }\n DelfiFile delfiFile = fileClient.getSignedUrlForLocation(location, authorizationToken,\n portalProperties.getAppKey(), partition);\n log.debug(\"Got Delfi file: /n\" + delfiFile);\n return delfiFile;\n }\n\n @Override\n public Record putRecord(Record record, String authorizationToken, String partition) {\n log.debug(\"Put record with params : {}, {}, {}\", record, authorizationToken, partition);\n if (authorizationToken == null || partition == null) {\n throw new OsduException(String.format(\"Invalid parameters passed to client :\"\n + \" record: %s, authorizationToken : %s, partition: %s\", record, authorizationToken,\n partition));\n }\n SaveRecordsResult saveResult = storageClient.putRecords(Collections.singletonList(record),\n authorizationToken, partition, portalProperties.getAppKey());\n log.debug(\"Save records result: {}\", saveResult);\n Record resultRecord = getRecord(saveResult.getRecordIds().get(0), authorizationToken,\n partition);\n log.debug(\"Put record finished: \" + resultRecord);\n return resultRecord;\n }\n}\n\n" }, { "alpha_fraction": 0.6445497870445251, "alphanum_fraction": 0.6485782265663147, "avg_line_length": 33.59016418457031, "blob_id": "149f928140cf1470282170eafff2c1dc10a9fd67", "content_id": "095e84e60774ff7ba41991dea8fb71e8145f8c69", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4220, "license_type": "permissive", "max_line_length": 115, "num_lines": 122, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/http/AbstractHttpClient.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.http;\n\nimport org.apache.commons.lang3.StringUtils;\n\nimport java.io.BufferedReader;\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.io.InputStreamReader;\nimport java.io.OutputStreamWriter;\nimport java.net.HttpURLConnection;\nimport java.net.MalformedURLException;\nimport java.net.URI;\nimport java.net.URISyntaxException;\nimport java.net.URL;\n\nabstract class AbstractHttpClient implements IHttpClient {\n\n @Override\n public HttpResponse send(HttpRequest request) {\n\n HttpResponse output = new HttpResponse();\n output.setRequest(request);\n HttpURLConnection conn = null;\n try {\n request.setUrl(encodeUrl(request.getUrl()));\n\n long start = System.currentTimeMillis();\n conn = this.createConnection(request);\n this.sendRequest(conn, request.body);\n\n output.setResponseCode(conn.getResponseCode());\n output.setContentType(conn.getContentType());\n output.setHeaders(conn.getHeaderFields());\n\n if (output.isSuccessCode()) {\n output.setBody(getBody(conn.getInputStream()).toString());\n\n } else {\n output.setBody(getBody(conn.getErrorStream()).toString());\n }\n\n output.setLatency(System.currentTimeMillis() - start);\n } catch (IOException e) {\n System.err.println(String.format(\"Unexpected error sending to URL %s METHOD %s. error %s\", request.url,\n request.httpMethod, e));\n output.setException(e);\n } catch (URISyntaxException e) {\n output.setException(e);\n } finally {\n if (conn != null)\n conn.disconnect();\n }\n\n return output;\n }\n\n private StringBuilder getBody(InputStream stream) throws IOException {\n try (BufferedReader in = new BufferedReader(new InputStreamReader(stream))) {\n String inputLine;\n StringBuilder resp = new StringBuilder();\n while ((inputLine = in.readLine()) != null) {\n resp.append(inputLine);\n }\n return resp;\n }\n }\n\n HttpURLConnection createConnection(HttpRequest request)\n throws IOException {\n\n HttpURLConnection conn = null;\n\n URL url = new URL(request.url);\n conn = (HttpURLConnection) url.openConnection();\n conn.setInstanceFollowRedirects(request.followRedirects);\n conn.setConnectTimeout(request.connectionTimeout);\n\n request.headers.forEach(conn::setRequestProperty);\n\n if (request.httpMethod.equals(HttpRequest.POST) ||\n request.httpMethod.equals(HttpRequest.PUT) ||\n request.httpMethod.equals(HttpRequest.PATCH)) {\n conn.setDoOutput(true); //only set if we have a body on request\n }\n conn.setRequestMethod(request.httpMethod);\n\n return conn;\n }\n\n private void sendRequest(HttpURLConnection connection, String body) throws IOException {\n if (!StringUtils.isBlank(body)) {\n try (OutputStreamWriter writer = new OutputStreamWriter(connection.getOutputStream())) {\n writer.write(body);\n }\n }\n }\n\n private String encodeUrl(String url) throws MalformedURLException, URISyntaxException {\n URL temp = new URL(url);\n URI uri = new URI(temp.getProtocol(), temp.getUserInfo(), temp.getHost(), temp.getPort(),\n temp.getPath(), temp.getQuery(), temp.getRef());\n return uri.toASCIIString();\n }\n\n}\n" }, { "alpha_fraction": 0.7558543086051941, "alphanum_fraction": 0.7584562301635742, "avg_line_length": 38.41880416870117, "blob_id": "b45ac5da223c162a895b849fc919bc024690c898", "content_id": "9aaa324eb8b311124b5edfdb6a67787a02ce3878", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4612, "license_type": "permissive", "max_line_length": 99, "num_lines": 117, "path": "/osdu-r2/os-ingest/ingest-core/src/main/java/org/opengroup/osdu/ingest/exception/handler/RestExceptionHandler.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.ingest.exception.handler;\n\nimport com.fasterxml.jackson.core.JsonParseException;\nimport com.fasterxml.jackson.databind.exc.MismatchedInputException;\nimport java.util.ArrayList;\nimport java.util.List;\nimport javax.validation.ConstraintViolation;\nimport javax.validation.ConstraintViolationException;\nimport lombok.RequiredArgsConstructor;\nimport org.apache.commons.lang3.exception.ExceptionUtils;\nimport org.opengroup.osdu.core.common.logging.JaxRsDpsLog;\nimport org.opengroup.osdu.core.common.model.http.AppException;\nimport org.springframework.core.Ordered;\nimport org.springframework.core.annotation.Order;\nimport org.springframework.http.HttpHeaders;\nimport org.springframework.http.HttpStatus;\nimport org.springframework.http.MediaType;\nimport org.springframework.http.ResponseEntity;\nimport org.springframework.http.converter.HttpMessageNotReadableException;\nimport org.springframework.web.bind.annotation.ControllerAdvice;\nimport org.springframework.web.bind.annotation.ExceptionHandler;\nimport org.springframework.web.context.request.WebRequest;\nimport org.springframework.web.servlet.mvc.method.annotation.ResponseEntityExceptionHandler;\n\n@RequiredArgsConstructor\n@Order(Ordered.HIGHEST_PRECEDENCE)\n@ControllerAdvice\npublic class RestExceptionHandler extends ResponseEntityExceptionHandler {\n\n final JaxRsDpsLog log;\n\n @ExceptionHandler({JsonParseException.class, IllegalStateException.class,\n MismatchedInputException.class})\n protected ResponseEntity<Object> handleInvalidBody(RuntimeException ex,\n WebRequest request) {\n log.error(\"Exception during REST request: \" + request.getDescription(false), ex);\n HttpHeaders headers = new HttpHeaders();\n headers.setContentType(MediaType.APPLICATION_JSON);\n ApiError apiError = ApiError.builder()\n .status(HttpStatus.BAD_REQUEST)\n .message(ExceptionUtils.getRootCauseMessage(ex))\n .build();\n return handleExceptionInternal(ex, apiError, headers,\n HttpStatus.BAD_REQUEST, request);\n }\n\n @ExceptionHandler({ConstraintViolationException.class})\n protected ResponseEntity<Object> handle(ConstraintViolationException ex, WebRequest request) {\n List<String> errors = new ArrayList<>();\n for (ConstraintViolation<?> violation : ex.getConstraintViolations()) {\n errors.add(violation.getPropertyPath() + \": \" + violation.getMessage());\n }\n log.error(\"Constraint exception: \" + errors);\n HttpHeaders headers = new HttpHeaders();\n headers.setContentType(MediaType.APPLICATION_JSON);\n ApiError apiError = ApiError.builder()\n .status(HttpStatus.BAD_REQUEST)\n .message(ExceptionUtils.getRootCauseMessage(ex))\n .errors(errors)\n .build();\n return handleExceptionInternal(ex, apiError, headers, HttpStatus.BAD_REQUEST, request);\n }\n\n @ExceptionHandler(AppException.class)\n protected ResponseEntity<Object> handleAppException(AppException e) {\n return this.getErrorResponse(e);\n }\n\n @Override\n protected ResponseEntity<Object> handleHttpMessageNotReadable(HttpMessageNotReadableException ex,\n HttpHeaders headers, HttpStatus status, WebRequest request) {\n ApiError apiError = ApiError.builder()\n .status(status)\n .message(ex.getLocalizedMessage())\n .build();\n return handleExceptionInternal(ex, apiError, headers, status, request);\n }\n\n\n private ResponseEntity<Object> getErrorResponse(AppException e) {\n\n String exceptionMsg = e.getOriginalException() != null\n ? e.getOriginalException().getMessage()\n : e.getError().getMessage();\n\n if (e.getError().getCode() > 499) {\n this.log.error(exceptionMsg, e);\n } else {\n this.log.warning(exceptionMsg, e);\n }\n\n // Support for non standard HttpStatus Codes\n HttpStatus httpStatus = HttpStatus.resolve(e.getError().getCode());\n if (httpStatus == null) {\n return ResponseEntity.status(e.getError().getCode()).body(e);\n } else {\n return new ResponseEntity<>(e.getError(), httpStatus);\n }\n }\n\n}\n" }, { "alpha_fraction": 0.6983776092529297, "alphanum_fraction": 0.7072271108627319, "avg_line_length": 40.121212005615234, "blob_id": "d92c82fe2e92139b387544fb00dc54536de5ebd0", "content_id": "ff977c625d92f3a10ecf3807586537d9d701e483", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1356, "license_type": "permissive", "max_line_length": 101, "num_lines": 33, "path": "/osdu-r2/os-python-sdk/osdu_api/model/search/query_request.py", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# Copyright 2020 Google LLC\n# Copyright 2020 Amazon\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom osdu_api.model.search.sort_query import SortQuery\nfrom osdu_api.model.search.spatial_filter import SpatialFilter\n\nclass QueryRequest:\n\n def __init__(self, kind: str, limit: int, query: str, return_highlighted_fields: bool, \n returned_fields: list, sort: SortQuery, query_as_owner: bool, spatial_filter: SpatialFilter, \n from: int, aggregate_by: str):\n self.kind = kind\n self.limit = limit\n self.query = query\n self.return_highlighted_fields = return_highlighted_fields\n self.returned_fields = returned_fields\n self.sort = sort\n self.query_as_owner = query_as_owner\n self.spatial_filter = spatial_filter\n self.from = from\n self.aggregate_by = aggregate_by" }, { "alpha_fraction": 0.741370677947998, "alphanum_fraction": 0.7453727126121521, "avg_line_length": 31.770492553710938, "blob_id": "37eb3ffa4d3d1fd9a10100b45b47a3d06dc82c01", "content_id": "6042799814fc668a5025dad270b5900d65f3fb16", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1999, "license_type": "permissive", "max_line_length": 82, "num_lines": 61, "path": "/osdu-r2/os-delivery/provider/delivery-gcp-datastore/src/main/java/org/opengroup/osdu/delivery/provider/gcp/mapper/FileLocationMapper.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.delivery.provider.gcp.mapper;\n\nimport org.mapstruct.Mapper;\nimport org.opengroup.osdu.core.common.model.file.DriverType;\nimport org.opengroup.osdu.core.common.model.file.FileLocation;\nimport org.opengroup.osdu.core.common.model.file.FileLocation.FileLocationBuilder;\nimport org.opengroup.osdu.delivery.provider.gcp.model.entity.FileLocationEntity;\n\n@Mapper\npublic abstract class FileLocationMapper {\n\n /**\n * Map file location Datastore entity to file location model.\n *\n * @param entity file location entity\n * @return file location\n */\n public FileLocation toFileLocation(FileLocationEntity entity) {\n if (entity == null) {\n return null;\n }\n\n FileLocationBuilder fileLocationBuilder = FileLocation.builder();\n\n fileLocationBuilder.fileID(entity.getFileID());\n if (entity.getDriver() != null) {\n fileLocationBuilder.driver(DriverType.valueOf(entity.getDriver()));\n }\n fileLocationBuilder.location(entity.getLocation());\n fileLocationBuilder.createdAt(entity.getCreatedAt());\n fileLocationBuilder.createdBy(entity.getCreatedBy());\n\n return fileLocationBuilder\n .build();\n }\n\n /**\n * Map file location model to file location Datastore entity.\n *\n * @param fileLocation file location\n * @return file location Datastore entity\n */\n public abstract FileLocationEntity toEntity(FileLocation fileLocation);\n\n}\n" }, { "alpha_fraction": 0.739234447479248, "alphanum_fraction": 0.7456140518188477, "avg_line_length": 33.83333206176758, "blob_id": "df4f84ffff53d1c692d76438595da1438f120cbc", "content_id": "9c197c3c85c22ef20b7af32928b1571e02c9cf88", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1254, "license_type": "permissive", "max_line_length": 76, "num_lines": 36, "path": "/osdu-r2/os-ingest/ingest-core/src/main/java/org/opengroup/osdu/ingest/provider/interfaces/IDeliveryIntegrationService.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.ingest.provider.interfaces;\n\nimport org.opengroup.osdu.core.common.model.file.FileLocationResponse;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.ingest.exception.ServerErrorException;\n\npublic interface IDeliveryIntegrationService {\n\n /**\n * Fetch file location info.\n *\n * @param fileId file ID\n * @param commonHeaders common headers\n * @return file location\n * @throws ServerErrorException if unable to get file location\n * or response doesn't contain file location\n */\n FileLocationResponse getFileInfo(String fileId, DpsHeaders commonHeaders);\n\n}\n" }, { "alpha_fraction": 0.745708167552948, "alphanum_fraction": 0.754291832447052, "avg_line_length": 36.279998779296875, "blob_id": "5cad4e511ceae681b765fee34f5c7267577b044e", "content_id": "67192a19d54f40176d72515f94a64b252dae2fb1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 932, "license_type": "permissive", "max_line_length": 128, "num_lines": 25, "path": "/osdu-r2/os-delivery/Dockerfile", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# Use the official maven/java 8 image to create a build artifact.\n# http://hub.docker.com/_/maven\nFROM maven:3-jdk-8-alpine AS builder\n\n# Copy local code to container image.\n## Create all the needed folders\nWORKDIR /app\nCOPY pom.xml .\nCOPY .m2/ .m2/\nCOPY delivery-core/ delivery-core/\nCOPY provider/ provider/\n\n# Build a release artifact for the child project\nRUN mvn -T2 package -DskipTests -B -s .m2/settings.xml\n\n# Use the official AdoptOpenJDK for a base image.\n# https://hub.docker.com/_/openjdk\nFROM openjdk:8-slim\nWORKDIR /app\nARG PROVIDER_NAME\nENV PROVIDER_NAME $PROVIDER_NAME\n# Copy the jar to the production image from the builder stage.\nCOPY --from=builder /app/provider/delivery-${PROVIDER_NAME}/target/delivery-${PROVIDER_NAME}-*.jar delivery-${PROVIDER_NAME}.jar\n# Run the web service on container startup.\nCMD java -Djava.security.egd=file:/dev/./urandom -Dserver.port=${PORT} -jar /app/delivery-${PROVIDER_NAME}.jar\n" }, { "alpha_fraction": 0.5172200798988342, "alphanum_fraction": 0.5736679434776306, "avg_line_length": 21.0238094329834, "blob_id": "746693f4f7363d083d0ff1e375bf7ad473103bed", "content_id": "0681baa38c3fd45b1c911646ec6baa5fb9e990c6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 12950, "license_type": "permissive", "max_line_length": 104, "num_lines": 588, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/legal/CountryCodes.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.legal;\n\nimport java.util.HashMap;\nimport java.util.Map;\n\npublic enum CountryCodes\n{\n AD(\"Andorra\", \"AD\", 16),\n\n AE(\"United Arab Emirates\", \"AE\", 784),\n\n AF(\"Afghanistan\", \"AF\", 4),\n\n AG(\"Antigua and Barbuda\", \"AG\", 28),\n\n AI(\"Anguilla\", \"AI\", 660),\n\n AL(\"Albania\", \"AL\", 8),\n\n AM(\"Armenia\", \"AM\", 51),\n\n AN(\"Netherlands Antilles\", \"AN\", 530),\n\n AO(\"Angola\", \"AO\", 24),\n\n AQ(\"Antarctica\", \"AQ\", 10),\n\n AR(\"Argentina\", \"AR\", 32, RESIDENCY_RISK.NO_RESTRICTION),\n\n AS(\"American Samoa\", \"AS\", 16),\n\n AT(\"Austria\", \"AT\", 40),\n\n AU(\"Australia\", \"AU\", 36, RESIDENCY_RISK.NO_RESTRICTION),\n\n AW(\"Aruba\", \"AW\", 533),\n\n AX(\"Aland Islands\", \"AX\", 248),\n\n AZ(\"Azerbaijan\", \"AZ\", 31),\n\n BA(\"Bosnia and Herzegovina\", \"BA\", 70),\n\n BB(\"Barbados\", \"BB\", 52, RESIDENCY_RISK.NOT_ASSIGNED),\n\n BD(\"Bangladesh\", \"BD\", 50),\n\n BE(\"Belgium\", \"BE\", 56, RESIDENCY_RISK.NOT_ASSIGNED),\n\n BF(\"Burkina Faso\", \"BF\", 854),\n\n BG(\"Bulgaria\", \"BG\", 100),\n\n BH(\"Bahrain\", \"BH\", 48, RESIDENCY_RISK.NOT_ASSIGNED),\n\n BI(\"Burundi\", \"BI\", 108),\n\n BJ(\"Benin\", \"BJ\", 204, RESIDENCY_RISK.NOT_ASSIGNED),\n\n BL(\"Saint Barthelemy\", \"BL\", 652),\n\n BM(\"Bermuda\", \"BM\", 60),\n\n BN(\"Brunei Darussalam\", \"BN\", 96, RESIDENCY_RISK.NO_RESTRICTION),\n\n BO(\"Bolivia\", \"BO\", 68, RESIDENCY_RISK.NO_RESTRICTION),\n\n BR(\"Brazil\", \"BR\", 76),\n\n BS(\"Bahamas\", \"BS\", 44),\n\n BT(\"Bhutan\", \"BT\", 64),\n\n BV(\"Bouvet Island\", \"BV\", 74),\n\n BW(\"Botswana\", \"BW\", 72),\n\n BY(\"Belarus\", \"BY\", 112),\n\n BZ(\"Belize\", \"BZ\", 84, RESIDENCY_RISK.NOT_ASSIGNED),\n\n CA(\"Canada\", \"CA\", 124, RESIDENCY_RISK.NO_RESTRICTION),\n\n CC(\"Cocos Islands\", \"CC\", 166),\n\n CD(\"The Democratic Republic of the Congo\", \"CD\", 180),\n\n CF(\"Central African Republic\", \"CF\", 140),\n\n CG(\"Congo\", \"CG\", 178),\n\n CH(\"Switzerland\", \"CH\", 756, RESIDENCY_RISK.NOT_ASSIGNED),\n\n CI(\"Cote d'Ivoire\", \"CI\", 384),\n\n CK(\"Cook Islands\", \"CK\", 184),\n\n CL(\"Chile\", \"CL\", 152, RESIDENCY_RISK.NO_RESTRICTION),\n\n CM(\"Cameroon\", \"CM\", 120),\n\n CN(\"China\", \"CN\", 156),\n\n CO(\"Colombia\", \"CO\", 170),\n\n CR(\"Costa Rica\", \"CR\", 188),\n\n CU(\"Cuba\", \"CU\", 192, RESIDENCY_RISK.EMBARGOED),\n\n CV(\"Cape Verde\", \"CV\", 132),\n\n CX(\"Christmas Island\", \"CX\", 162),\n\n CY(\"Cyprus\", \"CY\", 196, RESIDENCY_RISK.NOT_ASSIGNED),\n\n CZ(\"Czech Republic\", \"CZ\", 203),\n\n DE(\"Germany\", \"DE\", 276),\n\n DJ(\"Djibouti\", \"DJ\", 262),\n\n DK(\"Denmark\", \"DK\", 208, RESIDENCY_RISK.NO_RESTRICTION),\n\n DM(\"Dominica\", \"DM\", 212),\n\n DO(\"Dominican Republic\", \"DO\", 214, RESIDENCY_RISK.NOT_ASSIGNED),\n\n DZ(\"Algeria\", \"DZ\", 12),\n\n EC(\"Ecuador\", \"EC\", 218, RESIDENCY_RISK.NO_RESTRICTION),\n\n EE(\"Estonia\", \"EE\", 233),\n\n EG(\"Egypt\", \"EG\", 818),\n\n EH(\"Western Sahara\", \"EH\", 732),\n\n ER(\"Eritrea\", \"ER\", 232),\n\n ES(\"Spain\", \"ES\", 724, RESIDENCY_RISK.NO_RESTRICTION),\n\n ET(\"Ethiopia\", \"ET\", 231, RESIDENCY_RISK.NOT_ASSIGNED),\n\n FI(\"Finland\", \"FI\", 246),\n\n FJ(\"Fiji\", \"FJ\", 242),\n\n FK(\"Falkland Islands\", \"FK\", 238),\n\n FM(\"Federated States of Micronesia\", \"FM\", 583),\n\n FO(\"Faroe Islands\", \"FO\", 234),\n\n FR(\"France\", \"FR\", 250, RESIDENCY_RISK.NO_RESTRICTION),\n\n GA(\"Gabon\", \"GA\", 266),\n\n GB(\"United Kingdom\", \"GB\", 826, RESIDENCY_RISK.NO_RESTRICTION),\n\n GD(\"Grenada\", \"GD\", 308),\n\n GE(\"Georgia\", \"GE\", 268, RESIDENCY_RISK.NOT_ASSIGNED),\n\n GF(\"French Guiana\", \"GF\", 254),\n\n GG(\"Guernsey\", \"GG\", 831),\n\n GH(\"Ghana\", \"GH\", 288),\n\n GI(\"Gibraltar\", \"GI\", 292),\n\n GL(\"Greenland\", \"GL\", 304, RESIDENCY_RISK.NOT_ASSIGNED),\n\n GM(\"Gambia\", \"GM\", 270),\n\n GN(\"Guinea\", \"GN\", 324),\n\n GP(\"Guadeloupe\", \"GP\", 312),\n\n GQ(\"Equatorial Guinea\", \"GQ\", 226),\n\n GR(\"Greece\", \"GR\", 300),\n\n GS(\"South Georgia and the South Sandwich Islands\", \"GS\", 239),\n\n GT(\"Guatemala\", \"GT\", 320, RESIDENCY_RISK.NOT_ASSIGNED),\n\n GU(\"Guam\", \"GU\", 316),\n\n GW(\"Guinea-Bissau\", \"GW\", 624),\n\n GY(\"Guyana\", \"GY\", 328, RESIDENCY_RISK.NOT_ASSIGNED),\n\n HK(\"Hong Kong\", \"HK\", 344),\n\n HM(\"Heard Island and McDonald Islands\", \"HM\", 334),\n\n HN(\"Honduras\", \"HN\", 340),\n\n HR(\"Croatia\", \"HR\", 191),\n\n HT(\"Haiti\", \"HT\", 332),\n\n HU(\"Hungary\", \"HU\", 348),\n\n ID(\"Indonesia\", \"ID\", 360),\n\n IE(\"Ireland\", \"IE\", 372),\n\n IL(\"Israel\", \"IL\", 376, RESIDENCY_RISK.NOT_ASSIGNED),\n\n IM(\"Isle of Man\", \"IM\", 833),\n\n IN(\"India\", \"IN\", 356),\n\n IO(\"British Indian Ocean Territory\", \"IO\", 86),\n\n IQ(\"Iraq\", \"IQ\", 368),\n\n IR(\"Islamic Republic of Iran\", \"IR\", 364, RESIDENCY_RISK.EMBARGOED),\n\n IS(\"Iceland\", \"IS\", 352),\n\n IT(\"Italy\", \"IT\", 380, RESIDENCY_RISK.NO_RESTRICTION),\n\n JE(\"Jersey\", \"JE\", 832),\n\n JM(\"Jamaica\", \"JM\", 388),\n\n JO(\"Jordan\", \"JO\", 400),\n\n JP(\"Japan\", \"JP\", 392, RESIDENCY_RISK.NOT_ASSIGNED),\n\n KE(\"Kenya\", \"KE\", 404),\n\n KG(\"Kyrgyzstan\", \"KG\", 417),\n\n KH(\"Cambodia\", \"KH\", 116),\n\n KI(\"Kiribati\", \"KI\", 296),\n\n KM(\"Comoros\", \"KM\", 174),\n\n KN(\"Saint Kitts and Nevis\", \"KN\", 659),\n\n KP(\"Democratic People's Republic of Korea\", \"KP\", 408, RESIDENCY_RISK.EMBARGOED),\n\n KR(\"Republic of Korea\", \"KR\", 410, RESIDENCY_RISK.NOT_ASSIGNED),\n\n KW(\"Kuwait\", \"KW\", 414),\n\n KY(\"Cayman Islands\", \"KY\", 136),\n\n KZ(\"Kazakhstan\", \"KZ\", 398),\n\n LA(\"Lao People's Democratic Republic\", \"LA\", 418),\n\n LB(\"Lebanon\", \"LB\", 422, RESIDENCY_RISK.NO_RESTRICTION),\n\n LC(\"Saint Lucia\", \"LC\", 662),\n\n LI(\"Liechtenstein\", \"LI\", 438),\n\n LK(\"Sri Lanka\", \"LK\", 144, RESIDENCY_RISK.NOT_ASSIGNED),\n\n LR(\"Liberia\", \"LR\", 430, RESIDENCY_RISK.NOT_ASSIGNED),\n\n LS(\"Lesotho\", \"LS\", 426),\n\n LT(\"Lithuania\", \"LT\", 440, RESIDENCY_RISK.NOT_ASSIGNED),\n\n LU(\"Luxembourg\", \"LU\", 442),\n\n LV(\"Latvia\", \"LV\", 428),\n\n LY(\"Libya\", \"LY\", 434),\n\n MA(\"Morocco\", \"MA\", 504, RESIDENCY_RISK.NOT_ASSIGNED),\n\n MC(\"Monaco\", \"MC\", 492),\n\n MD(\"Republic of Moldova\", \"MD\", 498),\n\n ME(\"Montenegro\", \"ME\", 499),\n\n MF(\"Saint Martin\", \"MF\", 663),\n\n MG(\"Madagascar\", \"MG\",450, RESIDENCY_RISK.NOT_ASSIGNED),\n\n MH(\"Marshall Islands\", \"MH\", 584),\n\n MK(\"The former Yugoslav Republic of Macedonia\", \"MK\", 807),\n\n ML(\"Mali\", \"ML\", 466),\n\n MM(\"Myanmar\", \"MM\", 104, RESIDENCY_RISK.NOT_ASSIGNED),\n\n MN(\"Mongolia\", \"MN\", 496),\n\n MO(\"Macao\", \"MO\", 446),\n\n MP(\"Northern Mariana Islands\", \"MP\",580),\n\n MQ(\"Martinique\", \"MQ\", 474),\n\n MR(\"Mauretania\", \"MR\", 478, RESIDENCY_RISK.NOT_ASSIGNED),\n\n MS(\"Montserrat\", \"MS\", 500),\n\n MT(\"Malta\", \"MT\", 470),\n\n MU(\"Mauritius\", \"MU\", 480),\n\n MV(\"Maldives\", \"MV\", 462),\n\n MW(\"Malawi\", \"MW\", 454),\n\n MX(\"Mexico\", \"MX\", 484),\n\n MY(\"Malaysia\", \"MY\", 458, RESIDENCY_RISK.CLIENT_CONSENT_REQUIRED),\n\n MZ(\"Mozambique\", \"MZ\", 508),\n\n NA(\"Namibia\", \"NA\", 516, RESIDENCY_RISK.NOT_ASSIGNED),\n\n NC(\"New Caledonia\", \"NC\", 540),\n\n NE(\"Niger\", \"NE\", 562),\n\n NF(\"Norfolk Island\", \"NF\", 574),\n\n NG(\"Nigeria\",\"NG\", 566),\n\n NI(\"Nicaragua\", \"NI\", 558),\n\n NL(\"Netherlands\", \"NL\", 528, RESIDENCY_RISK.NOT_ASSIGNED),\n\n NO(\"Norway\", \"NO\", 578, RESIDENCY_RISK.NO_RESTRICTION),\n\n NP(\"Nepal\", \"NP\", 524),\n\n NR(\"Nauru\", \"NR\", 520),\n\n NU(\"Niue\", \"NU\", 570),\n\n NZ(\"New Zealand\", \"NZ\", 554, RESIDENCY_RISK.NO_RESTRICTION),\n\n OM(\"Oman\", \"OM\", 512),\n\n PA(\"Panama\", \"PA\", 591),\n\n PE(\"Peru\", \"PE\", 604),\n\n PF(\"French Polynesia\", \"PF\", 258),\n\n PG(\"Papua New Guinea\", \"PG\", 598, RESIDENCY_RISK.NO_RESTRICTION),\n\n PH(\"Philippines\", \"PH\", 608, RESIDENCY_RISK.NOT_ASSIGNED),\n\n PK(\"Pakistan\", \"PK\", 586),\n\n PL(\"Poland\", \"PL\", 616, RESIDENCY_RISK.NOT_ASSIGNED),\n\n PM(\"Saint Pierre and Miquelon\", \"PM\", 666),\n\n PN(\"Pitcairn\", \"PN\", 612),\n\n PR(\"Puerto Rico\", \"PR\", 630),\n\n PS(\"Palestinian Territory\", \"PS\", 275),\n\n PT(\"Portugal\", \"PT\", 620),\n\n PW(\"Palau\", \"PW\", 585),\n\n PY(\"Paraguay\", \"PY\", 600, RESIDENCY_RISK.NOT_ASSIGNED),\n\n QA(\"Qatar\", \"QA\", 634),\n\n RE(\"Reunion\", \"RE\", 638),\n\n RO(\"Romania\", \"RO\", 642),\n\n RS(\"Serbia\", \"RS\", 688),\n\n RU(\"Russian Federation\", \"RU\", 643, RESIDENCY_RISK.EMBARGOED),\n\n RW(\"Rwanda\", \"RW\", 646),\n\n SA(\"Saudi Arabia\", \"SA\", 682),\n\n SB(\"Solomon Islands\", \"SB\", 90),\n\n SC(\"Seychelles\", \"SC\", 690),\n\n SD(\"Sudan\", \"SD\", 729, RESIDENCY_RISK.EMBARGOED),\n\n SE(\"Sweden\", \"SE\", 752),\n\n SG(\"Singapore\", \"SG\", 702, RESIDENCY_RISK.NOT_ASSIGNED),\n\n SH(\"Saint Helena\", \"SH\", 654),\n\n SI(\"Slovenia\", \"SI\", 705),\n\n SJ(\"Svalbard and Jan Mayen\", \"SJ\", 744),\n\n SK(\"Slovakia\", \"SK\", 703),\n\n SL(\"Sierra Leone\", \"SL\", 694),\n\n SM(\"San Marino\", \"SM\", 674),\n\n SN(\"Senegal\", \"SN\", 686, RESIDENCY_RISK.NOT_ASSIGNED),\n\n SO(\"Somalia\", \"SO\", 706),\n\n SR(\"Suriname\", \"SR\", 740, RESIDENCY_RISK.NOT_ASSIGNED),\n\n SS(\"South Sudan\", \"SS\", 728, RESIDENCY_RISK.EMBARGOED),\n\n ST(\"Sao Tome and Principe\", \"ST\", 678),\n\n SV(\"El Salvador\", \"SV\", 222),\n\n SY(\"Syrian Arab Republic\", \"SY\", 760, RESIDENCY_RISK.EMBARGOED),\n\n SZ(\"Swaziland\", \"SZ\", 748),\n\n TC(\"Turks and Caicos Islands\", \"TC\", 796),\n\n TD(\"Chad\", \"TD\", 148),\n\n TG(\"Togo\", \"TG\", 768, RESIDENCY_RISK.NOT_ASSIGNED),\n\n TH(\"Thailand\", \"TH\", 764, RESIDENCY_RISK.NO_RESTRICTION),\n\n TJ(\"Tajikistan\", \"TJ\", 762),\n\n TK(\"Tokelau\", \"TK\", 772),\n\n TL(\"Timor-Leste\", \"TL\", 626),\n\n TM(\"Turkmenistan\", \"TM\", 795, RESIDENCY_RISK.NOT_ASSIGNED),\n\n TN(\"Tunisia\", \"TN\", 788, RESIDENCY_RISK.NOT_ASSIGNED),\n\n TO(\"Tonga\", \"TO\", 776),\n\n TR(\"Turkey\", \"TR\", 792),\n\n TT(\"Trinidad and Tobago\", \"TT\", 780, RESIDENCY_RISK.NOT_ASSIGNED),\n\n TV(\"Tuvalu\", \"TV\", 798),\n\n TW(\"Taiwan, Province of China\", \"TW\", 158, RESIDENCY_RISK.NOT_ASSIGNED),\n\n TZ(\"United Republic of Tanzania\", \"TZ\", 834),\n\n UA(\"Ukraine\", \"UA\", 804),\n\n UG(\"Uganda\", \"UG\", 800, RESIDENCY_RISK.NOT_ASSIGNED),\n\n UM(\"United States Minor Outlying Islands\", \"UM\", 581),\n\n US(\"United States\", \"US\", 840, RESIDENCY_RISK.NO_RESTRICTION),\n\n UY(\"Uruguay\", \"UY\", 858),\n\n UZ(\"Uzbekistan\", \"UZ\", 860),\n\n VC(\"Saint Vincent and the Grenadines\", \"VC\", 670),\n\n VE(\"Venezuela\", \"VE\", 862),\n\n VG(\"British Virgin Islands\", \"VG\", 92),\n\n VI(\"Virgin Islands, U.S.\", \"VI\", 850),\n\n VN(\"Vietnam\", \"VN\", 704),\n\n VU(\"Vanuatu\", \"VU\", 548),\n\n WF(\"Wallis and Futuna\", \"WF\", 876),\n\n WS(\"Samoa\", \"WS\", 882),\n\n YE(\"Yemen\", \"YE\", 887, RESIDENCY_RISK.NOT_ASSIGNED),\n\n ZA(\"South Africa\", \"ZA\", 710),\n\n ZM(\"Zambia\", \"ZM\", 894),\n\n ZW(\"Zimbabwe\", \"ZW\", 716),\n\n Default(\"Default\", \"XX\", 999, RESIDENCY_RISK.DEFAULT)\n ;\n\n private static final Map<String, CountryCodes> alpha2Map = new HashMap<>();\n\n static\n {\n for (CountryCodes cc : values())\n {\n alpha2Map.put(cc.getAlpha2(), cc);\n }\n }\n\n private final String name;\n private final String alpha2;\n private final int numeric;\n private final String residencyRisk;\n public static class RESIDENCY_RISK {\n public static final String NO_RESTRICTION = \"No restriction\";\n public static final String NOT_ASSIGNED = \"Not assigned\";\n public static final String EMBARGOED = \"Embargoed\";\n public static final String DEFAULT = \"Default\";\n public static final String CLIENT_CONSENT_REQUIRED = \"Client consent required\";\n }\n\n CountryCodes(String name, String alpha2, int numeric)\n {\n this(name, alpha2, numeric, RESIDENCY_RISK.DEFAULT);\n }\n CountryCodes(String name, String alpha2, int numeric, String residencyRisk)\n {\n this.name = name;\n this.alpha2 = alpha2;\n this.numeric = numeric;\n this.residencyRisk = residencyRisk;\n }\n\n public String getName()\n {\n return name;\n }\n\n public String getAlpha2()\n {\n return alpha2;\n }\n\n public int getNumeric()\n {\n return numeric;\n }\n\n public String getResidencyRisk()\n {\n return residencyRisk;\n }\n\n public boolean needsClientConsent(){\n return getResidencyRisk().equalsIgnoreCase(CountryCodes.RESIDENCY_RISK.CLIENT_CONSENT_REQUIRED);\n }\n public boolean isEmbargoed(){\n return getResidencyRisk().equalsIgnoreCase(RESIDENCY_RISK.EMBARGOED);\n }\n\n public static CountryCodes getByCode(String code)\n {\n if (code == null)\n return Default;\n\n CountryCodes country = alpha2Map.get(code);\n\n return country == null ? Default : country;\n }\n\n\n}\n" }, { "alpha_fraction": 0.7069330811500549, "alphanum_fraction": 0.7099606394767761, "avg_line_length": 40.822784423828125, "blob_id": "213ffc43fd08c239f92ae0f82151a7fe3d0fed2a", "content_id": "1e4bd9221582c986c57e4a63e2b64d2b0b4ddcff", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3305, "license_type": "permissive", "max_line_length": 79, "num_lines": 79, "path": "/osdu-r2/os-qa/src/main/java/com/osdu/core/data/provider/TestData.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.core.data.provider;\n\npublic class TestData {\n /**\n * Data keys for data provider\n */\n public static final String DRIVER = \"Driver\";\n public static final String LOCATION = \"Location\";\n public static final String USER_ID = \"UserID\";\n public static final String EMPTY_REQUEST = \"emptyRequest\";\n public static final String CONTENT_TYPE = \"contentType\";\n public static final String WORKFLOW_TYPE_INGEST = \"workflowTypeIngest\";\n public static final String WORKFLOW_TYPE_OSDU = \"workflowTypeOsdu\";\n\n /**\n * Error msg's\n */\n public static final String ERROR_TYPE_MISMATCH = \"errorMismatch\";\n public static final String EXCEPTION = \"exception\";\n public static final String ERROR_TIME_PARSING = \"errorTimeParsing\";\n public static final String ERROR_JSON_PARSING = \"errorJsonParsing\";\n public static final String ERROR_CONSTRAINT_VIOLATION = \"errorViolation\";\n public static final String ERROR_INVALID_FORMAT = \"errorInvalidFormat\";\n /**\n * Response keys for ingest\n */\n public static final String STATUS = \"Status\";\n public static final String STATUS_RUNNING = \"status_running\";\n public static final String STATUS_FINISHED = \"status_finished\";\n public static final String STATUS_FAILED = \"status_failed\";\n public static final String DATA_TYPE_LOG = \"dataTypeLog\";\n public static final String DATA_TYPE_OSDU = \"dataTypeOsdu\";\n public static final String DATA_TYPE_OPAQUE = \"dataTypeOpaque\";\n public static final String DATA_TYPE_INVALID = \"dataTypeInvalid\";\n\n /**\n * Response paths for file service\n */\n public static final String FILE_ID = \"FileID\";\n public static final String SIGNED_URL = \"Location.SignedURL\";\n public static final String GET_LOCATION_FROM_FILES = \"Сontent[0].Location\";\n public static final String GET_FILE_ID_FROM_FILES = \"Сontent[0].FileID\";\n public static final String GET_CREATION_TIME_FROM_FILES = \"CreatedAt\";\n public static final String GET_CREATOR_FROM_FILES = \"CreatedBy\";\n public static final String CONTENT = \"Content\";\n public static final String MESSAGE = \"message\";\n\n /**\n * Response paths for airflow\n */\n public static final String AIRFLOW_ITEMS = \"items\";\n public static final String AIRFLOW_DAG_ID = \"dag_id\";\n public static final String AIRFLOW_DAG_ID_DEFAULT = \"dag_id_default\";\n public static final String AIRFLOW_DAG_ID_OSDU = \"dag_id_osdu\";\n public static final String AIRFLOW_DAG_RUN_URL = \"dag_run_url\";\n public static final String AIRFLOW_EXECUTION_TIME = \"execution_date\";\n\n\n /**\n * Response paths for ingest service\n */\n public static final String WORKFLOW_ID = \"WorkflowID\";\n}" }, { "alpha_fraction": 0.7496751546859741, "alphanum_fraction": 0.7587700486183167, "avg_line_length": 36.24193572998047, "blob_id": "aa94e4a4945a42d8ebd470c576985faf8b906890", "content_id": "ebd5092ce455603e5cb7b2d6eb8d20e96d0837cb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2309, "license_type": "permissive", "max_line_length": 118, "num_lines": 62, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/search/CcsQueryRequest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.search;\n\nimport com.fasterxml.jackson.annotation.JsonProperty;\nimport io.swagger.annotations.ApiModelProperty;\nimport lombok.Data;\nimport lombok.NoArgsConstructor;\nimport org.hibernate.validator.constraints.NotBlank;\nimport org.opengroup.osdu.core.common.SwaggerDoc;\nimport org.opengroup.osdu.core.common.model.search.validation.CcsValidOffset;\nimport org.opengroup.osdu.core.common.model.search.validation.ValidMultiKind;\nimport org.springframework.validation.annotation.Validated;\n\nimport javax.validation.constraints.Min;\n\n@Data\n@Validated\n@NoArgsConstructor\n@CcsValidOffset\npublic class CcsQueryRequest {\n\n @Min(value = 0, message = SwaggerDoc.OFFSET_VALIDATION_MIN_MSG)\n @JsonProperty(\"offset\")\n @ApiModelProperty(value = SwaggerDoc.OFFSET_DESCRIPTION, dataType = \"java.lang.Integer\", example = \"0\")\n private int from;\n\n @NotBlank(message = SwaggerDoc.KIND_VALIDATION_CAN_NOT_BE_NULL_OR_EMPTY)\n @ApiModelProperty(value = SwaggerDoc.KIND_REQUEST_DESCRIPTION, required = true, example = SwaggerDoc.KIND_EXAMPLE)\n @ValidMultiKind\n private String kind;\n\n @Min(value = 0, message = SwaggerDoc.LIMIT_VALIDATION_MIN_MSG)\n @ApiModelProperty(value = SwaggerDoc.LIMIT_DESCRIPTION, dataType = \"java.lang.Integer\", example = \"30\")\n private int limit;\n\n @ApiModelProperty(value = SwaggerDoc.QUERY_DESCRIPTION)\n private String query = \"\";\n\n @ApiModelProperty(value = SwaggerDoc.QUERYASOWNER_DESCRIPTION, dataType = \"java.lang.Boolean\", example = \"false\")\n private boolean queryAsOwner;\n\n @Override\n public String toString(){\n return new com.google.gson.Gson().toJson(this);\n }\n}\n" }, { "alpha_fraction": 0.6256410479545593, "alphanum_fraction": 0.6287449598312378, "avg_line_length": 39.0594596862793, "blob_id": "60a9d8bde33eab6d9e47aec4dfad5c1d41fcf585", "content_id": "26baf51230e56e823b9c7fbefd6e3cada75387d1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 7410, "license_type": "permissive", "max_line_length": 124, "num_lines": 185, "path": "/osdu-r2/os-qa/src/test/java/com/osdu/ingest/e2e/IngestWithManifestAnyCloudTests.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.ingest.e2e;\n\nimport com.osdu.core.data.provider.DataProviders;\nimport com.osdu.core.endpoints.factories.FactoriesHolder;\nimport com.osdu.core.reporter.TestReporter;\nimport io.qameta.allure.Description;\nimport io.qameta.allure.restassured.AllureRestAssured;\nimport io.restassured.response.Response;\nimport org.apache.commons.lang3.StringUtils;\nimport org.awaitility.Awaitility;\nimport org.hamcrest.Matchers;\nimport org.testng.annotations.Ignore;\nimport org.testng.annotations.Test;\n\nimport java.util.*;\nimport java.util.concurrent.TimeUnit;\n\nimport static com.osdu.common.FilesKeeper.*;\nimport static com.osdu.core.data.parser.JsonParser.readJson;\nimport static com.osdu.core.data.provider.TestData.*;\nimport static io.restassured.RestAssured.given;\nimport static java.util.concurrent.TimeUnit.MINUTES;\nimport static org.apache.http.HttpStatus.*;\nimport static org.awaitility.Awaitility.await;\n\npublic class IngestWithManifestAnyCloudTests extends BaseIngestService { //todo:::: add creds to env variable!!!!!!!\n FactoriesHolder factoriesHolder = new FactoriesHolder();\n\n /**\n * Services paths\n */\n String submitFunction = factoriesHolder.remoteFactoryCreator().getIngest(\"submitWithManifest\");\n\n String getWorkflowStatus = factoriesHolder.remoteFactoryCreator().getWorkflowService(\"getStatus\");\n\n @Test(dataProvider = \"testedData\", dataProviderClass = DataProviders.class)\n @Description(\"Valid flow send request with all required fields for well log data type and with auth token\")\n public void i1_checkIngestWithManifest(Map<String, String> data) {\n String bodyRequest = readJson(requestForIngestWithManifest).toString();\n\n Response ingestResponse = given()\n .filter(new AllureRestAssured())\n .spec(baseRequestSpec(specifiedHeadersSet()))\n .body(bodyRequest)\n .when()\n .post(submitFunction);\n\n ingestResponse\n .then()\n .statusCode(SC_OK)\n .and()\n .assertThat().body(WORKFLOW_ID, Matchers.notNullValue())\n .log()\n .all();\n\n String workflowId = ingestResponse.then()\n .extract()\n .path(WORKFLOW_ID);\n\n String requestForIngestStatus = String.format(readJson(requestForWorkflowStatusTemplate).toString(), workflowId);\n\n Awaitility.setDefaultPollDelay(15, TimeUnit.SECONDS);\n await()\n .atMost(1, MINUTES)\n .with()\n .pollInterval(10, TimeUnit.SECONDS)\n .until(() -> given()\n .filter(new AllureRestAssured())\n .spec(baseRequestSpec(specifiedHeadersSet()))\n .body(requestForIngestStatus)\n .log()\n .method()\n .when()\n .post(getWorkflowStatus).jsonPath().get(STATUS), //TODO :: status should be finished\n s -> s.equals(data.get(STATUS)));\n\n TestReporter.reportStep(\"Job status is completed\");\n }\n\n @Test(dataProvider = \"testedData\", dataProviderClass = DataProviders.class)\n @Description(\"Send request with all required fields and without auth tokens\")\n public void i2_checkIngestSubmitWithoutHeaders(Map<String, String> data) {\n String bodyRequest = readJson(requestForIngestWithManifest).toString();\n\n Response ingestFunction = given()\n .filter(new AllureRestAssured())\n .spec(baseRequestSpec(new HashMap<>()))\n .body(bodyRequest)\n .when()\n .post(submitFunction);\n\n ingestFunction\n .then()\n .statusCode(SC_UNAUTHORIZED)\n .and()\n .log()\n .all();\n }\n\n @Test(dataProvider = \"testedData\", dataProviderClass = DataProviders.class)\n @Description(\"Send request without FileID field and with auth token\")\n public void i3_checkIngestSubmitWithoutWithoutOnOfTheRequiredFields(Map<String, String> data) {\n Response ingestFunction = given()\n .filter(new AllureRestAssured())\n .spec(baseRequestSpec(specifiedHeadersSet()))\n .body(readJson(requestForIngestWithInvalidManifest))\n .when()\n .post(submitFunction);\n\n ingestFunction\n .then()\n .statusCode(SC_BAD_REQUEST)\n .and()\n .log()\n .all();\n }\n\n @Test(dataProvider = \"testedData\", dataProviderClass = DataProviders.class)\n @Description(\"Send request without wpc and without auth token\")\n public void i4_checkIngestSubmitWithoutOnOfTheRequiredFieldsAndTokens(Map<String, String> data) {\n Response ingestFunction = given()\n .filter(new AllureRestAssured())\n .spec(baseRequestSpec(new HashMap<>()))\n .body(readJson(requestForIngestWithInvalidManifest))\n .when()\n .post(submitFunction);\n\n ingestFunction\n .then()\n .statusCode(SC_UNAUTHORIZED)\n .and()\n .log()\n .all();\n }\n\n @Test(dataProvider = \"testedData\", dataProviderClass = DataProviders.class)\n @Description(\"Send request with empty body and with auth token\")\n public void i5_checkIngestSubmitWithEmptyBody(Map<String, String> data) {\n given()\n .filter(new AllureRestAssured())\n .spec(baseRequestSpec(specifiedHeadersSet()))\n .body(StringUtils.EMPTY)\n .when()\n .post(submitFunction)\n .then()\n .statusCode(SC_BAD_REQUEST)\n .and()\n .log()\n .all();\n }\n\n @Ignore //todo: there is no validation for the body\n @Test(dataProvider = \"testedData\", dataProviderClass = DataProviders.class)\n @Description(\"Send request with the empty values with auth token\")\n public void i6_checkIngestSubmitWithEmptyValues(Map<String, String> data) {\n }\n\n @Ignore //todo: there is no validation for the body\n @Test(dataProvider = \"testedData\", dataProviderClass = DataProviders.class)\n @Description(\"Send request with mismatched types and with auth token\")\n public void i7_checkIngestWithMismatchedTypes(Map<String, String> data) {\n }\n\n @Ignore //todo: there is no validation for the body\n @Test(dataProvider = \"testedData\", dataProviderClass = DataProviders.class)\n @Description(\"Send request without srn and with auth token\")\n public void i8_checkIngestWithoutSrn(Map<String, String> data) {\n }\n}" }, { "alpha_fraction": 0.7633734345436096, "alphanum_fraction": 0.7651158571243286, "avg_line_length": 38.85416793823242, "blob_id": "96f6a5aed6ca02f5e5ffc0db2bc28baeb0b350ba", "content_id": "07a7190239f1e043d4320168e4dde8051f7209e7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 5739, "license_type": "permissive", "max_line_length": 99, "num_lines": 144, "path": "/osdu-r2/os-ingest/ingest-core/src/test/java/org/opengroup/osdu/ingest/service/WorkflowIntegrationServiceTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.ingest.service;\n\nimport static org.assertj.core.api.Assertions.catchThrowable;\nimport static org.assertj.core.api.BDDAssertions.then;\nimport static org.mockito.ArgumentMatchers.any;\nimport static org.mockito.ArgumentMatchers.anyString;\nimport static org.mockito.ArgumentMatchers.eq;\nimport static org.mockito.BDDMockito.given;\nimport static org.mockito.Mockito.verify;\nimport static org.opengroup.osdu.ingest.TestUtils.getFeignRequest;\n\nimport com.fasterxml.jackson.core.JsonProcessingException;\nimport com.fasterxml.jackson.databind.ObjectMapper;\nimport feign.Response;\nimport java.nio.charset.StandardCharsets;\nimport java.util.HashMap;\nimport java.util.Map;\nimport org.junit.jupiter.api.BeforeEach;\nimport org.junit.jupiter.api.DisplayNameGeneration;\nimport org.junit.jupiter.api.Test;\nimport org.junit.jupiter.api.extension.ExtendWith;\nimport org.mockito.ArgumentCaptor;\nimport org.mockito.Captor;\nimport org.mockito.Mock;\nimport org.mockito.junit.jupiter.MockitoExtension;\nimport org.opengroup.osdu.core.common.model.WorkflowType;\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.core.common.model.workflow.StartWorkflowRequest;\nimport org.opengroup.osdu.core.common.model.workflow.StartWorkflowResponse;\nimport org.opengroup.osdu.ingest.ReplaceCamelCase;\nimport org.opengroup.osdu.ingest.client.IWorkflowServiceClient;\nimport org.opengroup.osdu.ingest.exception.ServerErrorException;\nimport org.opengroup.osdu.ingest.provider.interfaces.IWorkflowIntegrationService;\nimport org.springframework.http.HttpStatus;\n\n@ExtendWith(MockitoExtension.class)\n@DisplayNameGeneration(ReplaceCamelCase.class)\nclass WorkflowIntegrationServiceTest {\n\n private static final String WORKFLOW_SERVICE_URL = \"http://workflowServiceUrl\";\n private static final String TEST_AUTH_TOKEN = \"test-auth-token\";\n private static final String TEST_PARTITION = \"test-partition\";\n private static final String WORKFLOW_ID = \"workflow-id\";\n private ObjectMapper mapper = new ObjectMapper();\n\n @Mock\n private IWorkflowServiceClient workflowServiceClient;\n\n @Captor\n ArgumentCaptor<StartWorkflowRequest> workflowRequestCaptor;\n\n IWorkflowIntegrationService workflowIntegrationService;\n\n @BeforeEach\n void setUp() {\n workflowIntegrationService = new WorkflowIntegrationServiceImpl(workflowServiceClient, mapper);\n }\n\n @Test\n void shouldSubmitIngestToWorkflowService() throws JsonProcessingException {\n\n // given\n Map<String, String> headersMap = new HashMap<>();\n headersMap.put(DpsHeaders.AUTHORIZATION, TEST_AUTH_TOKEN);\n headersMap.put(DpsHeaders.DATA_PARTITION_ID, TEST_PARTITION);\n DpsHeaders requestHeaders = DpsHeaders.createFromMap(headersMap);\n\n StartWorkflowResponse startWorkflowResponse = StartWorkflowResponse.builder()\n .workflowId(WORKFLOW_ID).build();\n\n Response response = Response.builder()\n .body(mapper.writeValueAsString(startWorkflowResponse), StandardCharsets.UTF_8)\n .request(getFeignRequest())\n .status(HttpStatus.OK.value()).build();\n\n Map<String, Object> context = new HashMap<>();\n context.put(\"key\", \"value\");\n given(workflowServiceClient.startWorkflow(eq(TEST_AUTH_TOKEN), eq(TEST_PARTITION), any()))\n .willReturn(response);\n\n // when\n String workflowId = workflowIntegrationService\n .submitIngestToWorkflowService(WorkflowType.INGEST, \"WELL_LOG\", context,\n requestHeaders);\n\n // then\n then(workflowId).isEqualTo(WORKFLOW_ID);\n verify(workflowServiceClient)\n .startWorkflow(anyString(), anyString(), workflowRequestCaptor.capture());\n then(workflowRequestCaptor.getValue()).satisfies(request -> {\n then(request.getContext()).containsEntry(\"key\", \"value\");\n then(request.getDataType()).isEqualTo(\"WELL_LOG\");\n then(request.getWorkflowType()).isEqualTo(WorkflowType.INGEST);\n });\n }\n\n @Test\n void shouldThrowExceptionIfResponseIsEmpty() throws JsonProcessingException {\n\n // given\n Map<String, String> headersMap = new HashMap<>();\n headersMap.put(DpsHeaders.AUTHORIZATION, TEST_AUTH_TOKEN);\n headersMap.put(DpsHeaders.DATA_PARTITION_ID, TEST_PARTITION);\n DpsHeaders requestHeaders = DpsHeaders.createFromMap(headersMap);\n\n StartWorkflowResponse startWorkflowResponse = StartWorkflowResponse.builder().workflowId(null)\n .build();\n\n Response response = Response.builder()\n .body(mapper.writeValueAsString(startWorkflowResponse), StandardCharsets.UTF_8)\n .request(getFeignRequest())\n .status(HttpStatus.OK.value()).build();\n\n Map<String, Object> context = new HashMap<>();\n context.put(\"key\", \"value\");\n given(workflowServiceClient.startWorkflow(eq(TEST_AUTH_TOKEN), eq(TEST_PARTITION), any()))\n .willReturn(response);\n\n // when\n Throwable thrown = catchThrowable(() -> workflowIntegrationService\n .submitIngestToWorkflowService(WorkflowType.INGEST, \"WELL_LOG\", context,\n requestHeaders));\n\n // then\n then(thrown).isInstanceOf(ServerErrorException.class);\n then(thrown.getMessage()).isEqualTo(\"No workflow id in workflow service response\");\n }\n}\n" }, { "alpha_fraction": 0.6964285969734192, "alphanum_fraction": 0.7074176073074341, "avg_line_length": 32.86046600341797, "blob_id": "6a81161d412a3c079718f9e500f3e5af09f38d80", "content_id": "7548bd72079d2cf449c05a530a83da1c46c39d3f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1456, "license_type": "permissive", "max_line_length": 88, "num_lines": 43, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/entitlements/EntitlementsFactory.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.entitlements;\n\nimport org.opengroup.osdu.core.common.model.http.DpsHeaders;\nimport org.opengroup.osdu.core.common.http.HttpClient;\n\npublic class EntitlementsFactory implements IEntitlementsFactory {\n\n private final EntitlementsAPIConfig config;\n\n public EntitlementsFactory(EntitlementsAPIConfig config) {\n if (config == null) {\n throw new IllegalArgumentException(\"EntitlementsAPIConfig cannot be empty\");\n }\n this.config = config;\n }\n\n @Override\n public IEntitlementsService create(DpsHeaders headers) {\n if (headers == null) {\n throw new NullPointerException(\"headers cannot be null\");\n }\n return new EntitlementsService(this.config,\n new HttpClient(),\n headers);\n }\n}\n" }, { "alpha_fraction": 0.6636478900909424, "alphanum_fraction": 0.6769939661026001, "avg_line_length": 41.81632614135742, "blob_id": "6e3b7c4a1f92c8f772681bb43f15b1e663eac8ac", "content_id": "e211224768f6bd3872414bde8a1d81affed419ef", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 6294, "license_type": "permissive", "max_line_length": 97, "num_lines": 147, "path": "/osdu-r2/os-core-common/src/test/java/org/opengroup/osdu/core/common/search/ConfigTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.search;\n\nimport org.junit.Test;\nimport org.opengroup.osdu.core.common.model.search.DeploymentEnvironment;\n\nimport static org.junit.Assert.*;\n\npublic class ConfigTest {\n\n @Test\n public void checkConfig() {\n assertNotNull(new Config());\n\n System.setProperty(\"DEPLOYMENT_ENVIRONMENT\", \"CLOUD\");\n System.setProperty(\"ELASTIC_DATASTORE_KIND\", \"BBB\");\n System.setProperty(\"ELASTIC_DATASTORE_ID\", \"CCC\");\n System.setProperty(\"ELASTIC_HOST\", \"DDD\");\n System.setProperty(\"ELASTIC_CLUSTER_NAME\", \"FFF\");\n System.setProperty(\"GOOGLE_CLOUD_PROJECT\", \"GGG\");\n System.setProperty(\"STORAGE_SCHEMA_HOST\", \"KKK\");\n System.setProperty(\"STORAGE_QUERY_RECORD_HOST\", \"LLL\");\n System.setProperty(\"STORAGE_QUERY_RECORD_FOR_CONVERSION_HOST\", \"For\");\n System.setProperty(\"SEARCH_HOST\", \"MMM\");\n System.setProperty(\"INDEXER_HOST\", \"OOO\");\n System.setProperty(\"ENTITLEMENTS_HOST\", \"PPP\");\n System.setProperty(\"ENTITLEMENT_TARGET_AUDIENCE\", \"RRR\");\n System.setProperty(\"GAE_SERVICE\", \"SSS\");\n System.setProperty(\"GAE_VERSION\", \"TTT\");\n System.setProperty(\"KEY_RING\", \"UUU\");\n System.setProperty(\"KMS_KEY\", \"VVV\");\n System.setProperty(\"STORAGE_RECORDS_BATCH_SIZE\", \"50\");\n System.setProperty(\"ELASTIC_CACHE_EXPIRATION\", \"14\");\n System.setProperty(\"ELASTIC_CACHE_SIZE\", \"15\");\n System.setProperty(\"SCHEMA_CACHE_EXPIRATION\", \"14\");\n System.setProperty(\"SCHEMA_CACHE_SIZE\", \"15\");\n System.setProperty(\"CURSOR_CACHE_EXPIRATION\", \"15\");\n System.setProperty(\"CRON_INDEX_CLEANUP_THRESHOLD_DAYS\", \"1\");\n System.setProperty(\"CRON_EMPTY_INDEX_CLEANUP_THRESHOLD_DAYS\", \"2\");\n\n System.setProperty(\"INDEXER_QUEUE_HOST\", \"evd\");\n System.setProperty(\"REDIS_SEARCH_HOST\", \"10.5.0.10\");\n System.setProperty(\"REDIS_SEARCH_PORT\", \"6379\");\n System.setProperty(\"INDEX_CACHE_EXPIRATION\", \"10\");\n\n System.setProperty(\"GOOGLE_AUDIENCES\", \"google-aud\");\n\n assertEquals(DeploymentEnvironment.CLOUD, Config.getDeploymentEnvironment());\n assertEquals(\"BBB\", Config.getElasticCredentialsDatastoreKind());\n assertEquals(\"CCC\", Config.getElasticCredentialsDatastoreId());\n assertEquals(\"DDD\", Config.getElasticServerAddress());\n assertEquals(\"FFF\", Config.getElasticClusterName());\n assertEquals(\"GGG\", Config.getGoogleCloudProjectId());\n assertEquals(\"KKK\", Config.getStorageSchemaHostUrl());\n assertEquals(\"LLL\", Config.getStorageQueryRecordHostUrl());\n assertEquals(\"For\", Config.getStorageQueryRecordFoRConversionHostUrl());\n assertEquals(\"MMM\", Config.getSearchHostUrl());\n\n assertEquals(\"PPP\", Config.getEntitlementsHostUrl());\n assertEquals(\"RRR\", Config.getEntitlementTargetAudience());\n\n assertEquals(\"TTT\", Config.getDeployedVersionId());\n assertEquals(\"UUU\", Config.getKmsRing());\n assertEquals(\"VVV\", Config.getKmsKey());\n assertEquals(50, Config.getStorageRecordsBatchSize());\n assertEquals(14, Config.getSchemaCacheExpiration());\n assertEquals(14, Config.getElasticCacheExpiration());\n assertEquals(15, Config.getCursorCacheExpiration());\n assertEquals(1, Config.getIndexCleanupThresholdDays());\n assertEquals(2, Config.getEmptyIndexCleanupThresholdDays());\n\n assertEquals(\"evd\", Config.getIndexerQueueHost());\n assertEquals(10, Config.getIndexCacheExpiration());\n assertEquals(6379, Config.getSearchRedisPort());\n assertEquals(\"10.5.0.10\", Config.getSearchRedisHost());\n\n assertEquals(\"google-aud\", Config.getGoogleAudiences());\n\n assertEquals(2 * 24 * 60, Config.getKindsCacheExpiration());\n assertEquals(1, Config.getKindsRedisDataBase());\n assertEquals(2 * 24 * 60, Config.getAttributesCacheExpiration());\n }\n\n @Test\n public void try_get_invalid_pattern() {\n System.setProperty(\"CRON_INDEX_CLEANUP_PATTERN\", \"\");\n String[] result = Config.getIndexCleanupPattern();\n assertEquals(result.length, 0);\n\n System.setProperty(\"CRON_INDEX_CLEANUP_PATTERN\", \"blah;fdjo\");\n result = Config.getIndexCleanupPattern();\n assertEquals(result.length, 1);\n\n System.setProperty(\"CRON_INDEX_CLEANUP_PATTERN\", \"-testindex*,-testquery*,-testcursor*\");\n result = Config.getIndexCleanupPattern();\n assertEquals(result.length, 3);\n }\n\n @Test\n public void try_get_indexCleanUp_tenants() {\n System.setProperty(\"CRON_INDEX_CLEANUP_TENANTS\", \"\");\n String[] result = Config.getIndexCleanupTenants();\n assertEquals(result.length, 0);\n\n System.setProperty(\"CRON_INDEX_CLEANUP_TENANTS\", \"tenant1;fdjo\");\n result = Config.getIndexCleanupTenants();\n assertEquals(result.length, 1);\n\n System.setProperty(\"CRON_INDEX_CLEANUP_TENANTS\", \"tenant1,common\");\n result = Config.getIndexCleanupTenants();\n assertEquals(result.length, 2);\n }\n\n @Test\n public void get_env() {\n System.setProperty(\"ENVIRONMENT\", \"evd\");\n assertTrue(Config.isPreDemo());\n assertTrue(Config.isPreP4d());\n assertFalse(Config.isLocalEnvironment());\n }\n\n // TODO: Remove this temporary implementation when ECE CCS is utilized\n @Test\n public void try_get_ccsStatus() {\n System.setProperty(\"SMART_SEARCH_CCS_DISABLED\", \"true\");\n assertTrue(Config.isSmartSearchCcsDisabled());\n\n System.setProperty(\"SMART_SEARCH_CCS_DISABLED\", \"\");\n assertFalse(Config.isSmartSearchCcsDisabled());\n }\n}\n" }, { "alpha_fraction": 0.6737653017044067, "alphanum_fraction": 0.6819211840629578, "avg_line_length": 30.98550796508789, "blob_id": "034a580fad31f3a0fc8a53689bb37640a330cc4f", "content_id": "109fd6db41df39a4478d057baa58770402971685", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": true, "language": "Java", "length_bytes": 2207, "license_type": "permissive", "max_line_length": 112, "num_lines": 69, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/cache/JsonCodec.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.cache;\n\nimport com.google.gson.Gson;\nimport com.lambdaworks.redis.codec.RedisCodec;\n\nimport java.io.ByteArrayInputStream;\nimport java.io.IOException;\nimport java.io.InputStreamReader;\nimport java.nio.ByteBuffer;\n\nclass JsonCodec<K, V> implements RedisCodec<K, V> {\n\n private final Class<K> classOfK;\n private final Class<V> classOfV;\n private final Gson gson = new Gson();\n\n public JsonCodec(Class<K> classOfK, Class<V> classOfV) {\n this.classOfK = classOfK;\n this.classOfV = classOfV;\n }\n\n @Override\n public K decodeKey(ByteBuffer bytes) {\n return decode(bytes, classOfK);\n }\n\n @Override\n public V decodeValue(ByteBuffer bytes) {\n return decode(bytes, classOfV);\n }\n\n @Override\n public ByteBuffer encodeKey(K k) {\n return k == null ? ByteBuffer.wrap(new byte[0]) : ByteBuffer.wrap(gson.toJson(k).getBytes());\n }\n\n @Override\n public ByteBuffer encodeValue(V v) {\n return v == null ? ByteBuffer.wrap(new byte[0]) : ByteBuffer.wrap(gson.toJson(v).getBytes());\n }\n\n private <T> T decode(ByteBuffer bytes, Class<T> classOf) {\n byte[] array = new byte[bytes.remaining()];\n bytes.get(array);\n try (InputStreamReader stream = new InputStreamReader(new ByteArrayInputStream(array))) {\n return gson.fromJson(stream, classOf);\n } catch (IOException e) {\n System.err.println(String.format(\"Unexpected error decoding from redis cache: %s\", e.getMessage()));\n return null;\n }\n }\n}\n" }, { "alpha_fraction": 0.719937801361084, "alphanum_fraction": 0.7309005260467529, "avg_line_length": 52.802040100097656, "blob_id": "f3e4e4953fd4f3f1efd66a77fd05b06e6e4c8729", "content_id": "993fcc0fe2e8fe77c66d0088626fd3a0ae630153", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 26366, "license_type": "permissive", "max_line_length": 242, "num_lines": 490, "path": "/osdu-r2/README.md", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# OSDU Release 2 Prototype\n\n## Contents\n\n* [Introduction](#introduction)\n* [Terms and definitions](#terms-and-definitions)\n* [Intention](#intention)\n* [Technology stack](#technology-stack)\n* [Auditing and logging](#auditing-and-logging)\n* [OSDU R2 services and components](#osdu-r2-services-and-components)\n* [Service definitions](#services-definitions)\n * [Ingestion service](#ingestion-service)\n * [Workflow service](#workflow-service)\n * [Delivery service](#delivery-service)\n * [Storage service](#storage-service)\n* [Workflow Engine (Apache Airflow)](#workflow-engine-apache-airflow)\n * [Opaque Ingestion DAG](#opaque-ingestion-dag)\n * [Manifest Ingestion DAG](#manifest-ingestion-dag)\n * [Workflow Status Operator](#workflow-status-operator)\n * [Stale Jobs Scheduler](#stale-jobs-scheduler)\n* [Google Cloud Platform implementation](#osdu-r2-google-cloud-platform-implementation)\n * [Cloud Firestore collections](#cloud-firestore-collections)\n\n## Introduction\n\nThe OSDU Release 2 Prototype is an implementation of the OSDU standard and is designed to ingest and deliver upstream\noil and gas data.\n\nThe OSDU R2 is cloud-agnostic &mdash; it provides common implementations that can be deployed and orchestrated on any\ncloud platform.\n\n## Terms and definitions\n\nThe following table defines the terms introduced os used in OSDU R2.\n\n| Property | Description |\n| ------------ | ------------------------------------------------------------------------------------------------------- |\n| SRN | Subsurface Data Universe Resource Number, an identifier of OSDU types that has the for `srn:namespace:type:unique_key:version`. |\n| Manifest | The JSON data that includes an OSDU Work Product and associated Work Product Components and Files. The manifest is validated with the OSDU WorkProductLoadManifestStagedFiles schema. |\n| Work Product | A package of data items prepared by an application for upload to OSDU. A Work Product consists of Work Product metadata and one or more Work Product Components. |\n| Work Product Component | A typed, smallest, independently usable unit of business data content transferred to OSDU as part of a Work Product. Each Work Product Component consists of one or more data content units known as OSDU Files. |\n| File | A concrete file uploaded to the system. Files in the OSDU manifest are associated with a Work Product Component. |\n| Landing zone | Location in a cloud platform where files for ingestion are loaded. Consists of the Driver and Location. |\n| Driver | Description of where a file was loaded by the user. Example: \"GCS\" (Google Cloud Storage). |\n| Location | Direct URI to file in cloud platform storage, such as a GCS bucket. |\n| Workflow | Unique business process to be carried out by the system. In OSDU R2, ingestion is one such process. |\n| Opaque data | Opaque is used when referred to file types that are ingested by the system. If the system doesn't receive \"well_log\" as the data type during ingestion, the data is treated as \"opaque\". That is, \"opaque\" means any data type. |\n| Airflow | An orchestration platform to author, schedule, and monitor workflows. |\n| DAG | Directed Acyclic Graph, a set of tasks that Apache Airflow runs to perform workflow steps. |\n\n> **Note**: The Driver and Location are used to allow direct access to the file to the internal OSDU R2 services.\n> **Note**: The Location doesn't necessarily store the signed URL by which the user uploads their file to the system.\n\nAdditional terms introduced by OpenDES.\n\n| Property | Description |\n| -------- | ------------------------------------------------------------------------------------------------------ |\n| ODES | OpenDES, the open source version of the DELFI Data Ecosystem. Developed and supported by Schlumberger. |\n| ACL | A group of users that have access to the DELFI record. Part of the ODES record structure. |\n| Legal | Consists of a list of legal tags associated with the record and a list of relevant data countries. Part of the ODES record structure. |\n\n## Intention\n\nThe OSDU R2 Prototype focuses on the implementation of the OSDU-compatible ingestion process. More specifically, the\nintent of the OSDU R2 Prototype is to:\n\n* Provide a unified ingestion flow based on the ingestion flows from OSDU Release 1 and the DELFI Data Ecosystem\n* Refactor the orchestration implementation of the DELFI Data Ecosystem\n* Develop an orchestration basis for different kinds of OSDU workflows, including the interactions with OSDU, storage\nand indexing workflows, and domain-specific ingestion workflows for different file types\n\nThe orchestration implementation is based on Apache Airflow, which allows for:\n\n* Validation, refinement, and decision making on the required ingestion workflow characteristics and methods\n* Provisioning of the basis for design documentation for the broader use cases, such as domain-specific workflows\n\nApache Airflow is an open source solution for workflow orchestration. The choice of Airflow for orchestrating OSDU R2\nservices is dictated by a set of functional and operational requirements listed below:\n\n* **Functional requirements**\n * Support for both sequential and parallel execution of tasks\n * Support for both synchronous and asynchronous operations\n * Implementation of error handling features\n * State persistence\n* **Operational requirements**\n * Available admin dashboard for viewing and handling tasks\n * Possibility to resume failed workflow jobs\n \n## Technology stack\n\n> OSDU R2 uses Maven's BOM to handle the versions of dependencies, which is why some libraries in the list are specified\n> without versions. These dependencies are pulled out according to the Spring Boot 2.2.5 version.\n\nThe OSDU GCP R2 is based on the following technology stack:\n\n* [Java 8]\n* [Python 3.6+], used for the Apache Airflow DAGs and the Python SDK\n* [Google Cloud SDK]\n* [Terraform 0.12.8+]\n* [Spring Boot 2.2.5]\n* [Spring Boot Cloud Hoxton.SR3]\n* [Project Lombok]\n* [Jackson]\n* [JavaX Inject 1]\n* [Guava 28.2-jre]\n* [GSON 2.8.5]\n* [Lettuce 4.5.0.Final]\n* [Swagger Core JAX RS Project Setup 1.5.X]\n* [Google HTTP Client 1.31.0]\n* [Auth0 Java JWT 3.8.1]\n* [JSON Web Token 0.9.1]\n* [Elasticsearch 6.6.2]\n* [Elasticsearch REST Client 6.6.2]\n* [Elasticsearch REST High Level Client 6.6.2]\n* [JUnit]\n* [Mockito 2.0.2-beta]\n* [Powermock 2.0.2]\n* [MapStruct 1.3.1 Final]\n\nJava build dependencies:\n\n* [Maven Checkstyle Plugin 3.1.0]\n* [Maven PMD Plugin 3.12.0]\n* [JaCoCo Maven Plugin 0.8.4]\n* [Spotbugs Maven Plugin 3.1.12]\n* [Maven Surefire PLugin]\n\nOSDU GCP R2 extensively uses the following Google Cloud Platform services:\n\n* [Cloud Run]\n* [Cloud Storage]\n* [Cloud Firestore]\n* [Cloud Datastore], used only for compatibility reasons; Datastore is to be replaced by Firestore\n* [Cloud SQL]\n* [Cloud Functions]\n* [Compute Engine]\n* [App Engine]\n* [Google Cloud Operations Suite] (Stackdriver)\n\n## Auditing and logging\n\nOSDU R2 uses Spring Boot default logging to record the application state and errors. Google Cloud Platform pulls out the\ngenerated logs from the running application and publishes them to [Google Cloud Operations Suite] (formerly\nStackdriver). \n\n## OSDU R2 services and components\n\nThe OSDU R2 Prototype introduces the following services:\n\n* [Delivery](#delivery-service)\n* [Ingestion](#ingestion-service)\n* [Workflow](#workflow-service)\n\nBesides the core services necessary for ingestion, the OSDU R2 Prototype also introduces changes to the DELFI Storage\nservice in order to support the new ingestion flow.\n\nThe OSDU R2 Prototype orchestration implementation is available as a Workflow Engine, which encompasses the following\ncomponents:\n\n* [Manifest Ingestion DAG](#manifest-ingestion-dag)\n* [Opaque Ingestion DAG](#opaque-ingestion-dag)\n* [Stale Jobs Scheduler](#stale-jobs-scheduler)\n* [Workflow Status Operator](#workflow-status-operator)\n* [Finished Workflow Sensor Operator](#finished-workflow-sensor-operator)\n\n## Services definitions\n\nThe following sections discuss the implementation details of the services developed for the OSDU ingestion flow. The\ngeneral considerations of OSDU R2 services implementation are:\n\n* Most services provide both external and internal API endpoints. The third-party applications can query only the\nexternal API endpoints, while the internal API endpoints can only be queried by OSDU services.\n* Each service's internal and external API endpoints need to receive a JSON Web Token (JWT). The future implementations\nof the services might be based on the token exchange as part of the security model.\n* Each service's internal and external APIs need to receive the DELFI partition ID to which the user has access.\n\n### Delivery service\n\nThe Delivery service provides internal and external API endpoints to let OSDU services and third-party applications\nquery file location or request documents from the system.\n\nThe OSDU R2 Delivery service is based on the OSDU R1 Delivery with new endpoints for requesting file location data and a\nreworked API for delivery of documents.\n\n**Implementation**: [os-delivery](./os-delivery) <br>\n**Description and workflow**: [OSDU R2 Delivery Service](./os-delivery/README.md)\n\n#### Delivery API\n\n**POST /delivery**\n\nThe `/delivery` API endpoint returns a list of OSDU data per SRN. If an SRN wasn’t found in the system, it’s returned in\nthe list of Unprocessed SRNs. The endpoint is open for external requests.\n\n**POST /getLocation**\n\nThe `/getLocation` API endpoint returns a signed URL to the third-party application. The signed URL is the path to the\nfile to be uploaded to the landing zone. This endpoint is open for external requests.\n\n**POST /getFileLocation**\n\nThe `/getFileLocation` API endpoint returns the file location information, which includes the Driver and Location. This\nendpoint is closed for external requests.\n\n**POST /getFileList**\n\nThe `/getFileList` API endpoint returns the paginated results of the file records from the database to let OSDU services\nknow whether a file was uploaded by the user or not. This endpoint is closed for external requests.\n\n> **Note**: The `getFileList` endpoint isn't used in the OSDU R2 Prototype.\n\n### Ingestion service\n\nThe OSDU R2 Prototype introduces two ingestion workflow types.\n\n* **Manifest Ingestion workflow**. This OSDU R2 Ingestion service is partly based on the implementation of the OSDU R1\nIngestion service and targets at processing multiple files with metadata added as OSDU Work Product and Work Product\nComponents to the ingestion manifest. The Manifest Ingestion flow requires an OSDU manifest added to the request body\nwith WP, WPC. The Ingestion service only validates this manifest against the schema stored in the database.\n* **Default (Opaque) Ingestion workflow**. This ingestion process aims at processing a single file per request. The\nOSDU-compatible metadata (the OSDU WorkProduct manifest) isn't added to the request.\n\nThe Ingestion service provides two endpoints for submitting files for ingestion. This API carries out necessary\noperations depending on the file type and then submits an ingestion workflow to the OSDU Workflow service.\n\n**Implementation**: [ingest](./os-ingest) <br>\n**Detailed information**: [OSDU R2 Ingestion Service](./os-ingest/README.md)\n\n#### Ingestion API\n\nThe Ingestion service provides two endpoints for submitting files for ingestion.\n\n**POST /submit**\n\nThe `/submit` API endpoint starts ingestion of a file, and then returns the workflow job ID to the user. This endpoint\nis open for external requests.\n\n**POST /submitWithManifest**\n\nThe `/submitWithManifest` API endpoint starts ingestion of multiple files with metadata added as a Work Product and Work\nProduct Component in the manifest. This endpoint is open for external requests.\n\n### Workflow service\n\nThe Workflow service determines and configures any business workflow to run by the Workflow Engine (Apache Airflow).\n\nIn the OSDU R2 Prototype, the Workflow service queries Apache Airflow to start specific ingestion flows depending on the\nworkflow type and data type.\n\n**Implementation**: [os-workflow](./os-workflow) <br>\n**Description and workflow**: [OSDU R2 Workflow Service](./os-workflow/README.md)\n\n#### Workflow API\n\n**POST /startWorkflow**\n\nThe `/startWorkflow` API endpoint starts a new workflow of the specific type depending on the data added to the request.\nThis endpoint is closed for external requests.\n\n**POST /getStatus**\n\nThe `/getStatus` API endpoint returns the current status of the workflow job stored in the database. This endpoint is\nopen for external requests.\n\n**POST /updateWorkflowStatus**\n\nThe `/updateWorkflowStatus` API endpoint receives a workflow ID and the current workflow status, and then updates it in\nthe database. This endpoint is closed for external requests.\n\n### Storage service\n\n> The OSDU R2 Prototype doesn't change the Storage service implementation. The changes that this section talks through\n> will be implemented in the future releases of OSDU.\n\nThe OSDU R2 Prototype Storage service is an extension of the DELFI Storage service and is designed to store extra\nnon-indexed metadata with key-value string parameters with each record.\n\nIn the OSDU R2 Prototype implementation, the Storage service's `/CreateRecord` endpoint adds the workflow and file IDs\nto the file records in the database.\n\n#### Storage API\n\n**POST /CreateRecord**\n\nThe `/CreateRecord` API endpoint creates a record in the database for each uploaded file. This is an existing endpoint\nand is updated to store extra non-indexing metadata fields with the records. This endpoint is closed for external\nrequests.\n\n**POST /listRecords**\n\nThe `/listRecords` API endpoint is new and searches the existing records by metadata. This endpoint is closed for\nexternal requests.\n\n### Workflow Engine (Apache Airflow)\n\nThe OSDU R2 Workflow Engine is an implementation of Apache Airflow that handles pipeline processing in OSDU R2.\n\n#### Manifest Ingestion DAG\n\nThe Manifest Ingestion DAG is partly based on the implementation of the [OSDU R1 Ingestion service].\nThe DAG carries out ingestion of OSDU Files with Work Product and Work Product Components metadata, all provided in the\nmanifest. The OSDU R2 Ingestion service performs manifest validation, and then the DAG runs the necessary tasks to\ncreate new records (ingest) for files, WPCs, and WP.\n\n#### Opaque Ingestion DAG\n\nThe Opaque Ingestion DAG carries out ingestion of the opaque data type. The DAG receives files for ingestion and creates\nrecords for them in the database. The OSDU ingestion process, which may include extraction, categorization, enrichment,\nquality assessment, and artifact generation, doesn't happen.\n\n#### Stale Jobs Scheduler\n\nThe Stale Jobs Scheduler is an operator designed to run at an N minutes interval to verify the current status of the\nsubmitted workflow. For the workflows that have a **submitted** or **running** status in the database but that have\nfailed during execution, the Stale Jobs Scheduler sets their status to **failed**.\n\n#### Workflow Status Operator\n\nThe Workflow Status Operator is a custom Airflow operator that updates the status of the submitted workflows. This\noperator queries the Workflow service to update the status.\n\n### Finished Workflow Sensor Operator\n\nThe Finished Workflow Sensor operator is a custom Airflow operator that notifies the DAG that the current ingestion\nprocess for a file has completed. The DAG starts ingestion of the next file in the list.\n\n## OSDU R2 Prototype Ingestion workflow\n\nThe OSDU R2 Prototype implementation introduces two ingestion workflow types that both consist of the following phases:\n\n1. File uploading\n2. Ingestion preparation\n3. Pipeline processing with Apache Airflow\n\n### 1. File uploading\n\nThe first phase of OSDU R2 ingestion is uploading a file to the system. The user needs to obtain a signed URL from the\nOSDU Delivery Service. The user fully controls file upload. In OSDU R2 Prototype, services do not verify whether a file\nwas uploaded to the landing zone or not.\n\nFile uploading workflow:\n\n1. The user or application sends an HTTP request to the Delivery Service to get a file location.\n2. The Delivery service creates a signed URL for the file.\n > In the GCP implementation, the Delivery service queries Google Cloud Storage to generate a signed URL.\n3. The Delivery service creates a file upload record in the database.\n4. The Delivery service returns a signed URL to the client.\n5. The user or application uploads a file to the landing zone by the signed URL.\n > The Delivery Service does not verify whether the file was uploaded or not.\n\n### 2. Ingestion\n\nThe ingestion phase consists of the following steps:\n\n1. The user or application submits a request for ingestion to the OSDU R2 Ingestion Service.\n > **Note**: The ingestion request may contain a manifest with a list of files and the metadata added as OSDU Work\n > Product and Work Product Components. For the OSDU Ingestion workflow, Files are presented as a list of file IDs.\n2. In the Opaque ingestion flow, the Ingestion service queries the Delivery service to obtain the files by the signed \nURLs. In the Manifest ingestion flow, the Ingestion service validates the manifest against the schema stored in the\nproject's database.\n * In the Opaque ingestion flow, the Delivery service returns the file location data &mdash; Driver and Location\n &mdash; to the Ingestion service.\n4. The Ingestion service submits a new ingestion job with the context to the Workflow service. The context includes:\n * File location\n * Workflow ingestion type &mdash; \"ingest\" or \"osdu\"\n * Manifest if the workflow type is \"osdu\"\n5. The Workflow service queries the database to understand what Airflow DAG should be started.\n6. The Workflow service submits a new workflow job with the DAG type, file type, and context.\n7. The Workflow service stores the workflow job ID in the database, and then returns the workflow job ID to the\nIngestion service.\n8. The Ingestion service returns the workflow job ID to the user. The user or application can eventually submit the\nworkflow job ID to the Workflow service to learn the status of the current workflow.\n\n### 3. Pipeline processing with Apache Airflow\n\nDuring this step, Apache Airflow starts running the necessary DAGs. Depending on the workflow, user, and data type,\ndifferent DAGs perform different actions.\n \n* During the Opaque ingestion flow, the Opaque Ingestion DAG only creates a record. \n* During the Manifest ingestion flow, the Manifest Ingestion DAG creates records for the Work Product, Work Product\nComponents, and Files.\n * A list of Work Product Components is retrieved from the manifest. For each Work Product Component, a list of\n associated files is ingested.\n * Once all Work Product Components are ingested, the DAG creates a record for the Work Product.\n\n## OSDU R2 Google Cloud Platform implementation\n\nThe OSDU R2 Prototype uses the following Google Cloud Platform services:\n\n* Cloud Firestore\n* Cloud Datastore\n > **Note**: The OSDU GCP project uses Cloud Datastore for compatibility reasons to work with the ODES services. The\n current OSDU implementation includes code to work with Firestore or Datastore. However, in the future releases of\n OSDU, the Cloud Datastore implementation will be removed. Cloud Datastore creates the same collections and indexes\n as Cloud Firestore.\n* Cloud Compose\n* Cloud Storage (GCS)\n\n### Cloud Firestore collections\n\nCloud Firestore creates the following collections in the database to store various data in OSDU R2 Prototype.\n\n#### `file-locations`\n\nThe `file-locations` collection stores the file documents by file ID. Each document stores the following information.\n\n| Property | Type | Description |\n| --------- | -------- | ----------------------------------------------- |\n| FileID | `String` | Unique file ID used as key to store file data |\n| Driver | `String` | Description of the place where files are loaded |\n| Location | `String` | Direct URI to the file |\n| CreatedAt | `String` | Timestamp when the record was created |\n| CreatedBy | `String` | User ID, not supported in the OSDU R2 Prototype |\n\n#### `dag-selection`\n\nThe `dag-selection` collection stores the data necessary to determine the Apache Airflow DAG to run for a specific\nworkflow.\n\n| Property | Type | Description |\n| ------------ | -------- | -------------------------------------------------------------------------------- |\n| WorkflowType | `String` | Supported workflow types. In OSDU R2 Prototype these are \"ingest\" or \"osdu\" |\n| DataType | `String` | Supported data types. In OSDU R2 Prototype these are \"well_log\" or \"opaque\" |\n| UserID | `String` | ID of the user that requested file ingestion. Not supported in OSDU R2 Prototype |\n| DAGName | `String` | The name of an Airflow DAG to run |\n\n#### `workflow-status`\n\nThe `workflow-status` collection stores the current status and some additional properties of a started workflow job.\n\n| Property | Type | Description |\n| ------------ | -------- | ---------------------------------------------------------------------------- |\n| WorkflowID | `String` | ID of the started workflow |\n| AirflowRunID | `String` | ID of the Airflow process |\n| Status | `String` | Current status of a workflow &mdash; submitted, running, finished, or failed |\n| SubmittedAt | `String` | Timestamp when the workflow was submitted to Airflow |\n| SubmittedBy | `String` | User ID, isn't supported in the OSDU R2 Prototype |\n\n#### `schema-data`\n\nThe `schema-data` collection stores the OSDU manifest validation schemas. In OSDU R2, the collection stores only the\nOSDU WorkProductLoadManifestStagedFiles JSON schema.\n\n| Property | Type | Description |\n| --------- | ------- | ---------------------------------------------------------- |\n| CreatedAt | String | The timestamp when the record was created. |\n| Schema | String | The OSDU [WorkProductLoadManifestStagedFiles] JSON schema. |\n| Title | Integer | The name of the manifest validation schema. |\n\n[Java 8]: https://java.com/en/download/faq/java8.xml\n[Python 3.6+]: https://www.python.org/downloads/release/python-360/ \n[Google Cloud SDK]: https://cloud.google.com/sdk/install\n[Terraform 0.12.8+]: https://www.terraform.io/downloads.html\n[Spring Boot 2.2.5]: https://spring.io/blog/2020/02/27/spring-boot-2-2-5-released\n[Spring Boot Cloud Hoxton.SR3]: https://spring.io/blog/2020/03/05/spring-cloud-hoxton-service-release-3-sr3-is-available\n[Project Lombok]: https://projectlombok.org/\n[Jackson]: https://github.com/FasterXML/jackson\n[JavaX Inject 1]: https://mvnrepository.com/artifact/javax.inject/javax.inject/1\n[Guava 28.2-jre]: https://github.com/google/guava\n[GSON 2.8.5]: https://github.com/google/gson\n[Lettuce 4.5.0.Final]: https://lettuce.io/\n[Swagger Core JAX RS Project Setup 1.5.X]: https://github.com/swagger-api/swagger-core/wiki/Swagger-Core-JAX-RS-Project-Setup-1.5.X\n[Google HTTP Client 1.31.0]: https://mvnrepository.com/artifact/com.google.http-client/google-http-client/1.31.0\n[Auth0 Java JWT 3.8.1]: https://mvnrepository.com/artifact/com.auth0/java-jwt/3.8.1\n[JSON Web Token 0.9.1]: https://mvnrepository.com/artifact/io.jsonwebtoken/jjwt/0.9.1\n[Elasticsearch 6.6.2]: https://www.elastic.co/guide/en/elasticsearch/reference/6.6/release-notes-6.6.2.html\n[Elasticsearch REST Client 6.6.2]: https://mvnrepository.com/artifact/org.elasticsearch.client/elasticsearch-rest-client/6.6.2\n[Elasticsearch REST High Level Client 6.6.2]: https://mvnrepository.com/artifact/org.elasticsearch.client/elasticsearch-rest-high-level-client/6.6.2\n[JUnit]: https://junit.org/junit5/\n[Mockito 2.0.2-beta]: https://mvnrepository.com/artifact/org.mockito/mockito-all/2.0.2-beta\n[Powermock 2.0.2]: https://mvnrepository.com/artifact/org.powermock/powermock-core/2.0.2\n[MapStruct 1.3.1 Final]: https://mapstruct.org/news/2019-09-29-mapstruct-1_3_1_Final-bug-fix-released/\n[Maven Checkstyle Plugin 3.1.0]: https://blogs.apache.org/maven/entry/apache-maven-checkstyle-plugin-version\n[Maven PMD Plugin 3.12.0]: https://maven.apache.org/plugins/maven-pmd-plugin/index.html\n[JaCoCo Maven Plugin 0.8.4]: https://mvnrepository.com/artifact/org.jacoco/jacoco-maven-plugin/0.8.4\n[Spotbugs Maven Plugin 3.1.12]: https://spotbugs.github.io/spotbugs-maven-plugin/\n[Maven Surefire PLugin]: https://maven.apache.org/surefire/maven-surefire-plugin/\n[Cloud Run]: https://cloud.google.com/run\n[Cloud Storage]: https://cloud.google.com/storage\n[Cloud Firestore]: https://cloud.google.com/firestore\n[Cloud Datastore]: https://cloud.google.com/datastore\n[Cloud SQL]: https://cloud.google.com/sql\n[Cloud Functions]: https://cloud.google.com/functions/\n[Compute Engine]: https://cloud.google.com/compute\n[App Engine]: https://cloud.google.com/appengine\n[Google Cloud Operations Suite]: https://cloud.google.com/products/operations\n[Spring Boot default logging]: https://github.com/spring-projects/spring-boot/blob/2.2.x/spring-boot-project/spring-boot/src/main/resources/org/springframework/boot/logging/logback/defaults.xml#L11\n[Open Group Community Wiki]: https://community.opengroup.org/osdu/documentation/-/wikis/OSDU-(C)/Design-and-Implementation/Ingestion-and-Enrichment-Detail/R2-Ingestion-Workflow-Orchestration-Spike\n[OSDU R1 Ingestion service]: ../compatibility-layer/docs/OSDU%20Compatibility%20Layer%20Services.md\n[WorkProductLoadManifestStagedFiles]: https://gitlab.opengroup.org/osdu/json-schemas/-/blob/master/WorkProductLoadManifestStagedFiles.json" }, { "alpha_fraction": 0.7586393356323242, "alphanum_fraction": 0.7616090774536133, "avg_line_length": 34.27619171142578, "blob_id": "f2bcdcff4d9e3a7d3513eb237efafb611e913acc", "content_id": "11540b400b430b78db57c1689affea683f8b6634", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3704, "license_type": "permissive", "max_line_length": 99, "num_lines": 105, "path": "/compatibility-layer/service/ingest/src/main/java/com/osdu/service/delfi/DelfiInitialIngestService.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.service.delfi;\n\nimport com.osdu.exception.OsduBadRequestException;\nimport com.osdu.mapper.IngestHeadersMapper;\nimport com.osdu.messaging.IngestPubSubGateway;\nimport com.osdu.model.IngestHeaders;\nimport com.osdu.model.IngestResult;\nimport com.osdu.model.job.IngestMessage;\nimport com.osdu.model.type.manifest.LoadManifest;\nimport com.osdu.request.OsduHeader;\nimport com.osdu.service.AuthenticationService;\nimport com.osdu.service.InitialIngestService;\nimport com.osdu.service.JobStatusService;\nimport com.osdu.service.validation.LoadManifestValidationService;\nimport javax.inject.Named;\nimport lombok.RequiredArgsConstructor;\nimport lombok.extern.slf4j.Slf4j;\nimport org.springframework.messaging.MessageHeaders;\nimport org.springframework.stereotype.Service;\n\n@Service\n@RequiredArgsConstructor\n@Slf4j\npublic class DelfiInitialIngestService implements InitialIngestService {\n\n final JobStatusService jobStatusService;\n final LoadManifestValidationService loadManifestValidationService;\n final IngestPubSubGateway ingestGateway;\n final AuthenticationService authenticationService;\n\n @Named\n final IngestHeadersMapper ingestHeadersMapper;\n\n @Override\n public IngestResult ingestManifest(LoadManifest loadManifest,\n MessageHeaders headers) {\n log.debug(\"Request to ingest file with following parameters: {}, and headers : {}\",\n loadManifest, headers);\n\n checkPreconditions(headers);\n\n IngestHeaders ingestHeaders = ingestHeadersMapper.toIngestHeaders(headers);\n log.debug(\"Parse ingest headers. Headers: {}\", ingestHeaders);\n\n authenticationService\n .checkAuthentication(ingestHeaders.getAuthorizationToken(), ingestHeaders.getPartition());\n\n loadManifestValidationService.validateManifest(loadManifest);\n\n String jobId = jobStatusService.initInjectJob();\n\n IngestMessage ingestMessage = IngestMessage.builder()\n .ingestJobId(jobId)\n .loadManifest(loadManifest)\n .headers(ingestHeaders)\n .build();\n log.debug(\"Send ingest message for processing. Message: {}\", ingestMessage);\n ingestGateway.sendIngestToPubSub(ingestMessage);\n\n log.debug(\"Request to ingest with parameters : {}, init the injection jobId: {}\", loadManifest,\n jobId);\n return IngestResult.builder()\n .jobId(jobId)\n .build();\n }\n\n private void checkPreconditions(MessageHeaders headers) {\n if (!headers.containsKey(OsduHeader.AUTHORIZATION)) {\n throw new OsduBadRequestException(\"Missing authorization token\");\n }\n\n if (!headers.containsKey(OsduHeader.PARTITION)) {\n throw new OsduBadRequestException(\"Missing partition\");\n }\n\n if (!headers.containsKey(OsduHeader.LEGAL_TAGS)) {\n throw new OsduBadRequestException(\"Missing \\\"legal-tags\\\" header\");\n }\n\n if (!headers.containsKey(OsduHeader.RESOURCE_HOME_REGION_ID)) {\n throw new OsduBadRequestException(\"Missing \\\"resource-home-region-id\\\" header\");\n }\n\n if (!headers.containsKey(OsduHeader.RESOURCE_HOST_REGION_IDS)) {\n throw new OsduBadRequestException(\"Missing \\\"resource-host-region-ids\\\" header\");\n }\n }\n\n}\n" }, { "alpha_fraction": 0.7710109353065491, "alphanum_fraction": 0.7758830785751343, "avg_line_length": 39.04878234863281, "blob_id": "06fbeed2a90d256630652a096553f62c4012f252", "content_id": "1103815ca9aa1434c66acf8eb9bcca3f12bcff90", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1642, "license_type": "permissive", "max_line_length": 97, "num_lines": 41, "path": "/osdu-r2/os-delivery/provider/delivery-gcp-datastore/src/main/java/org/opengroup/osdu/delivery/provider/gcp/repository/FileLocationEntityRepository.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.delivery.provider.gcp.repository;\n\nimport java.util.Date;\nimport org.opengroup.osdu.delivery.provider.gcp.model.entity.FileLocationEntity;\nimport org.springframework.cloud.gcp.data.datastore.repository.DatastoreRepository;\nimport org.springframework.cloud.gcp.data.datastore.repository.query.Query;\nimport org.springframework.data.domain.Page;\nimport org.springframework.data.domain.Pageable;\nimport org.springframework.data.repository.query.Param;\nimport org.springframework.lang.Nullable;\nimport org.springframework.stereotype.Repository;\n\n@Repository\npublic interface FileLocationEntityRepository\n extends DatastoreRepository<FileLocationEntity, Long> {\n\n @Nullable\n FileLocationEntity findByFileID(String fileID);\n\n @Query(\"SELECT * FROM `file-locations`\"\n + \" WHERE CreatedAt >= @time_from AND CreatedAt <= @time_to AND CreatedBy = @user_id\")\n Page<FileLocationEntity> findFileList(@Param(\"time_from\") Date from, @Param(\"time_to\") Date to,\n @Param(\"user_id\") String userID, Pageable pageable);\n\n}\n" }, { "alpha_fraction": 0.5965318083763123, "alphanum_fraction": 0.6527662873268127, "avg_line_length": 47.83064651489258, "blob_id": "231725a30f7da612e01c29d357ee1437bf4e368a", "content_id": "be38cff5be121b5a6ee2135ecb5972dc30cf7263", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 12110, "license_type": "permissive", "max_line_length": 251, "num_lines": 248, "path": "/osdu-r2/os-core-common/src/test/java/org/opengroup/osdu/core/common/model/units/UnitParseTests.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.units;\n\nimport org.junit.Test;\n\nimport static junit.framework.TestCase.assertTrue;\nimport static org.junit.Assert.*;\n\npublic class UnitParseTests {\n private static final String DEG_C_S_1 = \"%7B%22ScaleOffset%22%3A%7B%22Scale%22%3A1.0%2C%22Offset%22%3A-273.15%7D%2C%22Symbol%22%3A%22degC%22%2C%22BaseMeasurement%22%3A%22%257B%2522Ancestry%2522%253A%2522Temperature%2522%257D%22%7D\";\n private static final String DEG_C_E_1 = \"%7B%22ABCD%22%3A%7B%22A%22%3A273.15%2C%22B%22%3A1.0%2C%22C%22%3A1.0%2C%22D%22%3A0.0%7D%2C%22Symbol%22%3A%22degC%22%2C%22BaseMeasurement%22%3A%22%257B%2522Ancestry%2522%253A%2522K%2522%257D%22%7D\";\n private static final String DEG_F_S_1 = \"%7B%22ScaleOffset%22%3A%7B%22Scale%22%3A0.5555555555555556%2C%22Offset%22%3A-459.67%7D%2C%22Symbol%22%3A%22degF%22%2C%22BaseMeasurement%22%3A%22%257B%2522Ancestry%2522%253A%2522Temperature%2522%257D%22%7D\";\n private static final String DEG_F_E_1 = \"%7B%22ABCD%22%3A%7B%22A%22%3A2298.35%2C%22B%22%3A5.0%2C%22C%22%3A9.0%2C%22D%22%3A0.0%7D%2C%22Symbol%22%3A%22degF%22%2C%22BaseMeasurement%22%3A%22%257B%2522Ancestry%2522%253A%2522K%2522%257D%22%7D\";\n\n private static final String DEG_F_S_2 = \"{\\\"scaleOffset\\\":{\\\"scale\\\":0.5555555555555556,\\\"offset\\\":-459.67},\\\"symbol\\\":\\\"degF\\\",\\\"baseMeasurement\\\":{\\\"ancestry\\\":\\\"Temperature\\\",\\\"type\\\":\\\"UM\\\"},\\\"type\\\":\\\"USO\\\"}\";\n private static final String DEG_F_E_2 = \"{\\\"abcd\\\":{\\\"a\\\":2298.35,\\\"b\\\":5.0,\\\"c\\\":9.0,\\\"d\\\":0.0},\\\"symbol\\\":\\\"degF\\\",\\\"baseMeasurement\\\":{\\\"ancestry\\\":\\\"K\\\",\\\"type\\\":\\\"UM\\\"},\\\"type\\\":\\\"UAD\\\"}\";\n private static final String DEG_C_S_2 = \"{\\\"scaleOffset\\\":{\\\"scale\\\":1.0,\\\"offset\\\":-273.15},\\\"symbol\\\":\\\"degC\\\",\\\"baseMeasurement\\\":{\\\"ancestry\\\":\\\"Temperature\\\",\\\"type\\\":\\\"UM\\\"},\\\"type\\\":\\\"USO\\\"}\";\n private static final String DEG_C_E_2 = \"{\\\"abcd\\\":{\\\"a\\\":273.15,\\\"b\\\":1.0,\\\"c\\\":1.0,\\\"d\\\":0.0},\\\"symbol\\\":\\\"degC\\\",\\\"baseMeasurement\\\":{\\\"ancestry\\\":\\\"K\\\",\\\"type\\\":\\\"UM\\\"},\\\"type\\\":\\\"UAD\\\"}\";\n\n private static final String DEG_K_S_2 = \"{\\\"scaleOffset\\\":{\\\"scale\\\":1.0,\\\"offset\\\":0.0},\\\"symbol\\\":\\\"K\\\",\\\"baseMeasurement\\\":{\\\"ancestry\\\":\\\"Temperature\\\",\\\"type\\\":\\\"UM\\\"},\\\"type\\\":\\\"USO\\\"}\";\n private static final String M = \"{\\\"scaleOffset\\\":{\\\"scale\\\":1.0,\\\"offset\\\":0},\\\"symbol\\\":\\\"m\\\",\\\"baseMeasurement\\\":{\\\"ancestry\\\":\\\"Length\\\",\\\"type\\\":\\\"UM\\\"},\\\"type\\\":\\\"USO\\\"}\";\n\n private static final String CUSTOM_1 = \"{\\\"scaleOffset\\\":{\\\"scale\\\":1000000.0,\\\"offset\\\":0.0},\\\"symbol\\\":\\\"MMUSD\\\",\\\"baseMeasurement\\\":{\\\"ancestry\\\":\\\"CurrencyUSD\\\",\\\"type\\\":\\\"UM\\\"},\\\"type\\\":\\\"USO\\\"}\";\n\n private static final double[] DEG_FS = {32.0, 131.0};\n private static final double[] DEG_CS = {0.0, 55.0};\n\n private static IUnit corruptedUnit() {\n return ReferenceConverter.parseUnitReference(M.replace(\"{\", \"\"));\n }\n\n @Test\n public void testV1PersistableRepresentation() {\n IUnit unit1 = ReferenceConverter.parseUnitReference(DEG_C_E_1);\n assertNotNull(unit1);\n assertTrue(unit1.isValid());\n IUnit unit2 = ReferenceConverter.parseUnitReference(DEG_C_S_1);\n assertNotNull(unit2);\n assertTrue(unit2.isValid());\n }\n\n @Test\n public void testV2PersistableRepresentation() {\n IUnit unit1 = ReferenceConverter.parseUnitReference(DEG_C_E_2);\n assertNotNull(unit1);\n assertTrue(unit1.isValid());\n IUnit unit2 = ReferenceConverter.parseUnitReference(DEG_C_S_2);\n assertNotNull(unit2);\n assertTrue(unit2.isValid());\n }\n\n @Test\n public void testV1V2PersistableRepresentations() {\n IUnit unit1 = ReferenceConverter.parseUnitReference(DEG_C_E_2);\n assertNotNull(unit1);\n assertTrue(unit1.isValid());\n IUnit unit2 = ReferenceConverter.parseUnitReference(DEG_C_S_1);\n assertNotNull(unit2);\n assertTrue(unit2.isValid());\n }\n\n @Test\n public void testUnitToUnitConversion() {\n IUnit unit1 = ReferenceConverter.parseUnitReference(DEG_C_E_1);\n IUnit unit2 = ReferenceConverter.parseUnitReference(DEG_F_E_2);\n\n double toValue = unit1.convertToUnit(unit2, DEG_CS[1]);\n assertEquals(DEG_FS[1], toValue, 1.0e-10);\n double[] deg_cs = DEG_CS.clone();\n double[] deg_fs = unit1.convertToUnit(unit2, deg_cs); // from DEG_C to DEG F\n for (int i = 0; i < deg_fs.length; i++) assertEquals(DEG_FS[i], deg_fs[i], 1.0e-10);\n unit2 = ReferenceConverter.parseUnitReference(DEG_F_S_2);\n toValue = unit1.convertToUnit(unit2, DEG_CS[1]);\n assertEquals(DEG_FS[1], toValue, 1.0e-10);\n deg_cs = DEG_CS.clone();\n deg_fs = unit1.convertToUnit(unit2, deg_cs); // from DEG_C to DEG F\n for (int i = 0; i < deg_fs.length; i++) assertEquals(DEG_FS[i], deg_fs[i], 1.0e-10);\n unit2 = ReferenceConverter.parseUnitReference(M);\n toValue = unit1.convertToUnit(unit2, DEG_CS[1]);\n assertEquals(Double.NaN, toValue, 1.0e-10);\n deg_cs = DEG_CS.clone();\n deg_fs = unit1.convertToUnit(unit2, deg_cs); // from DEG_C to M - must fail\n for (double deg_f : deg_fs) assertEquals(Double.NaN, deg_f, 1.0e-10);\n unit1 = ReferenceConverter.parseUnitReference(DEG_C_E_1.replace(\"K\", \"MeasurementDoesNotExist\"));\n toValue = unit1.convertToUnit(unit2, DEG_CS[1]);\n assertEquals(Double.NaN, toValue, 1.0e-10);\n }\n\n @Test\n public void testToSIConversion() {\n double[] deg_fs = {32.0, 131.0};\n double[] deg_cs = {0.0, 55.0};\n double[] ks = {273.15, 328.15}; // expected SI\n\n IUnit unit = ReferenceConverter.parseUnitReference(DEG_F_S_1);\n assertConvertToAndFromSI(unit, deg_fs, ks);\n assertConvertArrayToAndFromSI(unit, deg_fs, ks);\n unit = ReferenceConverter.parseUnitReference(DEG_F_E_1);\n assertConvertToAndFromSI(unit, deg_fs, ks);\n assertConvertArrayToAndFromSI(unit, deg_fs, ks);\n unit = ReferenceConverter.parseUnitReference(DEG_C_S_1);\n assertConvertToAndFromSI(unit, deg_cs, ks);\n assertConvertArrayToAndFromSI(unit, deg_cs, ks);\n unit = ReferenceConverter.parseUnitReference(DEG_C_E_1);\n assertConvertToAndFromSI(unit, deg_cs, ks);\n assertConvertArrayToAndFromSI(unit, deg_cs, ks);\n }\n\n private void assertConvertToAndFromSI(IUnit unit, double[] degs, double[] sis) {\n assertNotNull(unit);\n assertTrue(unit.isValid());\n double v = degs[0];\n double si = unit.convertToSI(v);\n assertEquals(sis[0], si, 1.0e-10);\n double non_si = unit.convertFromSI(si);\n assertEquals(degs[0], non_si, 1.0e-10);\n }\n\n private void assertConvertArrayToAndFromSI(IUnit unit, double[] degs, double[] sis) {\n assertNotNull(unit);\n assertTrue(unit.isValid());\n double[] si = degs.clone();\n unit.convertToSI(si);\n for (int i = 0; i < sis.length; i++) assertEquals(sis[i], si[i], 1.0e-10);\n unit.convertFromSI(si);\n for (int i = 0; i < sis.length; i++) assertEquals(degs[i], si[i], 1.0e-10);\n }\n\n @Test\n public void testCreatePersistableReference() {\n IUnit unit = ReferenceConverter.parseUnitReference(DEG_F_E_1);\n assertRoundTripViaPersistableReference(unit);\n unit = ReferenceConverter.parseUnitReference(DEG_F_E_2);\n assertRoundTripViaPersistableReference(unit);\n unit = ReferenceConverter.parseUnitReference(DEG_C_E_1);\n assertRoundTripViaPersistableReference(unit);\n unit = ReferenceConverter.parseUnitReference(DEG_C_E_2);\n assertRoundTripViaPersistableReference(unit);\n }\n\n private void assertRoundTripViaPersistableReference(IUnit unit1) {\n assertNotNull(unit1);\n assertTrue(unit1.isValid());\n String pr = unit1.createPersistableReference();\n IUnit unit2 = ReferenceConverter.parseUnitReference(pr);\n assertNotNull(unit2);\n assertTrue(unit2.isValid());\n assertEquals(unit1.getScale(), unit2.getScale(), 1.0e-10);\n assertEquals(unit1.getOffset(), unit2.getOffset(), 1.0e-10);\n assertEquals(unit1.getSymbol(), unit2.getSymbol());\n assertEquals(unit1.getAncestry(), unit2.getAncestry());\n }\n\n @Test\n public void testInvalidPersistableReferences() {\n String pr = DEG_F_E_1.replace(\"%7B%22\", \"\");\n assertFailingUnit(pr);\n IUnit unit;\n pr = DEG_F_E_2.replace(\"{\", \"\");\n assertFailingUnit(pr);\n }\n\n private void assertFailingUnit(String pr) {\n IUnit unit = ReferenceConverter.parseUnitReference(pr);\n IUnit si = ReferenceConverter.parseUnitReference(DEG_K_S_2);\n assertNotNull(unit);\n assertFalse(unit.isValid());\n assertNull(unit.createPersistableReference());\n assertEquals(Double.NaN, unit.convertFromSI(0.0), 1.0e-10);\n assertEquals(Double.NaN, unit.convertToSI(0.0), 1.0e-10);\n assertEquals(Double.NaN, unit.convertToUnit(si, 0.0), 1.0e-10);\n double[] values = new double[]{0.0};\n unit.convertFromSI(values);\n assertEquals(Double.NaN, values[0], 1.0e-10);\n values[0] = 0.0;\n unit.convertToSI(values);\n assertEquals(Double.NaN, values[0], 1.0e-10);\n double[] deg_fs = DEG_FS.clone();\n double[] deg_cs = unit.convertToUnit(si, deg_fs);\n for (double deg_c : deg_cs) assertEquals(Double.NaN, deg_c, 1.0e-10);\n }\n\n @Test\n public void testEqualInBehavior() {\n IUnit degF1 = ReferenceConverter.parseUnitReference(DEG_F_S_2);\n IUnit degF2 = ReferenceConverter.parseUnitReference(DEG_F_S_2);\n IUnit kelvin = ReferenceConverter.parseUnitReference(DEG_K_S_2);\n IUnit meter = ReferenceConverter.parseUnitReference(M);\n IUnit corrupted = corruptedUnit();\n assertTrue(degF1.isEqualInBehavior(degF2));\n assertTrue(degF2.isEqualInBehavior(degF1));\n assertFalse(degF1.isEqualInBehavior(kelvin));\n assertFalse(kelvin.isEqualInBehavior(degF1));\n assertFalse(degF1.isEqualInBehavior(meter));\n assertFalse(meter.isEqualInBehavior(degF1));\n assertFalse(degF1.isEqualInBehavior(corrupted));\n assertFalse(corrupted.isEqualInBehavior(degF1));\n }\n\n @Test\n public void testIsConvertible() {\n IUnit degF = ReferenceConverter.parseUnitReference(DEG_F_S_2);\n IUnit kelvin = ReferenceConverter.parseUnitReference(DEG_K_S_2);\n IUnit meter = ReferenceConverter.parseUnitReference(M);\n IUnit corrupted = corruptedUnit();\n assertTrue(kelvin.isConvertible(degF));\n assertTrue(degF.isConvertible(kelvin));\n assertFalse(degF.isConvertible(meter));\n assertFalse(meter.isConvertible(degF));\n assertFalse(degF.isConvertible(corrupted));\n assertFalse(corrupted.isConvertible(degF));\n }\n\n @Test\n public void testGetBaseUnit() {\n IUnit degF = ReferenceConverter.parseUnitReference(DEG_F_S_2);\n String bu = degF.getBaseUnit();\n assertNotNull(bu);\n assertEquals(bu, DEG_K_S_2);\n IUnit corrupted = corruptedUnit();\n bu = corrupted.getBaseUnit();\n assertNull(bu);\n\n IUnit custom = ReferenceConverter.parseUnitReference(CUSTOM_1);\n bu = custom.getBaseUnit();\n assertNotNull(bu);\n IUnit custom_bu = ReferenceConverter.parseUnitReference(bu);\n double si_v = custom.convertToSI(1234.5);\n double cu_v = custom_bu.convertToUnit(custom, si_v);\n assertEquals(1234.5, cu_v, 1.0e-10);\n }\n}\n" }, { "alpha_fraction": 0.7061049342155457, "alphanum_fraction": 0.7093844413757324, "avg_line_length": 35.703704833984375, "blob_id": "d726b6f0f06d8e7353f48cc0658e8c1372122c28", "content_id": "fdca3f448ad73087939512dd755b385e6d802b2b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3964, "license_type": "permissive", "max_line_length": 99, "num_lines": 108, "path": "/compatibility-layer/service/ingest/src/main/java/com/osdu/repository/google/GcpIngestJobRepository.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.repository.google;\n\nimport com.google.api.core.ApiFuture;\nimport com.google.cloud.firestore.Firestore;\nimport com.google.cloud.firestore.FirestoreOptions;\nimport com.google.cloud.firestore.QueryDocumentSnapshot;\nimport com.google.cloud.firestore.QuerySnapshot;\nimport com.google.cloud.firestore.SetOptions;\nimport com.google.cloud.firestore.WriteResult;\nimport com.osdu.exception.IngestJobException;\nimport com.osdu.exception.SrnMappingException;\nimport com.osdu.model.job.IngestJob;\nimport com.osdu.repository.IngestJobRepository;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Optional;\nimport java.util.concurrent.ExecutionException;\nimport lombok.extern.slf4j.Slf4j;\nimport org.springframework.stereotype.Repository;\n\n@Repository\n@Slf4j\npublic class GcpIngestJobRepository implements IngestJobRepository {\n\n private static final String COLLECTION_NAME = \"ingestJob\";\n private static final String ID_FIELD_NAME = \"id\";\n\n final Firestore firestore;\n\n public GcpIngestJobRepository() {\n this.firestore = FirestoreOptions.getDefaultInstance().getService();\n }\n\n @Override\n public Optional<IngestJob> findById(String id) {\n log.debug(\"Requesting ingest job id : {}\", id);\n\n final ApiFuture<QuerySnapshot> query = firestore.collection(COLLECTION_NAME)\n .whereEqualTo(ID_FIELD_NAME, id).get();\n\n final QuerySnapshot querySnapshot;\n try {\n querySnapshot = query.get();\n } catch (InterruptedException | ExecutionException e) {\n Thread.currentThread().interrupt();\n throw new SrnMappingException(String.format(\"Failed to Ingest job for id %s\", id), e);\n }\n final List<QueryDocumentSnapshot> documents = querySnapshot.getDocuments();\n\n if (documents.size() > 1) {\n throw new IngestJobException(String\n .format(\"Find by Id returned %s document(s), expected 1, query id : %s\",\n documents.size(), id));\n }\n\n log.debug(\"Ingest job request resulted with : {}\", documents);\n return documents.stream()\n .findFirst()\n .map(snap -> snap.toObject(IngestJob.class));\n }\n\n @Override\n public void save(IngestJob ingestJob) {\n log.debug(\"Request to save ingest job : {}\", ingestJob);\n try {\n final WriteResult writeResult = firestore.collection(COLLECTION_NAME)\n .document(ingestJob.getId())\n .set(ingestJob, SetOptions.merge()).get();\n log.debug(\"Ingest job : {} saved on : {}\", ingestJob, writeResult.getUpdateTime());\n } catch (InterruptedException | ExecutionException e) {\n Thread.currentThread().interrupt();\n throw new IngestJobException(\n String.format(\"Exception during saving of ingest job : %s\", ingestJob), e);\n }\n }\n\n @Override\n public void updateFields(String id, Map<String, Object> fields) {\n log.debug(\"Request to update ingest job. Id: {}, fields: {}\", id, fields);\n try {\n WriteResult writeResult = firestore.collection(COLLECTION_NAME)\n .document(id)\n .update(fields).get();\n log.debug(\"Ingest job is updated. Id = {}, saved on: {}\", id, writeResult.getUpdateTime());\n } catch (InterruptedException | ExecutionException e) {\n Thread.currentThread().interrupt();\n throw new IngestJobException(\n String.format(\"Exception during updating of ingest job. Id: %s, fields: %s\", id, fields),\n e);\n }\n }\n}\n" }, { "alpha_fraction": 0.745658814907074, "alphanum_fraction": 0.7502553462982178, "avg_line_length": 32.186439514160156, "blob_id": "e4db5f74e1b3725aceb09dcedf50182b16cacbb0", "content_id": "d33fdfd6e986f7447ec23f441a23ea7a344f5110", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1958, "license_type": "permissive", "max_line_length": 98, "num_lines": 59, "path": "/osdu-r2/os-delivery/delivery-core/src/main/java/org/opengroup/osdu/delivery/validation/CommonFileLocationRequestValidator.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.delivery.validation;\n\nimport javax.validation.ConstraintValidatorContext;\nimport lombok.RequiredArgsConstructor;\nimport org.apache.commons.lang3.StringUtils;\nimport org.opengroup.osdu.core.common.model.file.FileLocationRequest;\nimport org.springframework.stereotype.Component;\n\n@Component\n@RequiredArgsConstructor\npublic class CommonFileLocationRequestValidator implements FileLocationRequestValidator {\n\n private static final String INVALID_FILE_ID = \"Invalid FileID\";\n protected static final String FILE_ID_FIELD = \"FileID\";\n\n final FileIdValidator fileIdValidator;\n\n @Override\n public boolean isValid(FileLocationRequest request, ConstraintValidatorContext context) {\n String fileID = request.getFileID();\n\n if (StringUtils.isBlank(fileID)) {\n context.disableDefaultConstraintViolation();\n context\n .buildConstraintViolationWithTemplate(\"{javax.validation.constraints.NotBlank.message}\")\n .addPropertyNode(FILE_ID_FIELD)\n .addConstraintViolation();\n return false;\n }\n\n if (!fileIdValidator.checkFileID(fileID)) {\n context.disableDefaultConstraintViolation();\n context\n .buildConstraintViolationWithTemplate(INVALID_FILE_ID)\n .addPropertyNode(FILE_ID_FIELD)\n .addConstraintViolation();\n return false;\n }\n\n return true;\n }\n\n}\n" }, { "alpha_fraction": 0.6536796689033508, "alphanum_fraction": 0.6587725877761841, "avg_line_length": 43.12359619140625, "blob_id": "b7792c9004fd18af4ee9cf953425b7c6fb2f292b", "content_id": "cc2dbe3c0ef9bddf71f8e78ca4ba66f17f9037e1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3927, "license_type": "permissive", "max_line_length": 139, "num_lines": 89, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/search/validation/SpatialFilterValidator.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.search.validation;\n\nimport org.opengroup.osdu.core.common.model.search.Coordinate;\nimport org.opengroup.osdu.core.common.model.search.SpatialFilter;\n\nimport javax.validation.ConstraintValidator;\nimport javax.validation.ConstraintValidatorContext;\nimport java.util.List;\n\npublic class SpatialFilterValidator implements ConstraintValidator<ValidSpatialFilter, SpatialFilter> {\n\n @Override\n public void initialize(ValidSpatialFilter validSpatialFilter) {\n\n }\n\n @Override\n public boolean isValid(SpatialFilter spatialFilter, ConstraintValidatorContext context) {\n\n // spatial filter can be null\n if (spatialFilter == null) {\n return true;\n }\n\n // only one of the spatial criteria should be used\n if ((spatialFilter.getByBoundingBox() != null && spatialFilter.getByDistance() != null) ||\n (spatialFilter.getByBoundingBox() != null && spatialFilter.getByGeoPolygon() != null) ||\n (spatialFilter.getByDistance() != null && spatialFilter.getByGeoPolygon() != null)) {\n return getViolation(context, \"only one criteria can be used with SpatialFilter\");\n }\n\n // validate bounding box\n if (spatialFilter.getByBoundingBox() != null) {\n double top = spatialFilter.getByBoundingBox().getTopLeft().getLatitude();\n double left = spatialFilter.getByBoundingBox().getTopLeft().getLongitude();\n double bottom = spatialFilter.getByBoundingBox().getBottomRight().getLatitude();\n double right = spatialFilter.getByBoundingBox().getBottomRight().getLongitude();\n\n if (top < bottom) {\n return getViolation(context, String.format(\"top corner is below bottom corner: %s vs. %s\", top, bottom));\n } else if (left > right) {\n return getViolation(context, String.format(\"left corner and right corner are flipped: %s vs. %s\", left, right));\n } else if (top == bottom) {\n return getViolation(context, String.format(\"top latitude cannot be the same as bottom latitude: %s == %s\", top, bottom));\n } else if (left == right) {\n return getViolation(context, String.format(\"left longitude cannot be the same as right longitude: %s == %s\", left, right));\n }\n }\n\n if (spatialFilter.getByGeoPolygon() != null) {\n List<Coordinate> coordinates = spatialFilter.getByGeoPolygon().getCoordinates();\n Coordinate start = coordinates.get(0);\n if (start.equals(coordinates.get(coordinates.size() - 1))) {\n if (coordinates.size() < 4) {\n return getViolation(context, \"too few coordinates defined for geo polygon query\");\n }\n } else {\n if (coordinates.size() < 3) {\n return getViolation(context, \"too few coordinates defined for geo polygon query\");\n }\n }\n }\n\n return true;\n }\n\n private boolean getViolation(ConstraintValidatorContext context, String message) {\n context.disableDefaultConstraintViolation();\n context.buildConstraintViolationWithTemplate(message).addConstraintViolation();\n return false;\n }\n}\n" }, { "alpha_fraction": 0.7058318853378296, "alphanum_fraction": 0.7126929759979248, "avg_line_length": 28.174999237060547, "blob_id": "76ac300742e51e13a265e9510e6f7d727ebcaae8", "content_id": "f854e3d02e407b5ea6c4e89bea11d48e9ebe72e8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1166, "license_type": "permissive", "max_line_length": 75, "num_lines": 40, "path": "/osdu-r2/os-qa/src/main/java/com/osdu/core/data/provider/DataProviders.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.core.data.provider;\n\nimport org.testng.annotations.DataProvider;\n\nimport java.util.Iterator;\n\n/**\n * Supply tests with the required data\n */\npublic class DataProviders {\n String path = \"src/test/resources/data/TestedData.json\";\n String dataBlockName = \"TestedData\";\n\n /**\n * Specified data provider for the additional block\n *\n * @return iterator\n */\n @DataProvider\n public Iterator<Object[]> testedData() {\n DataProviderImpl.fillInList(path, dataBlockName);\n return DataProviderImpl.mainIterator();\n }\n}" }, { "alpha_fraction": 0.6650887727737427, "alphanum_fraction": 0.6681656837463379, "avg_line_length": 38.12963104248047, "blob_id": "3376638315d2672842c7848974f9a23a688b79a1", "content_id": "80bc05faf735a38266aad15a6dd28987e3d8f160", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4225, "license_type": "permissive", "max_line_length": 125, "num_lines": 108, "path": "/osdu-r2/os-qa/src/test/java/com/osdu/workflow/e2e/WorkflowStatusAnyCloudTests.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.workflow.e2e;\n\nimport com.osdu.core.data.provider.DataProviders;\nimport com.osdu.core.endpoints.factories.FactoriesHolder;\nimport com.osdu.core.reporter.TestReporter;\nimport io.qameta.allure.Description;\nimport io.qameta.allure.restassured.AllureRestAssured;\nimport io.restassured.response.Response;\nimport org.apache.commons.lang3.StringUtils;\nimport org.testng.annotations.Test;\n\nimport java.util.HashMap;\nimport java.util.Map;\nimport java.util.UUID;\n\nimport static com.osdu.common.FilesKeeper.requestForWorkflowStatusTemplate;\nimport static com.osdu.core.data.parser.JsonParser.readJson;\nimport static io.restassured.RestAssured.given;\nimport static org.apache.http.HttpStatus.*;\n\npublic class WorkflowStatusAnyCloudTests extends BaseWorkflowService {\n FactoriesHolder factoriesHolder = new FactoriesHolder();\n\n /**\n * Services paths\n */\n String getWorkflowStatus = factoriesHolder.remoteFactoryCreator().getWorkflowService(\"getStatus\");\n\n @Test(dataProvider = \"testedData\", dataProviderClass = DataProviders.class)\n @Description(\"Send request with all required fields and without auth token\")\n public void i1_checkWorkflowStatusWithoutHeaders(Map<String, String> data) {\n String uniqueID = UUID.randomUUID().toString();\n TestReporter.reportStep(\"Create unique id %s\", uniqueID);\n\n String bodyRequestWithTheUniqueId = String.format((readJson(requestForWorkflowStatusTemplate).toString()), uniqueID);\n TestReporter.reportStep(\"Insert unique id into request %s\", bodyRequestWithTheUniqueId);\n\n Response ingestResponse = given()\n .filter(new AllureRestAssured())\n .spec(baseRequestSpec(new HashMap<>()))\n .body(bodyRequestWithTheUniqueId)\n .when()\n .post(getWorkflowStatus);\n\n ingestResponse\n .then()\n .statusCode(SC_UNAUTHORIZED)\n .and()\n .log()\n .all();\n }\n\n @Test(dataProvider = \"testedData\", dataProviderClass = DataProviders.class)\n @Description(\"Send request with empty body and with auth token\")\n public void i2_checkWorkflowStatusWithEmptyBody(Map<String, String> data) {\n given()\n .filter(new AllureRestAssured())\n .spec(baseRequestSpec(specifiedHeadersSet()))\n .body(StringUtils.EMPTY)\n .when()\n .post(getWorkflowStatus)\n .then()\n .statusCode(SC_BAD_REQUEST)\n .and()\n .log()\n .all();\n }\n\n @Test(dataProvider = \"testedData\", dataProviderClass = DataProviders.class)\n @Description(\"Send request with not existed id and with auth token\")\n public void i3_checkWorkflowStatusWithNotExistedFileId(Map<String, String> data) {\n String uniqueID = UUID.randomUUID().toString();\n TestReporter.reportStep(\"Create unique id %s\", uniqueID);\n\n String bodyRequestWithTheUniqueId = String.format((readJson(requestForWorkflowStatusTemplate).toString()), uniqueID);\n TestReporter.reportStep(\"Insert unique id into request %s\", bodyRequestWithTheUniqueId);\n\n Response ingestResponse = given()\n .filter(new AllureRestAssured())\n .spec(baseRequestSpec(specifiedHeadersSet()))\n .body(bodyRequestWithTheUniqueId)\n .when()\n .post(getWorkflowStatus);\n\n ingestResponse\n .then()\n .statusCode(SC_NOT_FOUND)\n .and()\n .log()\n .all();\n }\n}" }, { "alpha_fraction": 0.7786651253700256, "alphanum_fraction": 0.7849401235580444, "avg_line_length": 37.9555549621582, "blob_id": "fe3fa9e704dabd1b317ff848901e71028a62dae9", "content_id": "3a6be83b8932527e8f956318429486ca51c2999a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1753, "license_type": "permissive", "max_line_length": 98, "num_lines": 45, "path": "/compatibility-layer/service/ingest/src/main/java/com/osdu/function/JobStatusFunction.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.function;\n\nimport com.osdu.model.job.IngestJobStatusDto;\nimport com.osdu.model.job.IngestJobStatusRequestPayload;\nimport com.osdu.service.JobStatusService;\nimport java.util.function.Function;\nimport lombok.RequiredArgsConstructor;\nimport lombok.extern.slf4j.Slf4j;\nimport org.springframework.messaging.Message;\nimport org.springframework.messaging.support.GenericMessage;\nimport org.springframework.stereotype.Component;\n\n@Component\n@Slf4j\n@RequiredArgsConstructor\npublic class JobStatusFunction implements\n Function<Message<IngestJobStatusRequestPayload>, Message<IngestJobStatusDto>> {\n\n final JobStatusService jobStatusService;\n\n @Override\n public Message<IngestJobStatusDto> apply(Message<IngestJobStatusRequestPayload> objectMessage) {\n log.debug(\"Ingest job status request received, with following parameters: {}\", objectMessage);\n final IngestJobStatusDto jobStatus = jobStatusService\n .getStatus(objectMessage.getPayload().getJobId(), objectMessage.getHeaders());\n log.debug(\"Ingest job status result ready, request: {}, result:{}\", objectMessage, jobStatus);\n return new GenericMessage<>(jobStatus);\n }\n}\n" }, { "alpha_fraction": 0.7007355690002441, "alphanum_fraction": 0.7092528343200684, "avg_line_length": 34.875, "blob_id": "17135e9b9e083141028ff4c3df4b1e35ec39981c", "content_id": "40095e41ae4f5f044f876b51c7f88c785189dc7d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2583, "license_type": "permissive", "max_line_length": 96, "num_lines": 72, "path": "/osdu-r2/os-core-common/src/test/java/org/opengroup/osdu/core/common/model/http/AppExceptionTest.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.http;\n\nimport org.apache.http.HttpStatus;\nimport org.junit.Test;\nimport org.opengroup.osdu.core.common.model.http.AppError;\nimport org.opengroup.osdu.core.common.model.http.AppException;\n\nimport static org.junit.Assert.assertEquals;\nimport static org.junit.Assert.assertNotNull;\n\npublic class AppExceptionTest {\n\n @Test\n public void constructorTest() {\n AppException exception = new AppException(200, \"unknown error\", \"this error occurred:\");\n assertNotNull(exception);\n\n AppError error = exception.getError();\n assertNotNull(error);\n\n assertEquals(200, error.getCode());\n assertEquals(\"unknown error\", error.getReason());\n assertEquals(\"this error occurred:\", error.getMessage());\n }\n\n @Test\n public void testForbidden() {\n String debuggingInfo = \"dummy debuggingInfo\";\n AppException exception = AppException.createForbidden(debuggingInfo);\n assertNotNull(exception);\n\n AppError error = exception.getError();\n assertNotNull(error);\n\n assertEquals(HttpStatus.SC_FORBIDDEN, error.getCode());\n assertEquals(\"Access denied\", error.getReason());\n assertEquals(\"The user is not authorized to perform this action\", error.getMessage());\n assertEquals(debuggingInfo, error.getDebuggingInfo());\n }\n\n @Test\n public void testUnauthorized() {\n String debuggingInfo = \"dummy debuggingInfo\";\n AppException exception = AppException.createUnauthorized(debuggingInfo);\n assertNotNull(exception);\n\n AppError error = exception.getError();\n assertNotNull(error);\n\n assertEquals(HttpStatus.SC_UNAUTHORIZED, error.getCode());\n assertEquals(\"Unauthorized\", error.getReason());\n assertEquals(\"The user is not authorized to perform this action\", error.getMessage());\n assertEquals(debuggingInfo, error.getDebuggingInfo());\n }\n}\n" }, { "alpha_fraction": 0.7057557702064514, "alphanum_fraction": 0.7127487659454346, "avg_line_length": 31.05172348022461, "blob_id": "e6d008726edc5fc146ef9ba4a5acab93e0f49453", "content_id": "0dbc20acf1a7698d1a20dff1d25006f6cd8db67a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1859, "license_type": "permissive", "max_line_length": 75, "num_lines": 58, "path": "/compatibility-layer/service/ingest/src/main/java/com/osdu/service/helper/IngestionHelper.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.osdu.service.helper;\n\nimport com.osdu.model.ResourceTypeId;\nimport com.osdu.model.delfi.Acl;\nimport java.util.Map;\nimport java.util.UUID;\nimport org.springframework.stereotype.Component;\n\n@Component\npublic class IngestionHelper {\n\n /**\n * Generate SRN like \"srn:{type_id}:{uuid}:{1 as version}\".\n * @param resourceTypeId resource type id\n * @return generated SRN with 1st version\n */\n public static String generateSrn(ResourceTypeId resourceTypeId) {\n String uuid = UUID.randomUUID().toString().replace(\"-\", \"\");\n return String.format(\"srn:%s:%s:1\", resourceTypeId.getType(), uuid);\n }\n\n /**\n * Put 1st version if type ID has no version.\n * @param resourceTypeId resource type ID\n * @return prepared resource type ID with version\n */\n public static String prepareTypeId(String resourceTypeId) {\n ResourceTypeId typeId = new ResourceTypeId(resourceTypeId);\n return typeId.hasVersion() ? resourceTypeId : resourceTypeId + \"1\";\n }\n\n /**\n * Build Acl from group email map.\n */\n public static Acl getAcl(Map<String, String> groupEmailByName) {\n return Acl.builder()\n .owner(groupEmailByName.get(\"data.default.owners\"))\n .viewer(groupEmailByName.get(\"data.default.viewers\"))\n .build();\n }\n\n}\n" }, { "alpha_fraction": 0.7737798094749451, "alphanum_fraction": 0.779836118221283, "avg_line_length": 30.897727966308594, "blob_id": "94fcacb034043c4b6e734d8980a941e1222c4bfa", "content_id": "173f2e4e79ad4929acf369b722b805fd27b12230", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2807, "license_type": "permissive", "max_line_length": 92, "num_lines": 88, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/storage/Record.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.storage;\n\nimport java.util.Map;\nimport java.util.UUID;\n\nimport javax.validation.Valid;\nimport javax.validation.constraints.NotEmpty;\nimport javax.validation.constraints.NotNull;\nimport javax.validation.constraints.Pattern;\n\nimport com.fasterxml.jackson.annotation.JsonInclude;\nimport com.fasterxml.jackson.annotation.JsonInclude.Include;\nimport org.opengroup.osdu.core.common.model.entitlements.Acl;\nimport org.opengroup.osdu.core.common.model.legal.Legal;\nimport org.opengroup.osdu.core.common.model.entitlements.validation.ValidAcl;\nimport org.opengroup.osdu.core.common.model.storage.validation.ValidKind;\nimport org.opengroup.osdu.core.common.model.legal.validation.ValidLegal;\nimport org.opengroup.osdu.core.common.model.storage.validation.ValidationDoc;\n\nimport io.swagger.annotations.ApiModelProperty;\nimport lombok.AllArgsConstructor;\nimport lombok.Data;\nimport lombok.NoArgsConstructor;\n\n@Data\n@AllArgsConstructor\n@NoArgsConstructor\n@ValidLegal\npublic class Record {\n\n\tprivate static final String DATALAKE_RECORD_PREFIX = \"doc\";\n\n\t@Pattern(regexp = ValidationDoc.RECORD_ID_REGEX, message = ValidationDoc.INVALID_RECORD_ID)\n\t@ApiModelProperty(value = SwaggerDoc.RECORD_ID_DESCRIPTION,\n\t\t\trequired = true,\n\t\t\texample = SwaggerDoc.RECORD_ID_EXAMPLE)\n\tprivate String id;\n\n\tprivate Long version;\n\n\t@ValidKind\n\t@ApiModelProperty(value = SwaggerDoc.SCHEMA_REQUEST_KIND,\n\t\t\trequired = true,\n\t\t\texample = SwaggerDoc.RECORD_KIND_EXAMPLE)\n\tprivate String kind;\n\n\t@NotNull(message = ValidationDoc.RECORD_ACL_NOT_EMPTY)\n\t@ValidAcl\n\tprivate Acl acl;\n\n\t@Valid\n\tprivate Legal legal;\n\n\t@NotEmpty(message = ValidationDoc.RECORD_PAYLOAD_NOT_EMPTY)\n\t@JsonInclude(Include.ALWAYS)\n\tprivate Map<String, Object> data;\n\n\tprivate RecordAncestry ancestry;\n\n\tprivate Map<String, Object>[] meta;\n\n\tpublic void createNewRecordId(String tenant) {\n\t\tString uuid = UUID.randomUUID().toString().replace(\"-\", \"\");\n\t\tString dlId = String.format(\"%s:%s:%s\", tenant, DATALAKE_RECORD_PREFIX, uuid);\n\t\tthis.setId(dlId);\n\t}\n\n\tpublic static boolean isRecordIdValid(String recordId, String tenant) {\n\t\treturn recordId.split(\":\")[0].equalsIgnoreCase(tenant);\n\t}\n}\n" }, { "alpha_fraction": 0.7709090709686279, "alphanum_fraction": 0.7770909070968628, "avg_line_length": 35.18421173095703, "blob_id": "d3ffa7f5c4d638ca1b2a82256d5882a1206804aa", "content_id": "f8f43575cb9b805f706c6fe1d35a9de66b61f36f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2750, "license_type": "permissive", "max_line_length": 106, "num_lines": 76, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/legal/jobs/LegalTagConsistencyValidator.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.legal.jobs;\n\nimport org.opengroup.osdu.core.common.model.legal.InvalidTagWithReason;\nimport org.opengroup.osdu.core.common.logging.JaxRsDpsLog;\nimport org.opengroup.osdu.core.common.legal.ILegalService;\nimport org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.stereotype.Component;\nimport org.springframework.web.context.annotation.RequestScope;\n\nimport java.util.ArrayList;\nimport java.util.HashSet;\nimport java.util.List;\nimport java.util.Set;\n\n@Component\n@RequestScope\npublic class LegalTagConsistencyValidator {\n\n\t@Autowired\n\tprivate ILegalService legalService;\n\n\t@Autowired\n\tprivate JaxRsDpsLog logger;\n\n\tpublic LegalTagChangedCollection checkLegalTagStatusWithLegalService(LegalTagChangedCollection dto) {\n\t\tList<LegalTagChanged> statusChangedTags = dto.getStatusChangedTags();\n\n\t\tSet<String> requestedLegalTagNames = new HashSet<>();\n\t\tfor (LegalTagChanged lt : statusChangedTags) {\n\t\t\trequestedLegalTagNames.add(lt.getChangedTagName());\n\t\t}\n\n\t\tInvalidTagWithReason[] invalidLegalTags = this.legalService.getInvalidLegalTags(requestedLegalTagNames);\n\n\t\tList<String> invalidLegalTagsNames = new ArrayList<>();\n\t\tfor (InvalidTagWithReason legaltag : invalidLegalTags) {\n\t\t\tinvalidLegalTagsNames.add(legaltag.getName());\n\t\t}\n\n\t\tfor (int i = 0; i < statusChangedTags.size(); i++) {\n\t\t\tLegalTagChanged lt = statusChangedTags.get(i);\n\t\t\tif (lt.getChangedTagStatus().equalsIgnoreCase(\"incompliant\")\n\t\t\t\t\t&& !invalidLegalTagsNames.contains(lt.getChangedTagName())) {\n\t\t\t\tthis.logger.warning(\"Inconsistency between pubsub message and legal: \" + lt.getChangedTagName());\n\t\t\t\tstatusChangedTags.remove(lt);\n\t\t\t}\n\t\t\tif (lt.getChangedTagStatus().equalsIgnoreCase(\"compliant\")\n\t\t\t\t\t&& invalidLegalTagsNames.contains(lt.getChangedTagName())) {\n\t\t\t\tthis.logger.warning(\"Inconsistency between pubsub message and legal: \" + lt.getChangedTagName());\n\t\t\t\tstatusChangedTags.remove(lt);\n\t\t\t}\n\t\t}\n\n\t\tLegalTagChangedCollection validOutput = new LegalTagChangedCollection();\n\t\tvalidOutput.setStatusChangedTags(statusChangedTags);\n\n\t\treturn validOutput;\n\t}\n}\n" }, { "alpha_fraction": 0.7538330554962158, "alphanum_fraction": 0.7606473565101624, "avg_line_length": 33.52941131591797, "blob_id": "8b972b19edc21fb40fee9cc25aa25b44cf65a14d", "content_id": "12e341c1d18bd0f7aef0ecf52ed32fb3ae542cd7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1174, "license_type": "permissive", "max_line_length": 98, "num_lines": 34, "path": "/osdu-r2/os-ingest/ingest-core/src/main/java/org/opengroup/osdu/ingest/validation/schema/ILoadManifestValidationService.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.ingest.validation.schema;\n\nimport com.networknt.schema.ValidationMessage;\nimport java.util.Set;\nimport org.opengroup.osdu.ingest.model.WorkProductLoadManifest;\n\npublic interface ILoadManifestValidationService {\n\n /**\n * Load Manifests no matter what information is in them have to all be validated against general\n * manifest schema.\n *\n * @param loadManifest manifest received with the request\n * @return {@link Set} of validation messages as errors\n */\n Set<ValidationMessage> validateManifest(WorkProductLoadManifest loadManifest);\n\n}\n" }, { "alpha_fraction": 0.7284626960754395, "alphanum_fraction": 0.7333333492279053, "avg_line_length": 28.33035659790039, "blob_id": "58fe378acaca90c385206c2e94ac2abb90e247b4", "content_id": "a432025e19ad8a31d342e5129eef5081b086d95f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3285, "license_type": "permissive", "max_line_length": 75, "num_lines": 112, "path": "/osdu-r2/os-core-common/src/main/java/org/opengroup/osdu/core/common/model/crs/CrsPropertySet.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n * Copyright 2017-2019, Schlumberger\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.core.common.model.crs;\n\nimport lombok.Data;\nimport org.springframework.stereotype.Component;\n\nimport java.util.HashMap;\nimport java.util.HashSet;\nimport java.util.Map;\nimport java.util.Set;\n\n@Data\n@Component\npublic class CrsPropertySet {\n\tprivate Set<String> xPropertyNames;\n\tprivate Set<String> yPropertyNames;\n\tprivate Set<String> zPropertyNames;\n\tprivate Set<String> nestedPropertyNames;\n\tprivate Map<String, String> propertyPairing;\n\n\tpublic Set<String> getxPropertyNames() {\n\t\tif (this.xPropertyNames == null) {\n\t\t\tthis.xPropertyNames = new HashSet<>();\n\t\t}\n\n\t\tthis.xPropertyNames.add(\"X\");\n\t\tthis.xPropertyNames.add(\"LON\");\n\t\tthis.xPropertyNames.add(\"Longitude\");\n\t\tthis.xPropertyNames.add(\"wlbEwUtm\");\n\t\tthis.xPropertyNames.add(\"wlbEwDesDeg\");\n\t\tthis.xPropertyNames.add(\"TOPHOLEXNG\");\n\t\tthis.xPropertyNames.add(\"TOPHOLEXDD\");\n\t\tthis.xPropertyNames.add(\"BHLongitude\");\n\t\tthis.xPropertyNames.add(\"Utm_X\");\n\n\t\treturn this.xPropertyNames;\n\t}\n\n\tpublic Set<String> getyPropertyNames() {\n\t\tif (this.yPropertyNames == null) {\n\t\t\tthis.yPropertyNames = new HashSet<>();\n\t\t}\n\n\t\tthis.yPropertyNames.add(\"Y\");\n\t\tthis.yPropertyNames.add(\"LAT\");\n\t\tthis.yPropertyNames.add(\"Latitude\");\n\t\tthis.yPropertyNames.add(\"wlbNsUtm\");\n\t\tthis.yPropertyNames.add(\"wlbNsDecDeg\");\n\t\tthis.yPropertyNames.add(\"TOPHOLEYNG\");\n\t\tthis.yPropertyNames.add(\"TOPHOLEYDD\");\n\t\tthis.yPropertyNames.add(\"BHLatitude\");\n\t\tthis.yPropertyNames.add(\"Utm_Y\");\n\n\t\treturn this.yPropertyNames;\n\t}\n\n\tpublic Set<String> getzPropertyNames() {\n\t\tif (this.zPropertyNames == null) {\n\t\t\tthis.zPropertyNames = new HashSet<>();\n\t\t}\n\n\t\tthis.zPropertyNames.add(\"Z\");\n\n\t\treturn this.zPropertyNames;\n\t}\n\n\tpublic Set<String> getNestedPropertyNames() {\n\t\tif (this.nestedPropertyNames == null) {\n\t\t\tthis.nestedPropertyNames = new HashSet<>();\n\t\t}\n\n\t\tthis.nestedPropertyNames.add(\"projectOutlineLocalGeographic\");\n\t\tthis.nestedPropertyNames.add(\"projectOutlineProjected\");\n\n\t\treturn this.nestedPropertyNames;\n\t}\n\n\tpublic Map<String, String> getPropertyPairing() {\n\t\tif (this.propertyPairing == null) {\n\t\t\tthis.propertyPairing = new HashMap<>();\n\t\t}\n\n\t\tthis.propertyPairing.put(\"x\", \"y\");\n\t\tthis.propertyPairing.put(\"lon\", \"lat\");\n\t\tthis.propertyPairing.put(\"long\", \"lat\");\n\t\tthis.propertyPairing.put(\"longitude\", \"latitude\");\n\t\tthis.propertyPairing.put(\"wlbewutm\", \"wlbnsutm\");\n\t\tthis.propertyPairing.put(\"wlbewdesdeg\", \"wlbnsdecdeg\");\n\t\tthis.propertyPairing.put(\"topholexng\", \"topholeyng\");\n\t\tthis.propertyPairing.put(\"topholexdd\", \"topholeydd\");\n\t\tthis.propertyPairing.put(\"bhlongitude\", \"bhlatitude\");\n\t\tthis.propertyPairing.put(\"utm_x\", \"utm_y\");\n\n\t\treturn this.propertyPairing;\n\t}\n}\n" }, { "alpha_fraction": 0.7382199168205261, "alphanum_fraction": 0.7452006936073303, "avg_line_length": 31.742856979370117, "blob_id": "fbeb27c25229f44b4d68689687581c8b762070da", "content_id": "43600021b77d4039adaf799ca9554d1ee8f2a91c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1146, "license_type": "permissive", "max_line_length": 85, "num_lines": 35, "path": "/osdu-r2/os-workflow/workflow-core/src/main/java/org/opengroup/osdu/workflow/provider/interfaces/IIngestionStrategyRepository.java", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.opengroup.osdu.workflow.provider.interfaces;\n\nimport org.opengroup.osdu.core.common.model.WorkflowType;\nimport org.opengroup.osdu.workflow.model.IngestionStrategy;\n\npublic interface IIngestionStrategyRepository {\n\n /**\n * Finds a dag based on parameters.\n *\n * @param workflowType type of workflow\n * @param dataType data type\n * @param userId user id\n * @return ingestion strategy\n */\n IngestionStrategy findByWorkflowTypeAndDataTypeAndUserId(WorkflowType workflowType,\n String dataType, String userId);\n\n}\n" }, { "alpha_fraction": 0.5729429125785828, "alphanum_fraction": 0.5827035903930664, "avg_line_length": 42.7064208984375, "blob_id": "e749473d5a5fdf14e7291ed2994714bee137f081", "content_id": "36a1689cc7b0396a44d10d8658dbd23d0cc848f2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9528, "license_type": "permissive", "max_line_length": 117, "num_lines": 218, "path": "/osdu-r2/os-python-sdk/osdu_api/test/test_record_client.py", "repo_name": "google/framework-for-osdu", "src_encoding": "UTF-8", "text": "# Copyright 2020 Google LLC\n# Copyright 2020 Amazon\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nimport mock\nimport json\nimport unittest\nimport types\nfrom osdu_api.storage.record_client import RecordClient\nfrom osdu_api.base_client import BaseClient\nfrom osdu_api.model.http_method import HttpMethod\nfrom osdu_api.model.record import Record\nfrom osdu_api.model.legal_compliance import LegalCompliance\nfrom osdu_api.model.acl import Acl\nfrom osdu_api.model.legal import Legal\nfrom osdu_api.model.record_ancestry import RecordAncestry\n\n\nclass TestRecordClient(unittest.TestCase):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs) \n self.test_record_dict = {\n 'acl': {\n 'owners':[\n '[email protected]'\n ],\n 'viewers':[\n '[email protected]'\n ]\n },\n 'ancestry':{\n 'parents':[]\n },\n 'data':{'id':'test'},\n 'id':'opendes:welldb:123456',\n 'kind':'opendes:welldb:wellbore:1.0.0',\n 'legal':{\n 'legaltags':['opendes-storage-1579034803194'],\n 'otherRelevantDataCountries':['US'],\n 'status':'compliant'\n },\n 'meta':[\n {}\n ],\n 'version':0\n }\n\n self.test_record_str = \"\"\"{\n \"acl\": {\n \"owners\":[\n \"[email protected]\"\n ],\n \"viewers\":[\n \"[email protected]\"\n ]\n },\n \"ancestry\":{\n \"parents\":[]\n },\n \"data\":{\"id\":\"test\"},\n \"id\":\"opendes:welldb:123456\",\n \"kind\":\"opendes:welldb:wellbore:1.0.0\",\n \"legal\":{\n \"legaltags\":[\"opendes-storage-1579034803194\"],\n \"otherRelevantDataCountries\":[\"US\"],\n \"status\":\"compliant\"\n },\n \"meta\":[\n {}\n ],\n \"version\":0\n }\"\"\"\n\n acl = Acl(['[email protected]'], ['[email protected]'])\n legal = Legal(['opendes-storage-1579034803194'], ['US'], LegalCompliance.compliant)\n ancestry = RecordAncestry([])\n id = 'opendes:welldb:123456'\n kind = 'opendes:welldb:wellbore:1.0.0'\n meta = [{}]\n version = 0\n data = {'id': 'test'}\n self.test_record = Record(id, version, kind, acl, legal, data, ancestry, meta)\n\n @mock.patch.object(BaseClient, '_get_bearer_token', return_value=\"stubbed\")\n @mock.patch.object(BaseClient, 'make_request', return_value=\"response\")\n @mock.patch.object(BaseClient, '_read_variables', return_value=\"stubbed\")\n def test_create_update_records(self, get_bearer_token_mock, make_request_mock, parse_config_mock):\n # Arrange\n record_client = RecordClient()\n record_client.storage_url = 'stubbed url'\n record_client.headers = {}\n records = [\n self.test_record_dict\n ]\n headers = {'test': 'test-value'}\n\n # Act\n response = record_client.create_update_records_from_dict(records, headers=headers)\n\n # Assert\n make_request_mock.assert_called_with(data=json.dumps(records), method=HttpMethod.PUT, url='stubbed url',\n add_headers=headers)\n\n @mock.patch.object(BaseClient, '_get_bearer_token', return_value=\"stubbed\")\n @mock.patch.object(BaseClient, 'make_request', return_value=\"response\")\n @mock.patch.object(BaseClient, '_read_variables', return_value=\"stubbed\")\n def test_create_update_records_model_record(self, get_bearer_token_mock, make_request_mock, parse_config_mock):\n # Arrange\n record_client = RecordClient()\n record_client.storage_url = 'stubbed url'\n record_client.headers = {}\n\n make_request_mock.return_value = 'called'\n headers = {}\n\n # Act\n response = record_client.create_update_records([self.test_record], headers=headers)\n\n # Assert\n assert response == make_request_mock.return_value\n\n @mock.patch.object(BaseClient, '_get_bearer_token', return_value=\"stubbed\")\n @mock.patch.object(BaseClient, 'make_request', return_value=\"response\")\n @mock.patch.object(BaseClient, '_read_variables', return_value=\"stubbed\")\n def test_get_latest_record_version(self, get_bearer_token_mock, make_request_mock, parse_config_mock):\n # Arrange\n record_client = RecordClient()\n record_client.storage_url = 'stubbed url'\n record_client.headers = {}\n record_id = 'test'\n make_request_mock.return_value = types.SimpleNamespace()\n make_request_mock.return_value.content = self.test_record_str\n request_params = {'attribute': []}\n headers = {'test': 'test-value'}\n\n # Act\n record = record_client.get_latest_record(record_id, headers=headers)\n\n # Assert\n make_request_mock.assert_called_with(url=record_client.storage_url + '/test', params=request_params,\n method=HttpMethod.GET, add_headers=headers)\n assert record.acl.owners == self.test_record.acl.owners\n assert record.acl.viewers == self.test_record.acl.viewers\n assert record.id == self.test_record.id\n assert record.kind == self.test_record.kind\n assert record.legal.status == self.test_record.legal.status\n assert record.legal.legaltags == self.test_record.legal.legaltags\n assert record.legal.other_relevant_data_countries == self.test_record.legal.other_relevant_data_countries\n assert record.meta == self.test_record.meta\n assert record.version == self.test_record.version\n assert record.ancestry.parents == self.test_record.ancestry.parents\n\n @mock.patch.object(BaseClient, '_get_bearer_token', return_value=\"stubbed\")\n @mock.patch.object(BaseClient, 'make_request', return_value=\"response\")\n @mock.patch.object(BaseClient, '_read_variables', return_value=\"stubbed\")\n def test_get_specific_record_version(self, get_bearer_token_mock, make_request_mock, parse_config_mock):\n # Arrange\n record_client = RecordClient()\n record_client.storage_url = 'stubbed url'\n record_client.headers = {}\n record_id = 'test'\n make_request_mock.return_value = types.SimpleNamespace()\n make_request_mock.return_value.content = self.test_record_str\n request_params = {'attribute': []}\n version = 123\n headers = {'test': 'test-value'}\n\n # Act\n record = record_client.get_specific_record(record_id, version, headers)\n\n # Assert\n make_request_mock.assert_called_with(url=record_client.storage_url + '/test/123', params=request_params,\n method=HttpMethod.GET, add_headers=headers)\n assert record.acl.owners == self.test_record.acl.owners\n assert record.acl.viewers == self.test_record.acl.viewers\n assert record.id == self.test_record.id\n assert record.kind == self.test_record.kind\n assert record.legal.status == self.test_record.legal.status\n assert record.legal.legaltags == self.test_record.legal.legaltags\n assert record.legal.other_relevant_data_countries == self.test_record.legal.other_relevant_data_countries\n assert record.meta == self.test_record.meta\n assert record.version == self.test_record.version\n assert record.ancestry.parents == self.test_record.ancestry.parents\n\n @mock.patch.object(BaseClient, '_get_bearer_token', return_value=\"stubbed\")\n @mock.patch.object(BaseClient, 'make_request', return_value=\"response\")\n @mock.patch.object(BaseClient, '_read_variables', return_value=\"stubbed\")\n def test_get_record_versions(self, get_bearer_token_mock, make_request_mock, parse_config_mock):\n # Arrange\n record_client = RecordClient()\n record_client.storage_url = 'stubbed url'\n record_client.headers = {}\n record_id = 'test'\n make_request_mock.return_value = types.SimpleNamespace()\n make_request_mock.return_value.content = b'{\"versions\": [123]}'\n request_params = {'attribute': []}\n headers = {'test': 'test-value'}\n\n # Act\n versions = record_client.get_record_versions(record_id, headers)\n\n # Assert\n make_request_mock.assert_called_with(url=record_client.storage_url + '/versions/test', method=HttpMethod.GET,\n add_headers=headers)\n assert versions == [123]\n" } ]
213
huangpengling/proj_demo
https://github.com/huangpengling/proj_demo
5c92c713f419aed1cb692a4fc0459d4b85c1c1f6
29f256555c1f1422c6da0891872be0ee587d93c2
4274147562a43d5593493c41422ddae20adaded8
refs/heads/master
2021-05-14T00:09:22.647926
2018-01-07T03:46:24
2018-01-07T03:46:24
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7647058963775635, "alphanum_fraction": 0.800000011920929, "avg_line_length": 6.818181991577148, "blob_id": "1497ba22aa86d87314b631988fb8910b62f08cdc", "content_id": "d4d6b60608e0704ecd4a75b536765dd0d80b9e5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 195, "license_type": "no_license", "max_line_length": 16, "num_lines": 11, "path": "/README.md", "repo_name": "huangpengling/proj_demo", "src_encoding": "UTF-8", "text": "项目迭代一的内容\n演示的例子是bugfree。\n\n知识点:\n1.项目组织结构\n2.ddt的数据驱动\n3.生成html报告\n\n\n自动部署:\n例如把驱动自动拷贝到指定的目录。" }, { "alpha_fraction": 0.7439024448394775, "alphanum_fraction": 0.7439024448394775, "avg_line_length": 19.625, "blob_id": "e5a7ae27b271125b5899a95e39247ed525dff303", "content_id": "33e706ff73c301383129f6d34e62afb4ad11172a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 164, "license_type": "no_license", "max_line_length": 60, "num_lines": 8, "path": "/lib/utils.py", "repo_name": "huangpengling/proj_demo", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# author: samren\nimport shutil\nimport os\n\nshutil.copyfile(\"HTMLTestRunner.py\", \"c:\\HTMLTestRunner.py\")\n\nos.system(\"del c:\\\\HTMLTestRunner.py\")" } ]
2
bojanvujatovic/kaggle-MLSP
https://github.com/bojanvujatovic/kaggle-MLSP
3630c9fd1650c32457fdfa0eb7d6239575766268
b0ee41fc5d6a9f6cfd03e863899ec39baace762c
99d9fac755694b0f5cd61fa0ad960e0814d979d5
refs/heads/master
2021-01-17T06:26:57.979731
2014-04-03T11:49:04
2014-04-03T11:49:04
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4568965435028076, "alphanum_fraction": 0.48706895112991333, "avg_line_length": 16.923076629638672, "blob_id": "6fea8a66774f842fed4366dd16f0261d8f5a42fd", "content_id": "8d30f5c2b3ec0de084b9d32382ec22ec8f21a74e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 232, "license_type": "no_license", "max_line_length": 39, "num_lines": 13, "path": "/histFeatures/a.py", "repo_name": "bojanvujatovic/kaggle-MLSP", "src_encoding": "UTF-8", "text": "import sys\nimport time\n \ndef main():\n print \"dfsdf\"\n for i in range(399):\n sys.stdout.write(\"\\r\")\n sys.stdout.write(\"%3d/399\" % i)\n sys.stdout.flush()\n \n \nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.5196227431297302, "alphanum_fraction": 0.5372680425643921, "avg_line_length": 28.89090919494629, "blob_id": "42e226f18d29780c9111d58212cf067fe72fa055", "content_id": "75cfadff7c696196724af713f552d125f8f829f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3287, "license_type": "no_license", "max_line_length": 136, "num_lines": 110, "path": "/histFeatures/histFeatures.py", "repo_name": "bojanvujatovic/kaggle-MLSP", "src_encoding": "UTF-8", "text": "from string import split\nfrom sklearn.metrics import auc_score\nfrom numpy import array\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.grid_search import GridSearchCV\nfrom numpy import array\nfrom sklearn import cross_validation, metrics\n\n\n\ndef processAndGetData():\n hist_file = open('C:\\Users\\Bojan\\Desktop\\ML\\Kaggle\\Birds\\mlsp_contest_dataseta\\supplemental_data\\\\histogram_of_segments.txt', 'r')\n labels_file = open('C:\\Users\\Bojan\\Desktop\\ML\\Kaggle\\Birds\\mlsp_contest_dataseta\\essential_data\\\\rec_labels_test_hidden.txt', 'r')\n \n Xtrain = []\n idtrain = []\n Xtest = []\n idtest = []\n ytrain = [[] for i in range(19)]\n \n hist_line = hist_file.readline().split(',')\n label_line = labels_file.readline().split(',')\n \n label_line = labels_file.readline().split(',')\n hist_line = hist_file.readline().split(',')[1:]\n \n while len(label_line[0]) > 0:\n \n features = [float(hist_line[i]) for i in range(len(hist_line))]\n id = int(label_line[0])\n \n if len(label_line) > 1 and '?' in label_line[1]:\n idtest.append(id)\n Xtest.append(features)\n else:\n labels = [int(label_line[i]) for i in range(1, len(label_line))]\n \n idtrain.append(id)\n Xtrain.append(features)\n \n for i in range(19):\n ytrain[i].append(int(i in labels))\n \n label_line = labels_file.readline().split(',')\n hist_line = hist_file.readline().split(',')[1:]\n \n hist_file.close()\n labels_file.close()\n \n return (Xtrain, ytrain, Xtest, idtrain, idtest)\n \ndef calcAUCScoreCV(X, y, c):\n ypred = []\n \n for i in range(len(X)):\n Xtrain = X[0:i] + X[i+1:]\n ytrain = y[0:i] + y[i+1:]\n \n XCV = X[i]\n yCV = y[i]\n \n clf = LogisticRegression(penalty='l2', dual=False, C=c, class_weight='auto')\n clf.fit(Xtrain, ytrain)\n ypred.append(int(clf.predict_proba(XCV)[0][1]>0.5))\n \n fpr, tpr, thresholds = metrics.roc_curve(y, ypred, pos_label=1)\n auc = metrics.auc(fpr,tpr)\n return auc\n\ndef trainOneClass(X, y):\n Cs = [0.00005*(10**i) for i in range(6)] + [0.00001*(10**i) for i in range(7)]\n scores = []\n scoremax = -1\n \n for C in Cs:\n score = calcAUCScoreCV(X, y, C)\n \n scores.append(scores)\n \n if score > scoremax:\n Cmax = C\n scoremax = score\n \n #print C, score\n \n return Cmax\n \ndef main():\n (Xtrain, ytrain, Xtest, idtrain, idtest) = processAndGetData()\n\n n = 250\n for k in range(19):\n # Learning only class 0\n c = trainOneClass(Xtrain[0:n], ytrain[k][0:n])\n \n clf = LogisticRegression(penalty='l2', dual=False, C=c, class_weight='auto')\n \n clf.fit(Xtrain[0:n], ytrain[k][0:n])\n \n y = clf.predict_proba(Xtrain[n:])\n y1 = [int(y[i][1]>0.5) for i in range(len(y))]\n \n \n fpr, tpr, thresholds = metrics.roc_curve(ytrain[k][n:], y1, pos_label=1)\n auc = metrics.auc(fpr,tpr)\n print auc\n \n \nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.698924720287323, "alphanum_fraction": 0.7419354915618896, "avg_line_length": 22.25, "blob_id": "e8c6cff1babd5cfa2a08067b78033f3cff8eac36", "content_id": "c370213d73e801f56e51468207016471b5d224ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 93, "license_type": "no_license", "max_line_length": 67, "num_lines": 4, "path": "/README.md", "repo_name": "bojanvujatovic/kaggle-MLSP", "src_encoding": "UTF-8", "text": "kaggle-MLSP\n===========\n\nCode for Kaggle MLSP 2013 Bird Classification Challenge competition\n" }, { "alpha_fraction": 0.4474368095397949, "alphanum_fraction": 0.4617292582988739, "avg_line_length": 28.196552276611328, "blob_id": "d0ba1bc4e6755d73904e779027c9894731bc442e", "content_id": "cfdbe5cdbdb0668e0119bc68314e81acae028e5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8466, "license_type": "no_license", "max_line_length": 134, "num_lines": 290, "path": "/MIML/miml-knn.py", "repo_name": "bojanvujatovic/kaggle-MLSP", "src_encoding": "UTF-8", "text": "from string import split\nfrom sklearn.metrics import auc_score\nfrom sklearn import svm\nfrom numpy import array\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.grid_search import GridSearchCV\nfrom numpy import array\nfrom sklearn import cross_validation, metrics\nfrom sklearn.ensemble import RandomForestClassifier \nfrom scipy.spatial.distance import euclidean\nfrom random import random\nfrom math import sqrt\n\nimport sys\n\n\n\ndef processAndGetData():\n seg_file = open('C:\\Users\\Bojan\\Desktop\\ML\\Kaggle\\Birds\\mlsp_contest_dataseta\\supplemental_data\\\\segment_features.txt', 'r')\n labels_file = open('C:\\Users\\Bojan\\Desktop\\ML\\Kaggle\\Birds\\mlsp_contest_dataseta\\essential_data\\\\rec_labels_test_hidden.txt', 'r')\n \n \n \n Xtrain = []\n idtrain = []\n Xtest = []\n idtest = []\n ytrain = [[] for i in range(19)]\n labelstrain = []\n \n seg_line = seg_file.readline().split(',')\n label_line = labels_file.readline().split(',')\n \n label_line = labels_file.readline().split(',')\n seg_line = seg_file.readline().split(',')\n id_seg = int(seg_line[0])\n \n n_sum = len(seg_line) - 2\n n_feat = 0\n sum_feat = [0] * n_sum\n sumsq_feat = [0] * n_sum\n \n \n while len(label_line[0]) > 0:\n id_label = int(label_line[0])\n \n if len(label_line) > 1 and '?' in label_line[1]:\n idtest.append(id_label)\n Xtest.append([])\n \n while id_seg == id_label:\n features = [float(seg_line[i]) for i in range(2, len(seg_line))]\n \n sum_feat = [sum_feat[i] +features[i] for i in range(len(sum_feat))]\n sumsq_feat = [sumsq_feat[i]+features[i]**2 for i in range(len(sum_feat))]\n n_feat += 1\n \n Xtest[-1].append(features)\n \n seg_line = seg_file.readline().split(',')\n if len(seg_line) > 1:\n id_seg = int(seg_line[0])\n else:\n id_seg = -1\n \n else:\n labels = [int(label_line[i]) for i in range(1, len(label_line))]\n \n idtrain.append(id_label)\n labelstrain.append(labels)\n Xtrain.append([])\n \n while id_seg == id_label:\n features = [float(seg_line[i]) for i in range(2, len(seg_line))]\n \n sum_feat = [sum_feat[i] +features[i] for i in range(len(sum_feat))]\n sumsq_feat = [sumsq_feat[i]+features[i]**2 for i in range(len(sum_feat))]\n n_feat += 1\n \n Xtrain[-1].append(features)\n \n seg_line = seg_file.readline().split(',')\n if len(seg_line) > 1:\n id_seg = int(seg_line[0])\n else:\n id_seg = -1\n \n for i in range(19):\n ytrain[i].append(int(i in labels))\n\n label_line = labels_file.readline().split(',')\n \n seg_file.close()\n labels_file.close()\n \n sum_feat = [sum_feat[i]/n_feat for i in range(len(sum_feat))]\n sumsq_feat = [sqrt(sumsq_feat[i]/n_feat - sum_feat[i]**2) for i in range(len(sum_feat))]\n \n print sum_feat\n print sumsq_feat\n \n # print idtrain\n return (Xtrain, ytrain, Xtest, idtrain, idtest, labelstrain, sum_feat, sumsq_feat)\n \ndef normalizeList(l, mean, std):\n return [(l[i]-mean[i])/std[i] for i in range(len(l))]\n \n \ndef calcHausDist(bag1, bag2, mean, std):\n n1 = len(bag1)\n n2 = len(bag2)\n # if n1 == 0 and n2 == 0:\n # return 0.0\n if n1 == 0 or n2 == 0:\n return float(\"+inf\")\n \n total_dist = 0.0\n \n for a in bag1:\n minDist = float(\"+inf\")\n for b in bag2:\n dist = euclidean(normalizeList(a, mean, std), normalizeList(b, mean, std))\n if dist < minDist:\n minDist = dist\n total_dist += minDist\n \n for b in bag2:\n minDist = float(\"+inf\")\n for a in bag1:\n dist = euclidean(normalizeList(b, mean, std), normalizeList(a, mean, std))\n if dist < minDist:\n minDist = dist\n total_dist += minDist\n \n return total_dist / (n1+n2)\n\ndef sortBags(dist, idx):\n n = len(dist)\n \n for i in range(n-1):\n for j in range(i+1, n):\n if dist[i] > dist[j] or (dist[i] == dist[j] and random() < 0.5):\n t = dist[i]\n dist[i] = dist[j]\n dist[j] = t\n t = idx[i]\n idx[i] = idx[j]\n idx[j] = t\n return idx \n\ndef Ztransform(X, labels, ntrain, k, c, mean, std):\n n = len(X)\n Z = [[0]*19 for i in range(n)]\n \n for i in range(n):\n sys.stdout.write(\"\\r\")\n sys.stdout.write(\" Run: %3d/%d\" % (i, n))\n sys.stdout.flush()\n Haus_dist = []\n bag_num = []\n for j in range(n):\n if i != j:\n dist = calcHausDist(X[i], X[j], mean, std)\n if dist < float(\"+inf\"):\n Haus_dist.append(dist)\n bag_num.append(j)\n \n bag_num = sortBags(Haus_dist, bag_num)\n \n count = 0\n idx = 0\n while count < k:\n if len(bag_num) <= idx:\n break\n bag = bag_num[idx]\n if bag < ntrain:\n for l in labels[bag]:\n Z[i][l] += 1\n count += 1\n idx += 1\n \n \n count = 0\n idx = 0\n if i < ntrain:\n while count < c:\n if len(bag_num) <= idx:\n break\n bag = bag_num[idx]\n if bag < ntrain:\n for l in labels[i]:\n Z[bag][l] += 1\n count += 1\n idx += 1\n \n # print2DZ(Ztrain, \"Z\")\n return (Z[0:ntrain], Z[ntrain:])\n\ndef calcAUCScoreCV(X, y, n, d):\n ypred = []\n \n for i in range(len(X)):\n Xtrain = X[0:i] + X[i+1:]\n ytrain = y[0:i] + y[i+1:]\n \n XCV = X[i]\n yCV = y[i]\n \n clf = RandomForestClassifier(n_estimators = n, max_depth = d)\n clf.fit(Xtrain, ytrain)\n # print clf.predict(XCV)\n ypred.append(int(clf.predict(XCV)[0]))\n \n # print \" \", y\n # print \" \", ypred\n fpr, tpr, thresholds = metrics.roc_curve(y, ypred, pos_label=1)\n auc = metrics.auc(fpr,tpr)\n return auc\n\ndef trainOneClass(X, y):\n # return (100, 5)\n \n # ns = [40, 100, 200, 300]\n # ds = [10, 20, 40, 60]\n \n ns = [100]\n ds = [ 20]\n \n scores = []\n scoremax = -1\n \n for n in ns:\n for d in ds:\n \n score = calcAUCScoreCV(X, y, n, d)\n print \" Now testing: \", n, d, score\n scores.append(scores)\n \n if score > scoremax:\n nmax = n\n dmax = d\n scoremax = score\n \n return (nmax, dmax)\n\ndef printY(y, ids, name):\n file = open(name + \".csv\", \"w\")\n file.write(\"Id,Probability\\n\")\n \n for i in range(len(ids)):\n for k in range(len(y)):\n file.write(str(ids[i]*100+k)+ \",\" + str(y[k][i]) + \"\\n\")\n \n file.close()\n \ndef print2DZ(array, name):\n file = open(name + \".txt\", \"w\")\n \n for i in range(len(array)):\n for k in range(len(array[i])):\n file.write(str(array[i][k])+ \" \")\n file.write(\"\\n\")\n \n file.close()\n \ndef load2DZ(name):\n ret = []\n file = open(name + \".txt\")\n \n for line in file:\n if len(line) > 1:\n s = line.strip('\\n').strip().split(\" \")\n ret.append([int(ss) for ss in s])\n \n file.close()\n return ret\n \ndef main():\n (Xtrain, ytrain, Xtest, idtrain, idtest, labels, mean, std) = processAndGetData()\n \n (Ztrain, Ztest) = Ztransform(Xtrain+Xtest, labels, len(Xtrain), 8, 7, mean, std)\n print2DZ(Ztrain, \"Z6train\")\n print2DZ(Ztest , \"Z6test\")\n \n \n \n # Ztest = ZtransformTest(Xtest, Xtrain)\n \nif __name__ == \"__main__\":\n main()" } ]
4
CommitJr/roboto_counter_open
https://github.com/CommitJr/roboto_counter_open
f1868e2f8331c71f71f01d3f1bacff87427c69ed
6d816e4e1cecc528190583a9d45ed3a9303198a2
cb7f7e94406dce2a90260c75005ba6efd85da1d1
refs/heads/master
2022-07-03T12:10:23.684678
2020-05-13T12:32:57
2020-05-13T12:32:57
263,404,968
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6455424427986145, "alphanum_fraction": 0.703544557094574, "avg_line_length": 45.54999923706055, "blob_id": "b61f75b2c7743e011ae06e21ff613430c62e59b9", "content_id": "332a6f9e83242e021222a2e21426716889b05a95", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 931, "license_type": "permissive", "max_line_length": 83, "num_lines": 20, "path": "/src/controls.py", "repo_name": "CommitJr/roboto_counter_open", "src_encoding": "UTF-8", "text": "import cv2\nfrom src import consts\n\n\ndef set_controls():\n cv2.namedWindow('Controls')\n cv2.resizeWindow('Controls', 640, 320)\n cv2.createTrackbar('area ret min', 'Controls', 3000, 10000, lambda empty: None)\n cv2.createTrackbar('side ret max', 'Controls', 200, 1000, lambda empty: None)\n cv2.createTrackbar('jump X value', 'Controls', 300, 1000, lambda empty: None)\n cv2.createTrackbar('line position', 'Controls', 150, 1000, lambda empty: None)\n cv2.createTrackbar('line offset', 'Controls', 30, 100, lambda empty: None)\n\n\ndef update_controls_values():\n consts.area_ret_min = cv2.getTrackbarPos('area ret min', 'Controls')\n consts.side_ret_max = cv2.getTrackbarPos('side ret max', 'Controls')\n consts.jump_on_x_value = cv2.getTrackbarPos('jump X value', 'Controls')\n consts.pos_line = cv2.getTrackbarPos('line position', 'Controls')\n consts.offset = cv2.getTrackbarPos('line offset', 'Controls')\n" }, { "alpha_fraction": 0.7433264851570129, "alphanum_fraction": 0.7505133748054504, "avg_line_length": 31.433332443237305, "blob_id": "fadf383967070e7f9d9adc59cc88a5b2add394dc", "content_id": "3bd95b5d60e93ac6dcd4a03a675bc0fd78f6302e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 974, "license_type": "permissive", "max_line_length": 218, "num_lines": 30, "path": "/README.md", "repo_name": "CommitJr/roboto_counter_open", "src_encoding": "UTF-8", "text": "![roboto_counter_open Logo](https://i.imgur.com/VV25qv0.png)\n\n# About Roboto Counter Open\nRoboto Counter Open is an open source software that integrates movement recognition and counter as a flask API, it's originaly created to help stores controlling the amount of people inside due to Coronavirus Crisis. \nRoboto Counter Open is maintained by [Commit Jr](https://www.commitjrdev.com/) members, from CEFET-MG.\n\n## Dependencies\n\n- OpenCV $ `pip install opencv-python`\n- Numpy $ `pip install numpy`\n- Flask $ `pip install flask`\n- Threading\n\n## How To Contribute\n\n- Fork it\n- Code your changes\n- Create a Pull Request\n\n## Goals\n\n- Haar Cascade checking\n- Thermographic camera compatibility\n- Multiple cameras system\n\n## Authority & Credits\n\n- Created by [Alexandre Martins](https://github.com/Agronault) and [Pierre Vieira](https://github.com/PierreVieira)\n- Inspired by: [Xopvision](http://xopvision.pt/) and [João Reis](https://www.youtube.com/channel/UCkZ0WqdJ7igUqBfDZZ1bc1w)\n- MIT License \n" }, { "alpha_fraction": 0.5128805637359619, "alphanum_fraction": 0.646369993686676, "avg_line_length": 29.5, "blob_id": "8412d7fe8c4512f142e9b06afa6bb3ab45dc7a30", "content_id": "773ee480218d460bc75576328dca5bedd3fcf111", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 427, "license_type": "permissive", "max_line_length": 56, "num_lines": 14, "path": "/src/consts.py", "repo_name": "CommitJr/roboto_counter_open", "src_encoding": "UTF-8", "text": "import cv2\n\ncapture_path = '../media/pedestres.mp4' # 0 to webcam\nBLUE, GREEN, RED = (255, 0, 0), (0, 255, 0), (0, 0, 255)\nCYAN, YELLOW = (255, 255, 0), (0, 255, 255)\npos_line, offset = 150, 30\nxy1, xy2 = (20, pos_line), (300, pos_line)\narea_ret_min = 3000\nside_ret_max = 200\njump_on_x_value = 50\ncache_detects = []\ntotal = ppl_out = ppl_in = 0\ncap = cv2.VideoCapture(capture_path)\nfgbg = cv2.createBackgroundSubtractorMOG2()\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 20, "blob_id": "f31e62445556265c7e6c5c00cacc08c828b56f2e", "content_id": "8a79af534800328c1b33eacee3dddc6237bbb05a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 420, "license_type": "permissive", "max_line_length": 47, "num_lines": 20, "path": "/src/api_interface.py", "repo_name": "CommitJr/roboto_counter_open", "src_encoding": "UTF-8", "text": "import threading\nfrom flask import Flask, jsonify\nfrom src import consts\nfrom src.cv_recogniser import run_cv_recogniser\n\napp = Flask(__name__)\n\n\[email protected]('/get', methods=['GET'])\ndef get_counter():\n return jsonify({\n 'total': consts.total,\n 'out': consts.ppl_out,\n 'in': consts.ppl_in\n })\n\n\nth = threading.Thread(target=run_cv_recogniser)\nth.start()\napp.run(debug=True, use_reloader=False)\n" }, { "alpha_fraction": 0.5684032440185547, "alphanum_fraction": 0.5944514870643616, "avg_line_length": 35.32307815551758, "blob_id": "d3569a91ba4bf492cb920ee450107709ef88af66", "content_id": "598169fc9fc52cd070ab8b1ad139ffc5f05a6bb8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4722, "license_type": "permissive", "max_line_length": 119, "num_lines": 130, "path": "/src/cv_recogniser.py", "repo_name": "CommitJr/roboto_counter_open", "src_encoding": "UTF-8", "text": "from src import consts, controls\nimport cv2\n\n\ndef center(x, y, w, h):\n return x + w // 2, y + h // 2\n\n\ndef make_offset_lines(frame):\n cv2.line(frame, (consts.xy1[0], consts.pos_line - consts.offset), (consts.xy2[0], consts.pos_line - consts.offset),\n consts.CYAN)\n cv2.line(frame, (consts.xy1[0], consts.pos_line + consts.offset), (consts.xy2[0], consts.pos_line + consts.offset),\n consts.CYAN)\n\n\ndef make_center_line(frame):\n cv2.line(frame, consts.xy1, consts.xy2, consts.BLUE, 3)\n\n\ndef make_lines(frame):\n make_center_line(frame)\n make_offset_lines(frame)\n\n\ndef people_common_area(area):\n return int(area) > consts.area_ret_min\n\n\ndef two_people_rect(side):\n return int(side) < consts.side_ret_max\n\n\ndef make_contours(x, y, sqr_width, sqr_height, frame, sqr_center, i):\n cv2.putText(frame, str(i), (x + 5, y + 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, consts.YELLOW, 2)\n cv2.circle(frame, sqr_center, 4, consts.RED, -1)\n cv2.rectangle(frame, (x, y), (x + sqr_width, y + sqr_height), consts.GREEN, 2)\n\n\ndef infos_text(frame):\n cv2.putText(frame, f'TOTAL: {consts.total}', (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, consts.YELLOW, 2)\n cv2.putText(frame, f'OUT: {consts.ppl_out}', (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.5, consts.BLUE, 2)\n cv2.putText(frame, f'IN: {consts.ppl_in}', (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.5, consts.RED, 2)\n\n\ndef jump_on_x_detector(detect, c, l):\n return abs(detect[c - 1][0] - l[0]) > consts.jump_on_x_value\n\n\ndef make_count(frame, closing):\n contours, hierarchy = cv2.findContours(closing, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n amount = []\n i = 0\n for contour in contours:\n x, y, sqr_width, sqr_height = cv2.boundingRect(contour)\n area = cv2.contourArea(contour)\n if people_common_area(area):\n sqr_center = center(x, y, sqr_width, sqr_height)\n make_contours(x, y, sqr_width, sqr_height, frame, sqr_center, i)\n if len(amount) <= i:\n amount.append(1)\n if len(consts.cache_detects) <= i: # There's more people in the room\n consts.cache_detects.append([]) # Creates a new slot in the list\n if consts.pos_line - consts.offset < sqr_center[1] < consts.pos_line + consts.offset:\n consts.cache_detects[i].append(sqr_center)\n amount[i] = 1 if sqr_width < consts.side_ret_max else 2\n else:\n consts.cache_detects[i].clear()\n i += 1\n\n if i == 0 or len(contours) == 0:\n consts.cache_detects.clear()\n\n else:\n i = 0\n for detect in consts.cache_detects:\n for (c, l) in enumerate(detect):\n if detect[c - 1][1] < consts.pos_line < l[1] and not jump_on_x_detector(detect, c, l): # Out\n detect.clear()\n consts.ppl_out += amount[i]\n consts.total = consts.total - amount[i] if consts.total >= amount[i] else 0\n cv2.line(frame, consts.xy1, consts.xy2, consts.GREEN, 5)\n elif detect[c - 1][1] > consts.pos_line > l[1] and not jump_on_x_detector(detect, c, l): # In\n detect.clear()\n consts.ppl_in += amount[i]\n consts.total += amount[i]\n cv2.line(frame, consts.xy1, consts.xy2, consts.RED, 5)\n elif c > 0:\n cv2.line(frame, detect[c - 1], l, consts.RED, 1)\n i += 1\n infos_text(frame)\n\n\ndef show(dict_frames):\n make_lines(dict_frames['frame'])\n make_count(dict_frames['frame'], dict_frames['closing'])\n for key, value in dict_frames.items():\n cv2.imshow(key, value)\n\n\ndef logical_frame():\n status, frame = consts.cap.read()\n if not status:\n return False\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n fgmask = consts.fgbg.apply(gray)\n bool_val, threshold = cv2.threshold(fgmask, 200, 255, cv2.THRESH_BINARY)\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))\n opening = cv2.morphologyEx(threshold, cv2.MORPH_OPEN, kernel, iterations=2)\n dilatation = cv2.dilate(opening, kernel, iterations=8)\n closing = cv2.morphologyEx(dilatation, cv2.MORPH_CLOSE, kernel, iterations=8)\n show({'frame': frame, 'closing': closing})\n return True\n\n\ndef run_cv_recogniser():\n controls.set_controls()\n quit_process = lambda: cv2.waitKey(30) & 0xFF == ord('q')\n while True:\n controls.update_controls_values()\n status = logical_frame()\n if not status:\n break\n elif quit_process():\n break\n consts.cap.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n run_cv_recogniser()\n" } ]
5
Mosley38/SimpsonRule
https://github.com/Mosley38/SimpsonRule
d026345997537a817670aab58a81ca232e5a65bb
0aa8e0bebc4162feec638196ad426d01eaecf9ca
0e584c5563ab3db7a458daa45aba4a39f0e1e46f
refs/heads/master
2020-03-11T16:54:28.313655
2018-04-19T15:30:17
2018-04-19T15:30:17
130,131,729
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8070175647735596, "alphanum_fraction": 0.8070175647735596, "avg_line_length": 56, "blob_id": "1dd887927cd476e97ffb04ec1c7f3722b0401a39", "content_id": "20f4fa2aaf7fa671291b5690c1081591e59c6e9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 342, "license_type": "no_license", "max_line_length": 91, "num_lines": 6, "path": "/README.md", "repo_name": "Mosley38/SimpsonRule", "src_encoding": "UTF-8", "text": "# SimpsonRule\nJust a basic code to calc Simpson rule \nThis was an assignment in Comp Physics for the simpson rule\nYou basically just plug in the boundaries and the integral with n being number of intervals\nWe had to graph and shade after it was calculated.\nIf you can give any tips for improving the calculation it would be much appreciated.\n" }, { "alpha_fraction": 0.5516542792320251, "alphanum_fraction": 0.5887913703918457, "avg_line_length": 22.09375, "blob_id": "2d95ea0b9f4210348a7d72a9c987896de1a47b38", "content_id": "6658880003c697853389eb290459c016c111d173", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1481, "license_type": "no_license", "max_line_length": 83, "num_lines": 64, "path": "/simpsonRule.py", "repo_name": "Mosley38/SimpsonRule", "src_encoding": "UTF-8", "text": "#Exercise 5.2\n#By: Major Mosley\n\n\n\n#import pdb\n#pdb.set_trace()\ndef g(x): #We create our function that we wish to integrate\n return x**4 - 2*x + 1\ndef G(x): \n return x**5/5 - x**2 + x\n\ndef simpson(f,a,b,n):\n h = (b-a)/n\n\n v_o = 0\n for i in range(1,n//2 - 1):\n v_o += f(a + 2*i*h)\n\n v_1 = 0\n for i in range(1,n//2): #use division operator \n\n v_1 += f(a +(2*i-1)*h)\n\n integral = h/3 * (f(a) + f(b) + 4*v_o + 2*v_1)\n return integral\n\n\na = 0\nb = 2\nn = 100000\nexact = G(b) - G(a) #get actual value for the integral\nanswr = simpson(g,a,b,n)\nerror = (abs(answr - exact)/exact)*100 #here i am checking for the percentage error\nprint('There is an error of: {0:.2f}'.format(error))\nprint('The exact calculation gives: ',exact)\nprint('Simpson rule gives: {0:.3f}'.format(answr))\n\n###########Graphing part of program begins #########################\n\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n#here we have 250 iterations from a to b\nx0 = np.linspace(a,b, 250)\nplt.plot(x0, G(x0), '-', label = 'Function')\n\n#creating x axis and steps to be taken which is 10\nx = np.linspace(a, b, n+1)\nplt.plot(x, G(x), 'o', label = 'Simpson Rule')\n\nverts = [(a, 0)] + list(zip(x, G(x))) + [(b, 0)]\nplt.vlines(x, 0, G(x), colors = 'black')\n\n#fill in area under curve with black \nplt.fill_between(x, G(x), color = 'black')\nplt.title('Simpson Rule') \nplt.xlabel(r'$x$')\nplt.ylabel(r'$Exact Integral$')\nplt.legend()\nplt.axhline(0, color = 'black')\nplt.show()\n\n\n\n" } ]
2
prithvi9876/cf2pdf
https://github.com/prithvi9876/cf2pdf
7fcac9c4b183951a23ccaaca39045f5b1dd9a977
e526cf9ee92654ac573b82b41a996c58deedfcdf
598823a6ceed2f675f1bbccd53d8b214df1719b1
refs/heads/master
2020-12-27T15:35:57.313904
2016-01-11T11:30:16
2016-01-11T11:30:16
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6435452699661255, "alphanum_fraction": 0.6493256092071533, "avg_line_length": 26.432432174682617, "blob_id": "53a12df17bfb4f4c5fe7ec355de2ebeeaff29abc", "content_id": "614141123c8c03b6770291a086591a473c61dd76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1038, "license_type": "no_license", "max_line_length": 75, "num_lines": 37, "path": "/README.md", "repo_name": "prithvi9876/cf2pdf", "src_encoding": "UTF-8", "text": "# cf2pdf\ngets problems from codeforces in pdf format\n\n### Instructions ###\n \n Download the python scipt 'topdfscript.py' and run it from python shell \n It has 2 functions :\n gettags() and contestpdf()\n \n gettags() gets problems by tags \n call it by topdfscript.gettags()\n further it will ask for path and tags\n \n \n contestpdf() gets problems by contestId\n call it by topdfcript.contestpdf()\n \n It has two parameters\n \n The first parameter is the path\n pass the path as a string or pass an empty \n string for the present working directory to be path\n \n and the followed by contestId as a string\n \n eg: topdfscript.contestpdf(\"\",\"1\")\n or\n topdfscript.contestpdf(\"/users/\",\"1\")\n \n Both these commands will download the files \n and store them as pdfs in a folder named by \n the date and time of function call in the specified directory \n \n \n### Pre Requisites \n pdfkit (https://pypi.python.org/pypi/pdfkit) \n python 3.4 \n \n \n \n \n \n" }, { "alpha_fraction": 0.6091121435165405, "alphanum_fraction": 0.6203271150588989, "avg_line_length": 25.540372848510742, "blob_id": "6cef0aa23d5979ad9955a983c5f68431fbe46dfb", "content_id": "3a00a7f0f86058fd52f529ede3acf23d5e7f1fd2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4280, "license_type": "no_license", "max_line_length": 185, "num_lines": 161, "path": "/topdfscript.py", "repo_name": "prithvi9876/cf2pdf", "src_encoding": "UTF-8", "text": "import urllib.request\nimport json\nimport pdfkit\nimport os\nimport datetime\n\n\ndef tagpdf(pathac=\"\",num=1,urll=''):\n\trequest = urllib.request.Request(\"http://www.codeforces.com/api/problemset.problems?tags=\"+urll)\n\tresponse = urllib.request.urlopen(request)\n\tencoding = response.info().get_content_charset('utf8')\n\tdata = json.loads(response.read().decode(encoding))\n\tif data['status'] != 'OK':\n\t\tprint(' ERROR\\n')\n\t\treturn\n\n\tss = str(datetime.datetime.now().now())\n\tpath = pathac\n\tif not pathac:\n\t\tos.chdir(os.path.dirname(__file__))\n\t\tpath = os.getcwd()\n\t\t\n\tpath = path + '/' + ss\n\t\n\tos.makedirs(path,exist_ok=True)\n\t\n\tpath = path + '/'\n\t\n\t'''\n\tprint(path)\n\t'''\n\tfor i in range(0,num):\n\t\tif not data['result']['problems'][i]:\n\t\t\tbreak;\n\t\ta = \"http://www.codeforces.com/problemset/problem/\"\n\t\ta = a + str(data['result']['problems'][i]['contestId'])\n\t\ta = a + \"/\"\n\t\ta = a + str(data['result']['problems'][i]['index'])\n\t\tpdfkit.from_url(a,path + str(data['result']['problems'][i]['contestId'])+ str(data['result']['problems'][i]['index']))\n\n\n\n\t\t\ndef gettags():\n\t\n\ttags = [\"implementation\",\"binary search\",\"math\",\"data structures\",\"dfs and similar\",\"graphs\",\"combinatorics\",\"constructive algorithms\",\"dp\",\"greedy\",\"strings\",\"hashing\",\"two pointers\",\n\t\"bitmasks\",\"sortings\",\"string suffix structures\",\"geometry\",\"dsu\",\"divide and conquer\",\"trees\",\"games\",\"probabilities\"]\n\tprint(\"\\n\")\n\tprint(\"\t*********************** \\n \")\n\tprint(\"\tpress q to quit\\n\")\n\n\tprint(\"\ttags list\\n\")\n\tprint(\" tag id : tag\\n\")\n\tprint(\"\t1 implementation\\n\")\n\tprint(\"\t2 binary search\\n\")\n\tprint(\"\t3 math\\n\")\n\tprint(\"\t4 data structures\\n\")\n\tprint(\"\t5 dfs and similar\\n\")\n\tprint(\"\t6 graphs\\n\")\n\tprint(\"\t7 combinatorics\\n\")\n\tprint(\"\t8 constructive algorithms\\n\")\n\tprint(\"\t9 dp\\n\")\n\tprint(\"\t10 greedy\\n\")\n\tprint(\"\t11 strings\\n\")\n\tprint(\"\t12 hashing\\n\")\n\tprint(\"\t13 two pointers\\n\")\n\tprint(\"\t14 bitmasks\\n\")\n\tprint(\"\t15 sortings\\n\")\n\tprint(\"\t16 string suffix structures\\n\")\n\tprint(\"\t17 geometry\\n\")\n\tprint(\"\t18 dsu\\n\")\n\tprint(\"\t19 divide and conquer\\n\")\n\tprint(\"\t20 trees\\n\")\n\tprint(\"\t21 games\\n\")\n\tprint(\"\t22 probabilities\\n\")\n\tprint(\"\\n\")\n\tprint(\"\tpress f to select tag id: \\n\")\n\tprint(\"\tpress q to quit\\n\")\n\ts = input()\n\twhile s!='q' and s!='f':\n\t\tprint(\" press either q or f (not in caps)\")\n\t\ts= input()\n\t\n\tif s=='q':\n\t\treturn\n\t\n\trTag = []\n\n\twhile s!='s':\n\t\tprint('\tenter the tagid : ')\n\t\tprint('')\n\t\ts = input()\n\t\tif s=='s':\n\t\t\tbreak\n\t\twhile (int(s) > 22 or int(s) <=0) or (not s.isdigit()) :\n\t\t\tprint(\" invalid tag id , give a valid tag, press s to stop\")\n\t\t\ts=input()\n\t\t\tif s=='s':\n\t\t\t\tbreak\n\t\tif s=='s':\n\t\t\tbreak\n\t\trTag.append(int(s))\n\t\tprint('\tpress s to stop , f to continue')\n\t\ts = input()\n\t\twhile s!='s' and s!='f':\n\t\t\tprint('\tpress s to stop , f to continue')\n\t\t\ts=input()\n\t\tif s=='s':\n\t\t\tbreak\n\n\turll = \"\"\n\tfor i in range(0,len(rTag)):\n\t\turll = urll + tags[rTag[i]-1]\n\t\tif i != (len(rTag)-1):\n\t\t\turll = urll + ';'\n\tpath=\"\"\n\tprint('\tEnter the directory path where the problems are to be downloaded or leave blank, by default it is the working directory of script\\n')\n\tpath = input()\n\tprint('\tEnter the number of problems to be downloaded or leave blank, by default it is 1')\n\tnum = int(input())\n\n\ttagpdf(path,num,urll)\n\n\t\n\ndef contestpdf(pathac=\"\",contestId='1'):\n\tif not contestId.isdigit():\n\t\tprint('\tTYPE ERROR\\n')\n\t\treturn \n\trequest = urllib.request.Request('http://www.codeforces.com/api/problemset.problems?tags=')\n\tresponse = urllib.request.urlopen(request)\n\tencoding = response.info().get_content_charset('utf8')\n\tdata = json.loads(response.read().decode(encoding))\n\tif data['status'] != 'OK':\n\t\tprint(' ERROR\\n')\n\t\treturn\n\n\tss = str(datetime.datetime.now().now())\n\tpath = pathac\n\tif not pathac:\n\t\tos.chdir(os.path.dirname(__file__))\n\t\tpath = os.getcwd()\n\t\t\n\tpath = path + '/' + ss\n\t\n\tos.makedirs(path,exist_ok=True)\n\t\n\tpath = path + '/'\n\t\n\t\n\n\t\n\n\tfor i in range(0,len(data['result']['problems'])):\n\t\t\n\t\tif str(data['result']['problems'][i]['contestId']) == contestId:\n\t\t\ta = \"http://www.codeforces.com/problemset/problem/\"\n\t\t\ta = a + str(data['result']['problems'][i]['contestId'])\n\t\t\ta = a + \"/\"\n\t\t\ta = a + str(data['result']['problems'][i]['index'])\n\t\t\tpdfkit.from_url(a,path + str(data['result']['problems'][i]['contestId'])+ str(data['result']['problems'][i]['index']))\n\t\t\n\t\t\n\n" } ]
2
greenkidneybean/bchb620
https://github.com/greenkidneybean/bchb620
50296ddacd1e80f53dd1a2846ff16f105759618e
a4892269b204314d134c856ce8d6de0171a3c97a
fd712d2b2f286400abca48640fa03a992d0088f1
refs/heads/master
2020-07-21T08:40:23.672372
2019-12-20T23:11:30
2019-12-20T23:11:30
206,800,439
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6300366520881653, "alphanum_fraction": 0.6410256624221802, "avg_line_length": 17.200000762939453, "blob_id": "561b1661e298a670dee33b8fc1e0a077c5839e0f", "content_id": "a28055b06a56bb052caf0eed47fad1a0034f7bb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 273, "license_type": "no_license", "max_line_length": 42, "num_lines": 15, "path": "/8/8.py", "repo_name": "greenkidneybean/bchb620", "src_encoding": "UTF-8", "text": "\"\"\"Return translation of mRNA sequence\"\"\"\n\nimport sys\nfrom Bio.Seq import Seq\n\ninFile = sys.argv[1]\n#outFile = sys.argv[2]\n\nwith open(inFile,'r') as i:\n lines = i.read().splitlines()\n\nfor line in lines:\n mRNA = Seq(line)\n print(mRNA.translate()[:-1])\n print()\n" }, { "alpha_fraction": 0.5965417623519897, "alphanum_fraction": 0.6023054718971252, "avg_line_length": 22.133333206176758, "blob_id": "006d9d048362bcdf6adadf5b16037692ec35600d", "content_id": "f5341bee6315e9bc1cf9100e73e9796227fb69a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 347, "license_type": "no_license", "max_line_length": 55, "num_lines": 15, "path": "/12/12.py", "repo_name": "greenkidneybean/bchb620", "src_encoding": "UTF-8", "text": "import sys\nfrom Bio import SeqIO\n\ninFile = sys.argv[1]\nk = 3\n\nseq_dict = SeqIO.to_dict(SeqIO.parse(inFile, \"fasta\"))\n\nfor key in seq_dict.keys():\n pop_dict = seq_dict.copy()\n val = pop_dict.pop(key)\n suffix = val.seq[-k:]\n for sub_key,sub_val in pop_dict.items():\n if suffix == sub_val.seq[:k]:\n print(key, sub_key)\n" }, { "alpha_fraction": 0.6085526347160339, "alphanum_fraction": 0.6710526347160339, "avg_line_length": 29.399999618530273, "blob_id": "97fc3c4d8b9628711ab01ee170f35235b0153ffd", "content_id": "a54e6b5dc9db8e1bf226f00cb6c6ddeec2bded8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 304, "license_type": "no_license", "max_line_length": 130, "num_lines": 10, "path": "/README.md", "repo_name": "greenkidneybean/bchb620", "src_encoding": "UTF-8", "text": "# BCHB-620 Bioinformatics Algorithms\n\nIndependent course guided by Dr. Edwards, based on problems on [Rosalind](http://rosalind.info), office hourse on Fridays at 10am.\n\nFor the one credit course:\n\n28 <= # problems : A\n25 <= # problems < 28 : A-\n22 <= # problems < 25 : B+\n19 <= # problems < 22 : B\n" }, { "alpha_fraction": 0.5384615659713745, "alphanum_fraction": 0.546419084072113, "avg_line_length": 21.235294342041016, "blob_id": "42ce274b6ed22fa1513971cbd88784e6f82ac8bb", "content_id": "9d7462749e96c392846c999233870c6b836c7d7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 377, "license_type": "no_license", "max_line_length": 38, "num_lines": 17, "path": "/1/.ipynb_checkpoints/1-checkpoint.py", "repo_name": "greenkidneybean/bchb620", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport sys\ninFile = sys.argv[1]\noutFile = sys.argv[2]\n\nwith open(inFile,'r') as i:\n lines = i.read().splitlines()\n\n# this doesn't seem too pythonic\nwith open(outFile,'w') as o:\n for line in lines:\n a = line.count(\"A\")\n c = line.count(\"C\")\n g = line.count(\"G\")\n t = line.count(\"T\")\n o.write(f\"{a} {c} {g} {t} \\n\")" }, { "alpha_fraction": 0.5156739950180054, "alphanum_fraction": 0.5360501408576965, "avg_line_length": 21, "blob_id": "be454b3e8bc84715c7ad83baf63d902563ed0c20", "content_id": "ae8b45b93d7a8d21e88a5009f222772d73596ddc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 638, "license_type": "no_license", "max_line_length": 56, "num_lines": 29, "path": "/7 - Mendel's Law Probabilit/7.py", "repo_name": "greenkidneybean/bchb620", "src_encoding": "UTF-8", "text": "\"\"\"Return probability of progeny with dominant allele\"\"\"\n\nimport sys\n\ninFile = sys.argv[1]\n\nwith open(inFile,'r') as i:\n lines = i.read().splitlines()\n\n# probability function\ndef dom_allele_prob(k,m,n):\n pop = (k+m+n)\n tot_prog = (pop * (pop -1)) * 4\n\n # Aa x Aa\n Aa_Aa = ((m * (m-1)) * 4) * .25\n # Aa x aa\n Aa_aa = (((m * n) * 4) * 2 ) * .5\n # aa x aa\n aa_aa = ((n * (n-1)) * 4) * 1\n\n dom_allele = tot_prog - (Aa_Aa + Aa_aa + aa_aa)\n dom_prob = dom_allele/tot_prog\n print(dom_prob)\n\n# iterate thru input file\nfor line in lines:\n k,m,n = [int(i) for i in line.split(\" \")]\n dom_allele_prob(k,m,n)\n" }, { "alpha_fraction": 0.6019417643547058, "alphanum_fraction": 0.6116504669189453, "avg_line_length": 18.375, "blob_id": "be290f159653672fd2a0e42c8279163b4d1277d9", "content_id": "b101b86365da7420fa79b16f27bf407f29b542b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 309, "license_type": "no_license", "max_line_length": 38, "num_lines": 16, "path": "/2/2.py", "repo_name": "greenkidneybean/bchb620", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport sys\nfrom Bio.Seq import Seq\n\ninFile = sys.argv[1]\noutFile = sys.argv[2]\n\nwith open(inFile,'r') as i:\n lines = i.read().splitlines()\n\nwith open(outFile,'w') as o:\n for line in lines:\n trans = Seq(line).transcribe()\n o.write(f\"{trans} \\n\")\n print(trans)" }, { "alpha_fraction": 0.5365384817123413, "alphanum_fraction": 0.5519230961799622, "avg_line_length": 19.84000015258789, "blob_id": "91f039d7174c48bfb98b69f948a79e6b79882549", "content_id": "5fb9a9137261dc8388b515c42de349796735776b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 520, "license_type": "no_license", "max_line_length": 51, "num_lines": 25, "path": "/4/4.py", "repo_name": "greenkidneybean/bchb620", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport sys\ninFile = sys.argv[1]\noutFile = sys.argv[2]\n\ndef rabbits(n,k):\n if n <= 1:\n return n\n else:\n return(rabbits(n-1,k) + (rabbits(n-2,k)*k))\n \n\nwith open(inFile,'r') as i:\n lines = i.read().splitlines()\n\n# this doesn't seem too pythonic\nwith open(outFile,'w') as o:\n for line in lines:\n elements = line.split(\" \")\n n = int(elements[0])\n k = int(elements[1])\n value = str(rabbits(n,k))\n o.write(f\"{value} \\n\")\n print(value)" }, { "alpha_fraction": 0.6607595086097717, "alphanum_fraction": 0.6759493947029114, "avg_line_length": 18.75, "blob_id": "b1aeb3568752dbef338242179a4717dc50cf82bd", "content_id": "d041e8c58403527286cf829ea60b244490546958", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 395, "license_type": "no_license", "max_line_length": 75, "num_lines": 20, "path": "/6/6.py", "repo_name": "greenkidneybean/bchb620", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\"\"\"Return Hamming Distance for two strings of equal length\n\nHamming Distance - counting the number of bases at which two strings differ\n\"\"\"\n\nimport sys\nfrom Bio import SeqIO\nfrom Bio import SeqUtils\n\ninFile = sys.argv[1]\n\nwith open(inFile,'r') as i:\n lines = i.read().splitlines()\n\nham = 0\nfor x,y in zip(lines[0], lines[1]):\n if x != y:\n ham += 1\nprint(ham)\n" }, { "alpha_fraction": 0.6086404323577881, "alphanum_fraction": 0.6137229800224304, "avg_line_length": 23.53125, "blob_id": "6587fbfb9fac3da1abec7ef6887c94bb0e6c4418", "content_id": "d9ee1638b973ee72b812a5861aa5d361678f825a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 787, "license_type": "no_license", "max_line_length": 61, "num_lines": 32, "path": "/16/16.1.py", "repo_name": "greenkidneybean/bchb620", "src_encoding": "UTF-8", "text": "\n\nfrom Bio import SeqIO\nimport os\nimport re\nimport sys\nfrom urllib.request import urlopen\n\ninFile = sys.argv[1]\n\n# lines is the list\nwith open(inFile,'r') as i:\n ID = i.read().splitlines()\n\n\n\nfor i in range(len(ID)):\n URL = 'http://www.uniprot.org/uniprot/' + ID[i] + '.fasta'\n data = urlopen(URL)\n fasta = data.read().decode('utf-8', 'ignore')\n with open('seq_file.fasta', 'a') as text_file:\n text_file.write(fasta)\n\nhandle = open('seq_file.fasta', 'r')\nmotifs = re.compile(r'(?=(N[^P][ST][^P]))')\n\nfor id, record in zip(ID, SeqIO.parse(handle, 'fasta')):\n seq = str(record.seq)\n positions = []\n for m in re.finditer(motifs, seq):\n positions.append(m.start() + 1)\n if len(positions) > 0:\n print(id)\n print(\" \".join(str(x) for x in positions))\n" }, { "alpha_fraction": 0.604651153087616, "alphanum_fraction": 0.6212624311447144, "avg_line_length": 15.722222328186035, "blob_id": "ce6c2d282ce5f8f84afd0bee1f61df73ccf22587", "content_id": "eaaf96b9177358b0c64e0229187847a50a30f6a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 301, "license_type": "no_license", "max_line_length": 60, "num_lines": 18, "path": "/9/9.py", "repo_name": "greenkidneybean/bchb620", "src_encoding": "UTF-8", "text": "\"\"\"Find all positions of motif in string (including overlap)\n\n`$ python 9.py input.txt > output.txt`\n\n\"\"\"\n\nimport sys\nimport re\n\ninFile = sys.argv[1]\n\nwith open(inFile,'r') as i:\n lines = i.read().splitlines()\n\ns = lines[0]\nt = lines[1]\n\nprint(*[m.start() + 1 for m in re.finditer(f'(?={t})', s)])\n" }, { "alpha_fraction": 0.2723214328289032, "alphanum_fraction": 0.6205357313156128, "avg_line_length": 14.448275566101074, "blob_id": "cc98d4c4aeac95efbfab49471f367f95f86fdaa8", "content_id": "9f6639be829d9293b2be70a26e60ecdae8cdea3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 448, "license_type": "no_license", "max_line_length": 40, "num_lines": 29, "path": "/20/20.py", "repo_name": "greenkidneybean/bchb620", "src_encoding": "UTF-8", "text": "import sys\nfrom Bio import SeqIO\n\ninFile = sys.argv[1]\n\nmass_dict = {\"A\":71.03711,\n\"C\":103.00919,\n\"D\":115.02694,\n\"E\":129.04259,\n\"F\":147.06841,\n\"G\":57.02146,\n\"H\":137.05891,\n\"I\":113.08406,\n\"K\":128.09496,\n\"L\":113.08406,\n\"M\":131.04049,\n\"N\":114.04293,\n\"P\":97.05276,\n\"Q\":128.05858,\n\"R\":156.10111,\n\"S\":87.03203,\n\"T\":101.04768,\n\"V\":99.06841,\n\"W\":186.07931,\n\"Y\":163.06333}\n\nfile = open(inFile,'r')\nfor line in file:\n print(sum(map(mass_dict.get, line)))\n" }, { "alpha_fraction": 0.70703125, "alphanum_fraction": 0.72265625, "avg_line_length": 18.69230842590332, "blob_id": "7099d1beca037ff0b179a07bd94cc4150c0f7c12", "content_id": "ffe0ea7e20158634a3bf6447770938c43def3dcf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 256, "license_type": "no_license", "max_line_length": 57, "num_lines": 13, "path": "/5/5.py", "repo_name": "greenkidneybean/bchb620", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport sys\nfrom Bio import SeqIO\nfrom Bio import SeqUtils\n\ninFile = sys.argv[1]\n\nfile = list(SeqIO.parse(inFile, \"fasta\"))\nfile.sort(key=lambda x: SeqUtils.GC(x.seq), reverse=True)\n\nprint(file[0].id)\nprint(SeqUtils.GC(file[0].seq))\n" }, { "alpha_fraction": 0.6302631497383118, "alphanum_fraction": 0.6342105269432068, "avg_line_length": 23.516128540039062, "blob_id": "eacc9a2e2a910b2d0450d1fb2fe301d084876464", "content_id": "c8784affa23ae43e89d3aa3f1ed560adf2e2a36b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 760, "license_type": "no_license", "max_line_length": 74, "num_lines": 31, "path": "/10/10.py", "repo_name": "greenkidneybean/bchb620", "src_encoding": "UTF-8", "text": "\"\"\"Return consensus sequence\"\"\"\n\nimport sys\nfrom Bio import SeqIO\nfrom Bio import SeqUtils\nfrom Bio import AlignIO\nfrom Bio.Align import AlignInfo\n\ninFile = sys.argv[1]\n\nalignment = AlignIO.read(inFile, \"fasta\")\n\nsummary_align = AlignInfo.SummaryInfo(alignment)\n\nconsensus = summary_align.dumb_consensus(threshold=.01)\n\n\nmy_pssm = summary_align.pos_specific_score_matrix(consensus,\n chars_to_ignore = ['N'])\nfinal_dict = {'A':[],'C':[],'G':[],'T':[]}\nconsensus = []\n\nfor i in my_pssm:\n consensus.append(max(i, key=i.get))\n for key,val in i.items():\n final_dict[key].append(int(val))\n\nprint(\"\".join(consensus))\n\nfor key,val in final_dict.items():\n print(f'{key}: {\" \".join(list(map(str,val)))}')\n" }, { "alpha_fraction": 0.5328892469406128, "alphanum_fraction": 0.5670274496078491, "avg_line_length": 25.688888549804688, "blob_id": "f4de25c712fcc90b81acd3bd19e1f1a0d1cf5630", "content_id": "0b14ca6ddfb7721b6845bfdcb1962ec92dd93d35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1201, "license_type": "no_license", "max_line_length": 174, "num_lines": 45, "path": "/16/16.b.py", "repo_name": "greenkidneybean/bchb620", "src_encoding": "UTF-8", "text": "#Problem 16: Finding A Protein Motif\n#Given: At most 15 UniProt Protein Database access IDs.\n#Return: For each protein possessing the N-glycosylation motif, output its given access ID followed by a list of locations in the protein string where the motif can be found.\n\nimport fnmatch\nimport sys\n\nf = open(sys.argv[1], \"r\")\nmat = []\nstr1 = \" \"\nwhile (str1 != ''):\n str1 = f.readline().strip()\n mat.append(str1)\nfor i in range(0, len(mat)-1):\n if (mat[i][0] == \">\"):\n mat[i+1] = \"*{0}\".format(mat[i+1])\n\nfiltered = fnmatch.filter(mat, '>*')\nfilt = [x for x in mat if x not in filtered]\na = \"\"\nlst = a.join(filt)\nlst = lst.split(\"*\")\nlst.remove(\"\")\n\ng = open(sys.argv[1], \"r\")\nmat2 = []\nstr2 = \" \"\nwhile (str2 != ''):\n str2 = g.readline().strip()\n mat2.append(str2)\nmat2.remove(\"\")\n\nmat3 = []\nfor i in range(0, len(lst)):\n mat3.append(mat2[i])\n for j in range(0, len(lst[i])-3):\n if (lst[i][j] == \"N\" and lst[i][j+1] != \"P\" and (lst[i][j+2] == \"S\" or lst[i][j+2] == \"T\") and lst[i][j+3] != \"P\"):\n mat3.append(j+1)\n\nfor i in range(0, len(mat3)):\n if (mat3[i] in mat2):\n print(\"\\n\")\n print(mat3[i])\n else:\n print(mat3[i], end = ' ')\n" }, { "alpha_fraction": 0.5684008002281189, "alphanum_fraction": 0.576107919216156, "avg_line_length": 23.714284896850586, "blob_id": "da28725f69b44afcf5e928700e6f7ecc5d2b01f0", "content_id": "29a0048318bf236415f357a8183232f80bcec15c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 519, "license_type": "no_license", "max_line_length": 90, "num_lines": 21, "path": "/16/16.py", "repo_name": "greenkidneybean/bchb620", "src_encoding": "UTF-8", "text": "from Bio import SeqIO\nimport os\nimport re\nimport sys\n\ninFile = sys.argv[1]\n\n# lines is the list\nwith open(inFile,'r') as i:\n lines = i.read().splitlines()\n\nfor i in lines:\n os.system(f'wget http://www.uniprot.org/uniprot/{i}.fasta')\n\nfor i in lines:\n for record in SeqIO.parse(f\"{i}.fasta\", \"fasta\"):\n match = 0\n match = [m.start()+1 for m in re.finditer('(?=(N[^P][ST][^P]))', str(record.seq))]\n if len(match) > 0:\n print(i)\n print(\" \".join(str(x) for x in match))\n" } ]
15
Tanner-York-Make-School/CS-1.1-Intro-to-Programming
https://github.com/Tanner-York-Make-School/CS-1.1-Intro-to-Programming
3059d24a5b3571050c58802f9caabc1923636970
6540b4673c1ca6536333bde896095af67acd6697
87df7f7d86cde3fdbead520a26ac9cd5ab7aa96f
refs/heads/master
2021-05-19T14:08:47.939057
2020-03-31T23:19:44
2020-03-31T23:19:44
251,748,652
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.62718665599823, "alphanum_fraction": 0.6277437210083008, "avg_line_length": 39.79999923706055, "blob_id": "42c5c3544fea9ba3739a1069c15a42f895e82670", "content_id": "8969f90e5856fca71d5b7aa8fa9a6f632e1521c1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8975, "license_type": "permissive", "max_line_length": 257, "num_lines": 220, "path": "/Projects/Spaceman/spaceman.py", "repo_name": "Tanner-York-Make-School/CS-1.1-Intro-to-Programming", "src_encoding": "UTF-8", "text": "import random\nimport re\nfrom os import system\n\ndef load_words_list():\n '''\n A function that reads a text file of words and returns an array of words\n Returns: \n array: Reads from a text file to get the avaiable words to be selected as the secret word\n '''\n f = open('./words.txt', 'r')\n words_list = f.readlines()\n f.close()\n\n words_list = words_list[0].split(' ')\n return words_list\n\ndef load_word(words_list):\n '''\n A function that selects a random word from a list of words to be used as the secret word\n Args:\n array: A list of words\n Returns:\n string: A randomly selected word from the words list to be used as the secret word\n '''\n secret_word = random.choice(words_list)\n return secret_word\n\ndef is_word_guessed(secret_word, letters_guessed):\n '''\n A function that checks if all the letters of the secret word have been guessed.\n Args:\n secret_word (string): the random word the user is trying to guess.\n letters_guessed (list of strings): list of letters that have been guessed so far.\n Returns: \n bool: True only if all the letters of secret_word are in letters_guessed, False otherwise\n '''\n word = []\n for s_letter in secret_word:\n for g_letter in letters_guessed:\n if s_letter == g_letter:\n word.append(g_letter);\n return True if len(word) == len(secret_word) else False\n\ndef get_guessed_word(secret_word, letters_guessed):\n '''\n A function that is used to get a string showing the letters guessed so far in the secret word and underscores for letters that have not been guessed yet.\n Args: \n secret_word (string): the random word the user is trying to guess.\n letters_guessed (list of strings): list of letters that have been guessed so far.\n Returns: \n string: letters and underscores. For letters in the word that the user has guessed correctly, the string should contain the letter at the correct position. For letters in the word that the user has not yet guessed, shown an _ (underscore) instead.\n '''\n word = []\n for s_letter in secret_word: word.append('_')\n for index, s_letter in enumerate(secret_word):\n for g_letter in letters_guessed:\n if s_letter == g_letter:\n word[index] = g_letter\n return ' '.join(word)\n\n\ndef is_guess_in_word(guess, secret_word):\n '''\n A function to check if the guessed letter is in the secret word\n Args:\n guess (string): The letter the player guessed this round\n secret_word (string): The secret word\n Returns:\n bool: True if the guess is in the secret_word, False otherwise\n '''\n return True if secret_word.find(guess) != -1 else False\n\ndef has_been_guessed(guess, letters_guessed):\n '''\n A function to check if the guessed letter is in letters_guessed\n Args:\n guess (string): The letter the player guessed this round\n letters_guessed (string): The list of letters the user has guessed\n Returns:\n bool: True if the guess is in letters_guessed, False otherwise\n '''\n for letter in letters_guessed:\n if letter == guess:\n return True\n return False\n\ndef changed_word(current_word, secret_word, words_list):\n '''\n A function to get a new word with the same characters as the current guessed word\n Args:\n current_word (array): The characters of the current guessed word\n secret_word: The current secret word\n Returns:\n array: A list that has the same amount of letters and letters in the same index as current_word\n '''\n current_word_list = current_word.split()\n matched_expression = []\n for character in current_word_list: matched_expression.append('.')\n for i, char in enumerate(current_word_list): \n if char != '_': \n matched_expression[i] = char\n matched_expression = ''.join(matched_expression)\n\n for word in words_list:\n matched_word = re.match(matched_expression, word)\n if matched_word and re.match(f'{secret_word}', word) == None:\n return word\n return secret_word\n\ndef spaceman(secret_word, spaceman_words_list):\n '''\n A function that controls the game of spaceman. Will start spaceman in the command line.\n Args:\n secret_word (string): the secret word to guess.\n '''\n letters_guessed = []\n guesses_left = len(secret_word)\n prompt = 'Welcome to Spaceman!'\n original_secret_word = secret_word\n\n is_playing = True;\n while is_playing:\n current_word = get_guessed_word(secret_word, letters_guessed)\n \n system('clear')\n print(prompt)\n print(f'Current word: {current_word}')\n print(f'Letters guessed: {letters_guessed}')\n print(f'You have {guesses_left} guesses left, please enter one letter per round')\n print('---------------------------------------')\n\n user_guess = input('Enter a letter: ')\n user_guess = user_guess.replace(' ', '')\n while len(user_guess) > 1 or user_guess == '' or user_guess == ' ' or re.match(r'([a-z]|[A-Z])', user_guess) == None:\n user_guess = input('Enter only one letter: ') \n user_guess = user_guess.lower()\n\n if has_been_guessed(user_guess, letters_guessed):\n prompt = f'You already guessed {user_guess}'\n elif is_guess_in_word(user_guess, secret_word):\n prompt = f'Correct! {user_guess} in the the secret word.'\n letters_guessed.append(user_guess)\n current_word = get_guessed_word(secret_word, letters_guessed)\n secret_word = changed_word(current_word, secret_word, spaceman_words_list)\n else:\n prompt = f'Incorrect. {user_guess} is not in the secret word'\n letters_guessed.append(user_guess)\n guesses_left -= 1\n \n if is_word_guessed(secret_word, letters_guessed):\n user_input = input('You Won! Want to play agian? Y/n ')\n if user_input == 'Y' or user_input == 'y' or user_input == '' or re.match(r'\\s+', user_input):\n letters_guessed = []\n secret_word = load_word(spaceman_words_list)\n original_secret_word = secret_word\n guesses_left = len(secret_word)\n elif user_input == 'N' or user_input == 'n':\n is_playing = False\n elif guesses_left == 0:\n print(f'The word was {secret_word}')\n user_input = input('You Lost! Want to try agian? Y/n ')\n if user_input == 'Y' or user_input == 'y' or user_input == '' or re.match(r'\\s+', user_input):\n prompt = 'Welcome back to Spaceman!'\n letters_guessed = []\n secret_word = load_word(spaceman_words_list)\n original_secret_word = secret_word\n guesses_left = len(secret_word)\n elif user_input == 'N' or user_input == 'n':\n is_playing = False\n if is_word_guessed(secret_word, letters_guessed):\n current_word = re.sub(r'\\s', '', current_word)\n\ndef test_load_owrds_list():\n words_list = load_words_list()\n assert type(words_list) == list, 'load_words list did not return a list'\n\ndef test_load_word():\n words_list = load_words_list()\n secret_word = load_word(words_list)\n assert type(secret_word) == str, 'load_word did not return string'\n\ndef test_is_word_guessed():\n words_list = load_words_list()\n letters_guessed = ['a', 'i', 'p']\n secret_word = load_word(words_list)\n guessed = is_word_guessed(secret_word, letters_guessed)\n assert guessed == False, 'is_word_guessed did not return false'\n\ndef test_get_guessed_word():\n words_list = load_words_list()\n letters_guessed = ['a', 'i', 'p']\n secret_word = load_word(words_list)\n current_word = get_guessed_word(secret_word, letters_guessed)\n assert type(current_word) == str, 'get_guessed_word did not return a string'\n\ndef test_is_guess_in_word():\n words_list = load_words_list()\n secret_word = load_word(words_list)\n correct_guess = is_guess_in_word('r', secret_word)\n assert type(correct_guess) == bool, 'is_guess_in_word did not return a bool'\n\ndef test_has_been_guessed():\n letters_guessed = ['a', 'i', 'p']\n already_guessed = has_been_guessed('a', letters_guessed)\n assert already_guessed == True, 'has_been_guessed did not return true'\n\ndef test_changed_word():\n words_list = load_words_list()\n letters_guessed = ['a', 'i', 'p']\n secret_word = load_word(words_list)\n current_word = get_guessed_word(secret_word, letters_guessed)\n new_word = changed_word(current_word, secret_word, words_list)\n assert type(new_word) == str, 'changed_word did not return a string'\n\n#These function calls that will start the game\nif __name__ == '__main__':\n gloabal_words_list = load_words_list()\n secret_word = load_word(gloabal_words_list)\n spaceman(secret_word, gloabal_words_list)" }, { "alpha_fraction": 0.5872958302497864, "alphanum_fraction": 0.5880217552185059, "avg_line_length": 42.0546875, "blob_id": "ebe278e1d2859f22ca182c27825885fca712a7b2", "content_id": "2c25775be8acba0556e09b6f69eed1a128d98a85", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5528, "license_type": "permissive", "max_line_length": 167, "num_lines": 128, "path": "/Projects/mad_lib/madlibs.py", "repo_name": "Tanner-York-Make-School/CS-1.1-Intro-to-Programming", "src_encoding": "UTF-8", "text": "from termcolor import colored\nfrom os import system\nimport readline\nimport random\nimport re\n\nclass Mad_Lib:\n def __init__(self, mad_lib = None, name = None):\n self.name = name;\n self.MAD_LIB = mad_lib\n self.mad_lib = mad_lib\n self.user_inputs = {}\n \n # Check if user input is not blank and the correct pos, if avaliable.\n def is_valid(self, pos, user_input):\n if type(user_input) == str:\n if user_input == '' or re.match('\\s+', user_input):\n return False\n else: \n return True\n\n # Take any text inclosed in [] and inserts it into the inputs array \n def get_inputs(self):\n # Get the inputs from the mad lib and set each input to it's pos tupal value\n # Thanks to Kevin for helping me refactor the regex key from '\\[([a-z]([a-z]|\\s)+)\\]' to '\\[(\\w(\\w|\\s)+)\\]' so they can accept more than just lowercase letters\n tuple_inputs = re.findall('\\[(\\w(\\w|\\s)+)\\]', self.mad_lib)\n for index, mad_lib_tuple in enumerate(tuple_inputs): tuple_inputs[index] = mad_lib_tuple[0]\n # Get proper pos user input, if input is a pos, for each input\n for mad_lib_input in tuple_inputs: \n user_input = input(f'{mad_lib_input}: ') \n while not self.is_valid(mad_lib_input, user_input):\n user_input = input(f'Invaled, input is not a {mad_lib_input}: ')\n if (mad_lib_input not in self.user_inputs): \n self.user_inputs[mad_lib_input] = [colored(user_input, 'green')] \n else:\n self.user_inputs[mad_lib_input].append(colored(user_input, 'green'))\n\n # Get random words to be used as inputs from the /usr/share/dict/words file\n def get_random_inputs(self):\n inputs = re.findall('\\[(\\w(\\w|\\s)+)\\]', self.mad_lib)\n for index, mad_lib_tuple in enumerate(inputs): inputs[index] = mad_lib_tuple[0]\n words = open('/usr/share/dict/words', 'r').readlines()\n for index, word in enumerate(words): words[index] = re.sub('(\\\\n)', '', word)\n word_inputs = []\n for i in range(len(inputs)):\n word_inputs.append(colored(random.choice(words), 'green'))\n self.user_inputs = word_inputs\n\n # Subbsitute bracket inclosed text with user input\n def create(self):\n self.get_inputs()\n for key in self.user_inputs.keys():\n # Shuffle the user input for each pos and insert them into mad lib\n shuffled_list = random.sample(self.user_inputs[key], len(self.user_inputs[key]))\n for index, word in enumerate(reversed(list(shuffled_list))):\n self.mad_lib = re.sub(f'\\[{key}\\]', word, self.mad_lib, 1)\n \n def random_create(self):\n self.get_random_inputs()\n for index, word in enumerate(self.user_inputs):\n self.mad_lib = re.sub('\\[(\\w(\\w|\\s)+)\\]', word, self.mad_lib, 1)\n\n def print(self):\n system('clear')\n print(self.name)\n print(self.mad_lib)\n\n def start(self, create_type):\n if self.mad_lib and self.name and create_type == 'normal':\n print(self.name)\n self.create()\n self.print()\n self.mad_lib = self.MAD_LIB\n self.user_inputs = {}\n elif self.mad_lib and self.name and create_type == 'random':\n self.random_create()\n self.print()\n self.mad_lib = self.MAD_LIB\n self.user_inputs = {}\n else: \n print('Error, missing values')\n\n\nkevin_mad_lib = '''\n Kevin is a very [adjective] [noun], he gets really [verb] when people have \n the same name as him. Sometimes, Kevin enjoys [outdoor activity] with \n his friend [noun].\n '''\ntortoise_and_the_hare = '''\n Once upon a time there was a hare who, [verb] how he could [action verb] [comparative adjective] \n than anyone else. He always would be [word that ends in ing] tortoise for its [adjective]. Then \n one day, the [adjective] tortoise [verb] back: “Who do you think you are? There’s no denying \n you’re [adjective], but even you can be [adjective]!” The hare [verb] with [noun]. “[verb] \n in a [competition]? By whom? Not you, surely! I bet there’s nobody in the [noun] that can win \n against me, I’m so [adjective]. Now, why don’t you [verb] off?”\n '''\n\nk_mad_lib = Mad_Lib(kevin_mad_lib, 'Kevin')\ntath_mad_lib = Mad_Lib(tortoise_and_the_hare, 'Tortoise and the Hare')\nmad_libs = [k_mad_lib, tath_mad_lib]\n\nsystem('clear')\nshould_continue = True\nwhile should_continue:\n for index, mad_lib in enumerate(mad_libs):\n print('{} {}'.format(index, mad_lib.name))\n\n user_input = input(\"Enter a mad lib's index to start, or Q to quit: \")\n while user_input != '' and re.match('\\s+', user_input):\n user_input = input(f\"Index is invaled, try again or hit Q to quit: \")\n system('clear')\n\n if user_input.isnumeric():\n print('Would you like enter your own inputs? Y/n')\n user_selection = input()\n if user_selection == 'Y' or user_selection == 'y' or re.match('(\\s+)', user_selection):\n mad_libs[int(user_input)].start('normal')\n elif user_selection == 'N' or user_selection == 'n': \n mad_libs[int(user_input)].start('random')\n else:\n print('Unknown Input')\n\n elif user_input == 'Q' or user_input == 'q':\n system('clear')\n should_continue = False\n\n else: \n print('Invaled input')" }, { "alpha_fraction": 0.6650246381759644, "alphanum_fraction": 0.7487684488296509, "avg_line_length": 66.66666412353516, "blob_id": "9049963d013f71f774a61683177157f193c69138", "content_id": "e98c2aed24562cb294fbc533b3005cb2d1e0bc5d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 203, "license_type": "permissive", "max_line_length": 129, "num_lines": 3, "path": "/_navbar.md", "repo_name": "Tanner-York-Make-School/CS-1.1-Intro-to-Programming", "src_encoding": "UTF-8", "text": "* **[Syllabus](README.md)**\n* **[Progress Tracker](https://docs.google.com/spreadsheets/d/1wcsF29-EIQpTDWGxiMIWpH8Rf3xgU2rg4qspEX2UERo/edit#gid=173898617)**\n* [Make School](https://www.makeschool.com)\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.7546296119689941, "avg_line_length": 35, "blob_id": "ab714c7b27a9c37e7905e6219ea84bf57876242c", "content_id": "a8200538f5e336bd4ae129e84b2a481c85f05537", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 216, "license_type": "permissive", "max_line_length": 126, "num_lines": 6, "path": "/Projects/Spaceman/ReadMe.md", "repo_name": "Tanner-York-Make-School/CS-1.1-Intro-to-Programming", "src_encoding": "UTF-8", "text": "# Spaceman\nA Guessing Game where you try to guess a random word just by the number of letters and a prompt before you run out of guesses.\n\n## How to use this program\nOpen the file and run: <br>\n`python3 spaceman.py`\n" }, { "alpha_fraction": 0.6296842694282532, "alphanum_fraction": 0.6302744150161743, "avg_line_length": 48.130435943603516, "blob_id": "b0c6efa455164b5ec27c125f38ac2f9ef1d1178f", "content_id": "d1e0fd323dd14d8c6af3911963543b24d985d2d9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3389, "license_type": "permissive", "max_line_length": 117, "num_lines": 69, "path": "/Projects/Herd-Immunity-Simulation/logger.py", "repo_name": "Tanner-York-Make-School/CS-1.1-Intro-to-Programming", "src_encoding": "UTF-8", "text": "class Logger(object):\n ''' Utility class responsible for logging all interactions during the simulation. '''\n def __init__(self, file_name):\n self.file_name = file_name\n\n def write_metadata(self, pop_size, vacc_percentage, virus_name, mortality_rate,\n basic_repro_num):\n '''\n The simulation class should use this method immediately to log the specific\n parameters of the simulation as the first line of the file.\n '''\n log_file = open(self.file_name, 'w+')\n log_file.write(f'{pop_size} {vacc_percentage} {virus_name} {mortality_rate} {basic_repro_num}\\n')\n log_file.close()\n\n def log_interaction(self, person, random_person, random_person_sick=None,\n random_person_vacc=None, did_infect=None):\n '''\n The Simulation object should use this method to log every interaction\n a sick person has during each time step.\n\n The format of the log should be: \"{person.ID} infects {random_person.ID} \\n\"\n\n or the other edge cases:\n \"{person.ID} didn't infect {random_person.ID} because {'vaccinated' or 'already sick'} \\n\"\n '''\n log_file = open(self.file_name, 'a+')\n if random_person_sick is None and random_person_vacc is None and did_infect is None:\n log_file.write(f\"{person._id} didn't infect {random_person._id} because chance greater than repo_rate\\n\")\n elif did_infect is None and random_person_sick is True:\n log_file.write(f\"{person._id} didn't infect {random_person._id} because already sick\\n\")\n elif did_infect is None and random_person_vacc is True:\n log_file.write(f\"{person._id} didn't infect {random_person._id} because vaccinated\\n\")\n elif did_infect is True:\n log_file.write(f\"{person._id} infects {random_person._id}\\n\")\n log_file.close()\n\n def log_infection_survival(self, person, did_die_from_infection):\n ''' The Simulation object uses this method to log the results of every\n call of a Person object's .resolve_infection() method.\n\n The format of the log should be:\n \"{person.ID} died from infection\\n\" or \"{person.ID} survived infection.\\n\"\n '''\n log_file = open(self.file_name, 'a+')\n if did_die_from_infection:\n log_file.write(f'{person._id} survived infection\\n')\n else:\n log_file.write(f'{person._id} died from infection\\n')\n log_file.close()\n\n def log_time_step(self, time_step_number):\n ''' STRETCH CHALLENGE DETAILS:\n\n If you choose to extend this method, the format of the summary statistics logged\n are up to you.\n\n At minimum, it should contain:\n The number of people that were infected during this specific time step.\n The number of people that died on this specific time step.\n The total number of people infected in the population, including the newly infected\n The total number of dead, including those that died during this time step.\n\n The format of this log should be:\n \"Time step {time_step_number} ended, beginning {time_step_number + 1}\\n\"\n '''\n log_file = open(self.file_name, 'a+')\n log_file.write(f'Time step {time_step_number} ended, beginning {time_step_number + 1}\\n')\n log_file.close()" }, { "alpha_fraction": 0.4953007400035858, "alphanum_fraction": 0.6917293071746826, "avg_line_length": 15.625, "blob_id": "2bb0d149da316e4342f04d29e6f7672b74e6cdc6", "content_id": "1836e52499698f23cfec8039b7dbd4782a569bce", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1064, "license_type": "permissive", "max_line_length": 28, "num_lines": 64, "path": "/Projects/superhero-dueler/requirements.py", "repo_name": "Tanner-York-Make-School/CS-1.1-Intro-to-Programming", "src_encoding": "UTF-8", "text": "absl-py==0.8.0\naiofiles==0.4.0\naiohttp==3.5.4\nastor==0.8.0\nastroid==2.2.5\nasync-timeout==3.0.1\natomicwrites==1.3.0\nattrs==19.1.0\nautopep8==1.4.4\nchardet==3.0.4\nClick==7.0\nentrypoints==0.3\nflake8==3.7.8\nflake8-docstrings==1.3.1\nflake8-polyfill==1.0.2\nFlask==1.1.1\ngast==0.2.2\ngoogle-pasta==0.1.7\ngrpcio==1.23.0\nh5py==2.9.0\nidna==2.8\nimportlib-metadata==0.23\nisort==4.3.21\nitsdangerous==1.1.0\nJinja2==2.10.1\nKeras==2.2.5\nKeras-Applications==1.0.8\nKeras-Preprocessing==1.1.0\nlazy-object-proxy==1.4.2\nMarkdown==3.1.1\nMarkupSafe==1.1.1\nmccabe==0.6.1\nmock==2.0.0\nmore-itertools==7.2.0\nmultidict==4.5.2\nnumpy==1.17.1\npackaging==19.1\npbr==5.4.2\npluggy==0.13.0\nprotobuf==3.9.1\npy==1.8.0\npycodestyle==2.5.0\npydocstyle==4.0.0\npyflakes==2.1.1\npylint==2.3.1\npyparsing==2.4.2\npytest==5.1.2\nPyYAML==5.1.2\nrequests==2.13.0\nscipy==1.3.1\nsix==1.12.0\nsnowballstemmer==1.9.0\ntensorboard==1.14.0\ntensorflow==1.14.0\ntensorflow-estimator==1.14.0\ntermcolor==1.1.0\ntyped-ast==1.4.0\nvirtualenv==16.7.2\nVocabulary==1.0.4\nwcwidth==0.1.7\nWerkzeug==0.15.5\nwrapt==1.11.2\nyarl==1.3.0\nzipp==0.6.0\n" }, { "alpha_fraction": 0.6971544623374939, "alphanum_fraction": 0.7139227390289307, "avg_line_length": 48.224998474121094, "blob_id": "64e750c7595bc128c67a69576261f40746a8808c", "content_id": "79acfc0437baeea3df3aec3ce4302066bd272448", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1968, "license_type": "permissive", "max_line_length": 131, "num_lines": 40, "path": "/Projects/Herd-Immunity-Simulation/logger_test.py", "repo_name": "Tanner-York-Make-School/CS-1.1-Intro-to-Programming", "src_encoding": "UTF-8", "text": "from logger import Logger\nfrom person import Person\nfrom virus import Virus\n\n#### Test Logger Class ####\nsample_logger = Logger('sample-file.txt')\nsample_virus = Virus('Ebola', 0.25, 0.70)\nsample_person = Person(1, False)\nsample_infected = Person(2, False, sample_virus)\nsample_imune = Person(3, True)\n\ndef test_wite_metadata():\n sample_logger.write_metadata(10000, 0.9, sample_virus.name, sample_virus.mortality_rate, sample_virus.repro_rate)\n sample_file = open(sample_logger.file_name, 'r')\n assert sample_file.readlines()[0] == f'10000 0.9 {sample_virus.name} {sample_virus.mortality_rate} {sample_virus.repro_rate}\\n'\n sample_file.close()\n\ndef test_log_interaction():\n sample_file = open(sample_logger.file_name, 'r')\n sample_logger.log_interaction(sample_infected, sample_infected, True)\n assert sample_file.readlines()[1] == f\"{sample_infected._id} didn't infect {sample_infected._id} because already sick\\n\"\n sample_logger.log_interaction(sample_infected, sample_person, None, None, True)\n assert sample_file.readlines()[0] == f\"{sample_infected._id} infects {sample_person._id}\\n\"\n sample_logger.log_interaction(sample_infected, sample_imune, None, True, None)\n assert sample_file.readlines()[0] == f\"{sample_infected._id} didn't infect {sample_imune._id} because vaccinated\\n\"\n sample_file.close()\n\ndef test_log_infection_survival():\n sample_file = open(sample_logger.file_name, 'r')\n sample_logger.log_infection_survival(sample_person, True)\n assert sample_file.readlines()[4] == f'{sample_person._id} survived infection\\n'\n sample_logger.log_infection_survival(sample_person, False)\n assert sample_file.readlines()[0] == f'{sample_person._id} died from infection\\n'\n sample_file.close()\n\ndef test_log_time_step():\n sample_file = open(sample_logger.file_name, 'r')\n sample_logger.log_time_step(1)\n assert sample_file.readlines()[6] == f'Time step 1 ended, beginning 2\\n'\n sample_file.close()" }, { "alpha_fraction": 0.7295238375663757, "alphanum_fraction": 0.739047646522522, "avg_line_length": 29.882352828979492, "blob_id": "99d8f356acddcb5971ca35cadda118d7a22f69c2", "content_id": "ddcf758693376d67da961c83f43e1b8b9a2649ef", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 525, "license_type": "permissive", "max_line_length": 85, "num_lines": 17, "path": "/Projects/superhero-dueler/README.md", "repo_name": "Tanner-York-Make-School/CS-1.1-Intro-to-Programming", "src_encoding": "UTF-8", "text": "# Superhero Dueler\n\n## About\nThis project allows users to create superheros and put them against each other.\n\n## Getting started\nThe instructions bellow will inform you on how settup, run and use the application.\n\n## Installing\n1. Clone the repository <br>\n`git clone https://github.com/TannerYork/Superhero-Dueler` <br>\n\n## Using Code\n1. Go the the location of the cloned repository <br>\n2. Edit the code in superheros.py under the `__name__ == '__main__'` if-statment <br>\n3. Run superheros.py <br>\n`python3 superheros.py`\n" }, { "alpha_fraction": 0.6722830533981323, "alphanum_fraction": 0.6849199533462524, "avg_line_length": 30.263158798217773, "blob_id": "0b26c36736b437884e9c7bf953456e38578bced5", "content_id": "d2706728d9018524f96110094607e1ef73f8b8c1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1187, "license_type": "permissive", "max_line_length": 45, "num_lines": 38, "path": "/Projects/Herd-Immunity-Simulation/person_test.py", "repo_name": "Tanner-York-Make-School/CS-1.1-Intro-to-Programming", "src_encoding": "UTF-8", "text": "from person import Person\nfrom virus import Virus\n\n#### Test Person Class ####\ndef test_vacc_person_instantiation():\n person = Person(1, True)\n assert person._id == 1\n assert person.is_alive is True\n assert person.is_vaccinated is True\n assert person.infection is None\n\ndef test_not_vacc_person_instantiation():\n person = Person(2, False)\n assert person._id == 2\n assert person.is_alive is True\n assert person.is_vaccinated is False\n assert person.infection is None\n\ndef test_sick_person_instantiation():\n virus = Virus(\"Dysentery\", 0.7, 0.2)\n person = Person(3, False, virus)\n assert person._id is 3\n assert person.is_alive is True\n assert person.is_vaccinated is False\n assert person.infection is virus\n\ndef test_did_survive_infection():\n virus = Virus(\"Dysentery\", 0.7, 0.2)\n person = Person(4, False, virus)\n survived = person.did_survive_infection()\n if survived:\n assert person.is_alive is True\n assert person.is_vaccinated is True\n assert person.infection is None\n else:\n assert person.is_alive is False\n assert person.is_vaccinated is False\n assert person.infection is virus" }, { "alpha_fraction": 0.6308026313781738, "alphanum_fraction": 0.6365195512771606, "avg_line_length": 47.32044219970703, "blob_id": "314304a8063abb2975df267020cbcd2519f7a0da", "content_id": "f0d177fa1ace0333b762efa005995e917d7f047f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8746, "license_type": "permissive", "max_line_length": 136, "num_lines": 181, "path": "/Projects/Herd-Immunity-Simulation/simulation.py", "repo_name": "Tanner-York-Make-School/CS-1.1-Intro-to-Programming", "src_encoding": "UTF-8", "text": "import random, sys\nimport uuid\nrandom.seed(42)\nfrom person import Person\nfrom logger import Logger\nfrom virus import Virus\n\n\nclass Simulation(object):\n ''' Main class that will run the herd immunity simulation program.\n Expects initialization parameters passed as command line arguments when file is run.\n\n Simulates the spread of a virus through a given population. The percentage of the\n population that are vaccinated, the size of the population, and the amount of initially\n infected people in a population are all variables that can be set when the program is run.\n '''\n def __init__(self, pop_size, vacc_percentage, virus, initial_infected=1):\n ''' Logger object logger records all events during the simulation.\n Population represents all Persons in the population.\n The next_person_id is the next available id for all created Persons,\n and should have a unique _id value.\n The vaccination percentage represents the total percentage of population\n vaccinated at the start of the simulation.\n You will need to keep track of the number of people currently infected with the disease.\n The total infected people is the running total that have been infected since the\n simulation began, including the currently infected people who died.\n You will also need to keep track of the number of people that have die as a result\n of the infection.\n\n All arguments will be passed as command-line arguments when the file is run.\n HINT: Look in the if __name__ == \"__main__\" function at the bottom.\n '''\n self.logger = Logger(f\"{virus.name}_simulation_pop_{pop_size}_vp_{vacc_percentage}_infected_{initial_infected}.txt\")\n self.virus = virus\n self.pop_size = pop_size\n self.initial_infected = initial_infected\n self.vacc_percentage = vacc_percentage\n self.population = self._create_population(initial_infected)\n self.newly_infected = []\n self.current_infected = self.initial_infected\n self.total_infected = initial_infected\n self.total_vacc = int(vacc_percentage*self.pop_size)\n self.num_people_vacc_saved = 0\n self.total_dead = 0\n\n def _create_population(self, initial_infected):\n '''This method will create the initial population.\n Args:\n initial_infected (int): The number of infected people that the simulation\n will begin with.\n Returns:\n list: A list of Person objects.\n '''\n infected_population = [Person(uuid.uuid4(), False, self.virus) for _ in range(0, initial_infected)]\n pop_vacc_percentage = int((self.pop_size*self.vacc_percentage))\n vacc_population = [Person(uuid.uuid4(), True) for _ in range(0, pop_vacc_percentage)]\n normal_population = [Person(uuid.uuid4(), False) for _ in range(0, self.pop_size-len(vacc_population)-len(infected_population))]\n population = infected_population+vacc_population+normal_population\n assert len(population) == self.pop_size\n return population\n\n def _simulation_should_continue(self):\n ''' The simulation should only end if the entire population is dead\n or everyone is vaccinated.\n Returns:\n bool: True for simulation should continue, False if it should end.\n '''\n if self.total_vacc+self.total_dead == self.pop_size or self.current_infected == 0:\n return False\n else:\n return True\n\n def run(self):\n ''' This method should run the simulation until all requirements for ending\n the simulation are met.\n '''\n time_step_counter = 0\n should_continue = self._simulation_should_continue()\n while should_continue:\n print(f'Time Step: {time_step_counter}')\n self.time_step()\n time_step_counter += 1\n should_continue = self._simulation_should_continue()\n self.logger.log_time_step(time_step_counter)\n print(f'The simulation has ended after {time_step_counter} turns.')\n print(f'Pop Size: {self.pop_size}, Vacc Percentage: {self.vacc_percentage}')\n print(f'Infection: {self.virus.name}, Repo Rate: {self.virus.repro_rate}, Mortality Rate: {self.virus.mortality_rate}')\n print(f'People Infected Percent: {self.total_infected/self.pop_size}')\n print(f'Total Dead: {self.total_dead}')\n print(f'Saved by Vaccination: {self.num_people_vacc_saved}')\n\n def time_step(self):\n '''This method should contain all the logic for computing one time step in the simulation.\n This includes:\n 1. 100 total interactions with a randon person for each infected person\n in the population\n 2. If the person is dead, grab another random person from the population.\n Since we don't interact with dead people, this does not count as an interaction.\n 3. Otherwise call simulation.interaction(person, random_person) and\n increment interaction counter by 1.'''\n people_alive = [person for person in self.population if person.is_alive]\n for person in self.population:\n if person.infection and person.is_alive:\n interaction_count = 0\n while interaction_count < 100:\n random_person = random.choice(people_alive)\n if random_person.is_alive and random_person != person: \n self.interaction(person, random_person)\n interaction_count += 1\n self._did_infected_survive()\n self._infect_newly_infected()\n\n def interaction(self, person, random_person):\n '''This method should be called any time two living people are selected for an\n interaction. It assumes that only living people are passed in as parameters.\n Args:\n person1 (person): The initial infected person\n random_person (person): The person that person1 interacts with.'''\n assert person.is_alive == True\n assert random_person.is_alive == True\n\n if random_person.is_vaccinated is False and random_person.infection is None \\\n and random_person._id not in self.newly_infected:\n chance = random.randint(0, 100)/100\n if chance < self.virus.repro_rate:\n self.total_infected += 1\n self.current_infected += 1\n self.newly_infected.append(random_person._id)\n self.logger.log_interaction(person, random_person, None, None, True)\n else:\n self.logger.log_interaction(person, random_person)\n elif random_person.is_vaccinated is True:\n self.num_people_vacc_saved += 1\n self.logger.log_interaction(person, random_person, None, True, None)\n elif random_person.infection or random_person._id in self.newly_infected:\n self.logger.log_interaction(person, random_person, True)\n else:\n print('An error occured during interaction')\n\n def _infect_newly_infected(self):\n ''' This method should iterate through the list of ._id stored in self.newly_infected\n and update each Person object with the disease. '''\n for person in self.population:\n if person._id in self.newly_infected:\n person.infection = self.virus\n self.newly_infected = []\n\n def _did_infected_survive(self):\n ''' This method interate over the infected population and \n calls did_survive_infection on them'''\n for person in self.population:\n if person.infection:\n did_survive = person.did_survive_infection()\n if did_survive is False: self.total_dead += 1\n if did_survive is True: self.total_vacc += 1\n self.current_infected -= 1\n self.logger.log_infection_survival(person, did_survive)\n\ndef test_calculations(self):\n for person in self.population:\n if person.infection and person.is_alive and person.is_vaccinated is True:\n print(person)\n\nif __name__ == \"__main__\":\n params = sys.argv[1:]\n virus_name = str(params[0]).replace(' ', '-')\n repro_num = float(params[1])\n mortality_rate = float(params[2])\n\n pop_size = int(params[3])\n vacc_percentage = float(params[4])\n\n if len(params) == 6:\n initial_infected = int(params[5])\n else:\n initial_infected = 1\n\n virus = Virus(virus_name, repro_num, mortality_rate)\n sim = Simulation(pop_size, vacc_percentage, virus, initial_infected)\n\n sim.run()\n" }, { "alpha_fraction": 0.7060379981994629, "alphanum_fraction": 0.7357754707336426, "avg_line_length": 68.03365325927734, "blob_id": "e1df2b3558ca0a7fa68cae062b9b5a6580a16105", "content_id": "b2f51cb5c7ed3459a7279635302198d56e94e37a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 14367, "license_type": "permissive", "max_line_length": 595, "num_lines": 208, "path": "/README.md", "repo_name": "Tanner-York-Make-School/CS-1.1-Intro-to-Programming", "src_encoding": "UTF-8", "text": "# CS 1.1: Intro to Programming\n\n## Course Description\n\nThis course covers the fundamentals of programming including variables, data types, control flow, conditionals, loops, lists, dictionaries, functions, and object-oriented programming. Students will master top-level concepts in software development including writing pseudocode, using functions to process input into output, technical project planning, and diagramming. The course will also cover how computers work, from components and layout to memory and machine code instructions.\n\n## Important Links\n\n\n1. Assignments and Projects will be submitted using the [course page on trinket][]\n1. [Progress Tracker](https://docs.google.com/spreadsheets/d/1wcsF29-EIQpTDWGxiMIWpH8Rf3xgU2rg4qspEX2UERo/edit#gid=1456006823), this will have project grades for the course\n1. Quizzes will be evaluated using [the course page on gradescope](https://www.gradescope.com/)\n1. [Vibe Check](https://docs.google.com/forms/d/e/1FAIpQLSeXVJACSgyOJAYN4qZ5evcNmxu_yhdehVrm_yXdt_EZByt5dA/viewform?usp=sf_link)\n\n[course page on trinket]: https://trinket.io/jess-dahmen-makeschool-com/courses/cs-1-1\n\n\n\n### Why you should know this\n\nThis course is designed to equip students with the technical skills and foundational knowledge necessary\nto succeed at Make School. A strong foundation in CS concepts is the foundation for learning in any other course.\n\n## Prerequisites\n\n- Make School Prework or Ramp Program\n\n## Learning Objectives\n\nStudents by the end of the course will be able to ...\n\n1. Explain how to break down a problem into a programable algorithm.\n1. Translate user stories and requirements into a program.\n1. Practice core programming concepts including data structures, control flow, repetition, functions, and object oriented programming.\n1. Implement core programming concepts in Python.\n1. Implement programming best practices, debugging, and testing in Python.\n\n## Schedule\n\n### Office Hours \n\n**Instructor (Jess Dahmen):** Thursdays from 1:30 pm - 3:00 pm (programming lab in the Mezzanine)\n\n**TA:** TBD\n\n### Lessons\n**Course Dates A:** Monday, October 21 – Wednesday, December 11, 2019 (8 weeks)\n\n**Course Dates B:** Tuesday, October 22 – Thursday, December 12, 2019 (8 weeks)\n\n**Class Times Section A:** Monday and Wednesday at 1:30–3:20pm (15 class sessions)\n\n**Class Times Section B:** Tuesday and Thursday at 9:30–11:20am (15 class sessions)\n\n| Class | Date | Topics |\n|:-----:|:----------------------:|:---------------------------------------:|\n| 1 | Mon, Oct 21/Tue, Oct 22 | [Variables, Operations, Print, Input, Reading Errors] |\n| 2 | Wed, Oct 23/Thu, Oct 24 | [Booleans and Conditionals] |\n| 3 | Mon, Oct 28/Tue, Oct 29 | [Loops, Lists, Functions, & None]|\n| 4 | Wed, Oct 30/Thu, Oct 31 | [Scoping, Code Quality, Reuse & Error Handling] |\n| 5 | Mon, Nov 4/Tue, Nov 5 | [Pseudocode, Flowcharts, & Problem Solving Strategy] |\n| 6 | Wed, Nov 6/Thu, Nov 7 | [Dictionaries & File I/O] |\n| 7 | Mon, Nov 11/Tue, Nov 8 | Review Day |\n| 8 | Wed, Nov 13/Thu, Nov 7 | [Debugging & Testing] |\n| 9 | Mon, Nov 18/Tue, Nov 12 | [OOP Part 1: Encapsulation] |\n| 10 | Wed, Nov 20/Thu, Nov 21 | [OOP Part 2: Inheritance] | \n| 11 | Mon, Nov 25/Tue, Nov 26 | [OOP Part 3: Polymorphism & Design] |\n| - | Wed, Nov 27/Thu, Nov 28 | **NO CLASS** - Thanksgiving |\n| 12 | Mon, Dec 2/Tue, Dec 3 | [Herd Immunity & Simulation] + Review Day |\n| 13 | Wed, Dec 4/Thu, Dec 5 | Herd Immunity Lab |\n| 14 | Mon, Dec 9/Tue, Dec 10 | Herd Immunity Lab |\n| 15 | Wed, Dec 11/Thu, Dec 12 | [Computer Components & Organization] |\n\n\n[Variables, Operations, Print, Input, Reading Errors]: https://docs.google.com/presentation/d/1u7v6paJXcLO19-its9vKNumqdNNrO3VPoKXVqR9wpIo/edit?usp=sharing\n[Booleans and Conditionals]: https://docs.google.com/presentation/d/1JeBxiDdG5cHW_UNYioPpUSE5PectIMqPlAapCUdq5yc/edit#slide=id.g6f593e5740_0_80\n[Loops, Lists, Functions, & None]: https://docs.google.com/presentation/d/1C5BddEG1UX2vGmM8--tgPNq82VUYY1JVw-r6WHQ35Cc/edit#slide=id.p1\n[Scoping, Code Quality, Reuse & Error Handling]: https://docs.google.com/presentation/d/15eOVtMCcPRLQuGQ9H1nZFCe8Cob88otrExWdCWoTpt0/edit#slide=id.g5df54035c8_2_170\n[Pseudocode, Flowcharts, & Problem Solving Strategy]: https://docs.google.com/presentation/d/1Zu0ZtOvVdkk27HXXmqkdT2pzBQRId0IoshbipiF7lGI/edit#slide=id.p\n[Dictionaries & File I/O]: https://docs.google.com/presentation/d/16pBudRTgP4mNksqsnYdKXSfasvE4a-FpHpcw4TOftaM/edit#slide=id.g70a74db5c1_0_59\n[Debugging & Testing]: https://docs.google.com/presentation/d/1rFzKKhnPF0GfN-pSc8M-pAyz2b1faCbAD-s2Go7an3g/edit#slide=id.p\n[OOP Part 1: Encapsulation]: https://docs.google.com/presentation/d/10VhNa-L6ssFCHEOQdKbX4118eGvyis_HzwAPqeip34o/edit#slide=id.g6037b114b4_0_11\n[OOP Part 2: Inheritance]: https://docs.google.com/presentation/d/1SGW-vPHIXtaUnzUiGg9kgGA48S2_SympuOpezWujl44/edit\n[OOP Part 3: Polymorphism & Design]: https://docs.google.com/presentation/d/18XaI2oJKCktavWKOeWtkfd1n7sOQYvMRTCI7oz-wfoI/edit#slide=id.g7939982a05_0_154\n[OOP Part 4: Design and Composition]: Lessons/12-CPU-Instructions-Memory.md\n[Herd Immunity & Simulation]: https://github.com/Make-School-Courses/CS-1.1-Intro-to-Programming/blob/master/Lessons/10-Herd-Immunity-Simulation.md\n[Computer Components & Organization]: https://drive.google.com/file/d/1e9KGjPk1GzLujdLFQ6sF1G_Trw6XZQp_/view\n[Herd Immunity Lab]: Lessons/12-CPU-Instructions-Memory.md\n\n### Tutorial and Projects\n\n| Assignment | Spec | Due Date |\n|------------------------------|--------------------------|-------------|\n| Backwards Poetry | [Poetry Spec](https://github.com/Make-School-Courses/CS-1.1-Intro-to-Programming/blob/master/Projects/Poetry.md) | Friday, Nov 8 |\n| Create a Quiz | [Create a Quiz Spec](https://github.com/Make-School-Courses/CS-1.1-Intro-to-Programming/blob/master/Projects/quizfan.md) | Tue, Nov 19 |\n| Super Hero Team Dueler | [Super Hero Team Dueler](https://www.makeschool.com/academy/track/superhero-team-dueler) | Wed, Dec 4 |\n| Herd Immunity Simulation | [Herd Immunity Spec](https://github.com/Make-School-Courses/CS-1.1-Intro-to-Programming/tree/master/Projects/herdimmunity_starter_term2_2019) | Tue, Dec 10 |\n\nRead the rubric linked in the spec document to ensure your project meets all rubric requirements before the due date.\n\n[Super Hero Team Dueler]: https://make.sc/superhero-team-dueler\n[Herd Immunity Simulation]: https://make.sc/herd-immunity\n\n\n\n### Quizzes\n\n| Quiz | Study Guide | Date |\n|------|-------------|-------------|\n| 1 | [1](https://docs.google.com/document/d/1oVCeAxf1GH6jnXB9qsWfvc4EOYTni24gWkTd3Jtn56Y/edit) | Mon, Nov 4/Tue Nov 5 |\n| 2 | [2](https://docs.google.com/document/d/1NZ8k5hr2J6sy41ANsyU0U0V97BjipXjW1wDUo8cR86g/edit) | Mon, Nov 18/Tue Nov 19 |\n| 3 | [3](https://docs.google.com/document/d/1Zat4fwVv0EL0O5J0zCSG9eceU4JJdexXInjWPCVYiA8/edit) | Wed, Dec 4/Thu, Dec 5 |\n\n## Tutorials, Projects, & Exercises\n\n### 10 Minute No Risk Homework\n\nAfter each class day students will be given a trinket exercise as a homework assignment that will take approximately 10 minutes to complete and are due by midnight on the same day they are assigned. These exercises are no risk and will be graded based on effort not on correctness. Completion of these exercises will help the intructor see what topics students are stuggling with so complete them on your own to the best of your ability and be sure to put what concepts you are confused about in the code comments!\n\n| Hw | Date |\n|------|-------------|\n| [1](https://trinket.io/jess-dahmen-makeschool-com/courses/cs-1-1#/variables-operations-print-input-reading-errors/homework-1) | Mon, Oct 21/Tue, Oct 22 |\n| [2](https://trinket.io/jess-dahmen-makeschool-com/courses/cs-1-1#/mathematical-operations-booleans-conditionals/homework-2) | Wed, Oct 23/Thu, Oct 24 |\n| [3](https://trinket.io/jess-dahmen-makeschool-com/courses/cs-1-1#/lists-loops-functions-and-none/homework-3) | Mon, Oct 28/Tu, Oct 29 |\n| [4](https://trinket.io/jess-dahmen-makeschool-com/courses/cs-1-1#/scoping-error-handling-code-quality-and-reuse/homework-4) | Mon, Nov 4/Tu, Nov 5 |\n| [5](https://trinket.io/jess-dahmen-makeschool-com/courses/cs-1-1#/dictionaries-and-file-i-o/homework-5) | Mon, Nov 11/Tue, Nov 12 |\n\n\n### Tutorials\n\nStudents will complete the following guided tutorials, which are intended to help you get started on new topics. They are graded on completion only.\n\n- Super Hero Team Dueler\n\n### Projects\n\nStudents will complete the following self-guided projects, which are meant to be more challenging than tutorials to test your understanding of concepts.\n\n- Backwards Poetry\n- Create a Quiz\n- Herd Immunity Simulation\n\n**The first two projects require a minimum of 5 commits, while the last project and superheros tutorial require a minimum of 10 commits. Commits must take place throughout the period of time from when they are assigned to when they are due.**\n\n- **Good Example:** 40+ commits throughout the length of the course, looking for a healthy spattering of commits each week (such as 3-5 per day).\n- **Bad Example:** 10 commits on one day during the course and no others. Students who do this will be at severe risk of not passing the class.\n- **Unacceptable Example:** 2 commits the day before a project is due. Students who do this should not expect to pass the class.\n\n### Project and Tutorial Submission\n\nTo submit each project for feedback and grading go to the [course page on trinket][] and be sure to follow each step below:\n- Self-assess your project code against the evaluation criteria in the associated rubric linked in the project assignment\n- Add your project github repo link to the trinket code as a comment\n\n\nRefer to the project schedule below for the due date of each project.\nSubmissions will be accepted until midnight on the due date.\nLate submissions will not be graded.\n\n### Project and Tutorial Resubmission Policy\n\nIf you have submitted your project on time and have recieved at least a 2 on all rubric sections: after you recieve graded feedback from the TA or instructor you will have a total of 2 days after you recieve your grade to fix any issues and resubmit for final grading. \n\n### Quiz Policy\n\nA total of 3 paper and pencil quizzes will be given in class.If the student experiences an unexpected event such as a medical issue or a family emergency the student may be approved to retake the quiz during testing center hours within a week of when the quiz was given.To be approved to retake a missed quiz the student must communicate with the instructor prior to the quiz date. \n\n## Evaluation Criteria\n\nTo pass this course, students must meet the following requirements:\n- Complete all required coding tutorials:\n - Superhero Team Dueler\n- Complete all required coding projects (this includes all commit requirements and homework related to each project):\n - Backwards Poetry\n - Create a Quiz\n - Herd Immunity\n- Submit all tutorials and projects with rubric scores by the due dates listed above\n- Pass all tutorial and project submissions according to the associated rubrics\n- Pass all quizzes (there will be a total of 3 quizzes, lowest quiz will be dropped)\n- Actively participate in class and abide by the attendance policy\n- Make up all classwork from all absences\n- Complete 10 after class exercises \n\n### Evaluation Criteria Explained\n\n- To pass each project or tutorial, students must earn the required number of points or higher indicated on the associated rubric. Note that all points within one project or tutorial submission are fungible (that is, interchangeable) and so if one portion of work is below the \"Met All Expectations\" column of the rubric, another portion of work submitted can \"Exceed Expectations\" (generally by completing stretch challenges) to earn an extra point to make up for the missing one. Therefore, it's wise to complete stretch challenges as \"insurance\" in case some work does not meet expectations. \n- The instructor or teaching assistants will review students' submissions and verify or correct their self-assessed scores, then share feedback with the student through a GitHub issue opened on their repository. Feedback will include their status on that submission (that is, whether their work is passing the rubric). Resubmissions after the 2 day period mentioned previously will not be considered unless the instructor has approved a resubmission in writing due to exceptional circumstances.\n- Submissions received after the due date will not be considered unless the instructor has approved an extension in writing due to exceptional circumstances.\n\n\n## Resources\n\n- [Python Documentation](https://docs.python.org/3/index.html)\n- [Python Official Tutorial](https://docs.python.org/3/tutorial/index.html)\n- [Learn Python The Hard Way](https://learnpythonthehardway.org/python3/)\n- [Project Euler](https://projecteuler.net/)\n\n\n## Make School Course Policies\n\n- [Program Learning Outcomes](https://make.sc/program-learning-outcomes) - What you will achieve after finishing Make School, all courses are designed around these outcomes.\n- [Grading System](https://make.sc/grading-system) - How grading is done at Make School\n- [Diversity and Inclusion Statement](https://make.sc/diversity-and-inclusion-statement) - Learn about Diversity and Inclusion at Make School\n- [Academic Honesty](https://make.sc/academic-honesty-policy) - Our policies around plagerism, cheating, and other forms of academic misconduct \n- [Attendance Policy](https://make.sc/attendance-policy) - What we expect from you in terms of attendance for all classes at Make School\n- [Course Credit Policy](https://make.sc/course-credit-policy) - Our policy for how you obtain credit for your courses\n- [Disability Services (Academic Accommodations)](https://make.sc/disability-services) - Services and accommodations we provide for students\n- [Student Handbook](https://make.sc/student-handbook) - Guidelines, policies, and resources for all Make School students\n" }, { "alpha_fraction": 0.7583497166633606, "alphanum_fraction": 0.7603143453598022, "avg_line_length": 35.35714340209961, "blob_id": "1841526f38793921797c7fd5ebfb16bbe3205186", "content_id": "a3db8076450233d88c9da36e6ebdce9da2cd02a9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 509, "license_type": "permissive", "max_line_length": 253, "num_lines": 14, "path": "/Projects/mad_lib/README.md", "repo_name": "Tanner-York-Make-School/CS-1.1-Intro-to-Programming", "src_encoding": "UTF-8", "text": "# Mad Lib Project\n\nMad Lib is currently a command line application that uses python OOP to take in a string mad lib that has brackets around the desired blank inputs. It then asks the user for each input and, once all the inputs are filled, displays the completed mad lib.\n\n## Requierments\n`from termcolor import colored` <br>\n`from os import system` <br>\n`import readline` <br>\n`import random` <br>\n`import re`\n\n## Directions\nAfter download, open mad_lib folder in terminal and run <br>\n`python3 madlibs.py`\n" }, { "alpha_fraction": 0.6038203239440918, "alphanum_fraction": 0.6084188222885132, "avg_line_length": 23.57391357421875, "blob_id": "9d6749ae8b4816a145b669f47144e9c7ee1eede5", "content_id": "b40483e4f956f0eb4fd45014ddb46143a8b5553d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2827, "license_type": "permissive", "max_line_length": 190, "num_lines": 115, "path": "/Projects/checklist/checklist.py", "repo_name": "Tanner-York-Make-School/CS-1.1-Intro-to-Programming", "src_encoding": "UTF-8", "text": "from termcolor import colored\nfrom os import system, name\n\nchecklist = list()\n\n# CREATE\ndef create(item):\n checklist.append(item)\n\n# READ\ndef read(index):\n return checklist[index]\n\n# UPDATE\ndef update(index, item):\n checklist[index] = item\n\n# DELETE\ndef delete(index):\n checklist.pop(index)\n\ndef list_all_items():\n index = 0\n for list_item in checklist:\n print('{} {}'.format(index, list_item))\n index += 1\n\ndef mark_completed(index):\n checklist[index] = colored(checklist[index], 'green')\n\ndef select(function_code):\n if function_code == 'C' or function_code == 'c':\n input_item = user_input('Input item: ')\n clear_terminal()\n create(input_item)\n \n elif function_code == 'R' or function_code == 'r':\n item_index = valid_index(user_input('Index Number: '))\n clear_terminal()\n if item_index is 0 or item_index:\n print(read(item_index))\n\n elif function_code == 'U' or function_code == 'u':\n item_index = valid_index(user_input('Update Checklist Index Number: '))\n if item_index is 0 or item_index:\n update(item_index, user_input('New Item: '))\n \n elif function_code == 'M' or function_code == 'm':\n item_index = valid_index(user_input('Check Index Number: '))\n clear_terminal()\n if item_index is 0 or item_index:\n mark_completed(item_index)\n\n elif function_code == 'D' or function_code == 'd':\n item_index = valid_index(user_input('Delete Checklist Index Number: '))\n clear_terminal()\n if item_index is 0 or item_index:\n delete(item_index)\n\n elif function_code == 'P' or function_code == 'p':\n clear_terminal()\n list_all_items()\n\n elif function_code == 'Q' or function_code == 'q':\n return False\n\n else:\n input('Unkown Option')\n clear_terminal()\n return True\n\ndef user_input(prompt):\n user_input = input(prompt)\n return user_input\n\ndef valid_index(index):\n if index.isnumeric() and len(checklist) != 0 and int(index) <= len(checklist):\n return int(index)\n else:\n input('Invaled, index is either not an int or out of range')\n return False\n\ndef clear_terminal():\n system('clear')\n\nrunning = True\nclear_terminal()\nwhile running:\n selection = user_input('Press C to add list, R to read from list, U to update item in list, M to mark item as complete, D to delete item in list, P to display all items, and Q to quit ')\n running = select(selection)\n\n\ndef test ():\n create('purple sox')\n create('red cloaks')\n\n print(read(0))\n print(read(1))\n\n update(0, 'purple socks')\n delete(1)\n\n print(read(0))\n\n mark_completed(0)\n\n list_all_items()\n\n select('C')\n \n list_all_items()\n\n select('R')\n\n list_all_options()\n\n" }, { "alpha_fraction": 0.5885939002037048, "alphanum_fraction": 0.5919370651245117, "avg_line_length": 37.52525329589844, "blob_id": "1969da0bc574adf3889cb0f346618bfd8a91d1e2", "content_id": "8b135526835db9f91afef98632881e879abbd714", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15255, "license_type": "permissive", "max_line_length": 143, "num_lines": 396, "path": "/Projects/superhero-dueler/superheroes.py", "repo_name": "Tanner-York-Make-School/CS-1.1-Intro-to-Programming", "src_encoding": "UTF-8", "text": "import random\nimport math\nfrom functools import reduce\n\n\nclass Ability():\n def __init__(self, name, attack_strength):\n '''\n Initiate the abilities class with its name and attack strength\n Args:\n name (string): a single word discriptor of the ability\n attack_strength (int): the value of the abilities strength\n Returns:\n ability object: a new object\n '''\n self.name = name\n self.max_damage = attack_strength\n\n def attack(self):\n ''' A method that returns random int between 0 and the initilized max_damage property'''\n return random.randint(0, self.max_damage)\n\n\nclass Weapon(Ability):\n def __init__(self, name, attack_strength):\n '''\n Initiate the weapons subclass with name and attack_strength\n Args:\n name (string): a single word descriptor of the weapon\n attack_strength (int): value for the weapons may power\n Returns:\n weapon (object): new weapon object\n '''\n super().__init__(name, attack_strength)\n\n def attack(self):\n '''A method that returns a randome int between 0 and half the iniitlaized max_damage property'''\n return random.randint(math.floor(self.max_damage/2), self.max_damage)\n\n\nclass Armor():\n def __init__(self, name, blocking_strength):\n '''\n Initiate the armor class with name and blocking strength\n Args:\n name (string): a detailed one word discriptor of the armor\n blocking_strength (int): the max value that the armor can block\n Returns:\n armor (object): a new armor object\n '''\n self.name = name\n self.max_block = blocking_strength\n\n def block(self):\n '''A method that returns a random int between 0 and the initalized max_block property'''\n return random.randint(0, self.max_block)\n\n\nclass Hero():\n # TODO: Make a way for hero abilities to run out or be lost, and watch for heroes still having abilities\n def __init__(self, name, starting_health=100):\n '''\n Initiate the hero class with name, starting health, kills, and deaths\n Args:\n name (string): a one word discription of the hero\n starting_health (int): a value for the heroes inital health with a default of 100\n Returns:\n hero (object): a new hero object\n '''\n self.name = name\n self.abilities = []\n self.armors = []\n self.starting_health = starting_health\n self.current_health = starting_health\n self.kills = 0\n self.deaths = 0\n\n def add_kill(self, num_kills):\n '''A method for adding a kill to the heroes kill count'''\n self.kills += num_kills\n\n def add_death(self, num_deaths):\n '''A method for adding a death the the heroes death count'''\n self.deaths += num_deaths\n\n def add_ability(self, new_ability):\n '''A setter method that adds a new ability to the hero'''\n self.abilities.append(new_ability)\n\n def add_armor(self, new_armor):\n '''A setter method that adds a new armor object to the hero'''\n self.armors.append(new_armor)\n\n def add_weapon(self, weapon):\n '''A method for adding a new weapon object to the hero'''\n self.abilities.append(weapon)\n\n def attack(self):\n '''A getter method that creates a list of attack values by looping through the heroes abilities and then returns the sum of the list'''\n return sum(ability.attack() for ability in self.abilities)\n\n def defend(self, damage_amount=0):\n '''\n A getter method that creates a list of block values by looping through the heroes armors and then returns the sum of the list\n Args:\n damage_amount (int): the amount of damage given to the hero\n Returns:\n damage_taken (int): the damage_taken subtracted by the heroes total block\n '''\n return sum(armor.block() for armor in self.armors)\n \n\n def take_damage(self, damage):\n '''\n A setter method that updates the users current_health to reflect the damage minus the defence\n Args:\n damage (value): the value of the damage given to the hero\n Updates:\n current_health (int): sets the curret health equal to itself and the damage taken from defending\n '''\n damage_taken = damage - self.defend(damage)\n if damage_taken > 0:\n self.current_health -= damage_taken\n\n def is_alive(self):\n '''A getter method that returns true of false depending on whether or not the hero is alive'''\n if self.current_health <= 0:\n return False\n else:\n return True\n\n def fight(self, opponent):\n ''' \n A method that takes in a hero to fight and pits them agians the current hero untill one dies or no ailities are left\n Args:\n opponent (Hero): a hero object\n Returns:\n outcome (string): either which hero won or that the fight endded in a draw\n '''\n print(f'{self.name} and {opponent.name} are fighting!')\n while self.is_alive() and opponent.is_alive():\n hero_attack = self.attack()\n opponent_attack = opponent.attack()\n self.take_damage(opponent_attack)\n opponent.take_damage(hero_attack)\n if self.is_alive() == False and opponent.is_alive() == False:\n print('Draw! Both heroes died from their injuries!')\n self.add_kill(1)\n self.add_death(1)\n elif self.is_alive() == False:\n print(f'{opponent.name} won!')\n self.add_death(1)\n opponent.add_kill(1)\n elif opponent.is_alive() == False:\n print(f'{self.name} won!')\n self.add_kill(1)\n opponent.add_death(1)\n\n\nclass Team():\n def __init__(self, name):\n '''\n Initialize the team class with name and heroes properties\n Args:\n name (string): a single word descriptor of the team\n heroes (list): a list for hero objects\n Returns:\n team (object): a new team object\n '''\n self.name = name\n self.heroes = []\n\n def add_hero(self, hero):\n '''A method for adding a new hero to the team'''\n self.heroes.append(hero)\n\n def remove_hero(self, hero_name):\n '''A method for removing a hero from the team'''\n for index, hero in enumerate(self.heroes):\n if hero.name == hero_name:\n del self.heroes[index]\n return 0\n\n def view_all_heroes(self):\n '''A method for seeing all the heroes on the team'''\n for hero in self.heroes:\n print(hero.name)\n\n def heroes_alive(self):\n return [hero for hero in self.heroes if hero.is_alive()]\n\n def fight_should_continue(self, opponent, team_alive, opponents_alive):\n '''A helper function for checking if a fight should coninue or not'''\n team_abilities = sum(len(hero.abilities) for hero in self.heroes)\n opponent_abilities = sum(len(hero.abilities) for hero in opponent.heroes)\n if len(team_alive) > 0 and len(opponents_alive) > 0:\n if team_abilities > 0 and opponent_abilities > 0:\n return True\n elif team_abilities <= 0 and opponent_abilities > 0:\n return True\n elif team_abilities > 0 and opponent_abilities <= 0:\n return True\n else:\n return False\n else:\n return False\n\n def attack(self, opponent):\n '''A method for battling the team against another'''\n team_alive = self.heroes_alive()\n opponents_alive = opponent.heroes_alive()\n while self.fight_should_continue(opponent, team_alive, opponents_alive):\n team_hero = random.choice(team_alive)\n opponent_hero = random.choice(opponents_alive)\n team_hero.fight(opponent_hero)\n team_alive = self.heroes_alive()\n opponents_alive = opponent.heroes_alive()\n\n def revive_heroes(self, health=100):\n '''A method for resetting each heroes health to its starting_health'''\n for hero in self.heroes:\n hero.current_health = hero.starting_health\n\n def stats(self):\n '''A method for printing the teams stats'''\n for hero in self.heroes:\n if hero.deaths == 0:\n kill_death_ratio = hero.kills / 1\n else:\n kill_death_ratio = hero.kills / hero.deaths\n print(f'{hero.name}: {kill_death_ratio}')\n\n\nclass Arena():\n def __init__(self, team_one=None, team_two=None):\n '''Initialate the arena class with a team one and team two properties'''\n self.team_one = team_one\n self.team_two = team_two\n\n def create(self, type_prompt, type_reference):\n '''\n A geneator method for prompting the user for values to create a specific object\n Args:\n type_prompt (sting): type to enter into the input promts\n type_reference (class): class refrence for the type to create\n Returns:\n object: a new type_reference object\n '''\n print(f'To create an {type_prompt} enter the following:')\n name = valid_str_input('Name: ')\n strength = valid_int_input('Strength: ')\n return type_reference(name, strength)\n\n def create_hero(self):\n '''A gernator method for prompting the suer for values to create a Hero instance'''\n print('To create a hero follow the steps below:')\n name = valid_str_input('Name: ')\n health = greater_than_zero_input(f'How much health do you want {name} to have? ')\n numb_armors = valid_int_input(f'How much armor do you want {name} to have? ')\n armors = [self.create('Armor', Armor) for index in range(numb_armors)]\n numb_abilities = valid_int_input(f'How may abilities do you want {name} to have? ')\n abilities = [self.create('Ability', Ability) for index in range(numb_abilities)]\n numb_weapons = valid_int_input(f'How may weapons do you want {name} to have? ')\n abilities += [self.create('Weapon', Weapon) for index in range(numb_weapons)]\n hero = Hero(name, health)\n for armor in armors:\n hero.add_armor(armor)\n for ability in abilities:\n hero.add_ability(ability)\n return hero\n\n def build_team_one(self):\n '''A gernator method for prompting the user for values to create team one for the arena'''\n print('Create the first team by following the steps below: ')\n name = valid_str_input('Team Name: ')\n numb_heroes = greater_than_zero_input('How many heroes are on this team? ')\n self.team_one = Team(name)\n for _ in range(numb_heroes):\n self.team_one.add_hero(self.create_hero())\n\n def build_team_two(self):\n '''A gernator method for prompting the user for values to create team two for the arena'''\n print('Create the second team by following the steps below: ')\n name = valid_str_input('Team Name: ')\n numb_heroes = greater_than_zero_input('How many heroes are on this team? ')\n self.team_two = Team(name)\n for _ in range(numb_heroes):\n self.team_two.add_hero(self.create_hero())\n\n def team_battle(self):\n '''A method for making the two teams in the arena fight agianst each other'''\n self.team_one.attack(self.team_two)\n\n def show_stats(self):\n '''A method for printing out the teams statistics'''\n alive_team_one = self.team_one.heroes_alive()\n alive_team_two = self.team_two.heroes_alive()\n if len(alive_team_one) <= 0 and len(alive_team_two) <= 0:\n print('Draw! No heroes survied the battle.')\n elif len(alive_team_one) <= 0:\n print(f'{self.team_two.name} Won!')\n for hero in alive_team_two:\n print(f'{hero.name} Survived')\n elif len(alive_team_two) <= 0:\n print(f'{self.team_one.name}e Won!')\n for hero in alive_team_one:\n print(f'{hero.name} Survived')\n else:\n print('I\\'d tell you what happened but I\\'ll let the stats speak for themselves')\n for hero in alive_team_one:\n print(f'{hero.name} survied...')\n for hero in alive_team_two:\n print(f'{hero.name} survied...')\n team_one_kd_average = adverage_kd(self.team_one.heroes)\n team_two_kd_average = adverage_kd(self.team_two.heroes)\n\n print('Adverage Kill/Death Ratios')\n print(f'Team One: {team_one_kd_average}',\n f'Team Two: {team_two_kd_average}')\n\n\ndef valid_str_input(prompt):\n user_input = input(prompt)\n while user_input == '' or user_input == ' ':\n user_input = input('Invaled input, try agian: ')\n return user_input\n\ndef is_num_equal_to_or_greater_than_zero(user_input):\n if user_input.isnumeric():\n if int(user_input) > 0:\n return True\n else:\n return False\n\ndef valid_int_input(prompt):\n user_input = input(prompt)\n while not user_input.isnumeric() and not is_num_equal_to_or_greater_than_zero(user_input):\n user_input = input('Positive integer required, try agian: ')\n return int(user_input)\n\ndef greater_than_zero_input(prompt):\n user_input = valid_int_input(prompt)\n while user_input <= 0: user_input = valid_int_input('Needs to be greater then zero: ')\n return user_input\n\ndef adverage_kd(heroes):\n team_one_kd_sum = 0\n for hero in heroes:\n if hero.deaths == 0:\n team_one_kd_sum += hero.kills\n else:\n team_one_kd_sum += hero.kills/hero.deaths\n return team_one_kd_sum / len(heroes)\n\nif __name__ == \"__main__\":\n game_is_running = True\n arena = Arena()\n arena.build_team_one()\n arena.build_team_two()\n\n while game_is_running:\n arena.team_battle()\n arena.show_stats()\n play_again = input(\"Play Again? Y or N: \")\n\n if play_again.lower() == \"n\":\n game_is_running = False\n else:\n arena.team_one.revive_heroes()\n arena.team_two.revive_heroes()\n\n## Test Battling ###\n# if __name__ == \"__main__\":\n# ability = Ability('Kick', 15)\n# another_ability = Ability('Punch', 10)\n# armor = Armor('Shirt', 5)\n# another_armor = Armor('Mouth Guard', 8)\n\n# Bob = Hero('Bob Johson')\n# Bob.add_ability(ability)\n# Bob.add_ability(another_ability)\n# Bob.add_armor(armor)\n# Bob.add_armor(another_armor)\n\n# Jan_The_Man = Hero('Jan the Man')\n# Jan_The_Man.add_ability(ability)\n# Jan_The_Man.add_ability(another_ability)\n# Jan_The_Man.add_armor(armor)\n# Jan_The_Man.add_armor(another_armor)\n\n# team_one = Team('Accounting', [Bob])\n# team_two = Team('Marketing', [Jan_The_Man])\n\n# arena = Arena(team_one, team_two)\n# arena.team_battle()\n# arena.show_stats()" }, { "alpha_fraction": 0.6297760009765625, "alphanum_fraction": 0.6818181872367859, "avg_line_length": 36.04878234863281, "blob_id": "428c9777e3eef128573f5ba2df349e54a3513189", "content_id": "e6282102272677b4c47716fbc7487ae458d9a296", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1518, "license_type": "permissive", "max_line_length": 90, "num_lines": 41, "path": "/Projects/Herd-Immunity-Simulation/simulation_test.py", "repo_name": "Tanner-York-Make-School/CS-1.1-Intro-to-Programming", "src_encoding": "UTF-8", "text": "from simulation import Simulation\nfrom logger import Logger\nfrom person import Person\nfrom virus import Virus\n\n#### Test Simulation Class ####\ndef test_create_population():\n virus = Virus(\"HIV\", 0.8, 0.3)\n sim = Simulation(1000, 0.6, virus, 20)\n population = sim._create_population(20)\n assert len(population) == 1000\n percent_vacc = sum(1 for person in population if person.is_vaccinated)/len(population)\n assert percent_vacc == 0.6\n num_infected = sum(1 for person in population if person.infection)\n assert num_infected == 20\n\ndef test_simulation_should_continue():\n virus = Virus(\"HIV\", 0.8, 0.3)\n sim_one = Simulation(1000, 0.6, virus, 20)\n assert sim_one._simulation_should_continue() == True\n sim_two = Simulation(1000, 1, virus, 0)\n assert sim_two._simulation_should_continue() == False\n sim_three = Simulation(1000, 0.0, virus, 1000)\n assert sim_three._simulation_should_continue() == True\n\ndef test_interaction():\n virus = Virus('Black Plauge', 1.0, 0.5)\n sim = Simulation(1000, 0.6, virus, 20)\n infected_person = Person(1, False, virus)\n person = Person(2, False)\n sim.interaction(infected_person, person)\n assert person._id in sim.newly_infected\n\ndef test_infect_newly_infected():\n virus = Virus('Black Plauge', 1.0, 0.5)\n sim = Simulation(1000, 0.6, virus, 20)\n person = Person(2, False)\n sim.population.append(person)\n sim.newly_infected.append(person._id)\n sim._infect_newly_infected()\n assert person.infection == virus" }, { "alpha_fraction": 0.6326671242713928, "alphanum_fraction": 0.6388697624206543, "avg_line_length": 38.24324417114258, "blob_id": "eb04dc16a3988f93c1707757b54b5cee44f171c9", "content_id": "122d98a923da2b14c279e118012efe7c4c948835", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1451, "license_type": "permissive", "max_line_length": 89, "num_lines": 37, "path": "/Projects/Herd-Immunity-Simulation/person.py", "repo_name": "Tanner-York-Make-School/CS-1.1-Intro-to-Programming", "src_encoding": "UTF-8", "text": "import random\nrandom.seed(42)\n\n\nclass Person(object):\n ''' Person objects will populate the simulation. '''\n\n def __init__(self, _id, is_vaccinated, infection=None):\n ''' We start out with is_alive = True, because we don't make vampires or zombies.\n All other values will be set by the simulation when it makes each Person object.\n\n If person is chosen to be infected when the population is created, the simulation\n should instantiate a Virus object and set it as the value\n self.infection. Otherwise, self.infection should be set to None.\n '''\n self._id = _id\n self.is_alive = True\n self.is_vaccinated = is_vaccinated\n self.infection = infection\n\n def did_survive_infection(self):\n ''' Generate a random number and compare to virus's mortality_rate.\n If random number is smaller, person dies from the disease.\n If Person survives, they become vaccinated and they have no infection.\n Return a boolean value indicating whether they survived the infection.\n '''\n assert self.is_vaccinated != True\n assert self.infection != None\n chance = random.randint(0, 100)/100\n if chance < self.infection.mortality_rate:\n self.is_alive = False\n self.infection = None\n return False\n else:\n self.infection = None\n self.is_vaccinated = True\n return True" } ]
16
utkarshdalal/brookings_data_scrapers
https://github.com/utkarshdalal/brookings_data_scrapers
9ed16cac6ee212c9728919bde7cc787b7193be09
237c040ee7fbdd983b9adb16078d03c101e924da
bed529ddc4e828bab38f246b78b4319145712de9
refs/heads/master
2022-12-09T21:09:59.493372
2022-05-02T13:51:06
2022-05-02T13:51:06
252,749,323
0
1
null
2020-04-03T14:02:44
2022-05-02T10:26:11
2022-12-08T03:58:29
Python
[ { "alpha_fraction": 0.6519266963005066, "alphanum_fraction": 0.6569803953170776, "avg_line_length": 47.70769119262695, "blob_id": "5c499b4221e4e0a259df32753e69eba0b6e4e37b", "content_id": "eb0a60005922ce21ff37710d4306c1862e9db705", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3166, "license_type": "no_license", "max_line_length": 112, "num_lines": 65, "path": "/recalculate_moving_averages.py", "repo_name": "utkarshdalal/brookings_data_scrapers", "src_encoding": "UTF-8", "text": "import datetime\nimport pymysql\nfrom sqlalchemy import create_engine\nimport pandas as pd\nimport os\nimport sys\n\n\n# enter start and end timestamps in YYYY-MM-DD HH:MM:SS format\ndef write_moving_averages(start_timestamp, end_timestamp):\n host = os.environ['HOST']\n port = int(os.environ['PORT'])\n dbname = os.environ['DB']\n user = os.environ['USER']\n password = os.environ['PASSWORD']\n\n print(\"Writing Moving Averages\")\n\n engine = create_engine(f'mysql+pymysql://{user}:{password}@{host}:{port}/{dbname}')\n connection = engine.connect()\n\n start_datetime = datetime.datetime.strptime(start_timestamp, '%Y-%m-%d %H:%M:%S')\n end_datetime = datetime.datetime.strptime(end_timestamp, '%Y-%m-%d %H:%M:%S')\n\n range_start_time = (start_datetime - datetime.timedelta(days=31)).strftime(\"%Y-%m-%d %H:%M:%S\")\n range_end_time = (end_datetime + datetime.timedelta(days=31)).strftime(\"%Y-%m-%d %H:%M:%S\")\n\n x = pd.read_sql(f'select 5_min_rounded_timestamp, avg(thermal_generation_corrected) as thermal_generation, \\\n avg(gas_generation_corrected) as gas_generation, \\\n avg(hydro_generation_corrected) as hydro_generation, \\\n avg(nuclear_generation_corrected) as nuclear_generation, \\\n avg(renewable_generation_corrected) as renewable_generation, avg(demand_met) as demand_met \\\n from merit_india_data_rounded_corrected where \\\n timestamp >= \"{range_start_time}\" and timestamp <= \"{range_end_time}\" \\\n group by 5_min_rounded_timestamp', engine)\n x['5_min_rounded_timestamp'] = pd.to_datetime(x['5_min_rounded_timestamp'])\n\n timestamp_index_df = x.set_index('5_min_rounded_timestamp')\n\n daily_moving_averages = timestamp_index_df.rolling('24h').mean()\n daily_moving_averages[daily_moving_averages.index >= start_datetime]\\\n .to_sql(name='merit_india_daily_moving_averages_temp', con=engine, index_label='timestamp',\n if_exists='replace')\n connection.execute('REPLACE INTO merit_india_daily_moving_averages '\n 'select * from merit_india_daily_moving_averages_temp')\n\n weekly_moving_averages = timestamp_index_df.rolling('7d').mean()\n weekly_moving_averages[weekly_moving_averages.index >= start_datetime]\\\n .to_sql(name='merit_india_weekly_moving_averages_temp', con=engine, index_label='timestamp',\n if_exists='replace')\n connection.execute('REPLACE INTO merit_india_weekly_moving_averages '\n 'select * from merit_india_weekly_moving_averages_temp')\n\n monthly_moving_averages = timestamp_index_df.rolling('30d').mean()\n monthly_moving_averages[monthly_moving_averages.index >= start_datetime]\\\n .to_sql(name='merit_india_monthly_moving_averages_temp', con=engine, index_label='timestamp',\n if_exists='replace')\n connection.execute('REPLACE INTO merit_india_monthly_moving_averages '\n 'select * from merit_india_monthly_moving_averages_temp')\n\n connection.close()\n\n\nif __name__ == \"__main__\":\n write_moving_averages(sys.argv[1], sys.argv[2])\n" }, { "alpha_fraction": 0.6519480347633362, "alphanum_fraction": 0.6580086350440979, "avg_line_length": 31.507246017456055, "blob_id": "ea557b078f082c652471cd136aa083d1b5224dda", "content_id": "c2c58cf80de50d778df2e50adbe96f1dee4a89eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2310, "license_type": "no_license", "max_line_length": 102, "num_lines": 69, "path": "/delhiscraper.py", "repo_name": "utkarshdalal/brookings_data_scrapers", "src_encoding": "UTF-8", "text": "from urllib.request import urlopen as uReq\r\nfrom bs4 import BeautifulSoup\r\nimport time\r\nfrom csv import writer\r\nimport os\r\nimport pymysql\r\nimport helper_methods\r\n\r\n\r\ndef currentValues():\r\n page_url = 'http://www.delhisldc.org/Redirect.aspx'\r\n r = uReq(page_url)\r\n soup = BeautifulSoup(r.read(), \"html.parser\")\r\n\r\n data_dict = {}\r\n \r\n current_revision = helper_methods.read_datetime_from_span_id(soup, \"DynamicData1_LblDate\",\r\n 'DD-MMM-YYYY hh:mm:ss A')\r\n data_dict['timestamp'] = current_revision.datetime\r\n delhi_load = helper_methods.read_value_from_span_id(soup, \"DynamicData1_LblLoad\")\r\n data_dict['delhi_load'] = delhi_load\r\n schedule = helper_methods.read_value_from_span_id(soup, \"DynamicData1_LblCurrScheduledAllocation\")\r\n data_dict['schedule'] = schedule\r\n drawl = helper_methods.read_value_from_span_id(soup, \"DynamicData1_LblCurrDrawal\")\r\n data_dict['drawl'] = drawl\r\n delhi_gen = helper_methods.read_value_from_span_id(soup, \"DynamicData1_LblCurrGen\")\r\n data_dict['delhi_generation'] = delhi_gen\r\n freq = helper_methods.read_value_from_span_id(soup, 'DynamicData1_LblFrequency')\r\n data_dict['frequency_hz'] = freq\r\n \r\n return data_dict\r\n\r\n\r\ndef append_data(conn):\r\n cursor = conn.cursor()\r\n try:\r\n delhi_values = currentValues()\r\n helper_methods.insert_into_table('delhi_data', delhi_values, cursor, conn)\r\n except Exception as e:\r\n print(f'Could not fetch delhi data: {e}')\r\n\r\n\r\ndef run():\r\n host = os.environ['HOST']\r\n port = int(os.environ['PORT'])\r\n dbname = os.environ['DB']\r\n user = os.environ['USER']\r\n password = os.environ['PASSWORD']\r\n conn = pymysql.connect(host, user=user, port=port,\r\n passwd=password, db=dbname)\r\n append_data(conn)\r\n\r\n\r\ndef lambda_handler(event, context):\r\n run()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run()\r\n\r\n\r\n#Span IDS - \r\n#Delhi Load = ContentPlaceHolder3_LblLoad\r\n#Schedule = ContentPlaceHolder3_LblCurrScheduledAllocation\r\n#Drawl = ContentPlaceHolder3_LblCurrDrawal\r\n#Current Revision = ContentPlaceHolder3_LblDate\r\n#Delhi Generation = ContentPlaceHolder3_LblCurrGen\r\n#Max Load = ContentPlaceHolder3_LblMaxToday\r\n#Min Load = ContentPlaceHolder3_LblMinToday" }, { "alpha_fraction": 0.6223517060279846, "alphanum_fraction": 0.6296345591545105, "avg_line_length": 45.04878234863281, "blob_id": "3c0474d8986701b0c2155c1a0b9edefbe2d70b25", "content_id": "799a406d8b560a83ee41e53e2e68af880c0cc614", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7552, "license_type": "no_license", "max_line_length": 125, "num_lines": 164, "path": "/merit_scraper.py", "repo_name": "utkarshdalal/brookings_data_scrapers", "src_encoding": "UTF-8", "text": "import urllib.request\nfrom bs4 import BeautifulSoup\nimport csv\nfrom datetime import datetime, timedelta\nimport os\nimport logging\nimport pymysql\nimport helper_methods\nimport requests\n\n\ndef get_merit_data(conn):\n meritindia_url = 'http://www.meritindia.in'\n current_datetime = datetime.utcnow().replace(microsecond=0).isoformat()\n #page = urllib.request.urlopen(meritindia_url)\n\n #html_content = BeautifulSoup(page, 'html.parser')\n \n page = requests.get('https://meritindia.in')\n html_content = BeautifulSoup(page.content, 'html.parser')\n\n column_headings = ['TIMESTAMP']\n row_values = [current_datetime]\n\n print(f'Running write-to-aws.py at {current_datetime}')\n\n # Get data headers from website\n data_types = html_content.find_all('div', 'gen_title_sec')\n for data_type in data_types:\n column_headings.append(str(data_type.text.strip()))\n\n # Get current data values from website\n current_values = html_content.find_all('div', 'gen_value_sec')\n for current_value in current_values:\n data_value = (current_value.find('span', 'counter'))\n row_values.append(data_value.text.strip().replace(',', ''))\n\n print('Writing data to rds')\n\n try:\n cursor = conn.cursor()\n cursor2 = conn.cursor()\n data_dict = {}\n demand_met = float(row_values[1])\n thermal_generation = float(row_values[2])\n gas_generation = float(row_values[3])\n nuclear_generation = float(row_values[4])\n hydro_generation = float(row_values[5])\n renewable_generation = float(row_values[6])\n total_generation = thermal_generation + gas_generation + nuclear_generation + hydro_generation + renewable_generation\n utc_timestamp = datetime.strptime(current_datetime, \"%Y-%m-%dT%H:%M:%S\")\n rounded_timestamp_15 = utc_timestamp - timedelta(minutes=utc_timestamp.minute % 15,\n seconds=utc_timestamp.second,\n microseconds=utc_timestamp.microsecond)\n rounded_timestamp_5 = utc_timestamp - timedelta(minutes=utc_timestamp.minute % 5,\n seconds=utc_timestamp.second,\n microseconds=utc_timestamp.microsecond)\n data_dict['demand_met'] = demand_met\n data_dict['thermal_generation'] = thermal_generation\n data_dict['gas_generation'] = gas_generation\n data_dict['nuclear_generation'] = nuclear_generation\n data_dict['hydro_generation'] = hydro_generation\n data_dict['renewable_generation'] = renewable_generation\n data_dict['thermal_generation_corrected'] = thermal_generation\n data_dict['gas_generation_corrected'] = gas_generation\n data_dict['nuclear_generation_corrected'] = nuclear_generation\n data_dict['hydro_generation_corrected'] = hydro_generation\n data_dict['renewable_generation_corrected'] = renewable_generation\n data_dict['timestamp'] = utc_timestamp\n data_dict['5_min_rounded_timestamp'] = rounded_timestamp_5\n data_dict['15_min_rounded_timestamp'] = rounded_timestamp_15\n data_dict = calculate_corrected_data(cursor2, data_dict, demand_met, gas_generation, hydro_generation,\n nuclear_generation, renewable_generation, thermal_generation,\n total_generation)\n\n helper_methods.insert_into_table('merit_india_data_rounded_corrected', data_dict, cursor, conn)\n\n except Exception as e:\n print(f'Could not write data to rds! {str(e)}')\n finally:\n cursor.close()\n conn.close()\n\n print('Finished writing data to rds')\n\n\ndef calculate_corrected_data(cursor2, data_dict, demand_met, gas_generation, hydro_generation, nuclear_generation,\n renewable_generation, thermal_generation, total_generation):\n if total_generation <= 0.95 * demand_met or total_generation >= 1.05 * demand_met:\n cursor2.execute(\n f'select timestamp, thermal_generation_corrected, gas_generation_corrected, hydro_generation_corrected, '\n f'nuclear_generation_corrected, renewable_generation_corrected, demand_met from '\n f'merit_india_data_rounded_corrected where timestamp < \"{current_datetime}\" '\n f'order by timestamp desc limit 1')\n for r2 in cursor2:\n previous_thermal = r2[1]\n previous_gas = r2[2]\n previous_hydro = r2[3]\n previous_nuclear = r2[4]\n previous_renewable = r2[5]\n previous_demand_met = r2[6]\n\n current_thermal = thermal_generation\n current_gas = gas_generation\n current_hydro = hydro_generation\n current_nuclear = nuclear_generation\n current_renewable = renewable_generation\n current_demand_met = demand_met\n\n corrected_thermal = current_thermal\n previous_thermal_ratio = previous_thermal / previous_demand_met\n current_thermal_ratio = current_thermal / current_demand_met\n if abs((current_thermal - previous_thermal) / previous_thermal) >= 0.1:\n corrected_thermal *= previous_thermal_ratio / current_thermal_ratio\n\n corrected_gas = current_gas\n previous_gas_ratio = previous_gas / previous_demand_met\n current_gas_ratio = current_gas / current_demand_met\n if abs((current_gas - previous_gas) / previous_gas) >= 0.1:\n corrected_gas *= previous_gas_ratio / current_gas_ratio\n\n corrected_hydro = current_hydro\n previous_hydro_ratio = previous_hydro / previous_demand_met\n current_hydro_ratio = current_hydro / current_demand_met\n if abs((current_hydro - previous_hydro) / previous_hydro) >= 0.1:\n corrected_hydro *= previous_hydro_ratio / current_hydro_ratio\n\n corrected_nuclear = current_nuclear\n previous_nuclear_ratio = previous_nuclear / previous_demand_met\n current_nuclear_ratio = current_nuclear / current_demand_met\n if abs((current_nuclear - previous_nuclear) / previous_nuclear) >= 0.1:\n corrected_nuclear *= previous_nuclear_ratio / current_nuclear_ratio\n\n corrected_renewable = current_renewable\n previous_renewable_ratio = previous_renewable / previous_demand_met\n current_renewable_ratio = current_renewable / current_demand_met\n if abs((current_renewable - previous_renewable) / previous_renewable) >= 0.1:\n corrected_renewable *= previous_renewable_ratio / current_renewable_ratio\n\n data_dict['thermal_generation_corrected'] = corrected_thermal\n data_dict['gas_generation_corrected'] = corrected_gas\n data_dict['nuclear_generation_corrected'] = corrected_nuclear\n data_dict['hydro_generation_corrected'] = corrected_hydro\n data_dict['renewable_generation_corrected'] = corrected_renewable\n return data_dict\n\n\ndef run():\n host = os.environ['HOST']\n port = int(os.environ['PORT'])\n dbname = os.environ['DB']\n user = os.environ['USER']\n password = os.environ['PASSWORD']\n conn = pymysql.connect(host=host, user=user, port=port,\n passwd=password, db=dbname)\n get_merit_data(conn)\n\n\ndef lambda_handler(event, context):\n run()\n\n\nif __name__ == \"__main__\":\n run()\n" }, { "alpha_fraction": 0.6576576828956604, "alphanum_fraction": 0.6592956781387329, "avg_line_length": 31.13157844543457, "blob_id": "151f4e197eb90dd30096f6f94e13727a22e328c0", "content_id": "853bdf39e989650af4d9b22faed1574c1f6183cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1221, "license_type": "no_license", "max_line_length": 87, "num_lines": 38, "path": "/helper_methods.py", "repo_name": "utkarshdalal/brookings_data_scrapers", "src_encoding": "UTF-8", "text": "from requests import Session\nfrom bs4 import BeautifulSoup\nfrom arrow import get, utcnow\n\n\ndef get_response_soup(url, session=None):\n \"\"\"Get BeautifulSoup response\"\"\"\n ses = session or Session()\n response = ses.get(url)\n response_text = response.text\n return BeautifulSoup(response_text, 'html.parser')\n\n\ndef read_datetime_from_span_id(html, span_id, format):\n \"\"\"Read date time from span with id\"\"\"\n date_time_span = html.find('span', {'id': span_id})\n india_date_time = date_time_span.text + ' Asia/Kolkata'\n return get(india_date_time, format + ' ZZZ')\n\n\ndef read_text_from_span_id(html, span_id):\n \"\"\"Read text from span with id\"\"\"\n return html.find('span', {'id': span_id}).text\n\n\ndef read_value_from_span_id(html, span_id):\n \"\"\"Read value from span with id\"\"\"\n html_span = read_text_from_span_id(html, span_id)\n return float(html_span)\n\n\ndef insert_into_table(table, dictionary, cursor, conn):\n placeholders = ', '.join(['%s'] * len(dictionary))\n columns = ', '.join(dictionary.keys())\n sql = \"INSERT IGNORE INTO %s ( %s ) VALUES ( %s )\" % (table, columns, placeholders)\n # valid in Python 3\n cursor.execute(sql, list(dictionary.values()))\n conn.commit()\n" }, { "alpha_fraction": 0.4555984437465668, "alphanum_fraction": 0.6949806809425354, "avg_line_length": 15.1875, "blob_id": "9527780e9e14060b86b38c883a99610c35923f32", "content_id": "423599a258e424006c101797bab86eab5d89f244", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 259, "license_type": "no_license", "max_line_length": 22, "num_lines": 16, "path": "/requirements.txt", "repo_name": "utkarshdalal/brookings_data_scrapers", "src_encoding": "UTF-8", "text": "arrow==0.15.5\nbeautifulsoup4==4.8.2\nbotocore==1.15.35\nbs4==0.0.1\ncertifi==2019.11.28\nchardet==3.0.4\ndocutils==0.15.2\nidna==2.9\njmespath==0.9.5\nPyMySQL==0.9.3\npython-dateutil==2.8.1\nrequests==2.23.0\ns3transfer==0.3.3\nsix==1.14.0\nsoupsieve==2.0\nurllib3==1.25.8\n" }, { "alpha_fraction": 0.6487491726875305, "alphanum_fraction": 0.6528059244155884, "avg_line_length": 41.869564056396484, "blob_id": "34074665055bb4d61cc686c8e963a0f18d234316", "content_id": "29e28ab54cb3950090b0d169621518267eafaf71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2958, "license_type": "no_license", "max_line_length": 112, "num_lines": 69, "path": "/merit_moving_averages.py", "repo_name": "utkarshdalal/brookings_data_scrapers", "src_encoding": "UTF-8", "text": "import datetime\nimport pymysql\nfrom sqlalchemy import create_engine\nimport pandas as pd\nimport os\n\n\ndef write_moving_averages():\n host = os.environ['HOST']\n port = int(os.environ['PORT'])\n dbname = os.environ['DB']\n user = os.environ['USER']\n password = os.environ['PASSWORD']\n hours = int(os.environ['HOURS_BETWEEN_RUNS'])\n\n print(\"Writing Moving Averages\")\n\n engine = create_engine(f'mysql+pymysql://{user}:{password}@{host}:{port}/{dbname}')\n connection = engine.connect()\n\n month_ago_time = (datetime.datetime.utcnow() - datetime.timedelta(days=31)).strftime(\"%Y-%m-%d %H:%M:%S\")\n day_ago_time = (datetime.datetime.utcnow() - datetime.timedelta(hours=hours))\n\n x = pd.read_sql(f'select 5_min_rounded_timestamp, avg(thermal_generation_corrected) as thermal_generation, \\\n avg(gas_generation_corrected) as gas_generation, \\\n avg(hydro_generation_corrected) as hydro_generation, \\\n avg(nuclear_generation_corrected) as nuclear_generation, \\\n avg(renewable_generation_corrected) as renewable_generation, avg(demand_met) as demand_met \\\n from merit_india_data_rounded_corrected where \\\n timestamp >= \"{month_ago_time}\" \\\n group by 5_min_rounded_timestamp', engine)\n x['5_min_rounded_timestamp'] = pd.to_datetime(x['5_min_rounded_timestamp'])\n\n timestamp_index_df = x.set_index('5_min_rounded_timestamp')\n\n daily_moving_averages = timestamp_index_df.rolling('24h').mean()\n daily_moving_averages[daily_moving_averages.index >= day_ago_time]\\\n .to_sql(name='merit_india_daily_moving_averages_temp', con=engine, index_label='timestamp',\n if_exists='replace')\n connection.execute('INSERT IGNORE INTO merit_india_daily_moving_averages '\n 'select * from merit_india_daily_moving_averages_temp')\n\n weekly_moving_averages = timestamp_index_df.rolling('7d').mean()\n weekly_moving_averages[weekly_moving_averages.index >= day_ago_time]\\\n .to_sql(name='merit_india_weekly_moving_averages_temp', con=engine, index_label='timestamp',\n if_exists='replace')\n connection.execute('INSERT IGNORE INTO merit_india_weekly_moving_averages '\n 'select * from merit_india_weekly_moving_averages_temp')\n\n monthly_moving_averages = timestamp_index_df.rolling('30d').mean()\n monthly_moving_averages[monthly_moving_averages.index >= day_ago_time]\\\n .to_sql(name='merit_india_monthly_moving_averages_temp', con=engine, index_label='timestamp',\n if_exists='replace')\n connection.execute('INSERT IGNORE INTO merit_india_monthly_moving_averages '\n 'select * from merit_india_monthly_moving_averages_temp')\n\n connection.close()\n\n\ndef run():\n write_moving_averages()\n\n\ndef lambda_handler(event, context):\n run()\n\n\nif __name__ == \"__main__\":\n run()\n" }, { "alpha_fraction": 0.7085062265396118, "alphanum_fraction": 0.7115239500999451, "avg_line_length": 41.931175231933594, "blob_id": "afded604f65e7452de7039614e53db8e1a03a75f", "content_id": "edca8578e17517af6a865ab97e6543b5814a1d8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10604, "license_type": "no_license", "max_line_length": 108, "num_lines": 247, "path": "/kptcl_scraper.py", "repo_name": "utkarshdalal/brookings_data_scrapers", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n# Code generously borrowed from https://github.com/hectorespert/electricitymap/blob/master/parsers/IN_KA.py\n\nfrom requests import Session\nimport helper_methods\nimport pymysql\nimport os\n\n\ndef fetch_production_by_generator(session):\n html = helper_methods.get_response_soup('http://kptclsldc.com/StateGen.aspx', session)\n\n india_date_time = helper_methods.read_datetime_from_span_id(html, 'lbldate', 'M/D/YYYY h:mm:ss A')\n\n generation = {}\n\n generation['timestamp'] = india_date_time.datetime\n\n # RTPS Production: https://en.wikipedia.org/wiki/Raichur_Thermal_Power_Station\n rtps_value = helper_methods.read_value_from_span_id(html, 'lblrtptot')\n generation['rtps_generation'] = rtps_value\n\n # BTPS Production: https://en.wikipedia.org/wiki/Bellary_Thermal_Power_station\n btps_value = helper_methods.read_value_from_span_id(html, 'lblbtptot')\n generation['btps_generation'] = btps_value\n\n # YTPS Production: https://en.wikipedia.org/wiki/Yermarus_Thermal_Power_Station\n ytps_value = helper_methods.read_value_from_span_id(html, 'ytptot')\n generation['ytps_generation'] = ytps_value\n\n # UPCL Production: https://en.wikipedia.org/wiki/Udupi_Power_Plant\n upcl_value = helper_methods.read_value_from_span_id(html, 'lblupctot')\n generation['upcl_generation'] = upcl_value\n\n # Jhelper_methodsDAl Production: https://en.wikipedia.org/wiki/JSW_Vijayanagar_Power_Station\n jindal_value = helper_methods.read_value_from_span_id(html, 'lbljintot')\n generation['jindal_generation'] = jindal_value\n\n # Coal Production\n coal_value = rtps_value + btps_value + ytps_value + upcl_value + jindal_value\n generation['thermal_generation'] = coal_value\n\n # Sharavati Production: Sharavati Hydroelectric\n sharavati_value = helper_methods.read_value_from_span_id(html, 'lblshvytot')\n generation['sharavati_generation'] = sharavati_value\n\n # Nagjhari Production: Kalinadi-Nagjhari Hydroelectric\n nagjhari_value = helper_methods.read_value_from_span_id(html, 'lblngjtot')\n generation['nagjhari_generation'] = nagjhari_value\n\n # Varahi Production: https://en.wikipedia.org/wiki/Varahi_River#Varahi_Hydro-electric_Project\n varahi_value = helper_methods.read_value_from_span_id(html, 'lblvrhtot')\n generation['varahi_generation'] = varahi_value\n\n # Kodsalli Production: Kalinadi Kodasalli Hydroelectric\n kodsalli_value = helper_methods.read_value_from_span_id(html, 'lblkdsltot')\n generation['kodsalli_generation'] = kodsalli_value\n\n # Kadra Production: https://en.wikipedia.org/wiki/Kadra_Dam\n kadra_value = helper_methods.read_value_from_span_id(html, 'lblkdrtot')\n generation['kadra_generation'] = kadra_value\n\n # GERUSOPPA production: Gerusoppa Dam\n gerusoppa_value = helper_methods.read_value_from_span_id(html, 'lblgrsptot')\n generation['gerusoppa_generation'] = gerusoppa_value\n\n # JOG production: https://en.wikipedia.org/wiki/Jog_Falls\n jog_value = helper_methods.read_value_from_span_id(html, 'lbljogtot')\n generation['jog_generation'] = jog_value\n\n # LPH Production: Linganamakki Dam\n lph_value = helper_methods.read_value_from_span_id(html, 'lbllphtot')\n generation['lph_generation'] = lph_value\n\n # Supa generation: https://en.wikipedia.org/wiki/Supa_Dam\n supa_value = helper_methods.read_value_from_span_id(html, 'lblsupatot')\n generation['supa_generation'] = supa_value\n\n # SHIMSHA: https://en.wikipedia.org/wiki/Shimsha#Power_generation\n shimsha_value = helper_methods.read_value_from_span_id(html, 'lblshimtot')\n generation['shimsha_generation'] = shimsha_value\n\n # SHIVASAMUDRA: https://en.wikipedia.org/wiki/Shivanasamudra_Falls#Power_generation\n shivasamudra_value = helper_methods.read_value_from_span_id(html, 'lblshivtot')\n generation['shivasamudra_generation'] = shivasamudra_value\n\n # MANIDAM: Mani Dam Hydroelectric\n manidam_value = helper_methods.read_value_from_span_id(html, 'lblmanitot')\n generation['manidam_generation'] = manidam_value\n\n # MUNRABAD: Munirabad Hydroelectric\n munrabad_value = helper_methods.read_value_from_span_id(html, 'lblmbdtot')\n generation['munrabad_generation'] = munrabad_value\n\n # BHADRA: https://en.wikipedia.org/wiki/Bhadra_Dam\n bhadra_value = helper_methods.read_value_from_span_id(html, 'lblbdratot')\n generation['bhadra_generation'] = bhadra_value\n\n # GHATAPRABHA: Ghataprabha Hydroelectric\n ghataprabha_value = helper_methods.read_value_from_span_id(html, 'lblgtprtot')\n generation['ghataprabha_generation'] = ghataprabha_value\n\n # ALMATTI: https://en.wikipedia.org/wiki/Almatti_Dam\n almatti_value = helper_methods.read_value_from_span_id(html, 'lblalmttot')\n generation['almatti_generation'] = almatti_value\n\n # CGS (Central Generating Stations) Production\n # TODO: Search CGS production type\n cgs_value = helper_methods.read_value_from_span_id(html, 'lblcgs')\n generation['cgs_generation'] = cgs_value\n\n ncep_value = helper_methods.read_value_from_span_id(html, 'lblncep')\n generation['ncep_generation'] = ncep_value\n\n total_value = helper_methods.read_value_from_span_id(html, 'lbltotgen')\n generation['total_generation'] = total_value\n\n frequency_value = helper_methods.read_value_from_span_id(html, 'lblfreq')\n generation['frequency_hz'] = frequency_value\n\n # Hydro production\n hydro_value = sharavati_value + nagjhari_value + varahi_value + kodsalli_value \\\n + kadra_value + gerusoppa_value + jog_value + lph_value + supa_value \\\n + shimsha_value + shivasamudra_value + manidam_value + munrabad_value \\\n + bhadra_value + ghataprabha_value + almatti_value\n generation['hydro_generation'] = hydro_value\n\n return generation\n\n\ndef fetch_ncep_production(session):\n ncep_generation = {}\n\n # NCEP (Non-Conventional Energy Production)\n ncep_html = helper_methods.get_response_soup('http://kptclsldc.com/StateNCEP.aspx', session)\n ncep_date_time = helper_methods.read_datetime_from_span_id(ncep_html, 'Label1', 'DD/MM/YYYY HH:mm:ss')\n\n ncep_generation['timestamp'] = ncep_date_time.datetime\n\n # cogen type is sugarcane bagasee. Proof in Issue #1867\n cogen_value = helper_methods.read_value_from_span_id(ncep_html, 'lbl_tc')\n ncep_generation['cogen_generation'] = cogen_value\n\n biomass_value = helper_methods.read_value_from_span_id(ncep_html, 'lbl_tb')\n ncep_generation['biomass_generation'] = biomass_value\n\n # cogen_value is generated from sugarcane bagasse\n biomass_value += cogen_value\n\n mini_hydro_value = helper_methods.read_value_from_span_id(ncep_html, 'lbl_tm')\n ncep_generation['mini_hydro_generation'] = mini_hydro_value\n\n wind_value = helper_methods.read_value_from_span_id(ncep_html, 'lbl_tw')\n ncep_generation['wind_generation'] = wind_value\n\n solar_value = helper_methods.read_value_from_span_id(ncep_html, 'lbl_ts')\n ncep_generation['solar_generation'] = solar_value\n\n return ncep_generation\n\n\ndef fetch_escom_demand(session):\n escom_demand = {}\n\n # ESCOM Scheduled & Actual\n escom_html = helper_methods.get_response_soup('http://kptclsldc.com/Snapshot.aspx', session)\n escom_date_time = helper_methods.read_datetime_from_span_id(escom_html, 'Label6', 'DD/MM/YYYY HH:mm:ss')\n\n escom_demand['timestamp'] = escom_date_time.datetime\n\n scheduled_bescom_load = helper_methods.read_value_from_span_id(escom_html, 'Label15')\n actual_bescom_load = helper_methods.read_value_from_span_id(escom_html, 'Label10')\n escom_demand['scheduled_bescom_load'] = scheduled_bescom_load\n escom_demand['actual_bescom_load'] = actual_bescom_load\n\n scheduled_mescom_load = helper_methods.read_value_from_span_id(escom_html, 'Label16')\n actual_mescom_load = helper_methods.read_value_from_span_id(escom_html, 'Label11')\n escom_demand['scheduled_mescom_load'] = scheduled_mescom_load\n escom_demand['actual_mescom_load'] = actual_mescom_load\n\n scheduled_cesc_load = helper_methods.read_value_from_span_id(escom_html, 'Label17')\n actual_cesc_load = helper_methods.read_value_from_span_id(escom_html, 'Label12')\n escom_demand['scheduled_cesc_load'] = scheduled_cesc_load\n escom_demand['actual_cesc_load'] = actual_cesc_load\n\n scheduled_gescom_load = helper_methods.read_value_from_span_id(escom_html, 'Label18')\n actual_gescom_load = helper_methods.read_value_from_span_id(escom_html, 'Label13')\n escom_demand['scheduled_gescom_load'] = scheduled_gescom_load\n escom_demand['actual_gescom_load'] = actual_gescom_load\n\n scheduled_hescom_load = helper_methods.read_value_from_span_id(escom_html, 'Label19')\n actual_hescom_load = helper_methods.read_value_from_span_id(escom_html, 'Label14')\n escom_demand['scheduled_hescom_load'] = scheduled_hescom_load\n escom_demand['actual_hescom_load'] = actual_hescom_load\n\n scheduled_total_load = helper_methods.read_value_from_span_id(escom_html, 'Label25')\n actual_total_load = helper_methods.read_value_from_span_id(escom_html, 'Label26')\n escom_demand['scheduled_total_load'] = scheduled_total_load\n escom_demand['actual_total_load'] = actual_total_load\n\n frequency_value = helper_methods.read_value_from_span_id(escom_html, 'Label1')\n escom_demand['frequency_hz'] = frequency_value\n\n return escom_demand\n\n\ndef fetch_data(session=None, conn=None, target_datetime=None, logger=None):\n cursor = conn.cursor()\n\n try:\n generation = fetch_production_by_generator(session)\n helper_methods.insert_into_table('kptcl_generation', generation, cursor, conn)\n except Exception as e:\n print(f'Could not fetch kptcl generation data: {e}')\n\n try:\n ncep_generation = fetch_ncep_production(session)\n helper_methods.insert_into_table('kptcl_ncep_generation', ncep_generation, cursor, conn)\n except Exception as e:\n print(f'Could not fetch kptcl ncep generation data: {e}')\n\n try:\n escom_demand = fetch_escom_demand(session)\n helper_methods.insert_into_table('kptcl_load', escom_demand, cursor, conn)\n except Exception as e:\n print(f'Could not fetch kptcl load data: {e}')\n\n\ndef run():\n session = Session()\n host = os.environ['HOST']\n port = int(os.environ['PORT'])\n dbname = os.environ['DB']\n user = os.environ['USER']\n password = os.environ['PASSWORD']\n conn = pymysql.connect(host, user=user, port=port,\n passwd=password, db=dbname)\n fetch_data(session, conn)\n\n\ndef lambda_handler(event, context):\n run()\n\n\nif __name__ == \"__main__\":\n run()\n" } ]
7
eardil/gnomix
https://github.com/eardil/gnomix
beb4ac6b38f63536a921e2d25518cba89b98325e
aacdd3811c1f1d9d82eb5c4d597938885d383a34
d21456dcebf483f5a345e8692fb4ec9095f5e70c
refs/heads/main
2023-08-25T22:23:47.086530
2021-10-01T19:00:09
2021-10-01T19:00:09
413,292,446
0
0
null
2021-10-04T06:00:01
2021-10-04T02:08:14
2021-10-01T19:00:33
null
[ { "alpha_fraction": 0.5853846073150635, "alphanum_fraction": 0.5923076868057251, "avg_line_length": 32.35897445678711, "blob_id": "d9ecf7b5be9b45113eea1af7ca7eb5219bd807f4", "content_id": "f13ffc004c562f52606edfa2e010d4c56a5aff56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1300, "license_type": "no_license", "max_line_length": 95, "num_lines": 39, "path": "/src/Smooth/models.py", "repo_name": "eardil/gnomix", "src_encoding": "UTF-8", "text": "from src.Smooth.smooth import Smoother\n\nfrom src.Smooth.utils import slide_window\n\nfrom xgboost import XGBClassifier\nfrom src.Smooth.crf import CRF\n\nclass XGB_Smoother(Smoother):\n \n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.gnofix = True\n assert self.W >= 2*self.S, \"Smoother size to large for given window size. \"\n self.model = XGBClassifier(n_estimators=100, max_depth=4,\n learning_rate=0.1, reg_lambda=1, reg_alpha=0,\n nthread=self.n_jobs, random_state=self.seed,\n num_class=self.A, objective='multi:softprob')\n\n def process_base_proba(self,B,y=None):\n B_slide, y_slide = slide_window(B, self.S, y)\n return B_slide, y_slide\n\n\nclass CRF_Smoother(Smoother):\n \n def __init__(self, *args, **kwargs):\n\n super().__init__(*args, **kwargs)\n self.model = CRF(verbose=self.verbose)\n\n\nclass CNN_Smoother(Smoother):\n \n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n \n from src.Smooth.cnn import CNN # This is to avoid requiring the installation of pytorch\n\n self.model = CNN(num_classes=self.A, num_features=self.S, verbose=self.verbose)" }, { "alpha_fraction": 0.5057471394538879, "alphanum_fraction": 0.7030651569366455, "avg_line_length": 16.399999618530273, "blob_id": "f6e9fb589c227d51516c75772e3671159c15b78d", "content_id": "d36a57046d7077f20755afb5abf821678f014d1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 522, "license_type": "no_license", "max_line_length": 30, "num_lines": 30, "path": "/requirements.txt", "repo_name": "eardil/gnomix", "src_encoding": "UTF-8", "text": "cycler==0.10.0\nCython==0.29.21\ndask==2.20.0\ndataclasses==0.8\njoblib==0.16.0\nkiwisolver==1.2.0\nmatplotlib==3.3.0\nnumpy==1.19.0\npandas==1.0.5\nparameterized==0.8.1\nPillow==7.2.0\npyparsing==2.4.7\npython-crfsuite==0.9.7\npython-dateutil==2.8.1\npytz==2020.1\nPyYAML==5.3.1\nscikit-allel==1.3.1\nscikit-learn==0.23.1\nscipy==1.5.1\nseaborn==0.11.1\nsix==1.15.0\nsklearn==0.0\nsklearn-crfsuite==0.3.6\ntabulate==0.8.9\nthreadpoolctl==2.1.0\ntoolz==0.10.0\ntqdm==4.61.0\ntyping-extensions==3.10.0.0\nuncertainty-calibration==0.0.7\nxgboost==1.1.1\n" }, { "alpha_fraction": 0.5172872543334961, "alphanum_fraction": 0.5283687710762024, "avg_line_length": 29.62895965576172, "blob_id": "8dfd7a837986755938729569a7bdf638f7a84eec", "content_id": "00cd6026c8bca997f473b8734d8b19a9d5ee0a1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6768, "license_type": "no_license", "max_line_length": 129, "num_lines": 221, "path": "/src/Base/base.py", "repo_name": "eardil/gnomix", "src_encoding": "UTF-8", "text": "import numpy as np\nimport sys\nfrom sklearn.metrics import accuracy_score, balanced_accuracy_score\nfrom time import time\nfrom multiprocessing import get_context\nimport tqdm\n\nclass Base():\n\n def __init__(self, chm_len, window_size, num_ancestry, missing_encoding=2,\n context=0.5, train_admix=True, n_jobs=None, seed=94305, verbose=False):\n\n self.C = chm_len\n self.M = window_size\n self.W = self.C//self.M # Number of windows\n self.A = num_ancestry\n self.missing_encoding=missing_encoding\n self.context = context\n self.train_admix = train_admix\n self.n_jobs = n_jobs\n self.seed = seed\n self.verbose = verbose\n self.base_multithread = False\n self.log_inference = False\n\n self.time = {}\n\n def init_base_models(self, model_factory):\n \"\"\"\n inputs:\n - model_factory: function that returns a model object that has the functions\n - fit\n - predict\n - predict_proba\n - and the attributes\n - classes_\n \"\"\"\n self.models = [model_factory() for _ in range(self.W)]\n\n def pad(self,X):\n pad_left = np.flip(X[:,0:self.context],axis=1)\n pad_right = np.flip(X[:,-self.context:],axis=1)\n return np.concatenate([pad_left,X,pad_right],axis=1)\n \n def train(self, X, y, verbose=True):\n \"\"\"\n inputs:\n - X: np.array of shape (N, C) where N is sample size and C chm length\n - y: np.array of shape (N, C) where N is sample size and C chm length\n \"\"\"\n try:\n np.lib.stride_tricks.sliding_window_view\n return self.train_vectorized(X, y)\n except AttributeError:\n print(\"Vectorized implementation requires numpy versions 1.20+.. Using loopy version..\")\n return self.train_loopy(X, y, verbose=verbose)\n\n def train_loopy(self, X, y, verbose=True):\n \"\"\"Depricated\"\"\"\n\n t = time()\n\n if self.context != 0.0:\n X = self.pad(X)\n\n start = self.context\n\n for i in range(self.W):\n\n X_w = X[:,start-self.context:start+self.context+self.M]\n y_w = y[:,i]\n\n if i == self.W-1:\n X_w = X[:,start-self.context:]\n\n # train model\n self.models[i].fit(X_w,y_w)\n\n start += self.M\n\n if verbose:\n sys.stdout.write(\"\\rWindows done: %i/%i\" % (i+1, self.W))\n \n if verbose:\n print(\"\")\n\n self.time[\"train\"] = time() - t\n\n def train_base_model(self, b, X, y):\n return b.fit(X, y)\n\n def predict_proba_base_model(self, b, X):\n return b.predict_proba(X)\n\n def train_vectorized(self, X, y):\n\n slide_window = np.lib.stride_tricks.sliding_window_view\n\n t = time()\n\n # pad\n if self.context != 0.0:\n X = self.pad(X)\n \n # convolve\n M_ = self.M + 2*self.context \n idx = np.arange(0,self.C,self.M)[:-2]\n X_b = slide_window(X, M_, axis=1)[:,idx,:]\n\n # stack\n train_args = tuple(zip( self.models[:-1], np.swapaxes(X_b,0,1), np.swapaxes(y,0,1)[:-1] ))\n rem = self.C - self.M*self.W\n train_args += ((self.models[-1], X[:,X.shape[1]-(M_+rem):], y[:,-1]),)\n\n # train\n log_iter = tqdm.tqdm(train_args, total=self.W, bar_format='{l_bar}{bar:40}{r_bar}{bar:-40b}', position=0, leave=True)\n if self.base_multithread:\n with get_context(\"spawn\").Pool(self.n_jobs) as pool:\n self.models = pool.starmap(self.train_base_model, log_iter) \n else:\n self.models = [self.train_base_model(*b) for b in log_iter]\n\n self.time[\"train\"] = time() - t\n\n def predict_proba(self, X):\n \"\"\"\n inputs:\n - X: np.array of shape (N, C) where N is sample size and C chm length\n returns \n - B: base probabilities of shape (N,W,A)\n \"\"\"\n try:\n np.lib.stride_tricks.sliding_window_view\n return self.predict_proba_vectorized(X)\n except AttributeError:\n print(\"Vectorized implementation requires numpy versions 1.20+.. Using loopy version..\")\n return self.predict_proba_loopy(X)\n\n def predict_proba_vectorized(self, X):\n\n slide_window = np.lib.stride_tricks.sliding_window_view\n\n t = time()\n\n # pad\n if self.context != 0.0:\n X = self.pad(X)\n \n # convolve\n M_ = self.M + 2*self.context \n idx = np.arange(0,self.C,self.M)[:-2]\n X_b = slide_window(X, M_, axis=1)[:,idx,:]\n\n # stack\n base_args = tuple(zip( self.models[:-1], np.swapaxes(X_b,0,1) ))\n rem = self.C - self.M*self.W\n base_args += ((self.models[-1], X[:,X.shape[1]-(M_+rem):]), )\n\n if self.log_inference:\n base_args = tqdm.tqdm(base_args, total=self.W, bar_format='{l_bar}{bar:40}{r_bar}{bar:-40b}', position=0, leave=True)\n\n # predict proba\n if self.base_multithread:\n with get_context(\"spawn\").Pool(self.n_jobs) as pool:\n B = np.array(pool.starmap(self.predict_proba_base_model, base_args))\n else:\n B = np.array([self.predict_proba_base_model(*b) for b in base_args])\n\n B = np.swapaxes(B, 0, 1)\n\n self.time[\"inference\"] = time() - t\n\n return B\n\n\n def predict_proba_loopy(self, X):\n \"\"\"Depricated\"\"\"\n\n t = time()\n\n N = len(X)\n B = np.zeros( (N, self.W, self.A), dtype=\"float32\" )\n \n start = self.context\n \n if self.context != 0.0:\n X = self.pad(X)\n \n for i in range(self.W):\n X_w = X[:,start-self.context:start+self.context+self.M]\n\n if i == self.W-1:\n X_w = X[:,start-self.context:]\n\n B[:,i,self.models[i].classes_] = self.models[i].predict_proba(X_w)\n\n start += self.M\n\n self.time[\"inference\"] = time() - t\n \n return B\n \n def predict(self, X):\n B = self.predict_proba(X)\n return np.argmax(B, axis=-1)\n \n def evaluate(self,X=None,y=None,B=None):\n\n round_accr = lambda accr : round(np.mean(accr)*100,2)\n\n if X is not None:\n y_pred = self.predict(X)\n elif B is not None:\n y_pred = np.argmax(B, axis=-1)\n else:\n print(\"Error: Need either SNP input or estimated probabilities to evaluate.\")\n\n accr = round_accr( accuracy_score(y.reshape(-1), y_pred.reshape(-1)) )\n accr_bal = round_accr( balanced_accuracy_score(y.reshape(-1), y_pred.reshape(-1)) )\n\n return accr, accr_bal" }, { "alpha_fraction": 0.6219885945320129, "alphanum_fraction": 0.6303110122680664, "avg_line_length": 36.735538482666016, "blob_id": "d1dd14c3c89e0ed3c0c69a42489159ce39e46ecb", "content_id": "5e3dc796440a62ae62d1defc01f71f8e9d9cb80b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4566, "license_type": "no_license", "max_line_length": 109, "num_lines": 121, "path": "/src/postprocess.py", "repo_name": "eardil/gnomix", "src_encoding": "UTF-8", "text": "import allel\nfrom collections import Counter\nimport numpy as np\nimport pandas as pd\nfrom scipy.interpolate import interp1d\n\ndef get_effective_pred(prediction, chm_len, window_size, model_idx):\n \"\"\"\n Maps SNP indices to window number to find predictions for those SNPs\n \"\"\"\n\n # expanding prediction\n ext = np.repeat(prediction, window_size, axis=1)\n\n # handling remainder\n rem_len = chm_len-ext.shape[1]\n ext_rem = np.tile(prediction[:,-1], [rem_len,1]).T\n ext = np.concatenate([ext, ext_rem], axis=1)\n\n # return relevant positions\n return ext[:, model_idx]\n\n\ndef get_meta_data(chm, model_pos, query_pos, n_wind, wind_size, gen_map_df):\n \"\"\"\n Transforms the predictions on a window level to a .msp file format.\n - chm: chromosome number\n - model_pos: physical positions of the model input SNPs in basepair units\n - query_pos: physical positions of the query input SNPs in basepair units\n - n_wind: number of windows in model\n - wind_size: size of each window in the model\n - genetic_map_file: the input genetic map file\n \"\"\"\n\n model_chm_len = len(model_pos)\n \n # chm\n chm_array = [chm]*n_wind\n\n # start and end pyshical positions\n spos_idx = np.arange(0, model_chm_len, wind_size)[:-1]\n epos_idx = np.concatenate([np.arange(0, model_chm_len, wind_size)[1:-1],np.array([model_chm_len])])-1\n spos = model_pos[spos_idx]\n epos = model_pos[epos_idx]\n\n # start and end positions in cM (using linear interpolation, truncate ends of map file)\n end_pts = tuple(np.array(gen_map_df.pos_cm)[[0,-1]])\n f = interp1d(gen_map_df.pos, gen_map_df.pos_cm, fill_value=end_pts, bounds_error=False) \n sgpos = np.round(f(spos),5)\n egpos = np.round(f(epos),5)\n\n # number of query snps in interval\n wind_index = [min(n_wind-1, np.where(q == sorted(np.concatenate([epos, [q]])))[0][0]) for q in query_pos]\n window_count = Counter(wind_index)\n n_snps = [window_count[w] for w in range(n_wind)]\n\n # Concat with prediction table\n meta_data = np.array([chm_array, spos, epos, sgpos, egpos, n_snps]).T\n meta_data_df = pd.DataFrame(meta_data)\n meta_data_df.columns = [\"chm\", \"spos\", \"epos\", \"sgpos\", \"egpos\", \"n snps\"]\n\n return meta_data_df\n\ndef get_samples_from_msp_df(msp_df):\n \"\"\"Function for getting sample IDs from a pandas DF containing the output data\"\"\"\n\n # get all columns including sample names\n query_samples_dub = msp_df.columns[6:]\n\n # only keep 1 of maternal/paternal \n single_ind_idx = np.arange(0,len(query_samples_dub),2)\n query_samples_sing = query_samples_dub[single_ind_idx]\n\n # remove the suffix\n query_samples = [qs[:-2] for qs in query_samples_sing]\n\n return query_samples\n \ndef write_msp(msp_prefix, meta_data, pred_labels, populations, query_samples):\n \n msp_data = np.concatenate([np.array(meta_data), pred_labels.T], axis=1).astype(str)\n \n with open(msp_prefix+\".msp\", 'w') as f:\n # first line (comment)\n f.write(\"#Subpopulation order/codes: \")\n f.write(\"\\t\".join([str(pop)+\"=\"+str(i) for i, pop in enumerate(populations)])+\"\\n\")\n # second line (comment/header)\n f.write(\"#\"+\"\\t\".join(meta_data.columns) + \"\\t\")\n f.write(\"\\t\".join([str(s) for s in np.concatenate([[s+\".0\",s+\".1\"] for s in query_samples])])+\"\\n\")\n # rest of the lines (data)\n for l in range(msp_data.shape[0]):\n f.write(\"\\t\".join(msp_data[l,:]))\n f.write(\"\\n\")\n\ndef write_fb(fb_prefix, meta_data, proba, ancestry, query_samples):\n \n n_rows = meta_data.shape[0]\n\n pp = np.round(np.mean(np.array(meta_data[[\"spos\", \"epos\"]],dtype=int),axis=1)).astype(int)\n gp = np.mean(np.array(meta_data[[\"sgpos\", \"egpos\"]],dtype=float),axis=1).astype(float)\n\n fb_meta_data = pd.DataFrame()\n fb_meta_data[\"chromosome\"] = meta_data[\"chm\"]\n fb_meta_data[\"physical position\"] = pp\n fb_meta_data[\"genetic_position\"] = gp\n fb_meta_data[\"genetic_marker_index\"] = np.repeat(\".\", n_rows)\n\n fb_prob_header = [\":::\".join([q,h,a]) for q in query_samples for h in [\"hap1\", \"hap2\"] for a in ancestry]\n fb_prob = np.swapaxes(proba,1,2).reshape(-1, n_rows).T\n fb_prob_df = pd.DataFrame(fb_prob)\n fb_prob_df.columns = fb_prob_header\n\n fb_df = pd.concat((fb_meta_data.reset_index(drop=True), fb_prob_df),axis=1)\n\n with open(fb_prefix+\".fb\", 'w') as f:\n # header\n f.write(\"#reference_panel_population:\\t\")\n f.write(\"\\t\".join(ancestry)+\"\\n\")\n fb_df.to_csv(f, sep=\"\\t\", index=False)\n\n return\n" }, { "alpha_fraction": 0.5926775336265564, "alphanum_fraction": 0.6014271378517151, "avg_line_length": 34.78115463256836, "blob_id": "6b34423783c5aebe9d6854484e6a504e667577f4", "content_id": "439e81029c0269241668e06a34f2e5c4d4d31fc3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11772, "license_type": "no_license", "max_line_length": 130, "num_lines": 329, "path": "/src/utils.py", "repo_name": "eardil/gnomix", "src_encoding": "UTF-8", "text": "import allel\nimport gzip\nimport numpy as np\nimport os\nimport pandas as pd\nimport random\nfrom scipy.interpolate import interp1d\nimport string\nimport sys\nfrom time import time\nimport pickle\n\ndef save_dict(D, path):\n with open(path, 'wb') as handle:\n pickle.dump(D, handle, protocol=pickle.HIGHEST_PROTOCOL)\n \ndef load_dict(path):\n if not os.path.exists(path):\n return {}\n with open(path, 'rb') as handle:\n return pickle.load(handle)\n\ndef get_num_outs(sample_map_paths, r_admixed=1.0):\n # r_admixed: generated r_admixed * num-founders for each set\n # TODO: cap train2 lengths to a pre-defined value.\n num_outs = []\n for path in sample_map_paths:\n with open(path,\"r\") as f:\n length = len(f.readlines()) # how many founders.\n num_outs.append(int(length *r_admixed))\n return num_outs\n\ndef run_shell_cmd(cmd, verb=True):\n if verb:\n print(\"Running:\", cmd)\n rval = os.system(cmd)\n if rval != 0:\n signal = rval & 0xFF\n exit_code = rval >> 8\n if signal != 0:\n sys.stderr.write(\"\\nCommand %s exits with signal %d\\n\\n\" % (cmd, signal))\n sys.exit(signal)\n sys.stderr.write(\"\\nCommand %s failed with return code %d\\n\\n\" % (cmd, exit_code))\n sys.exit(exit_code)\n\ndef join_paths(p1,p2=\"\",verb=True):\n path = os.path.join(p1,p2)\n if not os.path.exists(path):\n os.makedirs(path)\n if verb:\n print(\"path created:\", path)\n return path\n\ndef read_vcf(vcf_file, chm=None, fields=None, verbose=False):\n \"\"\"\n Wrapper function for reading vcf files into a dictionary\n fields=\"*\" extracts more information, take out if ruled unecessary\n \"\"\"\n if fields is None:\n # fields = ['variants/CHROM', 'variants/POS', 'calldata/GT', 'variants/REF', 'samples']\n fields = \"*\"\n\n if vcf_file[-3:]==\".gz\":\n with gzip.open(vcf_file, 'rb') as vcf:\n data = allel.read_vcf(vcf, region=chm, fields=fields)\n else: \n data = allel.read_vcf(vcf_file, region=chm, fields=fields)\n\n if data is None:\n if chm is None:\n print(\"No data found in vcf file {}\".format(vcf_file))\n else:\n print('Found no data in vcf file {} in region labeled \"{}\". Using all data from vcf instead...'.format(vcf_file, chm))\n return read_vcf(vcf_file, None, fields, verbose)\n\n if verbose: \n chmlen, n, _ = data[\"calldata/GT\"].shape\n print(\"File read:\", chmlen, \"SNPs for\", n, \"individuals\")\n\n return data\n\ndef snp_intersection(pos1, pos2, verbose=False):\n \"\"\"\n Finds interception of snps given two arrays of snp position \n in O(max[size(pos1), size(pos2)])\n \"\"\"\n\n assert len(pos2) != 0, \"No SNPs of specified chromosome found in query file.\"\n \n ind_dict_1 = dict((p,i) for i,p in enumerate(pos1)) # O(n1)\n ind_dict_2 = dict((p,i) for i,p in enumerate(pos2)) # O(n2)\n intersection = set(pos1) & set(pos2) # O(min[n1, n2])\n assert len(intersection) != 0, \"No matching SNPs between model and query file.\"\n idx12 = [ (ind_dict_1[p], ind_dict_2[p]) for p in intersection ] # O(min[n1, n2])\n idx1, idx2 = np.array(idx12).T\n\n if verbose:\n print(\"- Number of SNPs from model:\", len(pos1))\n print(\"- Number of SNPs from file:\", len(pos2))\n print(\"- Number of intersecting SNPs:\", len(intersection))\n intersect_percentage = round(len(intersection)/len(pos1),4)*100\n print(\"- Percentage of model SNPs covered by query file: \",\n intersect_percentage, \"%\", sep=\"\")\n\n return idx1, idx2\n\n\ndef vcf_to_npy(vcf_data, snp_pos_fmt=None, snp_ref_fmt=None, miss_fill=2, return_idx=False, verbose=True):\n \"\"\"\n Converts vcf file to numpy matrix. \n If SNP position format is specified, then comply with that format by filling in values \n of missing positions and ignoring additional positions.\n If SNP reference variant format is specified, then comply with that format by swapping where \n inconsistent reference variants.\n Inputs\n - vcf_data: already loaded data from a vcf file\n - snp_pos_fmt: desired SNP position format\n - snp_ref_fmt: desired reference variant format\n - miss_fill: value to fill in where there are missing snps\n Outputs\n - npy matrix on standard format\n \"\"\"\n\n # reshape binary represntation into 2D np array \n data = vcf_data[\"calldata/GT\"]\n chm_len, n_ind, _ = data.shape\n data = data.reshape(chm_len,n_ind*2).T\n mat_vcf_2d = data\n vcf_idx, fmt_idx = np.arange(n_ind*2), np.arange(n_ind*2)\n\n if snp_pos_fmt is not None:\n # matching SNP positions with standard format (finding intersection)\n vcf_pos = vcf_data['variants/POS']\n fmt_idx, vcf_idx = snp_intersection(snp_pos_fmt, vcf_pos, verbose=verbose)\n # only use intersection of variants (fill in missing values)\n fill = np.full((n_ind*2, len(snp_pos_fmt)), miss_fill)\n fill[:,fmt_idx] = data[:,vcf_idx]\n mat_vcf_2d = fill\n\n if snp_ref_fmt is not None:\n # adjust binary matrix to match model format\n # - find inconsistent references\n vcf_ref = vcf_data['variants/REF']\n swap = vcf_ref[vcf_idx] != snp_ref_fmt[fmt_idx] # where to swap w.r.t. intersection\n if swap.any() and verbose:\n swap_n = sum(swap)\n swap_p = round(np.mean(swap)*100,4)\n print(\"- Found \", swap_n, \" (\", swap_p, \"%) different reference variants. Adjusting...\", sep=\"\")\n # - swapping 0s and 1s where inconsistent\n fmt_swap_idx = np.array(fmt_idx)[swap] # swap-index at model format\n mat_vcf_2d[:,fmt_swap_idx] = (mat_vcf_2d[:,fmt_swap_idx]-1)*(-1)\n\n # make sure all missing values are encoded as required\n missing_mask = np.logical_and(mat_vcf_2d != 0, mat_vcf_2d != 1)\n mat_vcf_2d[missing_mask] = miss_fill\n\n # return npy matrix\n if return_idx:\n return mat_vcf_2d, vcf_idx, fmt_idx\n\n return mat_vcf_2d\n\ndef read_genetic_map(genetic_map_path, chm=None, header=None):\n\n gen_map_df = pd.read_csv(genetic_map_path, delimiter=\"\\t\", header=header, comment=\"#\", dtype=str)\n gen_map_df.columns = [\"chm\",\"pos\",\"pos_cm\"]\n \n try:\n gen_map_df = gen_map_df.astype({'chm': str, 'pos': int, 'pos_cm': float})\n except ValueError:\n if header is None:\n print(\"WARNING: Something wrong with genetic map format. Trying with header...\")\n return read_genetic_map(genetic_map_path, chm=chm, header=0)\n else:\n raise Exception(\"Genetic map format not understood.\")\n\n if chm is not None:\n chm = str(chm)\n if len(gen_map_df[gen_map_df.chm == chm]) == 0:\n gen_map_df = gen_map_df[gen_map_df.chm == \"chr\"+chm]\n else:\n gen_map_df = gen_map_df[gen_map_df.chm == chm]\n\n return gen_map_df\n\ndef cM2nsnp(cM, chm_len_pos, genetic_map, chm=None):\n\n if type(genetic_map) == str:\n if chm is not None:\n gen_map_df = read_genetic_map(genetic_map, chm)\n else:\n print(\"Need chromosome number to read genetic map\")\n else:\n gen_map_df = genetic_map\n\n chm_len_cM = np.array(gen_map_df[\"pos_cm\"])[-1]\n snp_len = int(round(cM*(chm_len_pos/chm_len_cM)))\n\n return snp_len\n\ndef fb2proba(path_to_fb, n_wind=None):\n \n with open(path_to_fb) as f:\n header = f.readline().split(\"\\n\")[0]\n ancestry = np.array(header.split(\"\\t\")[1:])\n A = len(ancestry)\n \n fb_df = pd.read_csv(path_to_fb, sep=\"\\t\", skiprows=[0])\n\n samples = [s.split(\":::\")[0] for s in fb_df.columns[4::A*2]]\n \n # Probabilities in snp space\n fb = np.array(fb_df)[:,4:]\n C, AN = fb.shape\n N = AN//A\n fb_reshaped = fb.reshape(C, N, A) # (C, N, A)\n proba = np.swapaxes(fb_reshaped, 0, 1) # (N, C, A)\n \n # Probabilities in window space\n if n_wind is not None:\n gen_pos = np.array(fb_df['genetic_position'])\n w_cM = np.arange(gen_pos[0], gen_pos[-1], step = gen_pos[-1]/n_wind)\n f = interp1d(gen_pos, np.arange(C), fill_value=(0, C), bounds_error=False) \n w_idx = f(w_cM).astype(int)\n proba = proba[:,w_idx,:]\n \n return proba\n\ndef update_vcf(vcf_data, mask=None, Updates=None):\n\n out = vcf_data.copy()\n \n if mask is not None:\n for key in vcf_data.keys():\n if key != \"samples\":\n out[key] = vcf_data[key][mask]\n\n if Updates is not None:\n for key in Updates.keys():\n if key != \"samples\":\n out[key] = Updates[key]\n\n return out\n\ndef get_name(name_len=8):\n letters = string.ascii_lowercase + string.ascii_uppercase + string.digits\n return ''.join(random.choice(letters) for i in range(name_len)) \n\ndef npy_to_vcf(reference, npy, results_file, verbose=False):\n \"\"\"\n - reference: str path to reference file which provides metadata for the results\n or alternatively, a allel.read_vcf output\n - npy: npy matrix - shape: (num_samples, num_snps)\n make sure npy file has same snp positions\n - results_file: str output vcf path\n \n this is a very light version of npy_to_vcf for LAI applications\n \n Function behavior\n a vcf file called <results_file> with data in npy file and metadata from reference\n - metadata includes all fields except for genotype data\n - npy file must follow convention where maternal and paternal sequences appear one after the other\n for each sample\n\n NOTE: New to production. Has not been bullet-tested.\n \"\"\"\n \n if results_file.split(\".\")[-1] not in [\".vcf\", \".bcf\"]:\n results_file += \".vcf\"\n\n # read in the input vcf data\n if type(reference) == str:\n data = allel.read_vcf(reference)\n else:\n data = reference.copy()\n \n # infer chromosome length and number of samples\n npy = npy.astype(int)\n chmlen, _, _ = data[\"calldata/GT\"].shape\n h, c = npy.shape\n n = h//2\n assert chmlen == c, \"reference (\" + str(chmlen) + \") and numpy matrix (\" + str(c) + \") not compatible\"\n\n # Keep sample names if appropriate\n if \"samples\" in list(data.keys()) and len(data[\"samples\"]) == n:\n if verbose:\n print(\"Using same sample names\")\n data_samples = data[\"samples\"]\n else:\n data_samples = [get_name() for _ in range(n)]\n\n # metadata \n df = pd.DataFrame()\n df[\"CHROM\"] = data[\"variants/CHROM\"]\n df['POS'] = data[\"variants/POS\"]\n df[\"ID\"] = data[\"variants/ID\"]\n df[\"REF\"] = data[\"variants/REF\"]\n df[\"VAR\"] = data[\"variants/ALT\"][:,0] # ONLY THE FIRST SINCE WE ONLY CARE ABOUT BI-ALLELIC SNPS HERE FOR NOW\n df[\"QUAL\"] = data[\"variants/QUAL\"]\n df[\"FILTER\"] = [\"PASS\"]*chmlen\n df[\"INFO\"] = [\".\"]*chmlen\n df[\"FORMAT\"] = [\"GT\"]*chmlen\n \n # genotype data for each sample\n for i in range(n):\n \n # get that particular individual's maternal and paternal snps\n maternal = npy[i*2,:].astype(str) # maternal is the first\n paternal = npy[i*2+1,:].astype(str) # paternal is the second\n\n # create \"maternal|paternal\"\n lst = [maternal, [\"|\"]*chmlen, paternal]\n genotype_person = list(map(''.join, zip(*lst)))\n df[data_samples[i]] = genotype_person\n\n if verbose:\n print(\"writing vcf data in \"+results_file)\n\n # write header\n with open(results_file,\"w\") as f:\n f.write(\"##fileformat=VCFv4.1\\n\")\n f.write(\"##source=pyadmix (XGMix)\\n\")\n f.write('##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Phased Genotype\">\\n')\n f.write(\"#\"+\"\\t\".join(df.columns)+\"\\n\") # mandatory header\n \n # genotype data\n df.to_csv(results_file,sep=\"\\t\",index=False,mode=\"a\",header=False)\n \n return\n" }, { "alpha_fraction": 0.5887850522994995, "alphanum_fraction": 0.593235433101654, "avg_line_length": 35.75, "blob_id": "607f47ed7ce7d9d8fb536f1af24c0c4ed2b8db67", "content_id": "666ef8e3af6052b911b8da904887e192342472cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15729, "license_type": "no_license", "max_line_length": 144, "num_lines": 428, "path": "/src/laidataset.py", "repo_name": "eardil/gnomix", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nimport os\nfrom collections import namedtuple\nimport scipy.interpolate\n\nfrom src.utils import read_vcf, read_genetic_map\n\ndef get_chm_info(genetic_map,variants_pos,chm):\n\n \"\"\"\n get chromosome length in morgans from genetic map.\n Assumes genetic_map is sorted.\n \n genetic_map: file with the following format\n variants: a npy array with numbers representing centi morgans\n \n \"\"\"\n genetic_chm = read_genetic_map(genetic_map_path=genetic_map, chm=chm)\n\n # get length of chm.\n chm_length_morgans = max(genetic_chm[\"pos_cm\"])/100.0\n\n # get snp info - snps in the vcf file and their cm values.\n # then compute per position probability of being a breapoint.\n # requires some interpolation and finding closest positions.\n \"\"\"\n # 1: Minimum in a sorted array approach and implemented inside admix().\n - O(logn) every call to admix. Note that admix() takes O(n) anyway.\n # 2: Find probabilities using span. - One time computation.\n\n \"\"\"\n # This adds 0 overhead to code runtime.\n # get interpolated values of all reference snp positions\n genomic_intervals = scipy.interpolate.interp1d(x=genetic_chm[\"pos\"].to_numpy(), y=genetic_chm[\"pos_cm\"].to_numpy(),fill_value=\"extrapolate\")\n genomic_intervals = genomic_intervals(variants_pos)\n\n # interpolation\n lengths = genomic_intervals[1:] - genomic_intervals[0:-1]\n bp = lengths / lengths.sum()\n\n return chm_length_morgans, bp\n\ndef get_sample_map_data(sample_map, sample_weights=None):\n sample_map_data = pd.read_csv(sample_map,delimiter=\"\\t\",header=None,comment=\"#\", dtype=\"object\")\n sample_map_data.columns = [\"sample\",\"population\"]\n\n # creating ancestry map into integers from strings\n # id is based on order in sample_map file.\n ancestry_map = {}\n curr = 0\n for i in sample_map_data[\"population\"]:\n if i in ancestry_map.keys():\n continue\n else:\n ancestry_map[i] = curr\n curr += 1\n sample_map_data[\"population_code\"] = sample_map_data[\"population\"].apply(ancestry_map.get)\n\n if sample_weights is not None:\n sample_weights_df = pd.read_csv(sample_weights,delimiter=\"\\t\",header=None,comment=\"#\")\n sample_weights_df.columns = [\"sample\",\"sample_weight\"]\n sample_map_data = pd.merge(sample_map_data, sample_weights_df, on='sample')\n\n else:\n sample_map_data[\"sample_weight\"] = [1.0/len(sample_map_data)]*len(sample_map_data)\n\n return ancestry_map, sample_map_data\n\n\nPerson = namedtuple('Person', 'maternal paternal name')\n\ndef build_founders(sample_map_data,gt_data,chm_length_snps):\n \n \"\"\"\n Returns founders - a list of Person datatype.\n founders_weight - a list with a weight for each sample in founders\n\n Inputs\n gt_data shape: (num_snps, num_samples, 2)\n \n \"\"\"\n\n # building founders\n founders = []\n\n for i in sample_map_data.iterrows():\n\n # first get the index of this sample in the vcf_data.\n # if not there, skip and print to log.\n\n index = i[1][\"index_in_reference\"]\n\n name = i[1][\"sample\"]\n\n # when creating maternal, paternal make sure it has same keys\n\n maternal = {}\n paternal = {}\n\n # let us use the first for maternal in the vcf file...\n maternal[\"snps\"] = gt_data[:,index,0].astype(np.uint8)\n paternal[\"snps\"] = gt_data[:,index,1].astype(np.uint8)\n\n # single ancestry assumption.\n maternal[\"anc\"] = np.array([i[1][\"population_code\"]]*chm_length_snps).astype(np.uint8)\n paternal[\"anc\"] = np.array([i[1][\"population_code\"]]*chm_length_snps).astype(np.uint8)\n\n # any more info like coordinates, prs can be added here.\n\n p = Person(maternal,paternal,name)\n\n founders.append(p)\n \n return founders\n\n\n\ndef admix(founders,founders_weight,gen,breakpoint_probability,chm_length_snps,chm_length_morgans):\n\n \"\"\"\n create an admixed haploid from the paternal and maternal sequences\n in a non-recursive way.\n \n Returns:\n haploid_returns: dict with same keys as self.maternal and self.paternal\n\n \"\"\"\n # assert all founders have all keys.\n\n assert len(founders) >= 2, \"Too few founders!!!\"\n order = sorted(founders[0].maternal.keys())\n \n # for each gen, we sample from poisson\n num_crossovers = int(sum(np.random.poisson(chm_length_morgans,size=gen)))\n\n # initilizing all numbers to 0.\n haploid_returns = {}\n for key in order:\n haploid_returns[key] = np.zeros_like(founders[0].maternal[key])\n \n # edge case of no breaking points.\n if num_crossovers == 0:\n \n haploid_returns = {}\n select_id = np.random.choice(len(founders),p=founders_weight)\n select = founders[select_id]\n choice = np.random.rand()>=0.5\n select = select.maternal if choice else select.paternal\n for key in order:\n \n haploid_returns[key] = select[key].copy()\n\n else:\n \n breakpoints = np.random.choice(np.arange(1,chm_length_snps), \n size=num_crossovers, \n replace=False, \n p=breakpoint_probability)\n breakpoints = np.sort(breakpoints)\n \n breakpoints = np.concatenate(([0],breakpoints,[chm_length_snps]))\n \n # select paternal or maternal randomly and apply crossovers.\n for i in range(len(breakpoints)-1):\n begin = breakpoints[i]\n end = breakpoints[i+1]\n # choose random founder for this segment, then choose random haplotype for this founder\n select_id = np.random.choice(len(founders),p=founders_weight)\n select = founders[select_id]\n choice = np.random.rand()>=0.5\n select = select.maternal if choice else select.paternal\n for key in order:\n haploid_returns[key][begin:end] = select[key][begin:end].copy()\n\n return haploid_returns\n\n\n\ndef write_output(root,dataset):\n \n # dataset is a list of Person\n \n if not os.path.isdir(root):\n os.makedirs(root)\n \n snps = []\n anc = []\n for person in dataset:\n snps.append(person.maternal[\"snps\"])\n snps.append(person.paternal[\"snps\"])\n anc.append(person.maternal[\"anc\"])\n anc.append(person.paternal[\"anc\"])\n\n # create npy files.\n snps = np.stack(snps)\n np.save(root+\"/mat_vcf_2d.npy\",snps)\n\n # create map files.\n anc = np.stack(anc)\n np.save(root+\"/mat_map.npy\",anc)\n\n\nclass LAIDataset:\n \n \n def __init__(self,chm,reference,genetic_map,seed=94305):\n\n np.random.seed(seed)\n\n self.chm = chm\n \n # vcf data\n print(\"Reading vcf file...\")\n vcf_data = read_vcf(reference,self.chm)\n self.pos_snps = vcf_data[\"variants/POS\"].copy()\n self.num_snps = vcf_data[\"calldata/GT\"].shape[0]\n self.ref_snps = vcf_data[\"variants/REF\"].copy().astype(str)\n self.alt_snps = vcf_data[\"variants/ALT\"][:,0].copy().astype(str)\n \n self.call_data = vcf_data[\"calldata/GT\"]\n self.vcf_samples = vcf_data[\"samples\"]\n\n # genetic map data\n print(\"Getting genetic map info...\")\n self.morgans, self.breakpoint_prob = get_chm_info(genetic_map, self.pos_snps, self.chm)\n \n \n def buildDataset(self, sample_map, sample_weights=None):\n \n \"\"\"\n reads in the above files and extacts info\n \n self: chm, num_snps, morgans, breakpoint_prob, splits, pop_to_num, num_to_pop\n sample_map_data => sample name, population, population code, (maternal, paternal, name), weight, split\n \"\"\"\n \n # sample map data\n print(\"Getting sample map info...\")\n self.pop_to_num, self.sample_map_data = get_sample_map_data(sample_map, sample_weights)\n self.num_to_pop = {v: k for k, v in self.pop_to_num.items()}\n \n try:\n map_samples = np.array(list(self.sample_map_data[\"sample\"]))\n\n sorter = np.argsort(self.vcf_samples)\n indices = sorter[np.searchsorted(self.vcf_samples, map_samples, sorter=sorter)]\n self.sample_map_data[\"index_in_reference\"] = indices\n \n except:\n raise Exception(\"sample not found in vcf file!!!\")\n \n # self.founders\n print(\"Building founders...\")\n self.sample_map_data[\"founders\"] = build_founders(self.sample_map_data,self.call_data,self.num_snps)\n self.sample_map_data.drop(['index_in_reference'], axis=1, inplace=True)\n \n def __len__(self):\n return len(self.sample_map_data)\n \n def data(self):\n return self.sample_map_data\n \n def metadata(self):\n metadict = {\n \"chm\":self.chm,\n \"morgans\":self.morgans,\n \"num_snps\":self.num_snps,\n \"pos_snps\":self.pos_snps,\n \"ref_snps\":self.ref_snps,\n \"alt_snps\":self.alt_snps,\n \"pop_to_num\":self.pop_to_num,\n \"num_to_pop\":self.num_to_pop\n }\n return metadict\n \n def split_sample_map(self, ratios, split_names=None):\n \"\"\"\n Given sample_ids, populations and the amount of data to be put into each set,\n Split it such that all sets get even distribution of sample_ids for each population.\n \"\"\"\n\n assert sum(ratios) == 1, \"ratios must sum to 1\"\n\n split_names = [\"set_\"+str(i) for i in range(len(ratios))] if split_names is None else split_names\n \n set_ids = [[] for _ in ratios]\n \n for p in np.unique(self.sample_map_data[\"population\"]):\n\n # subselect population\n pop_idx = self.sample_map_data[\"population\"] == p\n pop_sample_ids = list(np.copy(self.sample_map_data[\"sample\"][pop_idx]))\n n_pop = len(pop_sample_ids)\n\n # find number of samples in each set\n n_sets = [round(r*n_pop) for r in ratios]\n while sum(n_sets) > n_pop:\n n_sets[0] -= 1 \n while sum(n_sets) < n_pop:\n n_sets[-1] += 1\n\n # divide the samples accordingly\n for s, r in enumerate(ratios):\n n_set = n_sets[s]\n set_ids_idx = np.random.choice(len(pop_sample_ids),n_set,replace=False)\n set_ids[s] += [[pop_sample_ids.pop(idx), p, split_names[s]] for idx in sorted(set_ids_idx,reverse=True)]\n\n split_df = pd.DataFrame(np.concatenate(set_ids), columns=[\"sample\", \"population\", \"split\"])\n return split_df\n\n def include_all(self, from_split, in_split):\n from_split_data = self.sample_map_data[self.sample_map_data[\"split\"]==from_split]\n from_pop = np.unique(from_split_data[\"population\"])\n ave_pop_size = np.round(len(from_split_data)/len(from_pop))\n\n in_split_data = self.sample_map_data[self.sample_map_data[\"split\"]==in_split]\n in_pop = np.unique(in_split_data[\"population\"])\n\n missing_pops = [p for p in from_pop if p not in in_pop]\n\n if len(missing_pops) > 0:\n print(\"WARNING: Small sample size from populations: {}\".format(np.array(missing_pops)))\n print(\"... Proceeding by including duplicates in both base- and smoother data...\")\n for p in missing_pops:\n # add some amount of founders to in_pop\n from_founders = from_split_data[from_split_data[\"population\"] == p].copy()\n n_copies = min(ave_pop_size, len(from_founders))\n copies = from_founders.sample(n_copies)\n copies[\"split\"] = [in_split]*n_copies\n self.sample_map_data = self.sample_map_data.append(copies)\n\n def create_splits(self,splits,outdir=None):\n print(\"Splitting sample map...\")\n \n # splits is a dict with some proportions, splits keys must be str\n assert(type(splits)==dict)\n self.splits = splits\n split_names, prop = zip(*self.splits.items())\n\n # normalize\n prop = np.array(prop) / np.sum(prop)\n \n # split founders randomly within each ancestry\n split_df = self.split_sample_map(ratios=prop, split_names=split_names)\n self.sample_map_data = self.sample_map_data.merge(split_df, on=[\"sample\", \"population\"])\n self.include_all(from_split=\"train1\",in_split=\"train2\")\n\n # write a sample map to outdir/split.map\n if outdir is not None:\n for split in splits:\n split_file = os.path.join(outdir,split+\".map\")\n self.return_split(split)[[\"sample\",\"population\"]].to_csv(split_file,sep=\"\\t\",header=False,index=False)\n \n def return_split(self,split):\n if split in self.splits:\n return self.sample_map_data[self.sample_map_data[\"split\"]==split]\n else:\n raise Exception(\"split does not exist!!!\")\n \n \n def simulate(self,num_samples,split=\"None\",gen=None,outdir=None,return_out=True, verbose=False):\n \n # general purpose simulator: can simulate any generations, either n of gen g or \n # just random n samples from gen 2 to 100.\n \n assert(type(split)==str)\n if verbose:\n print(\"Simulating using split: \",split) \n \n # get generations for each sample to be simulated\n if gen == None:\n gens = np.random.randint(2,100,num_samples)\n if verbose:\n print(\"Simulating random generations...\")\n \n else:\n gens = gen * np.ones((num_samples),dtype=int)\n if verbose:\n print(\"Simulating generation: \",gen)\n \n # corner case\n if gen == 0:\n simulated_samples = self.sample_map_data[self.sample_map_data[\"split\"]==split][\"founders\"].tolist()\n if outdir is not None:\n if verbose:\n print(\"Writing simulation output to: \",outdir)\n write_output(outdir,simulated_samples)\n \n # return the samples\n if return_out:\n return simulated_samples\n else:\n return\n \n # get the exact founder data based on split\n founders = self.sample_map_data[self.sample_map_data[\"split\"]==split][\"founders\"].tolist()\n founders_weight = self.sample_map_data[self.sample_map_data[\"split\"]==split][\"sample_weight\"].to_numpy()\n founders_weight = list(founders_weight/founders_weight.sum()) # renormalize to 1\n if len(founders) == 0:\n raise Exception(\"Split does not exist!!!\")\n \n # run simulation\n if verbose:\n print(\"Generating {} admixed samples\".format(num_samples))\n simulated_samples = []\n for i in range(num_samples):\n \n # create an admixed Person\n maternal = admix(founders,founders_weight,gens[i],self.breakpoint_prob,self.num_snps,self.morgans)\n paternal = admix(founders,founders_weight,gens[i],self.breakpoint_prob,self.num_snps,self.morgans)\n name = \"admixed\"+str(int(np.random.rand()*1e6))\n \n adm = Person(maternal,paternal,name)\n simulated_samples.append(adm)\n \n # write outputs\n if outdir is not None:\n if verbose:\n print(\"Writing simulation output to: \",outdir)\n write_output(outdir,simulated_samples)\n # TODO: optionally, we can even convert these to vcf and result (ancestry) files\n \n # return the samples\n if return_out:\n return simulated_samples\n else:\n return\n" } ]
6
nicolre239/DeepSpeechProcessing
https://github.com/nicolre239/DeepSpeechProcessing
666d557f603ea8d65ccfe21623028b00817bfa70
26e800632b6dff4761ed2ed77c268984584a43f7
dbde07cc4fbfb743b5743d2163c9f9dc43e9f10e
refs/heads/master
2022-12-18T17:46:21.772671
2020-10-09T14:30:06
2020-10-09T14:30:06
251,549,889
0
0
null
2020-03-31T08:56:19
2020-10-09T14:30:11
2022-12-08T03:56:22
Python
[ { "alpha_fraction": 0.4256559908390045, "alphanum_fraction": 0.6443148851394653, "avg_line_length": 15.149999618530273, "blob_id": "d17cb711d8a36ad037f1047e598c832f94695a9d", "content_id": "d2d2dd42c0ddc3e7bf2bcd3fcf580e600075dc66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 343, "license_type": "no_license", "max_line_length": 26, "num_lines": 20, "path": "/requirements.txt", "repo_name": "nicolre239/DeepSpeechProcessing", "src_encoding": "UTF-8", "text": "appdirs==1.4.3\r\ncertifi==2019.11.28\r\nchardet==3.0.4\r\ndecorator==4.4.2\r\ndistlib==0.3.0\r\nfilelock==3.0.12\r\nidna==2.9\r\nimageio==2.8.0\r\nimageio-ffmpeg==0.4.1\r\nmoviepy==1.0.2\r\nnumpy==1.18.2\r\nPillow==7.0.0\r\nproglog==0.1.9\r\npydub==0.23.1\r\nrequests==2.23.0\r\nsix==1.14.0\r\ntqdm==4.44.1\r\ntyping-extensions==3.7.4.1\r\nurllib3==1.25.8\r\nvirtualenv==20.0.15\r\n" }, { "alpha_fraction": 0.4856307804584503, "alphanum_fraction": 0.5144909620285034, "avg_line_length": 30.856000900268555, "blob_id": "cffd3c1235168425c87cbc2711006e471b1b00f4", "content_id": "f85a196e0c0f7359714ea5d10fa94ee69f5a739e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8212, "license_type": "no_license", "max_line_length": 177, "num_lines": 250, "path": "/main.py", "repo_name": "nicolre239/DeepSpeechProcessing", "src_encoding": "UTF-8", "text": "import moviepy.editor as mp\r\nimport glob\r\nimport csv\r\nimport time\r\nimport sys\r\nimport os\r\n\r\n#lastFileName = [-1]\r\nlastFileName = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\r\nglobalCounter = 0\r\ntotalDurationMs = 0\r\n\r\ndef incName(position):\r\n global lastFileName\r\n alphabetBase = 25\r\n\r\n if lastFileName[position] < alphabetBase:\r\n lastFileName[position] = lastFileName[position] + 1\r\n return 0\r\n elif position != 0:\r\n lastFileName[position] = 0\r\n return incName(position - 1)\r\n else:\r\n for i in range(len(lastFileName)):\r\n lastFileName[i] = 0\r\n\r\n lastFileName.insert(0, 0)\r\n return 0\r\n\r\ndef getName():\r\n global lastFileName\r\n\r\n incName(len(lastFileName) - 1)\r\n\r\n name = ''\r\n for i in range (len(lastFileName)):\r\n name = name + chr(lastFileName[i] + 97)\r\n\r\n return name\r\n\r\ndef getTimings (timeString):\r\n timeArray = [0, 0, 0, 0]\r\n shiftInMs = 135\r\n\r\n beginTimeArray = timeString.split(':')\r\n #print(beginTimeArray)\r\n\r\n timeArray[0] = int(beginTimeArray[0])\r\n timeArray[1] = int(beginTimeArray[1])\r\n\r\n secMs = str(beginTimeArray[2]).split('.')\r\n timeArray[2] = int(secMs[0])\r\n timeArray[3] = int(secMs[1])\r\n\r\n timeArray[3] = timeArray[3] + shiftInMs\r\n\r\n if timeArray[3] > 999:\r\n timeArray[3] = timeArray[3] % 1000\r\n timeArray[2] = timeArray[2] + 1\r\n\r\n if timeArray[2] > 60:\r\n timeArray[2] = timeArray[2] % 60\r\n timeArray[1] = timeArray[1] + 1\r\n\r\n if timeArray[1] > 60:\r\n timeArray[1] = timeArray[1] % 60\r\n timeArray[0] = timeArray[0] + 1\r\n\r\n return timeArray\r\n\r\n#def writeRow():\r\n\r\n\r\ndef cutFile (i, book):\r\n mp4 = glob.glob(book + 'audio/' + str(i) + '.mp4')\r\n subtitles = glob.glob(book + 'text/subtitles_final_' + str(i) + '.txt')\r\n #print (mp4)\r\n #print(subtitles)\r\n\r\n subtitles_file = open (subtitles[0], 'r')\r\n\r\n while True:\r\n beginTime = subtitles_file.readline()\r\n text = subtitles_file.readline().replace(\"\\n\", \"\").replace(\"\\t\", \"\")\r\n endTime = subtitles_file.readline()\r\n emptyLine = subtitles_file.readline()\r\n\r\n if not beginTime or not text or not endTime:\r\n break;\r\n\r\n beginTimeArray = getTimings(beginTime)\r\n endTimeArray = getTimings(endTime)\r\n\r\n #filename = '0'\r\n try:\r\n filename = cut(mp4, beginTimeArray, endTimeArray)\r\n except OSError or BrokenPipeError:\r\n #logfile = open('logfile.txt', 'a')\r\n #logfile.write('\\n#\\n#ERROR:\\n' + filename + '\\n#\\n')\r\n #logfile.close()\r\n continue\r\n\r\n if filename != '0':\r\n global globalCounter\r\n if globalCounter < 3:\r\n file = open('./cutted/train.tsv', 'a', newline='')\r\n elif globalCounter < 4:\r\n file = open('./cutted/dev.tsv', 'a', newline='')\r\n else:\r\n file = open('./cutted/test.tsv', 'a', newline='')\r\n\r\n tsv_writer = csv.writer(file, delimiter = '\\t')\r\n tsv_writer.writerow([filename + '.mp3', text])\r\n file.close()\r\n\r\n globalCounter = (globalCounter + 1) % 5\r\n else:\r\n '''\r\n logfile = open('logfile.txt', 'a')\r\n\r\n logfile.write(\"\\n\\nCut declined cause of timing\\n\")\r\n logfile.write(beginTime)\r\n logfile.write(text + '\\n')\r\n logfile.write(endTime + '\\n')\r\n\r\n logfile.close()\r\n '''\r\n continue\r\n\r\n\r\n\r\n\r\ndef cut (mp4, beginTime, endTime):\r\n global totalDurationMs\r\n mp4_file = mp4[0]\r\n\r\n clip = mp.VideoFileClip(mp4_file)\r\n\r\n beginMs = beginTime[3] + beginTime[2] * 1000 + beginTime[1] * 60 * 1000 + beginTime[0] * 3600 * 1000\r\n endMs = endTime[3] + endTime[2] * 1000 + endTime[1] * 60 * 1000 + endTime[0] * 3600 * 1000\r\n\r\n #(beginTime)\r\n #print(endTime)\r\n if (endMs - beginMs > 1000):\r\n totalDurationMs = totalDurationMs + (endMs - beginMs)\r\n\r\n beginTimeString = str(beginTime[0]) + ':' + str(beginTime[1]) + ':' + str(beginTime[2]) + '.'\r\n if beginTime[3] < 10:\r\n beginTimeString = beginTimeString + '00'\r\n elif beginTime[3] < 100:\r\n beginTimeString = beginTimeString + '0'\r\n beginTimeString = beginTimeString + str(beginTime[3])\r\n\r\n endTimeString = str(endTime[0]) + ':' + str(endTime[1]) + ':' + str(endTime[2]) + '.'\r\n if endTime[3] < 10:\r\n endTimeString = endTimeString + '00'\r\n elif endTime[3] < 100:\r\n endTimeString = endTimeString + '0'\r\n endTimeString = endTimeString + str(endTime[3])\r\n\r\n #print(beginTimeString)\r\n #print(endTimeString)\r\n subclip = clip.subclip(beginTimeString, endTimeString)\r\n\r\n filename = getName()\r\n #print(r\"./cutted/clips/\" + filename + \".mp3\")\r\n subclip.audio.write_audiofile(r\"./cutted/clips/\" + filename + \".mp3\", verbose=False, logger=None)\r\n #print('afterwrite')\r\n #clip = mp.AudioFileClip('./cutted/clips/' + filename + '.mp3')\r\n #print(clip.duration)\r\n #print(subclip.duration)\r\n #print ('beforeclose')\r\n clip.close()\r\n #print ('afterclose')\r\n return filename\r\n else:\r\n #print('\\nClip is too short\\n')\r\n return '0'\r\n\r\n\r\ndef main():\r\n global lastFileName\r\n global totalDurationMs\r\n\r\n try:\r\n if (int(sys.argv[1]) > 25) or (int(sys.argv[1]) < 0) or (int(sys.argv[2]) < 0) or (int(sys.argv[2]) > 25):\r\n print(\"WRONG COMMAND LINE ARGUMENT\")\r\n return 0\r\n else:\r\n try:\r\n os.mkdir('./cutted')\r\n os.mkdir('./cutted/clips/')\r\n except FileExistsError:\r\n print('Directory already exsists')\r\n\t\t\t \r\n lastFileName[0] = int(sys.argv[1])\r\n lastFileName[1] = int(sys.argv[2])\r\n start_time = time.time()\r\n books = glob.glob('./resources/*/')\r\n\r\n file = open('./cutted/train.tsv', 'w', newline='')\r\n tsv_writer = csv.writer(file, delimiter='\\t')\r\n tsv_writer.writerow(['path', 'sentence'])\r\n file.close()\r\n\r\n file = open('./cutted/dev.tsv', 'w', newline='')\r\n tsv_writer = csv.writer(file, delimiter='\\t')\r\n tsv_writer.writerow(['path', 'sentence'])\r\n file.close()\r\n\r\n file = open('./cutted/test.tsv', 'w', newline='')\r\n tsv_writer = csv.writer(file, delimiter='\\t')\r\n tsv_writer.writerow(['path', 'sentence'])\r\n file.close()\r\n\r\n file = open('logfile.txt', 'w')\r\n file.close()\r\n\r\n for book in books:\r\n partsNumber = len(glob.glob(book + 'audio/*.mp4'))\r\n\r\n\r\n logfile = open('logfile.txt', 'a')\r\n logfile.write('\\n******\\nBOOK ' + book + '\\n*\\n*\\n*')\r\n print('\\n******\\nBOOK ' + book + '\\n*\\n*\\n*')\r\n logfile.close()\r\n\r\n for i in range(1, partsNumber + 1):\r\n #start_time = time.time()\r\n totalDurationMs = 0\r\n logfile = open('logfile.txt', 'a')\r\n logfile.write('\\n\\nWorking with part %d out of %d' % (i, partsNumber))\r\n logfile.close()\r\n print('\\n\\nWorking with part %d out of %d' % (i, partsNumber))\r\n cutFile(i, book)\r\n\r\n logfile = open('logfile.txt', 'a')\r\n logfile.write('\\nTotal cuts time: %d:%d:%d \\n' % (totalDurationMs // 3600000, totalDurationMs % 3600000 // 60000, totalDurationMs % 3600000 % 60000 // 1000))\r\n logfile.close()\r\n #print(\"--- %s seconds ---\" % (time.time() - start_time))\r\n except IndexError:\r\n print(\"WRONG ARGUMENTS NUMBER. 2 ARGUMENTS REQUIRED\")\r\n return 0\r\n\r\n print(\"--- %s seconds ---\" % (time.time() - start_time))\r\n logfile.open('logfile.txt', 'a')\r\n logfile.write(\"--- %s seconds ---\" % (time.time() - start_time))\r\n logfile.close()\r\nif __name__ == '__main__':\r\n main()" }, { "alpha_fraction": 0.855555534362793, "alphanum_fraction": 0.855555534362793, "avg_line_length": 44, "blob_id": "2d30a608280dc1b5de2095d3e26efd81b7ddebe6", "content_id": "e5a44dd93b39be36f1c82d2e8c2fafccb54c8be4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 90, "license_type": "no_license", "max_line_length": 66, "num_lines": 2, "path": "/README.md", "repo_name": "nicolre239/DeepSpeechProcessing", "src_encoding": "UTF-8", "text": "# DeepSpeechProcessing\nScript for automated generation of training sample for DeepSpeech.\n" } ]
3
Razent1/test_col
https://github.com/Razent1/test_col
e3c574ee51f6eb87aaf0e7f4e67c36766f890bed
c3d92a94faf77db0ca95090160a975cb4ef78b3f
ba2cc20b6a97a5c54f95b45b64279b1ac33bc013
refs/heads/master
2023-07-26T01:46:06.200944
2021-08-31T19:17:13
2021-08-31T19:17:13
401,760,006
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5977579951286316, "alphanum_fraction": 0.6023738980293274, "avg_line_length": 34.670589447021484, "blob_id": "cac18ad5566c23ab0f1e71fd694b49964ed7b231", "content_id": "15e95d2a5edd45028514fd45422487531e4ac278", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3033, "license_type": "no_license", "max_line_length": 100, "num_lines": 85, "path": "/src/main.py", "repo_name": "Razent1/test_col", "src_encoding": "UTF-8", "text": "from typing import Optional\nfrom fastapi import FastAPI, Request, UploadFile, File, Form\nfrom fastapi.templating import Jinja2Templates\nfrom fastapi.staticfiles import StaticFiles\nfrom databases import Database\nfrom datetime import date\nfrom starlette.responses import RedirectResponse\n\napp = FastAPI()\n\napp.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")\n\ntemplates = Jinja2Templates(directory=\"templates\")\n\ndatabase = Database(\"sqlite:///test.db\")\n\n\[email protected]_event(\"startup\")\nasync def database_connect():\n await database.connect()\n\n\[email protected]_event(\"shutdown\")\nasync def database_disconnect():\n await database.disconnect()\n\n\[email protected](\"/\")\nasync def root(request: Request):\n query = \"SELECT * FROM sales\"\n results = await database.fetch_all(query=query)\n return templates.TemplateResponse(\"index.html\", {\"request\": request,\n \"date\": str(date.today().strftime(\"%Y-%m-%d\")),\n \"res\": results})\n\n\[email protected](\"/error\")\nasync def error(request: Request):\n query = \"SELECT * FROM sales\"\n results = await database.fetch_all(query=query)\n return templates.TemplateResponse(\"error.html\", {\"request\": request,\n \"date\": str(date.today().strftime(\"%Y-%m-%d\")),\n \"res\": results})\n\n\[email protected](\"/delete\")\nasync def delete_page(request: Request):\n query = \"SELECT * FROM sales\"\n results = await database.fetch_all(query=query)\n return templates.TemplateResponse(\"delete.html\", {\"request\": request,\n \"res\": results})\n\n\[email protected](\"/delete_error\")\nasync def delete_error(request: Request):\n query = \"SELECT * FROM sales\"\n results = await database.fetch_all(query=query)\n return templates.TemplateResponse(\"delete_error.html\", {\"request\": request,\n \"res\": results})\n\n\[email protected](\"/send_data\")\nasync def send_data(request: Request,\n total_due: Optional[float] = Form(None),\n date_sale: Optional[date] = Form(None),\n manager_id: Optional[int] = Form(None)):\n if total_due and date_sale and manager_id:\n query = f\"\"\"INSERT INTO sales(total_due, date_sale, manager_id) \n VALUES ({total_due}, DATE('{date_sale}'),\n {manager_id})\"\"\"\n await database.execute(query=query)\n else:\n return RedirectResponse(url=\"/error\", status_code=302)\n return RedirectResponse(url=\"/\", status_code=302)\n\n\[email protected](\"/delete_data\")\nasync def delete_data(request: Request,\n order_id: Optional[int] = Form(None)):\n if order_id:\n query = f\"\"\"DELETE FROM sales WHERE {order_id} = order_id\"\"\"\n await database.execute(query=query)\n else:\n return RedirectResponse(url=\"/delete_error\", status_code=302)\n return RedirectResponse(url=\"/delete\", status_code=302)\n\n" }, { "alpha_fraction": 0.7388535141944885, "alphanum_fraction": 0.808917224407196, "avg_line_length": 51.66666793823242, "blob_id": "dd01353469bc62e6cabe462c175c5ab0a5c5d28f", "content_id": "69dfbafc71874c40e94b2e0ee6faa8d6fd22c93b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 157, "license_type": "no_license", "max_line_length": 100, "num_lines": 3, "path": "/src/entrypoint.sh", "repo_name": "Razent1/test_col", "src_encoding": "UTF-8", "text": "#!/bin/bash\npip3 install fastapi aiosqlite databases starlette uvicorn aiofiles jinja2 python-multipart pydantic\nuvicorn main:app --host 0.0.0.0 --port 15400" } ]
2
nivyashri/djangoproject
https://github.com/nivyashri/djangoproject
8f1c25fc0915797bc2dcc455066f5ce88f7abfb2
cda33466f95685653685164ae03cef81cd840c11
98c8d26afb380d8829d8dd694740c5e0620e1f0c
refs/heads/master
2020-04-03T17:02:34.067216
2018-10-30T18:02:25
2018-10-30T18:02:25
155,429,311
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6523545980453491, "alphanum_fraction": 0.6551246643066406, "avg_line_length": 26.80769157409668, "blob_id": "3425b95c372eaf83d8ca977ffcdb451e6e7f7348", "content_id": "ef54827bfed7846da2d5a0f9a37af33272b679e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 722, "license_type": "no_license", "max_line_length": 53, "num_lines": 26, "path": "/student/views.py", "repo_name": "nivyashri/djangoproject", "src_encoding": "UTF-8", "text": "from django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.views.generic import View\nfrom student.models import std\n\nclass Sun(View):\n\n def get(self,request,*args,**kwargs):\n print(\"xxxxxxxxxxxxx\")\n return render(request,\"index.html\")\n\n def post(self,request,*args,**kargs):\n do=std()\n do.fname=request.POST.get('First_Name')\n do.lname=request.POST.get('Last_Name')\n do.email=request.POST.get('Email_Id')\n do.mobileno=request.POST.get('Mobile_Number')\n do.address=request.POST.get('Address')\n do.city=request.POST.get('City')\n do.save()\n return HttpResponse(\"<h1>success</h2>\")\n\n\n\nsun=Sun.as_view()\n# changes here" }, { "alpha_fraction": 0.6739130616188049, "alphanum_fraction": 0.6739130616188049, "avg_line_length": 16.375, "blob_id": "5e424b00ddfcfd785c551eb64442926afc3760a9", "content_id": "6d82d99ff78f68f3a0701cd5c3b41e69197cb62a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 138, "license_type": "no_license", "max_line_length": 41, "num_lines": 8, "path": "/student/urls.py", "repo_name": "nivyashri/djangoproject", "src_encoding": "UTF-8", "text": "from django.conf.urls import include, url\nfrom student.views import sun\n\n\nurlpatterns = [\n\n url(r'^hello', view=sun, name='sun'),\n ]" }, { "alpha_fraction": 0.7097457647323608, "alphanum_fraction": 0.7372881174087524, "avg_line_length": 42, "blob_id": "d43fd31c7f2e2acf61b495aa01b50716b58a8770", "content_id": "ad804499026e2d931997e795f5437c72678d487a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 472, "license_type": "no_license", "max_line_length": 69, "num_lines": 11, "path": "/student/models.py", "repo_name": "nivyashri/djangoproject", "src_encoding": "UTF-8", "text": "from django.db import models\n\nclass std(models.Model):\n fname=models.CharField(max_length=64, blank=True, null=True)\n lname=models.CharField(max_length=64, blank=True, null=True)\n email = models.EmailField(max_length=64, blank=True, null=True)\n mobileno=models.CharField(max_length=64, blank=True, null=True)\n address = models.CharField(max_length=100, blank=True, null=True)\n city = models.CharField(max_length=64, blank=True, null=True)\n\n# changes here" } ]
3
skasch/docfmt
https://github.com/skasch/docfmt
1f7423e9cb3f9b0d2f4818e5c40400b25aa2c606
9eeb1d6fc614424cb02125c95ef4a5758104535b
0b4d019a7609e25f4f12b494ec99e1ebd9baa148
refs/heads/master
2020-08-08T05:55:52.337958
2019-10-09T05:39:32
2019-10-09T05:39:32
213,744,311
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.824999988079071, "alphanum_fraction": 0.824999988079071, "avg_line_length": 19, "blob_id": "5de9ff0fbdd3ec42f82473d9266a0d718701299f", "content_id": "95a71c0abbfa1ff67d6ebdc5e0d7e02a798d9f67", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 40, "license_type": "permissive", "max_line_length": 30, "num_lines": 2, "path": "/README.md", "repo_name": "skasch/docfmt", "src_encoding": "UTF-8", "text": "# docfmt\nDocstring formatter for python\n" }, { "alpha_fraction": 0.559880256652832, "alphanum_fraction": 0.6017963886260986, "avg_line_length": 14.904762268066406, "blob_id": "d5de423cdfa7682478d184a3bf14613c84e4a679", "content_id": "40ec25b7e99fa6334b636594ac76ef61617dbe80", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 334, "license_type": "permissive", "max_line_length": 47, "num_lines": 21, "path": "/docfmt/docstring.py", "repo_name": "skasch/docfmt", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThe docstring module.\n\nRepresent a docstring.\n\nCreated by Romain Mondon-Cancel on 2019/10/08 20:14.\n\"\"\"\n\nimport abc\n\n\nclass Docstring(abc.ABC):\n @abc.abstractmethod\n def __init__(self, docstring: str) -> None:\n pass\n\n @abc.abstractmethod\n def __str__(self) -> str:\n pass\n" }, { "alpha_fraction": 0.5721649527549744, "alphanum_fraction": 0.6443299055099487, "avg_line_length": 15.166666984558105, "blob_id": "ee0c73b2efa75beddcf1fbdbcf1ede2108ff12c1", "content_id": "46541f4759751c061b835f05d3a228c36780671f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 194, "license_type": "permissive", "max_line_length": 59, "num_lines": 12, "path": "/tests/__init__.py", "repo_name": "skasch/docfmt", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nInit for tests.\n\nCreated by Romain Mondon-Cancel on 2019/10/08 22:03.\n\"\"\"\n\nimport os\nimport sys\n\nsys.path.append(os.path.dirname(os.path.dirname(__file__)))\n" }, { "alpha_fraction": 0.5157894492149353, "alphanum_fraction": 0.5894736647605896, "avg_line_length": 14.833333015441895, "blob_id": "4a4f0e479962fd5f6b567993a90e57a6f8ed3a95", "content_id": "9bb5a050fc8cbe0ed8edf94576db641738f842e7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 190, "license_type": "permissive", "max_line_length": 38, "num_lines": 12, "path": "/docfmt/__init__.py", "repo_name": "skasch/docfmt", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nInit for module docfmt.\n\nFormat the docstrings of a file.\n\nCreated by Romain Mondon-Cancel on 2019/10/08 20:13.\n\"\"\"\n\nif __name__ == \"__main__\":\n pass\n" }, { "alpha_fraction": 0.564184844493866, "alphanum_fraction": 0.5731707215309143, "avg_line_length": 30.139999389648438, "blob_id": "f72a8c5f17a22c0c2221260ef94a47a2cfd84601", "content_id": "f9df09b2691400f5dae303339b7ddad9e95915cd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1558, "license_type": "permissive", "max_line_length": 87, "num_lines": 50, "path": "/tests/test_google.py", "repo_name": "skasch/docfmt", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nTests for module google.\n\nCreated by Romain Mondon-Cancel on 2019/10/08 22:02.\n\"\"\"\n\nfrom docfmt import google\n\n\ndef test_clear_empty_lines():\n assert google.clear_empty_lines([]) == ([], False)\n assert google.clear_empty_lines([\"\"]) == ([], True)\n assert google.clear_empty_lines([\" \", \" \", \"Hello\", \"\"]) == ([\"Hello\", \"\"], True)\n assert google.clear_empty_lines([\"Hello\", \"\"]) == ([\"Hello\", \"\"], False)\n\n\ndef test_is_section_header():\n assert google.is_section_header(\"\") == (False, None)\n assert google.is_section_header(\" Args:\") == (True, google.SectionName.Args)\n assert google.is_section_header(\" \") == (False, None)\n assert google.is_section_header(\" Arks:\") == (False, None)\n assert google.is_section_header(\" Yield\") == (False, None)\n assert google.is_section_header(\" Yield:\") == (True, google.SectionName.Yields)\n\n\ndef test_build_tree():\n assert google.build_tree(\"Describe a simple docstring.\") == google.SyntaxTree(\n \"Describe a simple docstring.\", [], []\n )\n assert (\n google.build_tree(\n \"\"\"\n Describe a simple docstring.\n\n This docstring also has a long description.\n\n Long descriptions can have multiple paragraphs.\n \"\"\"\n )\n == google.SyntaxTree(\n \"Describe a simple docstring.\",\n [],\n [\n \"This docstring also has a long description.\",\n \"Long descriptions can have multiple paragraphs.\",\n ],\n )\n )\n\n" }, { "alpha_fraction": 0.6339372992515564, "alphanum_fraction": 0.6395690441131592, "avg_line_length": 26.594594955444336, "blob_id": "3ad128688879584a6838f58e3e924f2ae0bd54ab", "content_id": "7dc37d6d246f840fae6a1da7632fe3052b2650d9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4084, "license_type": "permissive", "max_line_length": 82, "num_lines": 148, "path": "/docfmt/google.py", "repo_name": "skasch/docfmt", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThe google module.\n\nThe Google module handles Google-style docstrings.\n\nCreated by Romain Mondon-Cancel on 2019/10/08 20:04.\n\"\"\"\n\nimport dataclasses as dc\nimport enum\nimport typing as t\n\n\nclass SectionName(enum.Enum):\n Args = \"Arguments:\"\n Arguments = \"Arguments:\"\n Attention = \"Attention:\"\n Attributes = \"Attributes:\"\n Caution = \"Caution:\"\n Danger = \"Danger:\"\n Error = \"Error:\"\n Example = \"Examples:\"\n Examples = \"Examples:\"\n Hint = \"Hint:\"\n Important = \"Important:\"\n KeywordArgs = \"Keyword Arguments:\"\n KeywordArguments = \"Keyword Arguments:\"\n Methods = \"Methods:\"\n Notes = \"Notes:\"\n OtherParameters = \"Other Parameters:\"\n Parameters = \"Parameters:\"\n Return = \"Returns:\"\n Returns = \"Returns:\"\n Raises = \"Raises:\"\n References = \"References:\"\n SeeAlso = \"See Also:\"\n Tip = \"Tip:\"\n Todo = \"Todo:\"\n Warning = \"Warnings:\"\n Warnings = \"Warnings:\"\n Warns = \"Warns:\"\n Yield = \"Yields:\"\n Yields = \"Yields:\"\n\n def __str__(self) -> str:\n return self.value\n\n\[email protected]\nclass NamedBlock:\n name: str\n content: \"Block\"\n option: t.List[str] = dc.field(default_factory=list)\n\n\[email protected]\nclass CodeBlock:\n content: str\n output: t.Optional[str] = None\n\n\[email protected]\nclass Section:\n name: t.Union[SectionName, str]\n content: \"Block\"\n\n\nSingleBlock = t.Union[str, CodeBlock, NamedBlock]\nBlock = t.Union[SingleBlock, t.List[SingleBlock]]\n\n\[email protected]\nclass SyntaxTree:\n description: str\n sections: t.List[t.Union[Block, Section]]\n long_description: t.List[str] = dc.field(default_factory=list)\n\n\nclass Error(Exception):\n def __init__(self, docstring: str, message: t.Optional[str] = None):\n self.docstring = docstring\n if message is None:\n message = f\"An error occurred with docstring {docstring}.\"\n super().__init__(message)\n\n\nclass MissingDescriptionError(Error):\n \"\"\"Exception raised when a docstring is missing the mandatory description.\"\"\"\n\n def __init__(self, docstring: str):\n super().__init__(docstring, \"Missing mandatory description in docstring.\")\n\n\ndef clear_empty_lines(lines: t.List[str]) -> t.Tuple[t.List[str], bool]:\n has_cleared_something = False\n while lines and lines[0].strip() == \"\":\n has_cleared_something = True\n lines = lines[1:]\n return lines, has_cleared_something\n\n\ndef is_section_header(line: str) -> t.Tuple[bool, t.Optional[SectionName]]:\n section_name = None\n line = line.strip()\n is_section_ = line and line[-1] == \":\" or False\n if is_section_:\n section_string = line[:-1].replace(\" \", \"\")\n is_section_ = is_section_ and section_string in SectionName.__members__\n if is_section_:\n section_name = SectionName[section_string]\n return is_section_, section_name\n\n\ndef is_line_valid(line: str, with_indent: t.Optional[int]) -> bool:\n return line.strip() != \"\" and (\n with_indent is None or line[:with_indent] == \" \" * with_indent\n )\n\n\ndef extract_paragraph(\n lines: t.List[str], with_indent: t.Optional[int] = None\n) -> t.Tuple[str, t.List[str]]:\n paragraph_lines = []\n while lines and is_line_valid(lines[0], with_indent):\n paragraph_lines.append(lines[0].strip())\n lines = lines[1:]\n return (\" \".join(paragraph_lines), lines)\n\n\ndef build_tree(docstring: str) -> SyntaxTree:\n description = \"\"\n long_description = []\n sections = []\n lines = docstring.split(\"\\n\")\n lines, _ = clear_empty_lines(lines)\n if not lines:\n raise MissingDescriptionError(docstring)\n description, lines = extract_paragraph(lines)\n lines, _ = clear_empty_lines(lines)\n if not lines:\n return SyntaxTree(description, sections, long_description)\n while lines and not is_section_header(lines[0])[0]:\n single_long_description, lines = extract_paragraph(lines)\n long_description.append(single_long_description)\n lines, _ = clear_empty_lines(lines)\n return SyntaxTree(description, sections, long_description)\n" } ]
6
uribou800/pytorch_base
https://github.com/uribou800/pytorch_base
4ecf1bdaa1fd7aefaf72dd2828b6eb2ed48cad93
d853e37de538775aae6ccfd7d94130ecc14b4eda
0f10febad7ea6ec677f608e8349e89cb4a212c46
refs/heads/master
2023-04-20T23:44:03.874141
2021-05-08T15:32:05
2021-05-08T15:32:05
351,097,775
0
0
null
2021-03-24T13:52:45
2021-04-01T13:28:45
2021-05-08T15:32:05
null
[ { "alpha_fraction": 0.49496981501579285, "alphanum_fraction": 0.5291750431060791, "avg_line_length": 18.8799991607666, "blob_id": "f85d3fb3a1148d2827db010838bbbfba381cc02a", "content_id": "f05746726f7ca9b5eaaa35b43312c53f6d545526", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 497, "license_type": "no_license", "max_line_length": 46, "num_lines": 25, "path": "/src/net.py", "repo_name": "uribou800/pytorch_base", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\n\n\nclass Network(nn.Module):\n def __init__(self, hidden_size):\n super(Network, self).__init__()\n self.l1 = nn.Linear(2, hidden_size)\n self.l2 = nn.Linear(hidden_size, 3)\n\n def forward(self, x):\n h = torch.tanh(self.l1(x))\n o = torch.sigmoid(self.l2(h))\n return o\n\n\ndef test():\n net = Network(128)\n x = torch.Tensor([[1.2, 3.3], [2.3, 7.8]])\n y = net(x)\n print(y)\n\n\nif __name__ == \"__main__\":\n test()\n" }, { "alpha_fraction": 0.5089285969734192, "alphanum_fraction": 0.5556318759918213, "avg_line_length": 25, "blob_id": "3e4121f547434799e78cdadd584d090b3399db6f", "content_id": "f5a66daeec2b96e110ea7139cc3b20b34d0b343a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1680, "license_type": "no_license", "max_line_length": 61, "num_lines": 56, "path": "/src/updater.py", "repo_name": "uribou800/pytorch_base", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\nfrom net import Network\n\n\nclass Updater:\n def __init__(self, net, optimizer):\n self.net = net\n self.optimizer = optimizer\n self.criterion = nn.MSELoss()\n\n # 今回のタスクで使用\n # モデルの入力は0~1の値なので和,差,積はそれぞれ0~2, -1~1, 0~1の値となる\n # ネットワークの出力はsigmoidなので0~1なので和と差の範囲をカバーできない\n # モデルの出力に以下の定数をかけたり足したりすることでモデルの出力と答えのカバー範囲を揃える.\n self.mul_constant = torch.Tensor([2, 2, 1])\n self.sum_constant = torch.Tensor([0, -1, 0])\n\n def step(self, x, t):\n o = self.net(x)\n y = torch.mul(o, self.mul_constant)\n self.optimizer.zero_grad()\n loss = self._cal_loss(y, t)\n loss.backward()\n self.optimizer.step()\n return loss\n\n def _cal_loss(self, y, t):\n return self.criterion(y + self.sum_constant, t)\n\n\ndef test():\n import torch.optim as optim\n import numpy as np\n\n net = Network(128)\n opt = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)\n updater = Updater(net, opt)\n\n x = torch.Tensor([[0.4, 0.3], [0.5, 0.9]])\n t = torch.Tensor([[0.7, 0.1, 0.12], [1.4, -0.4, 0.45]])\n mc = np.array([2, 2, 1])\n sc = np.array([0, -1, 0])\n\n print(net(x).data.numpy() * mc + sc)\n\n for i in range(10000):\n loss = updater.step(x, t)\n if i % 1000 + 1 == 1000:\n print(\"iter {} : {}\".format(i + 1, loss))\n\n print(net(x).data.numpy() * mc + sc)\n\n\nif __name__ == \"__main__\":\n test()\n" }, { "alpha_fraction": 0.5330275297164917, "alphanum_fraction": 0.5431192517280579, "avg_line_length": 25.585365295410156, "blob_id": "e9a6d19115777c8786f4f1e7d0e9cd968b8e0f27", "content_id": "feefb7d167ace7bc0b4246afbb3ac8aca29f2c6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1090, "license_type": "no_license", "max_line_length": 71, "num_lines": 41, "path": "/src/data_loader.py", "repo_name": "uribou800/pytorch_base", "src_encoding": "UTF-8", "text": "import torch\nimport numpy as np\n\n\nclass DataLoader:\n def __init__(self, iter_num, batchsize):\n self.iter_num = iter_num\n self.batchsize = batchsize\n\n def __iter__(self):\n return DataLoaderIterator(self.iter_num, self.batchsize)\n\n\nclass DataLoaderIterator:\n def __init__(self, iter_num, batchsize):\n self.iter_num = iter_num\n self.batchsize = batchsize\n self._pointer = 0\n\n def __next__(self):\n if self._pointer == self.iter_num:\n raise StopIteration\n data = {}\n input_data = np.random.rand(self.batchsize, 2)\n output_data = np.array([input_data[:, 0] + input_data[:, 1],\n input_data[:, 0] - input_data[:, 1],\n input_data[:, 0] * input_data[:, 1]]).T\n data[\"input\"] = torch.Tensor(input_data)\n data[\"output\"] = torch.Tensor(output_data)\n self._pointer += 1\n return data\n\n\ndef main():\n data_loader = DataLoader(3, 4)\n for data in data_loader:\n print(data)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.7168141603469849, "alphanum_fraction": 0.7168141603469849, "avg_line_length": 11.592592239379883, "blob_id": "dd9c99a5c412fe0435d5466237199b22add77216", "content_id": "1d9eae886283bd252ef097984fb303a915efb5f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 665, "license_type": "no_license", "max_line_length": 47, "num_lines": 27, "path": "/README.md", "repo_name": "uribou800/pytorch_base", "src_encoding": "UTF-8", "text": "# pytorch_base\n\n## 概要\npytorch学習用のテンプレートプロジェクト. \nやっていることは足し算,引き算,掛け算をNNに解かせている.\n\n## 各ファイルの説明\n### main.py\n\n### trainer.py\n\n### updater.py\n\n### net.py\n\n### dataloader.py\n\n### visualizer.py\n\n### util.py\n\n## やりたいこと\n- SGD以外のフォーマットも作成する\n - 例えば遺伝的アルゴリズム\n - ミツバチやトンボの方法も気になる\n - GANなどにも対応できるようにしたい\n- 自然言語用のレポジトリや画像用のものなど,別プロジェクトにフォークして徐々に分化させていく" } ]
4
PigLion/TestAI
https://github.com/PigLion/TestAI
71991723d21b68af6f22824e8f4a40c52032dd54
f1db6a7cd9c1dfb5899bade28831d6ee2c7c1c61
27620a3d33ccf2b54e531d4344b1b1f299f78369
refs/heads/master
2020-08-25T04:18:02.125808
2020-01-07T13:19:26
2020-01-07T13:19:26
216,960,112
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.631833553314209, "alphanum_fraction": 0.6528785824775696, "avg_line_length": 34.1787223815918, "blob_id": "d953594c88564cf1789263b21ef66a91b2568352", "content_id": "2a0579b5f8f898cdf68f3010eca4427ee1357df5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8946, "license_type": "no_license", "max_line_length": 122, "num_lines": 235, "path": "/firstNN_6_rectifyCostFunctionAndLaerningRate.py", "repo_name": "PigLion/TestAI", "src_encoding": "UTF-8", "text": "\"\"\"\n在 firstNN_5_b的基础上\n把二次代价函数修改成python深度学习里面的形式,就是多求了一个sigmod函数的导数\n同时,把36和38行的学习速率提出来,就是这步把你的准确率拉下去了!\n\"\"\"\nimport numpy\nimport scipy.special\nimport matplotlib.pyplot\nimport pylab\n\nclass neuralNetwork:\n def __init__(self,inputnodes,hiddennodes,outputnodes,learningrate,batch_size):\n self.inodes=inputnodes\n self.onodes=outputnodes\n self.hnodes=hiddennodes\n self.wih=numpy.random.normal(0.0,pow(self.hnodes,-0.5),(self.hnodes,self.inodes))\n self.who=numpy.random.normal(0.0,pow(self.onodes,-0.5),(self.onodes,self.hnodes))\n self.bih=numpy.random.normal(0.0,1,(self.hnodes,1))\n self.bho=numpy.random.normal(0.0,1,(self.onodes,1))\n self.lr=learningrate\n self.batch_size=batch_size\n self.activation_function=lambda x:scipy.special.expit(x)\n pass\n def train(self,inputs_list,targets_list):\n inputs = inputs_list\n targets =targets_list\n hidden_inputs = numpy.dot(self.wih, inputs)\n hidden_inputs+=self.bih\n hidden_outputs = self.activation_function(hidden_inputs)\n final_inputs = numpy.dot(self.who, hidden_outputs)\n final_inputs+=self.bho\n final_outputs = self.activation_function(final_inputs)\n output_errors = targets - final_outputs\n deltabho=(output_errors *final_outputs * (1.0 - final_outputs))\n hidden_errors = numpy.dot(self.who.T, deltabho)\n self.who += self.lr * ((numpy.dot(deltabho,numpy.transpose(hidden_outputs)))/batch_size+lamda/batch_size*self.who)\n deltabih=(hidden_errors *hidden_outputs * (1.0 - hidden_outputs))\n self.wih += self.lr *(( numpy.dot(deltabih, numpy.transpose (inputs)))/batch_size+lamda/batch_size*self.wih)\n self.bho=self.bho+deltabho/batch_size\n self.bih=self.bih+deltabih/batch_size\n return final_outputs\n def query(self,inputs_list):\n inputs = numpy.array(inputs_list, ndmin=2).T\n hidden_inputs = numpy.dot(self.wih, inputs)\n hidden_outputs =self.activation_function(hidden_inputs)\n final_inputs = numpy.dot(self.who, hidden_outputs)\n final_outputs = self.activation_function(final_inputs)\n return final_outputs\ninput_nodes=784\nhidden_nodes=30\noutput_nodes=10\nlearning_rate=0.1\nbatch_size=10\ntrainning_length=-1\ntest_length=-1\nepochs=30\n# lamda=0\nlamda=0.001\nn=neuralNetwork(input_nodes,hidden_nodes,output_nodes,learning_rate,batch_size)\n\n\ndef divide_batch(trainning_data):\n extend=numpy.zeros((10,trainning_length),float)\n for k in range(trainning_length):\n targets=numpy.zeros(output_nodes)+0.01\n targets[int(trainning_data_list[1][k])]=0.99\n extend[:,k]=targets\n pass\n trainning_data=(numpy.transpose(trainning_data[0]),extend)\n permutation = numpy.random.permutation(trainning_length)\n X_shuffle = trainning_data[0][:, permutation]\n Y_shuffle = trainning_data[1][:, permutation]\n\n mini_batches = []\n batch_num = trainning_length//batch_size\n for i in range(batch_num):\n mini_batch_x = X_shuffle[:, i * batch_size: (i + 1) * batch_size]\n mini_batch_y = Y_shuffle[:, i * batch_size: (i + 1) * batch_size]\n mini_batch = (mini_batch_x, mini_batch_y)\n mini_batches.append(mini_batch)\n\n if batch_num * batch_size < trainning_length:\n mini_batch_x = X_shuffle[:, batch_num * batch_size: trainning_length]\n mini_batch_y = Y_shuffle[:, batch_num * batch_size: trainning_length]\n mini_batch = [mini_batch_x, mini_batch_y]\n mini_batches.append(mini_batch)\n\n mini_batches = numpy.array(mini_batches)\n return mini_batches\n\n\n\nimport _pickle as cPickle\nimport gzip\ndef load_data():\n f = gzip.open('./mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = cPickle.load(f,encoding='bytes')\n f.close()\n\n # 单个的测试代码函数\n # testNumber=5\n # all_values=test_data_list[testNumber].split(',')\n # for k in range(0,len(training_data[0])):\n # pict=training_data[0][k]\n # image=numpy.asfarray(pict).reshape(28,28)\n # matplotlib.pyplot.imshow(image,cmap='Greys',interpolation='None')\n # pylab.show()\n # print(training_data[1][k])\n # pylab.time.sleep(2)\n\n return (training_data, validation_data, test_data)\n\n#————————————————————————————————————————————————————————\n# 第二个包里的数据训练\ntrainning_data_list, validation_data, test_data=load_data()\nif(trainning_length>0):\n trainning_data_list=(trainning_data_list[0][0:trainning_length],trainning_data_list[1][0:trainning_length])\nelse:\n trainning_length=numpy.shape(trainning_data_list[0])[0]\nif(test_length>0):\n test_data=(test_data[0][0:test_length],test_data[1][0:test_length])\n validation_data=(validation_data[0][0:test_length],validation_data[1][0:test_length])\nelse:\n test_length=numpy.shape(test_data[0])[0]\n# print(trainning_data_list[0].shape)\n# print(trainning_data_list[1].shape)\n# print(trainning_data_list[1][0])\n# print(test_data[0].shape)\n# print(test_data[1].shape)\n# print(validation_data[0].shape)\n# print(validation_data[1].shape)\nmini_batches=divide_batch(trainning_data_list)\n# print(mini_batches.shape)\nfor e in range(epochs):\n for k in range(len(mini_batches)):\n n.train(mini_batches[k][0],mini_batches[k][1])\n pass\n#————————————————————————————————————————————————————————\n\n\n\n#————————————————————————————————————————————————————————\n#第二个包里的数据测试\nscorecard = []\nfor k in range(len(test_data[0])):\n # all_values = numpy.reshape(test_data[0][k], (784, 1))\n all_values=test_data[0][k]\n correct_label = int(test_data[1][k])\n print(correct_label, \"correct label\")\n # inputs = (numpy.asfarray(all_values[0:]) / 255.0 * 0.99) + 0.01\n inputs=all_values\n outputs = n.query(inputs)\n label = numpy.argmax(outputs)\n print(label, \"network's answer\")\n if (label == correct_label):\n scorecard.append(1)\n else:\n scorecard.append(0)\n pass\n if(k>100):\n break\n pass\nprint(scorecard)\nscorecard_array = numpy.asarray(scorecard)\nprint (\"performance = \", scorecard_array.sum() /\n scorecard_array.size)\n#————————————————————————————————————————————————————————\n\n\n\n\"\"\"\ntrainning_data_file=open(\"../_data/handwrittingNumber/mnist_train.csv\",'r')\ntrainning_data_list=trainning_data_file.readlines()\ntrainning_data_file.close()\nepochs=2\nfor e in range(epochs):\n for record in trainning_data_list:\n all_values= record.split(',')\n inputs=(numpy.asfarray(all_values[1:])/255.0*0.99)+0.01\n targets=numpy.zeros(output_nodes)+0.01\n targets[int(all_values[0])]=0.99\n n.train(inputs,targets)\n pass\n pass\n\n\ntest_data_file=open(\"../_data/handwrittingNumber/mnist_test.csv\",'r')\ntest_data_list=test_data_file.readlines()\ntest_data_file.close()\n\n# #但个的测试代码函数\n# testNumber=5\n# all_values=test_data_list[testNumber].split(',')\n# image=numpy.asfarray(all_values[1:]).reshape(28,28)\n# matplotlib.pyplot.imshow(image,cmap='Greys',interpolation='None')\n# pylab.show()\n# final=n.query(numpy.asfarray(all_values[1:])/255.0*0.99)+0.01\n# print(final)\n\n\n\n#得分的测试\n# test the neural network\n# scorecard for how well the network performs, initially empty\nscorecard = []\n# go through all the records in the test data set\nfor record in test_data_list:\n # split the record by the ',' commas\n all_values = record.split(',')\n # correct answer is first value\n correct_label = int(all_values[0])\n print(correct_label, \"correct label\")\n # scale and shift the inputs\n inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01\n # query the network\n outputs = n.query(inputs)\n # the index of the highest value corresponds to the label\n label = numpy.argmax(outputs)\n print(label, \"network's answer\")\n # append correct or incorrect to list\n if (label == correct_label):\n # network's answer matches correct answer, add 1 to scorecard\n scorecard.append(1)\n else:\n # network's answer doesn't match correct answer, add 0 to scorecard\n scorecard.append(0)\n pass\n pass\nprint(scorecard)\n# 正确率显示\n# calculate the performance score, the fraction of correct answers\nscorecard_array = numpy.asarray(scorecard)\nprint (\"performance = \", scorecard_array.sum() /\n scorecard_array.size)\n\"\"\"\n\n" }, { "alpha_fraction": 0.5696814656257629, "alphanum_fraction": 0.6660978198051453, "avg_line_length": 35.25773239135742, "blob_id": "e1229a066d0f537c686272188dfaee78f5ec6d55", "content_id": "c657594b987f61291cdba7d27c8bdb580b0f9ebd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3516, "license_type": "no_license", "max_line_length": 131, "num_lines": 97, "path": "/KerasBigDataLesson/RainFall.py", "repo_name": "PigLion/TestAI", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nfrom keras import models\nfrom keras import layers\nimport matplotlib.pyplot as plt\nfrom keras import optimizers\nfrom keras import losses\nfrom keras import metrics\n\nrainData = pd.read_csv('../../_data/weatherAUS.csv')\nrainData= rainData.drop(columns=['Cloud3pm','Cloud9am','Location','RISK_MM','Date'],axis=1)\nrainData= rainData.dropna(how='any')\nrainData['RainToday'].replace({'No': 0, 'Yes':1},inplace = True)\nrainData['RainTomorrow'].replace({'No': 0, 'Yes': 1},inplace = True)\nrainData['WindGustDir'].replace({'E':0.06125,'ENE':0.1225,'ESE':0.18375,'N':0.245,'NE':0.30625,'NNE':0.3675,'NNW':0.42875,\n 'NW':0.49,'S':0.55125,'SE':0.6125,'SSE':0.67375,'SSW':0.735,'SW':0.79625,'W':0.8575,'WNW':0.91875,\n 'WSW':0.98},inplace = True)\nrainData['WindDir3pm'].replace({'E':0.06125,'ENE':0.1225,'ESE':0.18375,'N':0.245,'NE':0.30625,'NNE':0.3675,'NNW':0.42875,\n 'NW':0.49,'S':0.55125,'SE':0.6125,'SSE':0.67375,'SSW':0.735,'SW':0.79625,'W':0.8575,'WNW':0.91875,\n 'WSW':0.98},inplace = True)\nrainData['WindDir9am'].replace({'E':0.06125,'ENE':0.1225,'ESE':0.18375,'N':0.245,'NE':0.30625,'NNE':0.3675,'NNW':0.42875,\n 'NW':0.49,'S':0.55125,'SE':0.6125,'SSE':0.67375,'SSW':0.735,'SW':0.79625,'W':0.8575,'WNW':0.91875,\n 'WSW':0.98},inplace = True)\nrainToday=rainData['RainToday']\nrainTomorrow=rainData['RainTomorrow']\nrainData= rainData.drop(columns=['RainTomorrow','RainToday'],axis=1)\ntarget=rainTomorrow\nmean = rainData.mean(axis=0)\nrainData -= mean\nstd = rainData.std(axis=0)\nrainData /= std\nrainData['RainToday']=rainToday\nprint(rainData[:20])\n\n\n\nmodel = models.Sequential()\nmodel.add(layers.Dense(38, activation='softmax',\n input_shape=(rainData.shape[1],)))\nmodel.add(layers.Dense(38, activation='softmax'))\nmodel.add(layers.Dense(1,activation='sigmoid'))\nmodel.compile(optimizer=optimizers.RMSprop(lr=0.001),\n loss=losses.binary_crossentropy,\n metrics=[metrics.binary_accuracy])\n\nprint(rainData.shape)\nprint(target.shape)\n\ntrain_data=rainData[:45000]\ntest_data=rainData[45000:55000]\nvaliation_data=rainData[55000:]\ntrain_target=target[:45000]\ntest_target=target[45000:55000]\nvalidation_target=target[55000:]\n\nprintDataAmount=10\nprintData=rainData[55000:55000+printDataAmount]\nprintTarget=target[55000:55000+printDataAmount]\n\n\n\n\nnum_epochs = 10\n\nhistory = model.fit(train_data, train_target,\n validation_data=(valiation_data, validation_target),\n epochs=num_epochs, batch_size=512)\n\nhistory_dict = history.history\nprint(history_dict)\nloss_values = history_dict['loss']\nval_loss_values = history_dict['val_loss']\nepochs = range(1, len(loss_values) + 1)\nplt.plot(epochs, loss_values, 'bo', label='Training loss')\nplt.plot(epochs, val_loss_values, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()\nplt.show()\n\nplt.clf()\nacc = history_dict['binary_accuracy']\nval_acc = history_dict['val_binary_accuracy']\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.legend()\nplt.show()\n\nresults = model.evaluate(test_data, test_target)\nprint(results)\n\nprint(model.predict(printData))\nprint(printTarget)" }, { "alpha_fraction": 0.6191707253456116, "alphanum_fraction": 0.6433017253875732, "avg_line_length": 35.38327407836914, "blob_id": "2a3f68607698a917c4a113e64dd82fa370bacc65", "content_id": "73a5e6228294046b2db562ff58c17e7dd883f511", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11057, "license_type": "no_license", "max_line_length": 111, "num_lines": 287, "path": "/firstNN_2_MBGD.py", "repo_name": "PigLion/TestAI", "src_encoding": "UTF-8", "text": "\"\"\"\n对firstNN_2_MGBD 进行修改\n把每个的小批量改成随机\n把一个小批量的数据用矩阵的形式输入计算\n\"\"\"\nimport numpy\nimport scipy.special\nimport matplotlib.pyplot\nimport pylab\n\nclass neuralNetwork:\n def __init__(self,inputnodes,hiddennodes,outputnodes,learningrate,batch_size):\n self.inodes=inputnodes\n self.onodes=outputnodes\n self.hnodes=hiddennodes\n self.wih=numpy.random.normal(0.0,pow(self.hnodes,-0.5),(self.hnodes,self.inodes))\n self.who=numpy.random.normal(0.0,pow(self.onodes,-0.5),(self.onodes,self.hnodes))\n self.lr=learningrate\n self.batch_size=batch_size\n self.trainning_length=trainning_length\n self.test_length=test_length\n self.activation_function=lambda x:scipy.special.expit(x)\n self.acount=batch_size\n self.arrih=numpy.zeros((self.hnodes,self.inodes),float)\n self.arrho=numpy.zeros((self.onodes,self.hnodes),float)\n pass\n def train(self,inputs_list,targets_list):\n self.acount-=1\n inputs = numpy.array(inputs_list, ndmin=2).T\n targets = numpy.array(targets_list, ndmin=2).T\n hidden_inputs = numpy.dot(self.wih, inputs)\n hidden_outputs = self.activation_function(hidden_inputs)\n final_inputs = numpy.dot(self.who, hidden_outputs)\n final_outputs = self.activation_function(final_inputs)\n output_errors = targets - final_outputs\n hidden_errors = numpy.dot(self.who.T, output_errors)\n self.arrho += self.lr * numpy.dot((output_errors *\n final_outputs * (1.0 - final_outputs)),\n numpy.transpose(hidden_outputs))\n self.arrih += self.lr * numpy.dot((hidden_errors *\n hidden_outputs * (1.0 - hidden_outputs)), numpy.transpose\n (inputs))\n if(self.acount==0):\n self.acount=self.batch_size\n self.who+=self.arrho/self.batch_size\n self.wih+=self.arrih/self.batch_size\n self.arrih=numpy.zeros((self.hnodes,self.inodes),float)\n self.arrho=numpy.zeros((self.onodes,self.hnodes),float)\n return final_outputs\n def query(self,inputs_list):\n inputs = numpy.array(inputs_list, ndmin=2).T\n hidden_inputs = numpy.dot(self.wih, inputs)\n hidden_outputs =self.activation_function(hidden_inputs)\n final_inputs = numpy.dot(self.who, hidden_outputs)\n final_outputs = self.activation_function(final_inputs)\n return final_outputs\ninput_nodes=784\nhidden_nodes=100\noutput_nodes=10\nlearning_rate=0.12\nbatch_size=10\ntrainning_length=5000\ntest_length=100\nepochs=10\nn=neuralNetwork(input_nodes,hidden_nodes,output_nodes,learning_rate,batch_size)\n\n\n\ndef vectorized_result(j):\n \"\"\"Return a 10-dimensional unit vector with a 1.0 in the jth\n position and zeroes elsewhere. This is used to convert a digit\n (0...9) into a corresponding desired output from the neural\n network.\"\"\"\n e = numpy.zeros((10, 1))\n e[j] = 1.0\n return e\n\n\n\n\n\n\nimport _pickle as cPickle\nimport gzip\ndef load_data():\n \"\"\"Return the MNIST data as a tuple containing the training data,\n the validation data, and the test data.\n\n The ``training_data`` is returned as a tuple with two entries.\n The first entry contains the actual training images. This is a\n numpy ndarray with 50,000 entries. Each entry is, in turn, a\n numpy ndarray with 784 values, representing the 28 * 28 = 784\n pixels in a single MNIST image.\n\n The second entry in the ``training_data`` tuple is a numpy ndarray\n containing 50,000 entries. Those entries are just the digit\n values (0...9) for the corresponding images contained in the first\n entry of the tuple.\n\n The ``validation_data`` and ``test_data`` are similar, except\n each contains only 10,000 images.\n\n This is a nice data format, but for use in neural networks it's\n helpful to modify the format of the ``training_data`` a little.\n That's done in the wrapper function ``load_data_wrapper()``, see\n below.\n \"\"\"\n f = gzip.open('./mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = cPickle.load(f,encoding='bytes')\n f.close()\n\n # 单个的测试代码函数\n # testNumber=5\n # all_values=test_data_list[testNumber].split(',')\n # for k in range(0,len(training_data[0])):\n # pict=training_data[0][k]\n # image=numpy.asfarray(pict).reshape(28,28)\n # matplotlib.pyplot.imshow(image,cmap='Greys',interpolation='None')\n # pylab.show()\n # print(training_data[1][k])\n # pylab.time.sleep(2)\n\n return (training_data, validation_data, test_data)\n\ndef load_data_wrapper():\n \"\"\"Return a tuple containing ``(training_data, validation_data,\n test_data)``. Based on ``load_data``, but the format is more\n convenient for use in our implementation of neural networks.\n\n In particular, ``training_data`` is a list containing 50,000\n 2-tuples ``(x, y)``. ``x`` is a 784-dimensional numpy.ndarray\n containing the input image. ``y`` is a 10-dimensional\n numpy.ndarray representing the unit vector corresponding to the\n correct digit for ``x``.\n\n ``validation_data`` and ``test_data`` are lists containing 10,000\n 2-tuples ``(x, y)``. In each case, ``x`` is a 784-dimensional\n numpy.ndarry containing the input image, and ``y`` is the\n corresponding classification, i.e., the digit values (integers)\n corresponding to ``x``.\n\n Obviously, this means we're using slightly different formats for\n the training data and the validation / test data. These formats\n turn out to be the most convenient for use in our neural network\n code.\"\"\"\n\n tr_d, va_d, te_d = load_data()\n training_inputs = [numpy.reshape(x, (784, 1)) for x in tr_d[0]]\n training_results = [vectorized_result(y) for y in tr_d[1]]\n training_data = zip(training_inputs, training_results)\n validation_inputs = [numpy.reshape(x, (784, 1)) for x in va_d[0]]\n validation_data = zip(validation_inputs, va_d[1])\n test_inputs = [numpy.reshape(x, (784, 1)) for x in te_d[0]]\n test_data = zip(test_inputs, te_d[1])\n return (training_data, validation_data, test_data)\n\n\n\n\n#————————————————————————————————————————————————————————\n# 第二个包里的数据训练\ntrainning_data_list, validation_data, test_data=load_data()\nif(trainning_length>0):\n trainning_data_list=(trainning_data_list[0][0:trainning_length],trainning_data_list[1][0:trainning_length])\nif(test_length>0):\n test_data=(test_data[0][0:test_length],test_data[1][0:test_length])\n validation_data=(validation_data[0][0:test_length],validation_data[1][0:test_length])\n# print(trainning_data_list[0].shape)\n# print(trainning_data_list[1].shape)\n# print(trainning_data_list[1][0])\n# print(test_data[0].shape)\n# print(test_data[1].shape)\n# print(validation_data[0].shape)\n# print(validation_data[1].shape)\nfor e in range(epochs):\n # for k in range(len(trainning_data_list[0])):\n for k in range(len(trainning_data_list[0])):\n # inputs=(numpy.asfarray(trainning_data_list[0][k][0:])/255.0*0.99)+0.01\n inputs=trainning_data_list[0][k]\n targets=numpy.zeros(output_nodes)+0.01\n targets[int(trainning_data_list[1][k])]=0.99\n # # print(trainning_data_list[0][k])\n # print(inputs)\n # pylab.time.sleep(5)\n n.train(inputs,targets)\n if(k>5000):\n break\n pass\n pass\n#————————————————————————————————————————————————————————\n\n\n\n#————————————————————————————————————————————————————————\n#第二个包里的数据测试\nscorecard = []\nfor k in range(len(test_data[0])):\n # all_values = numpy.reshape(test_data[0][k], (784, 1))\n all_values=test_data[0][k]\n correct_label = int(test_data[1][k])\n print(correct_label, \"correct label\")\n # inputs = (numpy.asfarray(all_values[0:]) / 255.0 * 0.99) + 0.01\n inputs=all_values\n outputs = n.query(inputs)\n label = numpy.argmax(outputs)\n print(label, \"network's answer\")\n if (label == correct_label):\n scorecard.append(1)\n else:\n scorecard.append(0)\n pass\n if(k>100):\n break\n pass\nprint(scorecard)\nscorecard_array = numpy.asarray(scorecard)\nprint (\"performance = \", scorecard_array.sum() /\n scorecard_array.size)\n#————————————————————————————————————————————————————————\n\n\n\n\"\"\"\ntrainning_data_file=open(\"../_data/handwrittingNumber/mnist_train.csv\",'r')\ntrainning_data_list=trainning_data_file.readlines()\ntrainning_data_file.close()\nepochs=2\nfor e in range(epochs):\n for record in trainning_data_list:\n all_values= record.split(',')\n inputs=(numpy.asfarray(all_values[1:])/255.0*0.99)+0.01\n targets=numpy.zeros(output_nodes)+0.01\n targets[int(all_values[0])]=0.99\n n.train(inputs,targets)\n pass\n pass\n\n\ntest_data_file=open(\"../_data/handwrittingNumber/mnist_test.csv\",'r')\ntest_data_list=test_data_file.readlines()\ntest_data_file.close()\n\n# #但个的测试代码函数\n# testNumber=5\n# all_values=test_data_list[testNumber].split(',')\n# image=numpy.asfarray(all_values[1:]).reshape(28,28)\n# matplotlib.pyplot.imshow(image,cmap='Greys',interpolation='None')\n# pylab.show()\n# final=n.query(numpy.asfarray(all_values[1:])/255.0*0.99)+0.01\n# print(final)\n\n\n\n#得分的测试\n# test the neural network\n# scorecard for how well the network performs, initially empty\nscorecard = []\n# go through all the records in the test data set\nfor record in test_data_list:\n # split the record by the ',' commas\n all_values = record.split(',')\n # correct answer is first value\n correct_label = int(all_values[0])\n print(correct_label, \"correct label\")\n # scale and shift the inputs\n inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01\n # query the network\n outputs = n.query(inputs)\n # the index of the highest value corresponds to the label\n label = numpy.argmax(outputs)\n print(label, \"network's answer\")\n # append correct or incorrect to list\n if (label == correct_label):\n # network's answer matches correct answer, add 1 to scorecard\n scorecard.append(1)\n else:\n # network's answer doesn't match correct answer, add 0 to scorecard\n scorecard.append(0)\n pass\n pass\nprint(scorecard)\n# 正确率显示\n# calculate the performance score, the fraction of correct answers\nscorecard_array = numpy.asarray(scorecard)\nprint (\"performance = \", scorecard_array.sum() /\n scorecard_array.size)\n\"\"\"\n\n" }, { "alpha_fraction": 0.62116539478302, "alphanum_fraction": 0.6434020400047302, "avg_line_length": 35.24117660522461, "blob_id": "191fc7f7203475fbda9d55b5d2b88b7a8cbd3f2d", "content_id": "4272afe0067f85a163804729d9f7beb118c291c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6667, "license_type": "no_license", "max_line_length": 111, "num_lines": 170, "path": "/firstNN_8_AddOneLayer.py", "repo_name": "PigLion/TestAI", "src_encoding": "UTF-8", "text": "\"\"\"\n在 firstNN_7的基础上\n尝试添加一层隐藏层\n//准确率可达0.9901960784313726\n\"\"\"\nimport numpy\nimport scipy.special\nimport matplotlib.pyplot\nimport pylab\n\nclass neuralNetwork:\n def __init__(self,inputnodes,hiddennodes,outputnodes,learningrate,batch_size):\n self.inodes=inputnodes\n self.onodes=outputnodes\n self.hnodes=hiddennodes\n self.wih=numpy.random.normal(0.0,pow(self.hnodes,-0.5),(self.hnodes,self.inodes))\n self.whh=numpy.random.normal(0.0,pow(self.hnodes,-0.5),(self.hnodes,self.hnodes))\n self.who=numpy.random.normal(0.0,pow(self.onodes,-0.5),(self.onodes,self.hnodes))\n self.bih=numpy.random.normal(0.0,1,(self.hnodes,1))\n self.bhh=numpy.random.normal(0.0,1,(self.hnodes,1))\n self.bho=numpy.random.normal(0.0,1,(self.onodes,1))\n self.lr=learningrate\n self.batch_size=batch_size\n self.activation_function=lambda x:scipy.special.expit(x)\n pass\n def train(self,inputs_list,targets_list):\n inputs = inputs_list\n targets =targets_list\n hidden_inputs = numpy.dot(self.wih, inputs)\n hidden_inputs+=self.bih\n hidden_outputs = self.activation_function(hidden_inputs)\n\n hidden_inputs2=numpy.dot(self.whh,hidden_outputs)\n hidden_inputs2+=self.bhh\n hidden_outputs2=self.activation_function(hidden_inputs2)\n\n final_inputs = numpy.dot(self.who, hidden_outputs2)\n final_inputs+=self.bho\n final_outputs = self.activation_function(final_inputs)\n\n dZ3 = final_outputs-targets\n dwho=self.lr *(( numpy.dot(dZ3,numpy.transpose(hidden_outputs2)))/batch_size+lamda/batch_size*self.who)\n self.who-=dwho\n self.bho-=self.lr *(numpy.sum(dZ3,axis=1,keepdims=True)/batch_size)\n\n dZ2=numpy.dot(self.who.T,dZ3)*hidden_outputs2 * (1.0 - hidden_outputs2)\n dwhh=self.lr *((numpy.dot(dZ2,numpy.transpose (hidden_inputs)))/batch_size+lamda/batch_size*self.whh)\n dbhh=self.lr *(numpy.sum(dZ2,axis=1,keepdims=True)/batch_size)\n self.whh-=dwhh\n self.bhh-=dbhh\n\n dZ1=numpy.dot(self.whh.T,dZ2)*hidden_outputs*(1.0-hidden_outputs)\n dwih=self.lr*((numpy.dot(dZ1,numpy.transpose(inputs)))/batch_size+lamda/batch_size*self.wih)\n dbih=self.lr*(numpy.sum(dZ1,axis=1,keepdims=True)/batch_size)\n self.wih-=dwih\n self.bih-=dbih\n\n return final_outputs\n def query(self,inputs_list):\n inputs = numpy.array(inputs_list, ndmin=2).T\n inputs=numpy.array(inputs_list,ndmin=2).T\n hidden_inputs = numpy.dot(self.wih, inputs)\n hidden_inputs+=self.bih\n hidden_outputs = self.activation_function(hidden_inputs)\n\n hidden_inputs2=numpy.dot(self.whh,hidden_outputs)\n hidden_inputs2+=self.bhh\n hidden_outputs2=self.activation_function(hidden_inputs2)\n\n final_inputs = numpy.dot(self.who, hidden_outputs2)\n final_inputs+=self.bho\n final_outputs = self.activation_function(final_inputs)\n return final_outputs\ninput_nodes=784\nhidden_nodes=30\noutput_nodes=10\nlearning_rate=0.1\nbatch_size=10\ntrainning_length=-1\ntest_length=-1\nepochs=30\n# lamda=0\nlamda=0.001\nn=neuralNetwork(input_nodes,hidden_nodes,output_nodes,learning_rate,batch_size)\n\n\ndef divide_batch(trainning_data):\n extend=numpy.zeros((10,trainning_length),float)\n for k in range(trainning_length):\n targets=numpy.zeros(output_nodes)+0.01\n targets[int(trainning_data_list[1][k])]=0.99\n extend[:,k]=targets\n pass\n trainning_data=(numpy.transpose(trainning_data[0]),extend)\n permutation = numpy.random.permutation(trainning_length)\n X_shuffle = trainning_data[0][:, permutation]\n Y_shuffle = trainning_data[1][:, permutation]\n\n mini_batches = []\n batch_num = trainning_length//batch_size\n for i in range(batch_num):\n mini_batch_x = X_shuffle[:, i * batch_size: (i + 1) * batch_size]\n mini_batch_y = Y_shuffle[:, i * batch_size: (i + 1) * batch_size]\n mini_batch = (mini_batch_x, mini_batch_y)\n mini_batches.append(mini_batch)\n\n if batch_num * batch_size < trainning_length:\n mini_batch_x = X_shuffle[:, batch_num * batch_size: trainning_length]\n mini_batch_y = Y_shuffle[:, batch_num * batch_size: trainning_length]\n mini_batch = [mini_batch_x, mini_batch_y]\n mini_batches.append(mini_batch)\n\n mini_batches = numpy.array(mini_batches)\n return mini_batches\n\n\n\nimport _pickle as cPickle\nimport gzip\ndef load_data():\n f = gzip.open('./mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = cPickle.load(f,encoding='bytes')\n f.close()\n return (training_data, validation_data, test_data)\n\n#————————————————————————————————————————————————————————\n# 第二个包里的数据训练\ntrainning_data_list, validation_data, test_data=load_data()\nif(trainning_length>0):\n trainning_data_list=(trainning_data_list[0][0:trainning_length],trainning_data_list[1][0:trainning_length])\nelse:\n trainning_length=numpy.shape(trainning_data_list[0])[0]\nif(test_length>0):\n test_data=(test_data[0][0:test_length],test_data[1][0:test_length])\n validation_data=(validation_data[0][0:test_length],validation_data[1][0:test_length])\nelse:\n test_length=numpy.shape(test_data[0])[0]\nmini_batches=divide_batch(trainning_data_list)\nfor e in range(epochs):\n for k in range(len(mini_batches)):\n n.train(mini_batches[k][0],mini_batches[k][1])\n pass\n#————————————————————————————————————————————————————————\n\n\n\n#————————————————————————————————————————————————————————\n\nscorecard = []\nfor k in range(len(test_data[0])):\n all_values=test_data[0][k]\n correct_label = int(test_data[1][k])\n print(correct_label, \"correct label\")\n inputs=all_values\n outputs = n.query(inputs)\n label = numpy.argmax(outputs)\n print(label, \"network's answer\")\n if (label == correct_label):\n scorecard.append(1)\n else:\n scorecard.append(0)\n pass\n if(k>100):\n break\n pass\nprint(scorecard)\nscorecard_array = numpy.asarray(scorecard)\nprint (\"performance = \", scorecard_array.sum() /\n scorecard_array.size)\n#————————————————————————————————————————————————————————\n" }, { "alpha_fraction": 0.4958397448062897, "alphanum_fraction": 0.5195685625076294, "avg_line_length": 32.8125, "blob_id": "bc7c716e6e945f14bc1bdb308fb60c7e35ccf09d", "content_id": "4e4f903121bd0f46aff1d171217f4b94bdd92318", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3245, "license_type": "no_license", "max_line_length": 82, "num_lines": 96, "path": "/neuralNetworkOf.py", "repo_name": "PigLion/TestAI", "src_encoding": "UTF-8", "text": "# coding:utf8\nimport cPickle\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass Network(object):\n def __init__(self, sizes):\n self.num_layers = len(sizes)\n self.sizes = sizes\n self.biases = [np.random.randn(y, 1) for y in sizes[1:]] # L(n-1)->L(n)\n self.weights = [np.random.randn(y, x)\n for x, y in zip(sizes[:-1], sizes[1:])]\n\n def feedforward(self, a):\n for b_, w_ in zip(self.biases, self.weights):\n a = self.sigmoid(np.dot(w_, a)+b_)\n return a\n\n def SGD(self, training_data, test_data,epochs, mini_batch_size, eta):\n n_test = len(test_data)\n n = len(training_data)\n plt.xlabel('epoch')\n plt.title('cost')\n cy=[]\n cx=range(epochs)\n for j in cx:\n self.cost = 0.0\n np.random.shuffle(training_data) # shuffle\n for k in xrange(0, n, mini_batch_size):\n mini_batch = training_data[k:k+mini_batch_size]\n self.update_mini_batch(mini_batch, eta)\n cy.append(self.cost/n)\n print \"Epoch {0}: {1} / {2}\".format(\n j, self.evaluate(test_data), n_test)\n plt.plot(cx,cy)\n plt.scatter(cx,cy)\n plt.show()\n\n def update_mini_batch(self, mini_batch, eta):\n for x, y in mini_batch:\n delta_b, delta_w,cost = self.backprop(x, y)\n self.weights -= eta/len(mini_batch)*delta_w\n self.biases -= eta/len(mini_batch)*delta_b\n self.cost += cost\n\n def backprop(self, x, y):\n b=np.zeros_like(self.biases)\n w=np.zeros_like(self.weights)\n a_ = x\n a = [x]\n for b_, w_ in zip(self.biases, self.weights):\n a_ = self.sigmoid(np.dot(w_, a_)+b_)\n a.append(a_)\n for l in xrange(1, self.num_layers):\n if l==1:\n # delta= self.sigmoid_prime(a[-1])*(a[-1]-y) # O(k)=a[-1], t(k)=y\n delta= a[-1]-y # cross-entropy\n else:\n sp = self.sigmoid_prime(a[-l]) # O(j)=a[-l]\n delta = np.dot(self.weights[-l+1].T, delta) * sp\n b[-l] = delta\n w[-l] = np.dot(delta, a[-l-1].T)\n cost=0.5*np.sum((b[-1])**2)\n return (b, w,cost)\n\n def evaluate(self, test_data):\n test_results = [(np.argmax(self.feedforward(x)), y)\n for (x, y) in test_data]\n return sum(int(x == y) for (x, y) in test_results)\n\n def sigmoid(self,z):\n return 1.0/(1.0+np.exp(-z))\n\n def sigmoid_prime(self,z):\n return z*(1-z)\n\nif __name__ == '__main__':\n\n def get_label(i):\n c=np.zeros((10,1))\n c[i]=1\n return c\n\n def get_data(data):\n return [np.reshape(x, (784,1)) for x in data[0]]\n\n f = open('mnist.pkl', 'rb')\n training_data, validation_data, test_data = cPickle.load(f)\n training_inputs = get_data(training_data)\n training_label=[get_label(y_) for y_ in training_data[1]]\n data = zip(training_inputs,training_label)\n test_inputs = training_inputs = get_data(test_data)\n test = zip(test_inputs,test_data[1])\n net = Network([784, 30, 10])\n net.SGD(data[:5000],test[:5000],50,10, 3.0,) # 4476/5000 (4347/5000)" }, { "alpha_fraction": 0.6697163581848145, "alphanum_fraction": 0.6806953549385071, "avg_line_length": 33.15625, "blob_id": "c36541a111657153c092f6ba5637952eb9e5c056", "content_id": "3b638e4ef91b8c9b94c856ab02aa3f2aa6c6934c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3279, "license_type": "no_license", "max_line_length": 100, "num_lines": 96, "path": "/KerasBigDataLesson/HeartDisease.py", "repo_name": "PigLion/TestAI", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nfrom keras import models\nfrom keras import layers\nimport matplotlib.pyplot as plt\nfrom keras import optimizers\nfrom keras import losses\nfrom keras import metrics\n\nheartData = pd.read_csv('../../_data/heart.csv')\nheartData= heartData.dropna(how='any')\ntarget=heartData.loc[:,'target']\nheartData=heartData.drop(columns=['target'],axis=1)\nmean = heartData.mean(axis=0)\nheartData -= mean\nstd = heartData.std(axis=0)\nheartData /= std\nprint(heartData[:20])\ndef build_model():\n model = models.Sequential()\n model.add(layers.Dense(38, activation='sigmoid',\n input_shape=(heartData.shape[1],)))\n model.add(layers.Dense(38, activation='sigmoid'))\n model.add(layers.Dense(1,activation='sigmoid'))\n model.compile(optimizer=optimizers.RMSprop(lr=0.0005),\n loss=losses.binary_crossentropy,\n metrics=[metrics.binary_accuracy])\n return model\n\n\ntrain_data=heartData[50:]\ntest_data=heartData[:50]\ntrain_target=target[50:]\ntest_target=target[:50]\n\n\n\nnum_epochs = 15\nk = 4\nnum_val_samples = len(train_data) // k\nall_loss_histories = []\nall_Valloss_histories = []\nall_acc_histories = []\nall_Valacc_histories = []\nmodel=build_model()\nfor i in range(k):\n print('processing fold #', i)\n val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]\n val_targets = train_target[i * num_val_samples: (i + 1) * num_val_samples]\n partial_train_data = np.concatenate(\n [train_data[:i * num_val_samples],\n train_data[(i + 1) * num_val_samples:]],\n axis=0)\n partial_target = np.concatenate(\n [train_target[:i * num_val_samples],\n train_target[(i + 1) * num_val_samples:]],\n axis=0)\n history = model.fit(partial_train_data, partial_target,\n validation_data=(val_data, val_targets),\n epochs=num_epochs, batch_size=1)\n print(history.history)\n all_loss_histories.append(history.history['loss'])\n all_Valloss_histories.append(history.history['val_loss'])\n all_acc_histories.append(history.history['binary_accuracy'])\n all_Valacc_histories.append(history.history['val_binary_accuracy'])\n\nall_loss_histories = [np.mean([x[i] for x in all_loss_histories]) for i in range(num_epochs)]\nall_Valloss_histories = [np.mean([x[i] for x in all_Valloss_histories]) for i in range(num_epochs)]\nall_acc_histories = [np.mean([x[i] for x in all_acc_histories]) for i in range(num_epochs)]\nall_Valacc_histories = [np.mean([x[i] for x in all_Valacc_histories]) for i in range(num_epochs)]\n\n\nloss_values = all_loss_histories\nval_loss_values = all_Valloss_histories\nepochs = range(1, len(loss_values) + 1)\nplt.plot(epochs, loss_values, 'bo', label='Training loss')\nplt.plot(epochs, val_loss_values, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()\nplt.show()\n\nplt.clf()\nacc = all_acc_histories\nval_acc = all_Valacc_histories\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.legend()\nplt.show()\n\nresults = model.evaluate(test_data, test_target)\nprint(results)\n" }, { "alpha_fraction": 0.6487476825714111, "alphanum_fraction": 0.6648340225219727, "avg_line_length": 38.91056823730469, "blob_id": "f51fc654b2d28c07fbe48bd9a771e368dde1251f", "content_id": "5b761d617d518c3f57064ac116dae6f58f020d36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4949, "license_type": "no_license", "max_line_length": 98, "num_lines": 123, "path": "/firstNeuralNetwork.py", "repo_name": "PigLion/TestAI", "src_encoding": "UTF-8", "text": "import numpy\nimport scipy.special\nimport matplotlib.pyplot\nimport pylab\n\nclass neuralNetwork:\n def __init__(self,inputnodes,hiddennodes,outputnodes,learningrate):\n self.inodes=inputnodes\n self.onodes=outputnodes\n self.hnodes=hiddennodes\n self.wih=numpy.random.normal(0.0,pow(self.hnodes,-0.5),(self.hnodes,self.inodes))\n self.who=numpy.random.normal(0.0,pow(self.onodes,-0.5),(self.onodes,self.hnodes))\n self.lr=learningrate\n self.activation_function=lambda x:scipy.special.expit(x)\n pass\n def train(self,inputs_list,targets_list):\n # convert inputs list to 2d array\n inputs = numpy.array(inputs_list, ndmin=2).T\n targets = numpy.array(targets_list, ndmin=2).T\n # calculate signals into hidden layer\n hidden_inputs = numpy.dot(self.wih, inputs)\n # calculate the signals emerging from hidden layer\n hidden_outputs = self.activation_function(hidden_inputs)\n # calculate signals into final output layer\n final_inputs = numpy.dot(self.who, hidden_outputs)\n # calculate the signals emerging from final output layer\n final_outputs = self.activation_function(final_inputs)\n # output layer error is the (target - actual)\n output_errors = targets - final_outputs\n # hidden layer error is the output_errors, split by weights,recombined at hidden nodes\n hidden_errors = numpy.dot(self.who.T, output_errors)\n # update the weights for the links between the hidden and output layers\n self.who += self.lr * numpy.dot((output_errors *\n final_outputs * (1.0 - final_outputs)),\n numpy.transpose(hidden_outputs))\n # update the weights for the links between the input and hidden layers\n self.wih += self.lr * numpy.dot((hidden_errors *\n hidden_outputs * (1.0 - hidden_outputs)), numpy.transpose\n (inputs))\n pass\n def query(self,inputs_list):\n # convert inputs list to 2d array\n inputs = numpy.array(inputs_list, ndmin=2).T\n # calculate signals into hidden layer\n hidden_inputs = numpy.dot(self.wih, inputs)\n # calculate the signals emerging from hidden layer\n hidden_outputs =self.activation_function(hidden_inputs)\n # calculate signals into final output layer\n final_inputs = numpy.dot(self.who, hidden_outputs)\n # calculate the signals emerging from final outputlayer\n final_outputs = self.activation_function(final_inputs)\n return final_outputs\ninput_nodes=784\nhidden_nodes=100\noutput_nodes=10\nlearning_rate=0.3\nn=neuralNetwork(input_nodes,hidden_nodes,output_nodes,learning_rate)\n\n\ntrainning_data_file=open(\"../_data/handwrittingNumber/mnist_train.csv\",'r')\ntrainning_data_list=trainning_data_file.readlines()\ntrainning_data_file.close()\n\nepochs=2\nfor e in range(epochs):\n for record in trainning_data_list:\n all_values= record.split(',')\n inputs=(numpy.asfarray(all_values[1:])/255.0*0.99)+0.01\n targets=numpy.zeros(output_nodes)+0.01\n targets[int(all_values[0])]=0.99\n n.train(inputs,targets)\n pass\n pass\n\n\ntest_data_file=open(\"../_data/handwrittingNumber/mnist_test.csv\",'r')\ntest_data_list=test_data_file.readlines()\ntest_data_file.close()\n\n# #但个的测试代码函数\n# testNumber=5\n# all_values=test_data_list[testNumber].split(',')\n# image=numpy.asfarray(all_values[1:]).reshape(28,28)\n# matplotlib.pyplot.imshow(image,cmap='Greys',interpolation='None')\n# pylab.show()\n# final=n.query(numpy.asfarray(all_values[1:])/255.0*0.99)+0.01\n# print(final)\n\n\n\n#得分的测试\n# test the neural network\n# scorecard for how well the network performs, initially empty\nscorecard = []\n# go through all the records in the test data set\nfor record in test_data_list:\n # split the record by the ',' commas\n all_values = record.split(',')\n # correct answer is first value\n correct_label = int(all_values[0])\n print(correct_label, \"correct label\")\n # scale and shift the inputs\n inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01\n # query the network\n outputs = n.query(inputs)\n # the index of the highest value corresponds to the label\n label = numpy.argmax(outputs)\n print(label, \"network's answer\")\n # append correct or incorrect to list\n if (label == correct_label):\n # network's answer matches correct answer, add 1 to scorecard\n scorecard.append(1)\n else:\n # network's answer doesn't match correct answer, add 0 to scorecard\n scorecard.append(0)\n pass\n pass\nprint(scorecard)\n# 正确率显示\n# calculate the performance score, the fraction of correct answers\nscorecard_array = numpy.asarray(scorecard)\nprint (\"performance = \", scorecard_array.sum() /\n scorecard_array.size)\n\n\n" }, { "alpha_fraction": 0.6156172752380371, "alphanum_fraction": 0.6585097312927246, "avg_line_length": 24.794326782226562, "blob_id": "362b75b83a28b53fea57af87923520dab96f6e16", "content_id": "fca3eadde29567e2f02dc18953186c7bf07874e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3637, "license_type": "no_license", "max_line_length": 93, "num_lines": 141, "path": "/KerasBigDataLesson/newsCategory.py", "repo_name": "PigLion/TestAI", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nfrom keras import models\nfrom keras import layers\nimport matplotlib.pyplot as plt\nfrom keras import optimizers\nfrom keras.preprocessing.text import Tokenizer\n\n\namount=60000\nnewsData = pd.read_json('../../_data/News_Category_Dataset_v2.json', lines=True)\nnewsData=newsData.drop(columns=['date','link'],axis=1)\nnewsData= newsData.dropna(how='any')\n\n\nnewsData.category=newsData.category.map(lambda x: \"WORLDPOST\" if x == \"THE WORLDPOST\" else x)\nnewsData['text'] = newsData.headline + \" \" + newsData.short_description\ntokenizer = Tokenizer()\ntokenizer.fit_on_texts(newsData.text)\nnewsData['words'] = tokenizer.texts_to_sequences(newsData.text)\n# print(newsData.loc[:100,'words'])\n\ndef vectorize_sequences(sequences):\n dimension=10000\n results = np.zeros((amount, dimension))\n for i in range(amount):\n for k in sequences[i]:\n if(k<10000):\n results[i,k] = 1.\n return results\n\ninputData=vectorize_sequences(newsData.words)\nprint(inputData[:4,:10])\nprint(newsData.words[:4])\nprint(\"--------------------\")\n\n\nmodel = models.Sequential()\nmodel.add(layers.Dense(64, activation='sigmoid',\n input_shape=(inputData.shape[1],)))\nmodel.add(layers.Dense(64, activation='sigmoid'))\nmodel.add(layers.Dense(40,activation='sigmoid'))\nmodel.compile(optimizer=optimizers.RMSprop(lr=0.001),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\nprint(np.unique(newsData.category))\n\ntargetMap={\n 'ARTS':0,\n 'ARTS & CULTURE':1,\n 'BLACK VOICES':2,\n 'BUSINESS':3,\n 'COLLEGE':4,\n 'COMEDY':5,\n 'CRIME':6,\n 'CULTURE & ARTS':7,\n 'DIVORCE':8,\n 'EDUCATION':9,\n 'ENTERTAINMENT':10,\n 'ENVIRONMENT':11,\n 'FIFTY':12,\n 'FOOD & DRINK':13,\n 'GOOD NEWS':14,\n 'GREEN':15,\n 'HEALTHY LIVING':16,\n 'HOME & LIVING':17,\n 'IMPACT':18,\n 'LATINO VOICES':19,\n 'MEDIA':20,\n 'MONEY':21,\n 'PARENTING':22,\n 'PARENTS':23,\n 'POLITICS':24,\n 'QUEER VOICES':25,\n 'RELIGION':26,\n 'SCIENCE':27,\n 'SPORTS':28,\n 'STYLE':29,\n 'STYLE & BEAUTY':30,\n 'TASTE':31,\n 'TECH':32,\n 'TRAVEL':33,\n 'WEDDINGS':34,\n 'WEIRD NEWS':35,\n 'WELLNESS':36,\n 'WOMEN':37,\n 'WORLD NEWS':38,\n 'WORLDPOST':39\n}\n\ndef to_one_hot(sequences):\n dimension=40\n results = np.zeros((amount, dimension))\n for i in range(amount):\n results[i,sequences[i]] = 1.\n return results\ntarget=to_one_hot(newsData.category.map(targetMap))\n\ntrain_data=inputData[:40000]\ntest_data=inputData[40000:50000]\nvaliation_data=inputData[50000:]\ntrain_target=target[:40000]\ntest_target=target[40000:50000]\nvalidation_target=target[50000:]\n\n\n\n\nnum_epochs = 20\n\nhistory = model.fit(train_data, train_target,\n validation_data=(valiation_data, validation_target),\n epochs=num_epochs, batch_size=512)\n\nhistory_dict = history.history\nprint(history_dict)\nloss_values = history_dict['loss']\nval_loss_values = history_dict['val_loss']\nepochs = range(1, len(loss_values) + 1)\nplt.plot(epochs, loss_values, 'bo', label='Training loss')\nplt.plot(epochs, val_loss_values, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()\nplt.show()\n\nplt.clf()\nacc = history_dict['acc']\nval_acc = history_dict['val_acc']\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.legend()\nplt.show()\n\nresults = model.evaluate(test_data, test_target)\nprint(results)\n" } ]
8
Birchenkon/GeekBrains_Django1_try2
https://github.com/Birchenkon/GeekBrains_Django1_try2
eb02971a291650ded6684116dc9723ebb776219a
ddb96a9089c519019033e16f9b84b83ca8c7647b
e531d14c3f26a701c05fbdb8ed96f2703b2fcec2
refs/heads/master
2022-09-15T08:30:10.016696
2020-05-25T13:11:29
2020-05-25T13:11:29
263,665,897
0
0
null
2020-05-13T15:14:05
2020-05-20T18:20:43
2020-05-25T13:11:30
Python
[ { "alpha_fraction": 0.575047492980957, "alphanum_fraction": 0.6073464155197144, "avg_line_length": 27.981651306152344, "blob_id": "a26895407a19e1faf0ae055bf8a605959f02bb97", "content_id": "8ab30bd82fef3f485eedad3d86095a4ac07c8633", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3625, "license_type": "no_license", "max_line_length": 95, "num_lines": 109, "path": "/mainapp/migrations/0004_fill_bd.py", "repo_name": "Birchenkon/GeekBrains_Django1_try2", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.12 on 2020-05-17 18:33\n\nfrom django.db import migrations\n\n\ndef forwards_func(apps, schema_editor):\n pro_cat_model = apps.get_model(\"mainapp\", \"ProductCategory\") # Load model for make changes\n pro_model = apps.get_model(\"mainapp\", \"Product\") # Load model for make changes\n con_model = apps.get_model(\"mainapp\", \"Contact\") # Load model for make changes\n\n # Категория 1\n pro_cat_obj = pro_cat_model.objects.create(\n pk=1,\n name=\"стулья\", description=\"Мы поможем вам выбрать самые удобные стулья.\")\n\n # Продукты\n pro_model.objects.create(\n pk=1,\n category=pro_cat_obj, # Fk\n name=\"Для кухни\",\n image=\"product_img/product-22.jpg\",\n description=\"Незаменимая вещь, чтобы быстро позавтракать на кухне.\",\n price=\"1249\",\n quantity=17,\n )\n pro_model.objects.create(\n pk=2,\n category=pro_cat_obj, # Fk\n name=\"Для гостинной\",\n image=\"product_img/product-20.jpg\",\n description=\"Стиль и комфорт для долгих посиделок.\",\n price=\"4999\",\n quantity=102,\n )\n del pro_cat_obj\n\n # Категория 2\n pro_cat_obj = pro_cat_model.objects.create(\n pk=2,\n name=\"кресла\", description=\"Ваш комфорт в наших креслах!\")\n\n # Продукты в категории кресла\n pro_model.objects.create(\n pk=3,\n category=pro_cat_obj, # Fk\n name=\"Для спальни\",\n image=\"product_img/product-71.jpg\",\n description=\"Неизменная классика с новым уровнем уюта.\",\n price=\"8499\",\n quantity=32,\n )\n del pro_cat_obj\n\n # Категория 3\n pro_cat_obj = pro_cat_model.objects.create(\n pk=3,\n name=\"стульчики для кормления\", description=\"Все для самых маленьких.\")\n del pro_cat_obj\n\n # Категория 4\n pro_cat_obj = pro_cat_model.objects.create(\n pk=4,\n name=\"табуреты\", description=\"Компактность по новому.\")\n del pro_cat_obj\n\n # Категория 5\n pro_cat_obj = pro_cat_model.objects.create(\n pk=5,\n name=\"Подушки на стулья\", description=\"Добавим мягкости.\")\n del pro_cat_obj\n\n # Create contacts\n con_model.objects.create(\n pk=1,\n phone=\"+7-888-888-8888\",\n email=\"[email protected]\",\n city=\"Москва\",\n address=\"3-я улица Строителей д. 25\"\n )\n con_model.objects.create(\n pk=2,\n phone=\"+7-777-777-7777\",\n email=\"[email protected]\",\n city=\"Санкт-Петербург\",\n address=\"3-я улица Строителей д. 25\",\n )\n con_model.objects.create(\n pk=3,\n phone=\"+7-999-999-9999\",\n email=\"[email protected]\",\n city=\"Владивосток\",\n address=\"ул. Московская д.10\",\n )\n\n\ndef reverse_func(apps, schema_editor):\n pro_cat_model = apps.get_model(\"mainapp\", \"ProductCategory\") # Load model for make changes\n con_model = apps.get_model(\"mainapp\", \"Contact\") # Load model for make changes\n\n # Delete all objects\n pro_cat_model.objects.all().delete()\n con_model.objects.all().delete()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [(\"mainapp\", \"0003_contact\")]\n\n operations = [migrations.RunPython(forwards_func, reverse_func)]" }, { "alpha_fraction": 0.5111111402511597, "alphanum_fraction": 0.7555555701255798, "avg_line_length": 45, "blob_id": "6e92b0cd60e68ab37ce6c189514ef5180ad397f0", "content_id": "2e39e13b53588634434495d4a841e760a3946aea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 45, "license_type": "no_license", "max_line_length": 45, "num_lines": 1, "path": "/run.sh", "repo_name": "Birchenkon/GeekBrains_Django1_try2", "src_encoding": "UTF-8", "text": "python3 ./manage.py runserver #127.0.0.1:8000" } ]
2
santiagoesdras/task-cli
https://github.com/santiagoesdras/task-cli
72e3dde16522b785192eb655bd0469d42c871e61
42beb36d6112a8f265a9baacf4c2517fbc8055cb
eb806abeb961542a7559d41ab649cb63d62789f1
refs/heads/main
2023-07-04T11:14:53.920353
2021-08-10T03:51:39
2021-08-10T03:51:39
393,569,041
0
0
null
2021-08-07T03:39:04
2021-08-07T03:38:06
2021-08-07T03:38:04
null
[ { "alpha_fraction": 0.6034843325614929, "alphanum_fraction": 0.6146341562271118, "avg_line_length": 19.5, "blob_id": "924b4e7a696a2c943831947500e077392b5ba8e8", "content_id": "bdb2fc8d186554b17aba2cca866d366b50cb2755", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1435, "license_type": "no_license", "max_line_length": 141, "num_lines": 70, "path": "/task-cli.py", "repo_name": "santiagoesdras/task-cli", "src_encoding": "UTF-8", "text": "import os\nimport time\nfrom sys import platform\n\ndbTasks = []\n\n# add new task in array\ndef addTask(task):\n dbTasks.append(task)\n\n# list the tasks in the array\ndef listTask():\n for task in dbTasks:\n print(\"* \", task)\n\n# remove task in the array\ndef removeTask(i):\n dbTasks.pop(i)\n\n# edit task in the array\ndef editTask(i, updateTask):\n dbTasks[i] = updateTask \n\ndef menu(option):\n if option == \"0\":\n quit()\n\n elif option == \"1\":\n taskInput = input(\"\\n\\tTitle for new task: \")\n addTask(taskInput)\n \n elif option == \"2\":\n listTask()\n \n elif option == \"3\":\n try:\n removeInput = input(\"\\n\\tWhat task do you want to delete?: \")\n removeTask(int(removeInput))\n except:\n print(\"\\tNot a number\")\n \n elif option == \"4\":\n try:\n editInput = input(\"\\n\\tWhat task do you want to update?: \")\n updateInput = input(\"\\n\\tWhat title will you put it?: \")\n editTask(int(editInput), updateInput)\n except: \n print(\"\\tNot a number\")\n\n elif option == \"5\":\n print(\"\\n\\tTotal tasks: \", len(dbTasks))\n\n else:\n print(\"\\tOption not valid\")\n\n\ndef imenu():\n print(\"\\n\\tOptions for you task: \\n\\t0.Exit \\n\\t1.Add new Task \\n\\t2.List tasks \\n\\t3.Delete tasks \\n\\t4.Update tasks \\n\\t5.Total tasks\")\n\n menuOption = input(\"\\tEnter an option \")\n menu(menuOption)\n\nwhile(1):\n imenu()\n\n time.sleep(2)\n if platform == \"win32\": \n os.system(\"cls\")\n else:\n os.system(\"clear\")\n" } ]
1
jahn941015/post_training_quantization-
https://github.com/jahn941015/post_training_quantization-
d45ceedadb7674258cb6d4149c25aa2d7e9b4789
13c84d1e006fae81391d22087178a0427e9be1a7
ec9adedfe0d274629836a0f5c17ece4d246d3072
refs/heads/master
2022-11-30T03:01:46.906142
2020-08-06T10:03:37
2020-08-06T10:03:37
285,508,715
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.5394057035446167, "alphanum_fraction": 0.5645994544029236, "avg_line_length": 27.66666603088379, "blob_id": "a5baeb62bde7d9f486af65fc92adb48b4ce796c7", "content_id": "66d0521089e07c4e5dc690104600279753603594", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1548, "license_type": "no_license", "max_line_length": 88, "num_lines": 54, "path": "/utils.py", "repo_name": "jahn941015/post_training_quantization-", "src_encoding": "UTF-8", "text": "import os\n\nimport torch\nfrom average_meter import AverageMeter\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\ndef calibrate(model, data_loader):\n model.eval()\n with torch.no_grad():\n for image, target in data_loader:\n model(image)\n\n\ndef evaluate(model, criterion, data_loader, neval_batches):\n model.eval()\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n cnt = 0\n with torch.no_grad():\n for image, target in data_loader:\n output = model(image)\n loss = criterion(output, target)\n cnt += 1\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n print('.', end = '')\n top1.update(acc1[0], image.size(0))\n top5.update(acc5[0], image.size(0))\n if cnt >= neval_batches:\n return top1, top5\n\n return top1, top5\n\n\ndef print_size_of_model(model):\n torch.save(model.state_dict(), \"temp.p\")\n print('Size (MB):', os.path.getsize(\"temp.p\")/1e6)\n os.remove('temp.p')\n" }, { "alpha_fraction": 0.6443132758140564, "alphanum_fraction": 0.6670243144035339, "avg_line_length": 36, "blob_id": "69d842259a37146bb678ce653175f3886cad9451", "content_id": "992f45963c46e0e13953adb59457a804994efe30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5592, "license_type": "no_license", "max_line_length": 262, "num_lines": 151, "path": "/main.py", "repo_name": "jahn941015/post_training_quantization-", "src_encoding": "UTF-8", "text": "import os\n\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torchvision\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, models\nimport torchvision.transforms as transforms\nimport torch.quantization\nfrom torch.quantization import get_default_qconfig, quantize_jit\nfrom utils import accuracy, calibrate, evaluate, print_size_of_model\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(description = 'perform post training quantization on models implemented in torchvision')\n parser.add_argument(\n '-m', '--model', type=str, metavar='',\n help='selecting the models implemented in torchvision. \\n 1.resnet18 \\n 2.alexnet 3.squeezenet \\n 4.vgg16 \\n 5.densenet \\n 6.inception \\n 7.googlenet \\n 8.shufflenet \\n 9.mobilenet \\n 10.resnext50_32x4d \\n11.wide_resnet50_2 \\n12.mnasnet')\n parser.add_argument('-c', '--checkpoint', type=str, metavar='', help='saved model checkpoint path')\n parser.add_argument('-d', '--data', type=str, metavar='', help='data path')\n parser.add_argument('-n', '--target', type=int, metavar='', help='number of classes')\n return parser \n\n\ndef data_loaders(args):\n traindir = os.path.join(args.data, 'train')\n valdir = os.path.join(args.data, 'valid')\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n train_batch_size = 30\n eval_batch_size = 30\n\n dataset = torchvision.datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]))\n\n dataset_test = torchvision.datasets.ImageFolder(\n valdir,\n transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ]))\n\n train_sampler = torch.utils.data.RandomSampler(dataset)\n test_sampler = torch.utils.data.SequentialSampler(dataset_test)\n\n data_loader = torch.utils.data.DataLoader(\n dataset, batch_size=train_batch_size,\n sampler=train_sampler)\n\n data_loader_test = torch.utils.data.DataLoader(\n dataset_test, batch_size=eval_batch_size,\n sampler=test_sampler)\n\n return data_loader, data_loader_test\n\n\ndef model_setup(args):\n model_path = args.checkpoint\n \n if args.model == 'resnet18':\n model = models.resnet18(pretrained=True)\n elif args.model == 'alexnet':\n model = models.alexnet(pretrained=True)\n elif args.model == 'squeezenet':\n model = models.squeezenet1_0(pretrained=True)\n elif args.model == 'vgg16':\n model = models.vgg16(pretrained=True)\n elif args.model == 'densenet':\n model = models.densenet161(pretrained=True)\n elif args.model == 'inception':\n model = models.inception_v3(pretrained=True)\n elif args.model == 'googlenet':\n model = models.googlenet(pretrained=True)\n elif args.model == 'shufflenet':\n model = models.shufflenet_v2_x1_0(pretrained=True)\n elif args.model == 'mobilenet':\n model = models.mobilenet_v2(pretrained=True)\n elif args.model == 'resnext50_32x4d':\n model = models.resnext50_32x4d(pretrained=True)\n elif args.model == 'wide_resnet50_2':\n model = models.wide_resnet50_2(pretrained=True)\n elif args.model == 'mnasnet':\n model = models.mnasnet1_0(pretrained=True)\n else:\n raise ValueError('please enter a proper model name')\n #model = models.resnet18(pretrained=True)\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, args.target) \n device = torch.device(\"cpu\")\n model = model.to(device)\n model.load_state_dict(torch.load(model_path, map_location=device))\n \n return model\n\n\ndef quantize(model, data_loader, data_loader_test):\n criterion = nn.CrossEntropyLoss()\n num_calibration_batches = 10\n num_eval_batches = 10\n eval_batch_size = 30\n\n myModel = model\n myModel.eval()\n myModel.qconfig = torch.quantization.default_qconfig\n print(myModel.qconfig)\n torch.quantization.prepare(myModel, inplace=True)\n\n ts_model = torch.jit.script(model).eval()\n qconfig = get_default_qconfig('fbgemm')\n qconfig_dict = {'': qconfig}\n quantized_model = quantize_jit(\n ts_model,\n {'': qconfig},\n calibrate,\n [data_loader_test])\n\n print(quantized_model.graph)\n print('Size of model before quantization')\n print_size_of_model(ts_model)\n print('Size of model after quantization')\n print_size_of_model(quantized_model)\n top1, top5 = evaluate(ts_model, criterion, data_loader_test, num_eval_batches)\n print('[before serilaization] Evaluation accuracy on test dataset: %2.2f, %2.2f'%(top1.avg, top5.avg))\n\n saved_model_dir = '/ssd3/jhahn/ptq/model/saved_model/'\n graph_mode_model_file = 'resnet18_graph_mode_quantized.pth'\n torch.jit.save(quantized_model, saved_model_dir + graph_mode_model_file)\n quantized_model = torch.jit.load(saved_model_dir + graph_mode_model_file)\n top1, top5 = evaluate(quantized_model, criterion, data_loader_test, num_eval_batches)\n print('[after serialization/deserialization] Evaluation accuracy on test dataset: %2.2f, %2.2f'%(top1.avg, top5.avg))\n\n\ndef main():\n #initializing data loader \n args = get_parser().parse_args()\n model = model_setup(args)\n data_loader, data_loader_test = data_loaders(args)\n quantize(model, data_loader, data_loader_test)\n\n\nif __name__ == '__main__':\n main()\n \n" }, { "alpha_fraction": 0.7144818305969238, "alphanum_fraction": 0.7501716017723083, "avg_line_length": 39.44444274902344, "blob_id": "5e56c3c1935a9513241b6ec335318dd717dc637a", "content_id": "dacbbf3e068859cb72bd89dc29882ba36325bd7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1457, "license_type": "no_license", "max_line_length": 139, "num_lines": 36, "path": "/README.md", "repo_name": "jahn941015/post_training_quantization-", "src_encoding": "UTF-8", "text": "# post_training_quantization-\nA module that enables post training quantization for a pytorch deep learning classification model \n## Setup\nSet up the environment for using post training quantization by installing pytorch from the following link\n(https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md) \n\n## Running the Program\n- -m or --model: selecting 1 of 12 pretrained models implemented in torchvision \n- -c or --checkpoint: path for the checkpoint of the model that will go through quantization \n- -d or --data: path for the data which the original model was built upon \n- -n or --target: number of classes \n \nexample of performing the post training quantization of a model built by a certain dataset \n```bash\npython3 main.py -m resnet18 -c /ssd3/jhahn/ptq/model/saved_model/resnet18-model1.pth -d /ssd3/jhahn/ptq/data/imagenet_1k/ -n 1000\n```\n## Caution\n1. In order to use this module the dataset should be in the same format as the following dataset \\n \nhttps://s3.amazonaws.com/pytorch-tutorial-assets/imagenet_1k.zip\n2. This module only works for 12 classification model implemented in torchvision(https://pytorch.org/docs/stable/torchvision/models.html). \nThe 12 models are the following: \n(1)resnet18 \n(2)alexnet \n(3)squeezenet \n(4)vgg16 \n(5)densenet \n(6)inception \n(7)googlenet \n(8)shufflenet \n(9)mobilenet \n(10)resnext50_32x4d \n(11)wide_resnet50_2 \n(12)mnasnet \n\n## Result\n![result](result.PNG)\n\n" } ]
3
kristentan1/SSW567_Hw04_GitHubApi
https://github.com/kristentan1/SSW567_Hw04_GitHubApi
c4c5d48418157a425706fc79369d33e3171eccb1
5d8c7f6dfe8675b4c4bb64ad8f48065a1b0a055e
ade7217944191f70337c8908245f03239a910e67
refs/heads/master
2020-03-29T04:02:54.669230
2018-10-04T21:36:47
2018-10-04T21:36:47
149,513,139
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6627907156944275, "alphanum_fraction": 0.6724806427955627, "avg_line_length": 34.620689392089844, "blob_id": "2f57b774746406d698f798990b4d0c989213ff1d", "content_id": "035ea8700fc362fb2fc6fae34b9bddb73f1058fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1032, "license_type": "no_license", "max_line_length": 117, "num_lines": 29, "path": "/Hw04a.py", "repo_name": "kristentan1/SSW567_Hw04_GitHubApi", "src_encoding": "UTF-8", "text": "'''\nCreated on Sep 18, 2018\n\n@author: Kristen Tan\nI pledge my Honor that I have abided by the Stevens Honor System. Kristen Tan\n'''\n\nimport requests\nimport json\n\ndef getGitHubInfo(gitHubUserId):\n \"\"\"Takes in a username for a GitHub user and returns a list of their repos and the number of commits per repo.\"\"\"\n try:\n urlString = \"https://api.github.com/users/\" + gitHubUserId + \"/repos\"\n except TypeError as error:\n return \"gitHubUserId must be a string\"\n resultList = []\n gitHubInfo = requests.get(urlString)\n gitHubInfoJson = json.loads(gitHubInfo.content.decode('utf-8'))\n for repo in gitHubInfoJson:\n gitCommits = requests.get(\"https://api.github.com/repos/\" + gitHubUserId + \"/\" + repo[\"name\"] + \"/commits\")\n gitCommitsJson = json.loads(gitCommits.content.decode('utf-8'))\n count = 0\n for commitItem in gitCommitsJson:\n count += 1\n resultList.append([repo[\"name\"], count])\n return resultList #repos will be alphabetical\n\n# Attempting to fix Travis" }, { "alpha_fraction": 0.6135578751564026, "alphanum_fraction": 0.6476684212684631, "avg_line_length": 49.369564056396484, "blob_id": "54dcf818e1241cc013841071bca2a6da556844ca", "content_id": "c1037a16a81bd89fcd6574b89227bab7207f629e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2316, "license_type": "no_license", "max_line_length": 165, "num_lines": 46, "path": "/TestHw04a.py", "repo_name": "kristentan1/SSW567_Hw04_GitHubApi", "src_encoding": "UTF-8", "text": "'''\nCreated on Sep 18, 2018\n\n@author: Kristen Tan\nI pledge my Honor that I have abided by the Stevens Honor System. Kristen Tan\n'''\n\nimport unittest\nfrom Hw04a import getGitHubInfo\n\nclass TestHw04a(unittest.TestCase):\n \n def testValidInput1(self):\n self.assertEqual(getGitHubInfo('bsb226'), [['GuessingGame', 3], ['GuessingGame2', 2], ['hello-world', 3], ['HelloJava', 4], ['ud851-Exercises', 30]],\n 'bsb226 has the following repos and commits: GuessingGame - 3, GuessingGame2 - 2, hello-world - 3, HelloJava - 4, ud851-Exercises - 30.')\n \n def testValidInput2(self):\n self.assertEqual(getGitHubInfo('richkempinski'), [['hellogitworld', 30], ['helloworld', 2], ['Project1', 2], ['threads-of-life', 1]], \n 'richkempinski has the folllowing repos and commits: hellogitworld - 30, helloworld - 2, Project1 - 2, threads-of-life - 1.')\n \n def testValidInput3(self):\n self.assertEqual(getGitHubInfo('Simoa33'), [['LaundryDetector', 2], ['uunite', 8]], \n 'Simoa33 has the folllowing repos and commits: LaundryDetector - 2, uunite - 8.')\n \n def testInvalidInput1(self):\n self.assertEqual(getGitHubInfo(77), 'gitHubUserId must be a string', 'Only a string may be passed to getGitHubInfo().')\n \n def testInvalidInput2(self):\n self.assertEqual(getGitHubInfo(False), 'gitHubUserId must be a string', 'Only a string may be passed to getGitHubInfo().')\n \n def testInvalidInput3(self):\n self.assertEqual(getGitHubInfo(9.5), 'gitHubUserId must be a string', 'Only a string may be passed to getGitHubInfo().')\n \n def testInvalidInput4(self):\n self.assertEqual(getGitHubInfo([1, 2, 3, 4, 5]), 'gitHubUserId must be a string', 'Only a string may be passed to getGitHubInfo().')\n \n def testInvalidInput5(self):\n self.assertEqual(getGitHubInfo((1, 2, 3, 4, 5)), 'gitHubUserId must be a string', 'Only a string may be passed to getGitHubInfo().')\n\n def testInvalidInput6(self):\n self.assertEqual(getGitHubInfo({'kristen':'junior', 'kevin':'freshman'}), 'gitHubUserId must be a string', 'Only a string may be passed to getGitHubInfo().')\n\n \nif __name__ == '__main__':\n print('Running unit tests')\n unittest.main()" }, { "alpha_fraction": 0.6918919086456299, "alphanum_fraction": 0.7837837934494019, "avg_line_length": 91, "blob_id": "8a05f13ba6cc3cd6c444da200df18bcd6e08f917", "content_id": "bc9a3a15482d0c3360a0a1ac9c861d612a525926", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 185, "license_type": "no_license", "max_line_length": 159, "num_lines": 2, "path": "/README.md", "repo_name": "kristentan1/SSW567_Hw04_GitHubApi", "src_encoding": "UTF-8", "text": "# SSW567_Hw04_GitHubApi\n[![build status of master](https://travis-ci.org/kristentan1/SSW567_Hw04_GitHubApi.svg?branch=master)](https://travis-ci.org/kristentan1/SSW567_Hw04_GitHubApi)\n\n" } ]
3
liatsegal/Moral-Love
https://github.com/liatsegal/Moral-Love
6f550207fe0afac728cd5435c09e5994a9507186
647c79dd41a1093f90c78dd033cb558baf2e9ed6
203a97428b2d333d5b7bba5ceefba2d98621fb93
refs/heads/master
2021-01-12T08:33:21.512395
2016-12-16T01:48:57
2016-12-16T01:48:57
76,610,668
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6118188500404358, "alphanum_fraction": 0.6164189577102661, "avg_line_length": 38.78873062133789, "blob_id": "74e4200cb3eb199be7651ad8803498b0d51aea6d", "content_id": "fbd2366be30a13a1bfc0cca426eb29344c6a2e69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2826, "license_type": "no_license", "max_line_length": 157, "num_lines": 71, "path": "/Couple.py", "repo_name": "liatsegal/Moral-Love", "src_encoding": "UTF-8", "text": "from Agent import Agent\nfrom random import randint\n\n\nclass Couple:\n '''\n Args:\n two agents\n\n\n '''\n\n def __init__(self, agent_a, agent_b):\n self.agent_a = agent_a\n self.agent_b = agent_b\n self.is_active = True\n self.couple_n_steps = 0\n self.couple_average_utility = 0\n self.couple_average_morality = 0\n\n def run_step(self):\n self.agent_a.choose_action()\n self.agent_b.choose_action()\n\n def make_or_break(self):\n # Both agents decide whether to continue the relationship or break up\n # A break up occurs if at least one agent decides to break up\n self.agent_make_or_break(self.agent_a, self.agent_b)\n self.agent_make_or_break(self.agent_b, self.agent_a)\n self.couple_average_utility = (self.agent_a.agent_total_utility + self.agent_b.agent_total_utility) / 2\n self.couple_average_morality = (self.agent_a.average_morality + self.agent_b.average_morality) / 2\n self.couple_n_steps += 1\n\n def agent_make_or_break(self, agent_a, agent_b):\n # agent_a decides whether to continue the relationship or break up\n\n self.agent_a.set_agent_total_utility((self.agent_a.average_utility_on_self + self.agent_b.average_utility_on_other) / 2)\n self.agent_b.set_agent_total_utility((self.agent_b.average_utility_on_self + self.agent_a.average_utility_on_other) / 2)\n\n\n if self.agent_a.moral_type == \"kant\":\n # A Kantian\n if self.agent_a.agent_total_utility < self.agent_a.threshold_make_break or self.agent_b.agent_total_utility < self.agent_a.threshold_make_break:\n self.is_active = False\n\n elif self.agent_a.moral_type == \"util\":\n # A Utilitarian\n couple_mean_utility = (self.agent_a.agent_total_utility + self.agent_b.agent_total_utility) / 2\n if couple_mean_utility < self.agent_a.threshold_make_break:\n self.is_active = False\n\n elif self.agent_a.moral_type == \"ego\":\n # An Egoist\n if self.agent_a.agent_total_utility < self.agent_a.threshold_make_break:\n self.is_active = False\n\n elif self.agent_a.moral_type == \"altr\":\n # An Altruist\n if self.agent_b.agent_total_utility < self.agent_a.threshold_make_break:\n self.is_active = False\n\n elif self.agent_a.moral_type == \"psyc\":\n # A Psychopath\n if self.agent_a.agent_total_utility < self.agent_a.threshold_make_break and self.agent_b.agent_total_utility > self.agent_a.threshold_make_break:\n self.is_active = False\n\n\n elif self.agent_a.moral_type == \"rnd\":\n # An agent with no moral that makes decisions by a coin flip\n if randint(0,1) < 0.5:\n self.is_active = False\n\n" }, { "alpha_fraction": 0.701805055141449, "alphanum_fraction": 0.7061371803283691, "avg_line_length": 54.31999969482422, "blob_id": "691d48380a90fb00987e258ba3b4a158f58ec495", "content_id": "c1f75305ab2e294e178f76b4d384f32cac14578e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1385, "license_type": "no_license", "max_line_length": 159, "num_lines": 25, "path": "/Action.py", "repo_name": "liatsegal/Moral-Love", "src_encoding": "UTF-8", "text": "import numpy as np\n\n\nclass Action:\n \"\"\"\n Args:\n mean_utility_self (float): expected utility of an agent's action to itself\n sd_utility_self (float): standard deviation for generating utility of an agent's action to itself\n mean_utility_other (float): expected utility of an agent's action to other agent\n sd_utility_other (float): standard deviation for generating utility of an agent's action to other agent\n mean_morality (float): expected morality value of an agent's action\n sd_morality (float): standard deviation for generating morality value of an agent's action\n\n utility_self (float): utility value of an agent's action to itself\n utility_other (float): utility value of an agent's action to the other agent\n morality_score (float): morality value of an agent's action\n \"\"\"\n\n def __init__(self, moral_utility_factor, mean_utility_self=0, mean_utility_other=0, mean_morality=0, sd_utility_self=1, sd_utility_other=1, sd_morality=1):\n self.moral_utility_factor = moral_utility_factor\n self.utility_self = np.random.normal(mean_utility_self, sd_utility_self)\n self.utility_other = np.random.normal(mean_utility_other, sd_utility_other)\n self.morality_score = np.random.normal(mean_morality, sd_morality)\n\n self.utility_self += moral_utility_factor * self.morality_score\n\n\n" }, { "alpha_fraction": 0.6638050675392151, "alphanum_fraction": 0.6671282052993774, "avg_line_length": 49.15277862548828, "blob_id": "a097bc3714b2d73844dc426af09020809109ccaf", "content_id": "359d824c9e9a85cd4d967cf76af47a57ba173fc5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3611, "license_type": "no_license", "max_line_length": 220, "num_lines": 72, "path": "/Agent.py", "repo_name": "liatsegal/Moral-Love", "src_encoding": "UTF-8", "text": "from Action import Action\nimport numpy as np\nfrom random import randint\n\n\nclass Agent:\n '''\n Args:\n moral_type = \"kant\",\"util\",\"ego\",\"altr\",\"rnd\",\"psyc\"\n n_agents_per_type = number of agents per type\n threshold_make_break = threshold of total utility - staying or breaking up\n moral_utility_factor = how much morallity adds to utility\n ... args of actions of agent ...\n '''\n\n def __init__(self, id, moral_type, moral_utility_factor, threshold_make_break, mean_utility_self=0, mean_utility_other=0, mean_morality=0, sd_utility_self=1, sd_utility_other=1, sd_morality=1, n_action_freedom = 10):\n self.id = id\n self.moral_type = moral_type\n self.moral_utility_factor = moral_utility_factor\n self.threshold_make_break = threshold_make_break\n self.mean_utility_self = mean_utility_self\n self.mean_utility_other = mean_utility_other\n self.mean_morality = mean_morality\n self.sd_utility_self = sd_utility_self\n self.sd_utility_other = sd_utility_other\n self.sd_morality = sd_morality\n self.n_action_freedom = n_action_freedom\n self.past_actions = []\n self.agent_total_utility = 0 # given both by self and by other\n self.average_morality = 0\n\n def choose_action(self):\n # set possible action alternative for agent's next step\n action_alternatives = []\n for i in range(self.n_action_freedom):\n action_alternatives.append(Action(self.moral_utility_factor, self.mean_utility_self, self.mean_utility_other, self.mean_morality, self.sd_utility_self, self.sd_utility_other, self.sd_morality))\n\n # choose best action according to agent's morality\n if self.moral_type == \"kant\":\n # A Kantian chooses the action that maximizes universal morality, regardless of utility\n self.current_action = max(action_alternatives, key=lambda x: x.morality_score)\n\n elif self.moral_type == \"util\":\n # A Utilitarian chooses the action that maximizes the couple's total utility\n self.current_action = max(action_alternatives, key=lambda x: (x.utility_self + x.utility_other))\n\n elif self.moral_type == \"ego\":\n # An Egoist chooses the action that maximizes its utility\n self.current_action = max(action_alternatives, key=lambda x: x.utility_self)\n\n elif self.moral_type == \"altr\":\n # An Altruist chooses the action that maximizes its utility\n self.current_action = max(action_alternatives, key=lambda x: x.utility_other)\n\n elif self.moral_type == \"psyc\":\n # A Psychopath chooses the action that\n # maximizes its utility, minimizes the other's utility and minimizes morality\n self.current_action = max(action_alternatives, key=lambda x: x.utility_self - x.utility_other - x.morality_score)\n\n elif self.moral_type == \"rnd\":\n # An agent with no moral that makes decisions by a coin flip\n self.current_action = action_alternatives[randint(0,self.n_action_freedom-1)]\n\n self.past_actions.append(self.current_action)\n\n self.average_utility_on_self = sum(act.utility_self for act in self.past_actions) / len(self.past_actions)\n self.average_utility_on_other = sum(act.utility_other for act in self.past_actions) / len(self.past_actions)\n self.average_morality = sum(act.morality_score for act in self.past_actions) / len(self.past_actions)\n\n\n def set_agent_total_utility(self,agent_total_utility):\n self.agent_total_utility = agent_total_utility\n" }, { "alpha_fraction": 0.6003856062889099, "alphanum_fraction": 0.6183417439460754, "avg_line_length": 33.43153381347656, "blob_id": "d05f7a14c76afc2b768907b0d93cdcbd461bb228", "content_id": "faafa649f8ae01800de5e25421d20e787d3419de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8298, "license_type": "no_license", "max_line_length": 190, "num_lines": 241, "path": "/LoveMoralSimulation.py", "repo_name": "liatsegal/Moral-Love", "src_encoding": "UTF-8", "text": "from Action import Action\nfrom Agent import Agent\nfrom Couple import Couple\nfrom random import shuffle\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom time import gmtime\n\n\n###### PARAMETERS ######\n\"\"\"\nsimulation_mode =\n \"real\": couples form out of current singles,\n \"theoretical\": each round one couple is selected out of entire population\nnumber_of_rounds_at_theoretical_simulation = when simulation_mode=\"theoretical\" - the number of couples to test\nnumber_of_steps = (\"real\") steps of simulation OR (\"theoretical\") maximum steps for round\n\nmoral_type = \"kant\",\"util\",\"ego\",\"altr\",\"rnd\",\"psyc\"\nn_agents_per_type = number of agents per type\nthreshold_make_break = threshold of total utility - staying or breaking up\nmoral_utility_factor = how much morallity adds to utility\n\"\"\"\n\nsimulation_mode = \"theoretical\"\nnumber_of_rounds_at_theoretical_simulation = 1000\nnumber_of_steps = 1000\n\nmoral_type = (\"kant\", \"util\", \"ego\", \"altr\", \"rnd\", \"psyc\")\nn_agents_per_type = (100, 100, 100, 100, 100, 100)\nthreshold_make_break = (0.5, 1.5, 0.8, 0.8, 0.5, 0.5)\nmoral_utility_factor = (1.5, 0.5, 0.3, 0.7, 0.5, -0.5)\n\nmoral_title = (\"Kantian\", \"Utilitarian\", \"Egoist\", \"Altruist\", \"CoinFlipper\", \"Psychopath\")\n\n\n###### MAIN ######\n\n\nn_of_types = len(n_agents_per_type)\n\n# Initiate agents\nall_agents = []\nagent_id_counter = 0\n\n# Construct agents:\nfor t in range(n_of_types):\n for i in range(n_agents_per_type[t]):\n all_agents.append(Agent(agent_id_counter, moral_type[t], moral_utility_factor[t], threshold_make_break[t]))\n agent_id_counter += 1\n\n# remove last agent if the total number of agents is odd\nif len(all_agents) % 2 == 1:\n all_agents = all_agents[:len(all_agents)-1]\n\n# Initiate couples\nnumber_of_agents = agent_id_counter\nsingles_ids = list(range(number_of_agents))\nactive_couples = []\nhistory_couples = []\n\n### run simulation ###\nif simulation_mode == \"real\":\n for s in range(number_of_steps):\n print(\"--------- Step %s\\t\" % s)\n # make new couples\n shuffle(singles_ids)\n while len(singles_ids)>0:\n # new couple's ids\n id_a = singles_ids.pop()\n id_b = singles_ids.pop()\n\n # make new couple\n active_couples.append(Couple(all_agents[id_a], all_agents[id_b]))\n\n # run agents' actions\n # (at each step all agents are coupled)\n for ac in active_couples:\n ac.run_step()\n ac.make_or_break()\n\n # if couple broke up\n if not ac.is_active:\n singles_ids.append(ac.agent_a.id)\n singles_ids.append(ac.agent_b.id)\n\n history_couples.append(ac)\n active_couples.remove(ac)\n\n #print(\"id_a=%s [%s]\\tid_b=%s [%s]\\tis_active=%s\\tn_steps=%s\" % (ac.agent_a.id, ac.agent_a.moral_type, ac.agent_b.id, ac.agent_b.moral_type, ac.is_active, ac.couple_n_steps))\n\n # at the end of the simulation add active couples to history log\n for ac in active_couples:\n history_couples.append(ac)\n\n #for hc in history_couples:\n #print(\"%s\\t\\t%s\\t\\t%s\\t\\t%s\\t\" % (hc.agent_a.moral_type, hc.agent_b.moral_type, hc.couple_n_steps , hc.agent_a.agent_total_utility + hc.agent_b.agent_total_utility))\n\nelif simulation_mode == \"theoretical\":\n\n for s in range(number_of_rounds_at_theoretical_simulation):\n shuffle(singles_ids)\n id_a = singles_ids[0]\n id_b = singles_ids[1]\n\n # make new couple\n current_couple = Couple(all_agents[id_a], all_agents[id_b])\n\n print(\"--------- Step %s\\t\" % s)\n\n while current_couple.couple_n_steps <= number_of_steps and current_couple.is_active:\n current_couple.run_step()\n current_couple.make_or_break()\n\n history_couples.append(current_couple)\n\n\n# output\n\nmean_utility_mat = np.zeros((n_of_types,n_of_types))\nsd_utility_mat = np.zeros((n_of_types,n_of_types))\nmean_morality_mat = np.zeros((n_of_types,n_of_types))\nsd_morality_mat = np.zeros((n_of_types,n_of_types))\nmean_n_steps_mat = np.zeros((n_of_types,n_of_types))\nsd_n_steps_mat = np.zeros((n_of_types,n_of_types))\n\nfor ta in range(n_of_types):\n for tb in range(n_of_types):\n type2type_utilities = []\n type2type_moralities = []\n type2type_n_steps = []\n for hc in history_couples:\n if (hc.agent_a.moral_type == moral_type[ta] and hc.agent_b.moral_type == moral_type[tb]) or (hc.agent_b.moral_type == moral_type[ta] and hc.agent_a.moral_type == moral_type[tb]):\n type2type_utilities.append(hc.couple_average_utility)\n type2type_moralities.append(hc.couple_average_morality)\n type2type_n_steps.append(hc.couple_n_steps)\n\n # updating \n mean_utility_mat[ta][tb] = np.mean(type2type_utilities)\n sd_utility_mat[ta][tb] = np.std(type2type_utilities)\n mean_morality_mat[ta][tb] = np.mean(type2type_moralities)\n sd_morality_mat[ta][tb] = np.std(type2type_moralities)\n mean_n_steps_mat[ta][tb] = np.mean(type2type_n_steps)\n sd_n_steps_mat[ta][tb] = np.std(type2type_n_steps)\n\n# plots\n\nmin_val_colormap = -0.5\nmax_val_colormap = 1.5\n\n\nX,Y = np.meshgrid(range(n_of_types+1),range(n_of_types+1))\nticks_pos = np.arange(0.,n_of_types+0., 1)\n\nfig, ax = plt.subplots(2,3,figsize=(14,7))\n\nax = ax.flatten()\n\npcm0 = ax[0].pcolormesh(X,Y,mean_utility_mat, vmin=min_val_colormap, vmax=max_val_colormap)\nax[0].set_title('Couple Utility')\nax[0].set_ylabel('Mean')\nax[0].set_yticks(ticks_pos)\nax[0].set_yticklabels(moral_title, rotation=30)\nax[0].set_xticks([])\n\npcm1 = ax[1].pcolormesh(X,Y,mean_morality_mat, vmin=min_val_colormap, vmax=max_val_colormap)\nax[1].set_title('Couple Morality')\nax[1].set_xticks([])\nax[1].set_yticks([])\n\npcm2 = ax[2].pcolormesh(X,Y,mean_n_steps_mat)\nax[2].set_title('Couple N Steps')\nax[2].set_xticks([])\nax[2].set_yticks([])\n\npcm3 = ax[3].pcolormesh(X,Y,sd_utility_mat)\nax[3].set_ylabel('Standard Deviation')\nax[3].set_yticks(ticks_pos)\nax[3].set_yticklabels(moral_title, rotation=30)\nax[3].set_xticks(ticks_pos)\nax[3].set_xticklabels(moral_title, rotation=30)\n\npcm4 = ax[4].pcolormesh(X,Y,sd_morality_mat)\nax[4].set_xticks(ticks_pos)\nax[4].set_xticklabels(moral_title, rotation=30)\nax[4].set_yticks([])\n\npcm5 = ax[5].pcolormesh(X,Y,sd_n_steps_mat)\nax[5].set_xticks(ticks_pos)\nax[5].set_xticks(ticks_pos)\nax[5].set_xticklabels(moral_title, rotation=30)\nax[5].set_yticks([])\n\n\nfig.colorbar(pcm0, ax=ax[0], orientation='vertical')\nfig.colorbar(pcm1, ax=ax[1], orientation='vertical')\nfig.colorbar(pcm2, ax=ax[2], orientation='vertical')\nfig.colorbar(pcm3, ax=ax[3], orientation='vertical')\nfig.colorbar(pcm4, ax=ax[4], orientation='vertical')\nfig.colorbar(pcm5, ax=ax[5], orientation='vertical')\n\nfile_name = 'output/LoveMoralSimulation_' + str(gmtime().tm_yday) + str(gmtime().tm_hour) + str(gmtime().tm_min) + str(gmtime().tm_sec)\nfig.savefig(file_name + \".png\", dpi=300)\nplt.show()\n\n\n# print log\n\nlog_text = \"simulation_mode = \\\"\" + simulation_mode + \"\\\"\\n\"\nlog_text += \"number_of_rounds_at_theoretical_simulation = \" + str(number_of_rounds_at_theoretical_simulation) + \"\\n\"\nlog_text += \"number_of_steps = \" + str(number_of_steps) + \"\\n\\n\"\n\nlog_text += \"n_agents_per_type = (\"\nfor i in range(n_of_types):\n log_text += ( str(n_agents_per_type[i]) + \",\")\nlog_text = log_text[:-1] + \")\\n\"\n\nlog_text += \"moral_type = (\\\"\"\nfor i in range(n_of_types):\n log_text += ( str(moral_type[i]) + \"\\\",\\\"\")\nlog_text = log_text[:-2] + \")\\n\"\n\nlog_text += \"threshold_make_break = (\"\nfor i in range(n_of_types):\n log_text += ( str(threshold_make_break[i]) + \",\")\nlog_text = log_text[:-1] + \")\\n\"\n\nlog_text += \"moral_utility_factor = (\"\nfor i in range(n_of_types):\n log_text += ( str(moral_utility_factor[i]) + \",\")\nlog_text = log_text[:-1] + \")\\n\"\n\nlog_text += \"moral_title = (\\\"\"\nfor i in range(n_of_types):\n log_text += ( str(moral_title[i]) + \"\\\",\\\"\")\nlog_text = log_text[:-2] + \")\\n\\n\"\n\n#log_text += \"-------------------------\\n\\n\"\n\n\nwith open(file_name + \".txt\", \"w\") as text_file:\n text_file.write(log_text)\n" }, { "alpha_fraction": 0.7037037014961243, "alphanum_fraction": 0.7037037014961243, "avg_line_length": 19, "blob_id": "a92b4e22278ffc91e2baa8f49ec6a523cb921254", "content_id": "335a5d99d7ca99149a3f7faf997f498cf11875d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 81, "license_type": "no_license", "max_line_length": 46, "num_lines": 4, "path": "/README.md", "repo_name": "liatsegal/Moral-Love", "src_encoding": "UTF-8", "text": "# Moral-Love\n\nCode for Moral Love Simulation - Final Project\nby Liat Segal & Kaveena Maniam \n" } ]
5
csuf-sed/OpenMV_Stuff
https://github.com/csuf-sed/OpenMV_Stuff
84aa89c97360fb4a7fe699cd62688f4d096f39b8
15ed32c4f4ff735b9c44af7ce5e7c58b1fc9f1b6
094a3c73317158a4077158d11a0a58ddf5a6f467
refs/heads/master
2020-04-01T03:35:22.748876
2019-05-14T02:23:21
2019-05-14T02:23:21
152,827,927
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6518518328666687, "alphanum_fraction": 0.7111111283302307, "avg_line_length": 18.285715103149414, "blob_id": "e87d060889bfd46f9400b7bd91fa7918a11e6a2d", "content_id": "d2ddccf7e6fcd3c8b97dadd4e60fefbc42e0b33e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 135, "license_type": "permissive", "max_line_length": 83, "num_lines": 7, "path": "/README.md", "repo_name": "csuf-sed/OpenMV_Stuff", "src_encoding": "UTF-8", "text": "# OpenMV_Stuff\nSource code for our Computer Engineering senior project for Fall 2018 - Spring 2019\n\n## Team\n* Peter Fink\n* Riad Soliven\n* Daniel Verdugo\n" }, { "alpha_fraction": 0.6110503077507019, "alphanum_fraction": 0.666301965713501, "avg_line_length": 25.83823585510254, "blob_id": "c50ef8282b5f9f2bedc2297ed91223d1d238e807", "content_id": "ff562b7cd0bcba644fe8c1a56c6aa6086b863adf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1828, "license_type": "permissive", "max_line_length": 93, "num_lines": 68, "path": "/main.py", "repo_name": "csuf-sed/OpenMV_Stuff", "src_encoding": "UTF-8", "text": "# AprilTag Detection\n# \n\nimport sensor, image, time, math\nfrom pyb import UART\nfrom pyb import LED\nimport utime\n\nred_led = LED(1)\nred_led.on()\ngreen_led = LED(2)\n\nuart = UART(3, 9600, timeout_char = 1000)\nuart.init(9600, bits=8, parity=None, stop=1, timeout_char=1000)\n\nsensor.reset()\nsensor.set_pixformat(sensor.RGB565)\nsensor.set_framesize(sensor.QQVGA) # we run out of memory if the resolution is much bigger...\nsensor.skip_frames(time = 2000)\nsensor.set_auto_gain(False) # must turn this off to prevent image washout...\nsensor.set_auto_whitebal(False) # must turn this off to prevent image washout...\nclock = time.clock()\n\ntag_families = 0\ntag_families |= image.TAG16H5\n#tag_families |= image.TAG25H7\n#tag_families |= image.TAG25H9\n#tag_families |= image.TAG36H10\n#tag_families |= image.TAG36H11\n#tag_families |= image.ARTOOLKIT\n\ndef family_name(tag):\n\tif(tag.family() == image.TAG16H5):\n\t\treturn \"TAG16H5\"\n\tif(tag.family() == image.TAG25H7):\n\t\treturn \"TAG25H7\"\n\tif(tag.family() == image.TAG25H9):\n\t\treturn \"TAG25H9\"\n\tif(tag.family() == image.TAG36H10):\n\t\treturn \"TAG36H10\"\n\tif(tag.family() == image.TAG36H11):\n\t\treturn \"TAG36H11\"\n\tif(tag.family() == image.ARTOOLKIT):\n\t\treturn \"ARTOOLKIT\"\n\n\nwhile(True):\n\tclock.tick()\n\timg = sensor.snapshot()\n\tfor tag in img.find_apriltags(families=tag_families):\n\t\timg.draw_rectangle(tag.rect(), color = (255, 0, 0))\n\t\timg.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0))\n\t\tprint_args = (family_name(tag), tag.id(), (180 * tag.x_rotation()) / math.pi)\n\t\tuart.write(\"%dt\" % tag.id())\n\t\t#print(\"%dt\" % tag.id())\n\t\tif tag.cx() < (img.width() / 3):\n\t\t\tuart.write(\"ro\")\n\t\t\t#print(\"ro\")\n\t\telif tag.cx() > (img.width() * 2 / 3):\n\t\t\tuart.write(\"lo\")\n\t\t\t#print(\"lo\")\n\t\telse:\n\t\t\tuart.write(\"so\")\n\t\t\t#print(\"so\")\n\t\tgreen_led.on()\n\t\tutime.sleep_ms(200)\n\t\tgreen_led.off()\n\t#utime.sleep_ms(50)\n\n\n\n" }, { "alpha_fraction": 0.5634218454360962, "alphanum_fraction": 0.6076695919036865, "avg_line_length": 13.125, "blob_id": "2087960ab3aae51809871efb3107390362891f8f", "content_id": "ce6c60d1bfb247eca35051ca35de7db137bc3ef1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 339, "license_type": "permissive", "max_line_length": 23, "num_lines": 24, "path": "/examples/blinking_led0.py", "repo_name": "csuf-sed/OpenMV_Stuff", "src_encoding": "UTF-8", "text": "# Blinking LED\n# 10/12/2018\n# Daniel Verdugo\n# Riad Soliven\n# Peter Fink\n\nimport pyb\nimport utime\n\nred_led = pyb.LED(1)\ngreen_led = pyb.LED(2)\nblue_led = pyb.LED(3)\nir_led = pyb.LED(4)\n\nred_led.off()\ngreen_led.off()\nblue_led.off()\nir_led.off()\n\nwhile True:\n utime.sleep_ms(500)\n red_led.toggle()\n green_led.toggle()\n blue_led.toggle()\n" } ]
3
CedYF/COMP206
https://github.com/CedYF/COMP206
5e661545813dc53e3f048267f39dd7dd86d8ea12
98a4230094af2606a7105a9e559b6d176fa97fce
a9866253df88cd8ebbeb6c40803473b5d5d0bdd5
refs/heads/master
2021-01-24T19:37:07.332551
2015-04-02T22:21:49
2015-04-02T22:21:49
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6726943850517273, "alphanum_fraction": 0.6745027303695679, "avg_line_length": 25.33333396911621, "blob_id": "cbdc0c868698f445c6112f0f414ec06e059aebb6", "content_id": "aef6d930bd2ee79bce74ce0287b9ccca1fcaa367", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 553, "license_type": "no_license", "max_line_length": 40, "num_lines": 21, "path": "/cgi-bin/AddFriend.py", "repo_name": "CedYF/COMP206", "src_encoding": "UTF-8", "text": "username=form.getvalue(\"username\")\nfriend=form.getvalue(\"friend\")\n\nwith open(\"members.csv\") as members:\n\tmemberslist=members.read().splitlines()\n\tnumLines=len(memberslist)\n\tfor i in range(len(memberslist)):\n\t\tlineUser=\"\"\n\t\tline=memberslist[i]\n\t\tfor j in range(len(line)):\n\t\t\tif line[j]!=\" \":\n\t\t\t\tlineUser=lineUser+line[j]\n\t\t\t\tlineMatch=i\n\t\t\telse:\n\t\t\t\tbreak\n\t\t\tif lineUser==username:\n\t\t\t\tif line.find(friend)==-1:\n\t\t\t\t\tmemberslist[i]=line + \" \" + friend\nmembers=open(\"members\", \"w\")\t\nfor i in range(len(memberslist)):\n\tmembers.write(memberslist[i]+\"\\n\")\n" }, { "alpha_fraction": 0.5374677181243896, "alphanum_fraction": 0.5400516986846924, "avg_line_length": 20.55555534362793, "blob_id": "1070706c941902e4f84e9bb6baf5f8f4ed1edf3d", "content_id": "dc0fda5aa68c3806fd89381d97803bbe21dd359f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 387, "license_type": "no_license", "max_line_length": 43, "num_lines": 18, "path": "/cgi-bin/listing.py", "repo_name": "CedYF/COMP206", "src_encoding": "UTF-8", "text": "###########################################\n# Returns a list of members to be printed #\n###########################################\n\ndef memberList(username):\n\n\tlisting = []\n\n\twith open(\"members.csv\") as members:\n\t\tmemberslist=members.read().splitlines()\n\n\t\tfor line in memberslist:\n\t\t\tlineUser = line.split()[0]\n\n\t\t\tif lineUser != username:\n\t\t\t\tlisting.append(lineUser)\n\n\treturn listing" } ]
2
APL-AnArPa/Dictionary
https://github.com/APL-AnArPa/Dictionary
80a36704b6f9f8a2759dade0a192c177c371365b
66b0b8e1834694326302932d5a6d1fe33a16ee28
e6e7393e27c83f5a9a12ed1c9f7ab221dd4839ea
refs/heads/master
2021-01-22T11:51:20.025537
2013-09-12T08:06:45
2013-09-12T08:06:45
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6330613493919373, "alphanum_fraction": 0.638589084148407, "avg_line_length": 30.47008514404297, "blob_id": "b07c682b53e8b5b41bda4a71fc17177f4f365007", "content_id": "436ec8cd97e0c0cbc70b53918f326c6d15dece38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3799, "license_type": "no_license", "max_line_length": 133, "num_lines": 117, "path": "/src/DictionaryADT.java", "repo_name": "APL-AnArPa/Dictionary", "src_encoding": "UTF-8", "text": "import java.io.BufferedReader;\r\nimport java.io.BufferedWriter;\r\nimport java.io.FileReader;\r\nimport java.io.FileWriter;\r\nimport java.io.PrintWriter;\r\nimport java.lang.Math;\r\n\r\n\r\npublic abstract class DictionaryADT \r\n{\r\n\tpublic void PopulateDictionary(String InputFileName, String TimeFileInsert) //Populate dictionary using given input text file\r\n\t{\r\n\t\ttry\r\n\t\t{\r\n\t\t\tFileReader fileReader = new FileReader(InputFileName);\r\n\t\t\tBufferedReader bufferedReader = new BufferedReader(fileReader);\r\n\t\t\tString sInputList = bufferedReader.readLine();\r\n\t\t\tbufferedReader.close();\r\n\t\t\tString[] splitString = new String[sInputList.length()];\r\n\t\t\tint len = 0;\r\n\t\t\tfor(int i=0; i < sInputList.length(); i++)\r\n\t\t\t{\r\n\t\t\t\t//Checking whether the character at the current position is a space or not\r\n\t\t\t\tif(sInputList.charAt(i) != ' ')\r\n\t\t\t\t{\r\n\t\t\t\t\t//Extracting each element from the string and storing them in a string array\r\n\t\t\t\t\tsplitString[len] = sInputList.substring(i, sInputList.indexOf(' ', i) == -1 ? sInputList.length() : sInputList.indexOf(' ', i));\r\n\t\t\t\t\t//Updating the value of i\r\n\t\t\t\t\ti = sInputList.indexOf(' ', i) == -1 ? sInputList.length() : sInputList.indexOf(' ', i);\r\n\t\t\t\t\t//Incrementing len\r\n\t\t\t\t\tlen++;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t\tint [] inputArray=new int[len];\r\n\t\t\tfor(int i = 0; i < len; i++)\r\n\t\t\t{\r\n\t\t\t\tinputArray[i] = Integer.parseInt(splitString[i]);\r\n\t\t\t}\r\n\t\t\tlong totalInsertTime = 0;\r\n\t\t\tfor(int i=0;i<len;i++)\r\n\t\t\t{\r\n\t\t\t\tlong startTime = System.nanoTime();\r\n\t\t\t\tthis.Insert(inputArray[i]);\r\n\t\t\t\tlong endtTime = System.nanoTime();\r\n\t\t\t\ttotalInsertTime += (endtTime - startTime);\r\n\t\t\t}\r\n\t\t\tif(TimeFileInsert != null)\r\n\t\t\t{\r\n\t\t\t PrintWriter out = new PrintWriter(new BufferedWriter(new FileWriter(TimeFileInsert, true)));\r\n\t\t\t out.println(len + \" \" + totalInsertTime/Math.pow(10, 9));\r\n\t\t\t out.close();\r\n\t\t\t}\r\n\t\t}\r\n\t\tcatch(Exception e)\r\n\t\t{\r\n\t\t\te.printStackTrace();\r\n\t\t}\r\n\t}\r\n\tpublic void LocateInDictionary(String SearchFileName, String TimeFileSearch) \r\n\t{\r\n\t\ttry\r\n\t\t{\r\n\t\t\tFileReader fileReader = new FileReader(SearchFileName);\r\n\t\t\tBufferedReader bufferedReader = new BufferedReader(fileReader);\r\n\t\t\tString sInputList = bufferedReader.readLine();\r\n\t\t\tbufferedReader.close();\r\n\t\t\tString[] splitString = new String[sInputList.length()];\r\n\t\t\tint len = 0;\r\n\t\t\tfor(int i=0; i < sInputList.length(); i++)\r\n\t\t\t{\r\n\t\t\t\t//Checking whether the character at the current position is a space or not\r\n\t\t\t\tif(sInputList.charAt(i) != ' ')\r\n\t\t\t\t{\r\n\t\t\t\t\t//Extracting each element from the string and storing them in a string array\r\n\t\t\t\t\tsplitString[len] = sInputList.substring(i, sInputList.indexOf(' ', i) == -1 ? sInputList.length() : sInputList.indexOf(' ', i));\r\n\t\t\t\t\t//Updating the value of i\r\n\t\t\t\t\ti = sInputList.indexOf(' ', i) == -1 ? sInputList.length() : sInputList.indexOf(' ', i);\r\n\t\t\t\t\t//Incrementing len\r\n\t\t\t\t\tlen++;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t\tint [] inputArray=new int[len];\r\n\t\t\tfor(int i = 0; i < len; i++)\r\n\t\t\t{\r\n\t\t\t\tinputArray[i] = Integer.parseInt(splitString[i]);\r\n\t\t\t}\r\n\t\t\tlong totalInsertTime = 0;\r\n\t\t\tint itemsFound = 0;\r\n\t\t\tfor(int i=0;i<len;i++)\r\n\t\t\t{\r\n\t\t\t\tlong startTime = System.nanoTime();\r\n\t\t\t\tboolean found = this.Search(inputArray[i]);\r\n\t\t\t\tlong endtTime = System.nanoTime();\r\n\t\t\t\tif(found)\r\n\t\t\t\t{\r\n\t\t\t\t\ttotalInsertTime += (endtTime - startTime);\r\n\t\t\t\t\titemsFound++;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t\tif(TimeFileSearch != null)\r\n\t\t\t{\r\n\t\t\t PrintWriter out = new PrintWriter(new BufferedWriter(new FileWriter(TimeFileSearch, true)));\r\n\t\t\t out.println(itemsFound + \" \" + totalInsertTime/Math.pow(10, 9));\r\n\t\t\t out.close();\r\n\t\t\t}\r\n\t\t}\r\n\t\tcatch(Exception e)\r\n\t\t{\r\n\t\t\te.printStackTrace();\r\n\t\t}\r\n\t}\r\n\tabstract public void Insert(int val); \r\n\tabstract public boolean Delete(int val);\r\n\tabstract public boolean Search(int val);\r\n\tabstract public void ClearADT();\r\n\tabstract public void DisplayADT();\r\n}\r\n" }, { "alpha_fraction": 0.6849315166473389, "alphanum_fraction": 0.6940639019012451, "avg_line_length": 18.81818199157715, "blob_id": "f5c28c93e83b2235ecc08869dde01f3d171835ff", "content_id": "6dcf3371af1cdef66877d131908f91be8bccd3d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 219, "license_type": "no_license", "max_line_length": 44, "num_lines": 11, "path": "/src/makefile", "repo_name": "APL-AnArPa/Dictionary", "src_encoding": "UTF-8", "text": "SOURCE = -source 1.4\n\nall: List.class MainClass.class \n\nList.class: List.java\n\tjavac $(SOURCE) -d ../class/ List.java\n\nMainClass.class: MainClass.java\n\tjavac $(SOURCE) -d ../class/ MainClass.java\nclean:\n\trm -f *.class \n" }, { "alpha_fraction": 0.7448028922080994, "alphanum_fraction": 0.7591397762298584, "avg_line_length": 81, "blob_id": "e18954f71b69b76b1a7aaa92edc66119afb21be8", "content_id": "edc10f928a629431a35ab6a728a142d2d2e79232", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1395, "license_type": "no_license", "max_line_length": 133, "num_lines": 17, "path": "/scripts/runDicLocPerf.sh", "repo_name": "APL-AnArPa/Dictionary", "src_encoding": "UTF-8", "text": "rm -f ../Files/*\npython generateDicLoc.py\ncd ../src/\n\njavac Dictionary.java\n\njava Dictionary ../Files/Dictionary1.txt ../Files/Locate1.txt -t ../Files/TimeFileInsert_RBT.txt ../Files/TimeFileSearch_RBT.txt\njava Dictionary ../Files/Dictionary2.txt ../Files/Locate2.txt -t ../Files/TimeFileInsert_RBT.txt ../Files/TimeFileSearch_RBT.txt\njava Dictionary ../Files/Dictionary3.txt ../Files/Locate3.txt -t ../Files/TimeFileInsert_RBT.txt ../Files/TimeFileSearch_RBT.txt\njava Dictionary ../Files/Dictionary4.txt ../Files/Locate4.txt -t ../Files/TimeFileInsert_RBT.txt ../Files/TimeFileSearch_RBT.txt\njava Dictionary ../Files/Dictionary5.txt ../Files/Locate5.txt -t ../Files/TimeFileInsert_RBT.txt ../Files/TimeFileSearch_RBT.txt\n\njava Dictionary -bst ../Files/Dictionary1.txt ../Files/Locate1.txt -t ../Files/TimeFileInsert_BST.txt ../Files/TimeFileSearch_BST.txt\njava Dictionary -bst ../Files/Dictionary2.txt ../Files/Locate2.txt -t ../Files/TimeFileInsert_BST.txt ../Files/TimeFileSearch_BST.txt\njava Dictionary -bst ../Files/Dictionary3.txt ../Files/Locate3.txt -t ../Files/TimeFileInsert_BST.txt ../Files/TimeFileSearch_BST.txt\njava Dictionary -bst ../Files/Dictionary4.txt ../Files/Locate4.txt -t ../Files/TimeFileInsert_BST.txt ../Files/TimeFileSearch_BST.txt\njava Dictionary -bst ../Files/Dictionary5.txt ../Files/Locate5.txt -t ../Files/TimeFileInsert_BST.txt ../Files/TimeFileSearch_BST.txt\n\n" }, { "alpha_fraction": 0.5372549295425415, "alphanum_fraction": 0.572549045085907, "avg_line_length": 30.875, "blob_id": "573db889f0dcb7cabf2b2de90af65d567713c698", "content_id": "c29a36737589cee65c1345c2486e7313bf58afd2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 255, "license_type": "no_license", "max_line_length": 91, "num_lines": 8, "path": "/scripts/generateDicLoc.py", "repo_name": "APL-AnArPa/Dictionary", "src_encoding": "UTF-8", "text": "import shutil\n\nfor i in range(5):\n with open('../Files/Dictionary'+str(i+1)+'.txt','w') as f:\n for j in range(5*pow(10,i+1)):\n f.write(\"%s \" % str(j+1))\n\n shutil.copyfile('../Files/Dictionary'+str(i+1)+'.txt','../Files/Locate'+str(i+1)+'.txt');\n" }, { "alpha_fraction": 0.6875, "alphanum_fraction": 0.6875, "avg_line_length": 15, "blob_id": "d554820a9f3740e0e50a85028bb51810dac1258a", "content_id": "148257a52e1bb688ab20730becb228e1d58b482e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 64, "license_type": "no_license", "max_line_length": 40, "num_lines": 4, "path": "/README.md", "repo_name": "APL-AnArPa/Dictionary", "src_encoding": "UTF-8", "text": "Dictionary\n==========\n\nBST and RBT used to implement Dictionary\n" } ]
5
ipavel83/Python
https://github.com/ipavel83/Python
31e0733b83ecfa87086900a51f7dbc731ec4384c
a50951cfe334c2f24b8edb92accf0a4e65f5679e
a684fcd56e2beab44ae0f6519ef5b440e2aef2db
refs/heads/master
2020-04-13T23:13:11.280901
2020-02-28T16:47:04
2020-02-28T16:47:04
163,500,616
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6220472455024719, "alphanum_fraction": 0.625, "avg_line_length": 25.763158798217773, "blob_id": "d896448bd9315dba9985c4b640f26154420dd17e", "content_id": "2e2ecac7e249fa6bc645b28a8db3047c6b331151", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1016, "license_type": "permissive", "max_line_length": 98, "num_lines": 38, "path": "/029scope.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7 #https://docs.python.org/3/tutorial/classes.html#scopes-and-namespaces-example\n\ndef scopeFunction():\n \n def localScope():\n test = 'localScope'\n \n def nonlocalScope():\n nonlocal test\n test = 'nonlocalFuction'\n \n def globalScope():\n global test\n test = 'globalFunction'\n \n test = 'scope'\n localScope()\n print('localScope()', test) #localScope() scope\n \n nonlocalScope()\n print('nonlocalScope()', test) #nonlocalScope() nonlocalFuction \n \n globalScope()\n print('globalScope()', test) #globalScope() nonlocalFuction #####it changed in global scope\n\nprint('text variable after:')\nscopeFunction()\nprint('scopeFunction()', test) #scopeFunction() globalFunction\n\n#def both():\n# def GlobalNonlocal():\n# global test\n# nonlocal test ########SyntaxError: name 'test' is nonlocal and global\n# test = 'nonlocalGlobal'\n# GlobalNonlocal()\n# print('GlobalNonlocal()', test)\n#both()\n#print('both()', test)" }, { "alpha_fraction": 0.7029288411140442, "alphanum_fraction": 0.7252440452575684, "avg_line_length": 38.83333206176758, "blob_id": "96510f0a94e07d462c0934dbcc24fe76cc560c12", "content_id": "010f88bad53f64e34a755224714858f423d03b54", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 717, "license_type": "permissive", "max_line_length": 144, "num_lines": 18, "path": "/005import.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7 \nfrom sys import argv\nscript, first, second, third = argv #if no 3 additional params to script then error: ValueError: not enough values to unpack (expected 4, got 1)\n\n#myLabelModule = __import__('123drawSplitedColorLabelClass') #import from fileName started with number\n\n#TODO test Absolute and Relative Imports #https://docs.python.org/2.5/whatsnew/pep-328.html\n\n\nprint (f\"Arguments of called script: {argv}, and it length: {len(argv)}\")\nprint(\"script called:\", script)\nprint(\"first arg is:\", first)\nprint(\"second arg is:\", second)\nprint(\"third arg is:\", third)\n\n##########relative import\nhelloModule = __import__('001hello')\nprint('imported relatively:', helloModule.a, helloModule.b, helloModule.c)\n" }, { "alpha_fraction": 0.49466192722320557, "alphanum_fraction": 0.5480427145957947, "avg_line_length": 15.588234901428223, "blob_id": "cf7029da6c4d04ff1b7789ba5e9e54d076b6db53", "content_id": "09fd06725c8f608985192b7d58b08505422c0e3c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 281, "license_type": "permissive", "max_line_length": 28, "num_lines": 17, "path": "/054class_method_as_variable.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "class NumAdd():\n def __init__(self, num):\n self.num = num\n \n def add(self, num2):\n self.num+=num2\n return self.num\n \nn1 = NumAdd(2)\nprint(n1.num)\nn2 = NumAdd(3)\n#print(n1.num)\naddN1 = n1.add\naddN1(5)\nassert n1.num == 7\nprint(n1.num)\ninput()" }, { "alpha_fraction": 0.6181818246841431, "alphanum_fraction": 0.6590909361839294, "avg_line_length": 14.714285850524902, "blob_id": "c1745644e70cb125bbc55830d109588a75334fca", "content_id": "bc57bfcfd895658d9d1160b06c62279e24c17a08", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 220, "license_type": "permissive", "max_line_length": 46, "num_lines": 14, "path": "/026time.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7\n#https://docs.python.org/3.8/library/time.html\n\nimport time\n\nt = time.time()\nprint(t)\nprint(time.gmtime(t))\nprint(time.localtime(t))\nread = input()\nt2 = time.time()\nprint(t2)\nprint(f'{t2-t}')\nprint(f'{t2-t:.2f}')\n" }, { "alpha_fraction": 0.6435555815696716, "alphanum_fraction": 0.7200000286102295, "avg_line_length": 26.463415145874023, "blob_id": "bb037b86b1004f41803a1acf6f41873499f53595", "content_id": "f8b46dd363245709ee989ca48dace21d592d563b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1132, "license_type": "permissive", "max_line_length": 143, "num_lines": 41, "path": "/035sendKeysToWindows.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7\n###############for keys\nimport win32com.client as comctl\nwsh=comctl.Dispatch(\"WScript.Shell\")\n\n\n###############for mouse\nimport win32api, win32con\n#https://stackoverflow.com/questions/1181464/controlling-mouse-with-python\n#https://docs.microsoft.com/en-us/windows/desktop/api/winuser/nf-winuser-mouse_event\ndef moveTo(x, y):\n win32api.SetCursorPos((x,y))\n #win32api.mouse_event(win32con.MOUSEEVENTF_MOVE | win32con.MOUSEEVENTF_ABSOLUTE, int(x/SCREEN_WIDTH*65535.0), int(y/SCREEN_HEIGHT*65535.0))\n\n \ndef moveRelative(x, y):\n win32api.mouse_event(win32con.MOUSEEVENTF_MOVE, x, y)\n \n\ndef clickRight():\n win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTDOWN,0,0,0,0)\n win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTUP,0,0,0,0)\n\ndef clickLeft():\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0,0,0)\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0,0,0)\n\n \nimport time\n#moveTo(250, 45)\n#clickLeft()\nwsh.AppActivate(\"Блокнот\")\ntime.sleep(1)\nmoveRelative(-50, 50)\nwsh.SendKeys(\"a\")\ntime.sleep(0.5)\nwsh.SendKeys(\"a\")\ntime.sleep(0.5)\nwsh.SendKeys(\"a\")\n#clickRight()\n#wsh.SendKeys(\"{F1}\")" }, { "alpha_fraction": 0.5502347350120544, "alphanum_fraction": 0.6760563254356384, "avg_line_length": 25.625, "blob_id": "a6762d9f4b87fb8fcc6577e0745fbc7b1bd86900", "content_id": "1dbf0acabab8a68ff74c45b40cb0fd07968e8acd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1065, "license_type": "permissive", "max_line_length": 81, "num_lines": 40, "path": "/021array.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7\n\ntuple1 = ('one', 'andTwo', 3, 'four')\n#del tuple1[1] #TypeError: 'tuple' object does not support item deletion\nprint(tuple1)\n\nlist1 = [el for el in tuple1]\nlist2 = list(tuple1)\nprint(list1)\nprint(list2)\ndel list2[1]\nprint(list2)\n\nprint(tuple1)\n\nnumtuple1 =(1.0, 0.7, 1.5, 220, 2.5)\n#############Array mutable\nimport array\narr1 = array.array('f', numtuple1) #220 converted to float\nprint(arr1) #0.7 converted to float also\narr1[2] = 0.2\nprint(arr1)\n\narr2 = array.array('d', numtuple1) #220 converted to double\nprint(arr2) \narr2[2] = 0.2\nprint(arr2)\nprint(numtuple1)\n\nfrom sys import getsizeof\nnumtuple2 = (0, 1, 2, 3, 5, 9, 15, 255)\n############# immutable bytes\nbArr1 = bytes(numtuple2)\nbArr2 = b'\\x00\\x01\\x02\\x05\\x09\\x15\\xff'\nbArr3 = b'x00x01x02x05x09x15xff' #without backslashes it just [x,0,0,x,0, ...etc]\nprint(bArr1, bArr1[2]) #ok 2\nprint(bArr2, bArr2[2]) #ok 2\nprint(bArr3, bArr3[2]) #48 is Ascii code of symbol 0 (zero) \nprint('size of bytes:', getsizeof(bArr1), getsizeof(bArr2), getsizeof(bArr3))\nprint(b'0123'[0],b'0123'[1],b'0123'[2],b'0123'[3])\n" }, { "alpha_fraction": 0.7526881694793701, "alphanum_fraction": 0.7526881694793701, "avg_line_length": 31.230770111083984, "blob_id": "fc3136915f0e3b8fd470c1b691cdf936201a92d8", "content_id": "c8e4b396d76e8011cfbd2aa0a945ddbce4dc2442", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 837, "license_type": "permissive", "max_line_length": 122, "num_lines": 26, "path": "/050logging.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "import logging\nlogging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s- %(message)s')\n#logging.basicConfig(filename=__file__+'log.txt', level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')\n#logging.disable(logging.DEBUG)\n#logging.disable(logging.INFO)\n#logging.disable(logging.WARNING)\n#logging.disable(logging.ERROR)\n#logging.disable(logging.CRITICAL)\n\ndebug = logging.debug\n\ndef customDebug(*args, **kwargs):\n print('custom Debug:',*args, **kwargs) #comment print call to disable custom debug messages\n pass\n\ncustomDebug('self made ducktape analog of logging')\n\ndebug('allmost begining of program')\n\nlogging.debug('debug message')\nlogging.info('info message')\nlogging.warning('warning message')\nlogging.error('error message')\nlogging.critical('critical message')\n\ndebug('end of program?')" }, { "alpha_fraction": 0.6286248564720154, "alphanum_fraction": 0.6361085176467896, "avg_line_length": 30.880596160888672, "blob_id": "041e55b4026034bf63ba509d49b3b8551e2459ee", "content_id": "281ce9046c0b86941b303259912bf16277951a39", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2138, "license_type": "permissive", "max_line_length": 148, "num_lines": 67, "path": "/034exception.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7\n\ntry:\n print(len(None))\nexcept BaseException as e:\n print (e, ' _ ', type(e), ' _ ', dir(e)) #object of type 'NoneType' has no len() _ <class 'TypeError'> , #######output of dir#######\n\n#except ValueError as e ##to use short exception var e\n#except (RuntimeError, TypeError, NameError): if 3 different type in one\n####### BADBADBAD except RuntimeError, TypeError: ---- is OLD syntax and EQUIVOLENT to except RuntimeError as TypeError: which is NOT what YOU want\n\n \nprint('Now run tryEvalOperation')\n\ndef tryEvalOperation(operation, *args):\n print()\n try:\n if len(args)<1:\n print (operation, eval(operation))\n except ValueError:\n print(ValueError) #####\n except TypeError as t:\n \n print('TypeError happen: ', t) #TypeError happen: object of type 'NoneType' has no len()\n print(t.args) #(\"object of type 'NoneType' has no len()\",)\n \n print(TypeError) #<class 'TypeError'>\n print(TypeError.args) #<attribute 'args' of 'BaseException' objects> ####without as is just class <class 'ZeroDivisionError'>\n \n except ZeroDivisionError:\n print(ZeroDivisionError) \n except:\n print('exception for anything')\n finally:\n print('finally')\n \n \ntryEvalOperation('1/2')\ntryEvalOperation('1/0') #ZeroDivisionError\ntryEvalOperation('len(None)') #TypeError\n \n \n# except Exception as inst:\n# print type(inst) # the exception instance\n# print inst.args # arguments stored in .args\n# print inst # __str__ allows args to be printed directly\n \n#also from: https://docs.python.org/2/tutorial/errors.html#handling-exceptions\n#import sys\n#\n#try:\n# f = open('myfile.txt')\n# s = f.readline()\n# i = int(s.strip())\n#except IOError as e:\n# print \"I/O error({0}): {1}\".format(e.errno, e.strerror)\n#except ValueError:\n# print \"Could not convert data to an integer.\"\n#except:\n# print \"Unexpected error:\", sys.exc_info()[0]\n# raise\n \n \n#for with (predefined clean up) look at:\n#https://docs.python.org/2/tutorial/errors.html#predefined-clean-up-actions\n#014with.py\n#\n\n\n" }, { "alpha_fraction": 0.6782115697860718, "alphanum_fraction": 0.7112720608711243, "avg_line_length": 32.43157958984375, "blob_id": "12cd027c7a22aa7e3aae8f00f2d7a7014505ad24", "content_id": "e82659dbb1f5ebd2ec435dc71912958360b70ed6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3176, "license_type": "permissive", "max_line_length": 123, "num_lines": 95, "path": "/003strings.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7\nintVar1 = 12\nprint(type(intVar1),intVar1)\nstrIntVar1 =str(intVar1)\nprint(len(strIntVar1)) #length strIntVar1 is 2\n\nprint(type(strIntVar1),strIntVar1,'just text', sep=':-:', end= '; ')\nprint(str(intVar1)+strIntVar1, end= '; \\n\\n')\n\nstrIntVar1+='abc'\nprint(\"text and variable inside it {} and {} - will be printed\".format(intVar1,strIntVar1))\nprint(f\"python 3.6 formated text and variable inside it {intVar1} and {strIntVar1}\")\nstrVarOption ='VarA'\nstrResult= (f'strResult= text1 with {strVarOption} ' f'text2 with {intVar1} '\nf'text with {strVarOption}' f'text with {strVarOption} '\nf'text with {strIntVar1}')\nstrVarOption = 'b'\nprint(strResult)\nprint()\n\nprint ('%02d-%02d: %s' % (15, 23, 'HELLO')) #Python2 way to format strings\nprint()\n\nformater = '{} {} and {} one more {} and maybe {}'\n#print(formater.format(1,2,'3m',4)) #format must have enougth elements# IndexError: tuple index out of range\nprint(formater.format(1,2,'3m',4,'empty text'))\nformater = '{} {} and {}'\nprint(formater.format(1,2,'3m',4,'empty text')) #format more elements is OK - #output: 1 2 and 3m\n\nprint()\ni = 10\ns = '10'\nprint(f'fstring with repr inside for int - {i!r}, for string - {s!r}') #https://www.python.org/dev/peps/pep-0498/#abstract\n\n\nprint()\nprint('NOW WE SLICE:')\nstrOnlyLetters = \"This is only letters\"\n#slicing syntax [START:STOP:STEP]\nprint(1,strOnlyLetters, strOnlyLetters[3]) #letter at 3 index is 's'\nprint(2,strOnlyLetters[1:3]) #cut start at index 1 and end before 3\nprint(3,strOnlyLetters[2:]) #cut start at index 2\n#if we need to slice to end with step, just omit STOP:\nprint(4,strOnlyLetters[2::3]) #cut start at index 2 with step3\nprint(5,strOnlyLetters[::-1])\nprint(6,strOnlyLetters[8::-1]) #cut start at index 2 with step-1\n#print(7,strOnlyLetters[len(strOnlyLetters)-1::-1]) #not pythonic 5\nprint(8,strOnlyLetters[len(strOnlyLetters)-1::-2])\n#print(strOnlyLetters[len(strOnlyLetters)]) #IndexError: string index out of range\nprint(9,strOnlyLetters[100::-1]) #no IndexError, buy why?\nprint()\n\n#slice can be used in LIST\n#especialy useful to clean LIST without destroying it: del LISTVAR[:]\n#LISTVAR[:] is also shalow copy of LISTVAR\n#LISTVAR[:] is LISTVAR #returns False because we create shaloow but still copy\n\nprint(\"text\\nNext strint\\nAnd next one\\n And Anoter One \\n1ne more time\\n\")\n\nprint(\"\"\"The big one text\non multiple lines\\n:\nit ends.\nbut only after last one\nline\n\"\"\")\n\ntabbed=\"\\tTabbed text\"\nslashes=\"text\\\\ has \\\\ mul\\\\tip\\\\le\\\\\\\\slashes\"\nlist=\"\"\"\nfat text with list:\n\\t* one\n\\t* two\n\\t* three\\t or\\t some/thing\"\"\"\nprint(tabbed, slashes, list)\n\n#multiline comment in notepad plus plus\n#In the position where you want to add text, do:\n#Shift + Alt + down arrow\n\n#Escape What it does.\n#\\\\ Backslash (\\)\n#\\' Single-quote (')\n#\\\" Double-quote (\")\n#\\a ASCII bell (BEL)\n#\\b ASCII backspace (BS)\n#\\f ASCII formfeed (FF)\n#\\n ASCII linefeed (LF)\n#\\N{name} Character named name in the Unicode database (Unicode only)\n#\\r Carriage return (CR)\n#\\t Horizontal tab (TAB)\n#\\uxxxx Character with 16-bit hex value xxxx\n#\\Uxxxxxxxx Character with 32-bit hex value xxxxxxxx\n#\\v ASCII vertical tab (VT)\n#\\000 Character with octal value 000\n#\\xhh Character with hex value hh\n" }, { "alpha_fraction": 0.5625510811805725, "alphanum_fraction": 0.6508585214614868, "avg_line_length": 38.45161437988281, "blob_id": "3b871feff618bad2e8fe5e8606ee726ef16229c4", "content_id": "6b94a290b973d83ec56f422b421554e80739d73c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1223, "license_type": "permissive", "max_line_length": 113, "num_lines": 31, "path": "/041time24to12amPmConverter.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#https://en.wikipedia.org/wiki/12-hour_clock\n#\"a.m.\" and \"p.m.\" are abbreviations of the Latin ante meridiem (before midday) and post meridiem (after midday\n#example calculator and table can be found at - https://www.timecalculator.net/12-hour-to-24-hour-converter\n# in 12AmPm format 12 is acting as zero\n#0 o'clock and 12 o'clock in 24 format are exceptions and shown as 12am for 0 hour, and 12pm for 12 hour (midday)\n\ndef timeConverter24toAmPm(time):\n splTime = time.split(\":\")\n hours, minutes = int(splTime[0]), int(splTime[1])\n\n if hours == 12:\n return f'12:{minutes:02} p.m.'\n if hours > 12:\n return f'{hours-12}:{minutes:02} p.m.'\n if hours == 0:\n return f'12:{minutes:02} a.m.'\n return f'{hours}:{minutes:02} a.m.' \n return time\n\nif __name__ == '__main__':\n print(timeConverter24toAmPm('12:30'))\n assert timeConverter24toAmPm('12:30') == '12:30 p.m.'\n \n print(timeConverter24toAmPm('09:00'))\n assert timeConverter24toAmPm('09:00') == '9:00 a.m.'\n \n print(timeConverter24toAmPm('23:30'))\n assert timeConverter24toAmPm('23:30') == '11:30 p.m.'\n \n print(timeConverter24toAmPm('00:15'))\n assert timeConverter24toAmPm('00:15') == '12:15 a.m.'\n" }, { "alpha_fraction": 0.7349137663841248, "alphanum_fraction": 0.7392241358757019, "avg_line_length": 34.61538314819336, "blob_id": "361d96bdaa50e44bb21f20fa941e5335bb233c81", "content_id": "8fb5bfbe3c7da12d70aebaf9e6667f2daf113440", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 464, "license_type": "permissive", "max_line_length": 77, "num_lines": 13, "path": "/044typing_mypy.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.8\n#All errors in this file happen ONLY in static analisys utils like MyPy\n#This file should execute just fine :)\n\nfrom typing import NoReturn, List, Union, Optional\n\ndef noRet() -> NoReturn:\n #annotation NoReturn used for functions that should NOT return any values\n #also can be used for showing that function throws an EXCEPTION\n print('no return function:')\n return 'this is should be ERROR' #should make MyPy typing error\n \nprint(noRet())\n\n" }, { "alpha_fraction": 0.6048110127449036, "alphanum_fraction": 0.612829327583313, "avg_line_length": 24.705883026123047, "blob_id": "d9b9cf62bced32551acbfcba19f4adad88459bb0", "content_id": "a75f2b6a5991e27f07fd17bc83a88aba9f6d5247", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 873, "license_type": "permissive", "max_line_length": 68, "num_lines": 34, "path": "/048jsonDumpLoad.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#run in local path!\nimport sys \nimport os \nimport json\n\nfile_name = os.path.basename(sys.argv[0])\nprint(file_name)\nfile_name = os.path.splitext(file_name)[0]\nprint(file_name)\njson_file_name = file_name + '_data.json'\nprint(json_file_name)\n\na = {'name': 'John Doe', 'age': 24}\nprint(a)\n\ntry:\n with open (json_file_name, 'r') as initialFile:\n print('file already exist:')\n already = json.load(initialFile)\n print( already)\n print('will add +1 to loaded age')\n a['age'] = already['age']+1\nexcept FileNotFoundError as e:\n print('file initially not exist, its fine', e, e.errno)\n\njs = json.dumps(a, sort_keys=True, indent=4, separators=(',', ': '))\nprint(js)\nwith open(json_file_name, 'w+') as fw:\n fw.write(js)\n \nwith open(json_file_name, 'r') as fr:\n b = json.load(fr)\n \nprint( b, b['name'], b['age'], type(b['age']) )" }, { "alpha_fraction": 0.6238244771957397, "alphanum_fraction": 0.6601880788803101, "avg_line_length": 38.9487190246582, "blob_id": "9a4929be826bc380eb0b1d616890635e283329b0", "content_id": "6b5cc9059c84aea8601aa9f463a37a7950122013", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1595, "license_type": "permissive", "max_line_length": 117, "num_lines": 39, "path": "/016argsKwargs.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#!/usr/bin/var python3\r\n#py3.7\r\nimport inspect\r\n\r\ndef printArgs(one, two=2, *args, **kwargs):\r\n print(f'\\nprint {inspect.currentframe().f_code.co_name}:')\r\n print(one)\r\n print(two)\r\n print(args)\r\n print(kwargs)\r\n\r\n#printArgs() #TypeError: printArgs() missing 1 required positional argument: 'one'\r\n#printArgs(,) #TypeError: printArgs() missing 1 required positional argument: 'one'\r\nprintArgs('1_call',)\r\nprintArgs('2_call','Two',)\r\nprintArgs('3_call','Two','three',)\r\nprintArgs('4_call','Two',par1='three',)\r\n#printArgs('5_call','Two',par1='three','fourt',) #SyntaxError: positional argument follows keyword argument\r\nprintArgs('5_call','Two','fourt', par1='three',)\r\nprintArgs('5_call','andFive','Two','fourt', par1='three',)\r\n#printArgs('5_call', ,'andFive','Two','fourt', par1='three',) #SyntaxError: invalid syntax\r\nprintArgs('5_call', None ,'andFive','Two','fourt', par1='three',)\r\nprintArgs('5_call', '' ,'andFive','Two','fourt', par2= 'seven' ,par1='three',par3='eight')\r\nprintArgs(1, 2, 3, 4, 9, 8, 7, par2=222, par1=1, par3=3)\r\n\r\ndict = { 'hi':'hii' , 'nice':'niiice',}\r\n#printArgs(*[1,2,3,4,5,6,7,], hi='hii', **dict) #TypeError: printArgs() got multiple values for keyword argument 'hi'\r\nprintArgs(*[1,22,3,4,5,6,7,], hi2='hii', **dict)\r\n\r\nprint('genexpr:')\r\ngenexpr = (x * x for x in range(5))\r\nprint(genexpr)\r\nprint(list(genexpr))\r\nprint(list(genexpr)) #genexpr now empty: []\r\n#TODO find out more about generator expressions and posibility of its rewind(reuse)\r\n\r\n#for now just redeclare generator expression\r\ngenexpr = (x * x for x in range(5))\r\nprintArgs(*genexpr)" }, { "alpha_fraction": 0.6883942484855652, "alphanum_fraction": 0.6931637525558472, "avg_line_length": 23.230770111083984, "blob_id": "056f03a85d56d640467011d2b3c91005867ebf4b", "content_id": "e84752bc854a1382248f4f6ceb485b2e8dbccced", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 629, "license_type": "permissive", "max_line_length": 77, "num_lines": 26, "path": "/031TypeCheck.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7\n\nimport types #from types import MethodType, FunctionType\n\n#i = 2\n#type(i) is int #not recommended\n#isinstance(i, int)\n\nclass SomeClass:\n def fun():\n pass\n\nprint('what type is SomeClass.fun?', type(SomeClass.fun)) #<class 'function'>\n\nif isinstance( SomeClass.fun, types.MethodType): #False\n print('fun is types.MethodType')\n \nif isinstance( SomeClass.fun, types.FunctionType): #True\n print('fun is types.FunctionType')\n \nprint()\ndef justFun():\n pass\nprint('what type is justFun?', type(justFun))\nif isinstance( justFun, types.FunctionType): #True\n print('justFun is types.FunctionType')" }, { "alpha_fraction": 0.5827814340591431, "alphanum_fraction": 0.639072835445404, "avg_line_length": 21.230770111083984, "blob_id": "aa7e184a02865524e7a510e96253560a00c1c2e1", "content_id": "0e0847ee2ba26f8b9df68d8af46d4c9bfaadec37", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 302, "license_type": "permissive", "max_line_length": 63, "num_lines": 13, "path": "/013stringFilterLambda.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7\r\nimport string\r\nprint(string.printable)\r\nprint([c for c in string.printable])\r\n\r\ns1 = \"\\n\\t\\nHE\\aLLO\\n\\t\\n\\17\\b\"\r\nprint('s1', s1)\r\nprocd1 = [c for c in s1]\r\nprint(procd1)\r\ns2 = ''.join(list(filter(lambda x: x in string.printable, s1)))\r\nprint('s2', s2)\r\nprocd2 = [c for c in s2]\r\nprint(procd2)\r\n" }, { "alpha_fraction": 0.6322517395019531, "alphanum_fraction": 0.6558505296707153, "avg_line_length": 21.159090042114258, "blob_id": "f8bbe991d995681f5029aac9dd47d767ed64ff73", "content_id": "782bba5bf47edc710cfd7635a6642989f6867fef", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1069, "license_type": "permissive", "max_line_length": 141, "num_lines": 44, "path": "/008functions.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7\r\n\r\ndef strFromOperation(operation,a, b):\r\n return f'{operation} {a} and {b} result'\r\n\r\ndef add(a, b):\r\n return a + b\r\n\r\ndef chooseBiggerOrZero(a, b):\r\n if a > b:\r\n return a\r\n elif a < b:\r\n return b\r\n else:\r\n return 0\r\n\r\ndef logOperarion(operation, a, b):\r\n print(strFromOperation(operation.__name__, a, b))\r\n return operation(a, b)\r\n\r\nprint(logOperarion(add, 7,8))\r\nprint(logOperarion(chooseBiggerOrZero, 7,8))\r\nprint(chooseBiggerOrZero(1, 2))\r\nprint(chooseBiggerOrZero(4, 5))\r\nprint(chooseBiggerOrZero(4, 4))\r\n\r\ndef Плюс(переменнаяА, переменнаяБ): #https://stackoverflow.com/questions/17043894/what-unicode-symbols-are-accepted-in-python3-variable-names\r\n return переменнаяА + переменнаяБ\r\n\r\nprint(logOperarion(Плюс, 7,8))\r\n\r\ndef noReturn():\r\n print('no return - return:')\r\nprint(noReturn())\r\n#assert noReturn() == None\r\n\r\ndef explicitNone():\r\n print('explicit none:')\r\n return None\r\nprint(explicitNone())\r\n\r\ndef doNothing():\r\n pass\r\nprint(doNothing())" }, { "alpha_fraction": 0.5503144860267639, "alphanum_fraction": 0.5786163806915283, "avg_line_length": 19.33333396911621, "blob_id": "8906e5aa117ffd492664a55b8c5da907060b82d9", "content_id": "c4af781c997ffbe8899927e4f00239023e4fafcf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 318, "license_type": "permissive", "max_line_length": 96, "num_lines": 15, "path": "/015decorators.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\r\n#py3.7\r\n\r\ndef retDeco(func1):\r\n def dec(text2):\r\n return 'start '+func1(text2)+' end'\r\n return dec\r\n\r\n@retDeco\r\ndef funText(text):\r\n return ''.join([text[i].upper() if i%2 == 0 else text[i].lower() for i in range(len(text))])\r\n \r\n \r\na= funText('hello everyone!')\r\nprint(a)" }, { "alpha_fraction": 0.6560747623443604, "alphanum_fraction": 0.6672897338867188, "avg_line_length": 28.72222137451172, "blob_id": "c653766fa98ea9b4c1332e6635debe683cf2f353", "content_id": "a64da2ca4b9cfa5f5fcd5fe4f5f4f2c97e9fc1a5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1070, "license_type": "permissive", "max_line_length": 83, "num_lines": 36, "path": "/030classOverridesDiffList.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7\n\nclass VeryBase(object):\n classvar ='1somevar'\n classvarToOverride ='2somevar'\n def __init__(self):\n self.instvar ='3somevar'\n self.instvarToOverride ='4somevar'\n def method(self):\n print('5method')\n def methodToOverride(self):\n print('6methodToOverride')\n \n \nclass WillOverride(VeryBase):\n classvarToOverride = '7Overriden_classVar'\n classvarSomeOther = '8someotherVarOfChildClass' \n def childMethod(self):\n print('9childMethod') \n def methodToOverride(self):\n print('9Overriden_methodToOverride')\n\nprint('differences:')\ntest = WillOverride()\nbaseDir = dir(VeryBase)\nchildDir = dir(WillOverride)\nfor m in baseDir:\n method = getattr(VeryBase, m)\n if method != getattr(WillOverride, m):\n print (f'{m} is {type(method)} and different in VeryBase and WillOverride')\nprint('child unique:')\nchildDir = dir(WillOverride)\nfor m in childDir:\n method = getattr(WillOverride, m)\n if m not in baseDir:\n print(f'{m} is {type(method)} and unique to WillOverride')\n" }, { "alpha_fraction": 0.7661290168762207, "alphanum_fraction": 0.7903226017951965, "avg_line_length": 30, "blob_id": "d3e1a41e143dbd6278bfb9dc30973da9c6abe9a2", "content_id": "1a87a8a9d675e5e908a4c5066fcc4de5921223b7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 124, "license_type": "permissive", "max_line_length": 66, "num_lines": 4, "path": "/README.md", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "## Currently used to fiddle Python 3\nPython version 3.8\n\nSource files located in root directory named in order of creation.\n" }, { "alpha_fraction": 0.6774193644523621, "alphanum_fraction": 0.6797235012054443, "avg_line_length": 28.965517044067383, "blob_id": "ecf159e292e75b2834cf11b90966ac2328fe23bc", "content_id": "a02209bdf8b9f2e9f97d75965a4c24e8f99e7e71", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 868, "license_type": "permissive", "max_line_length": 76, "num_lines": 29, "path": "/038selfCountingStringToClipboard.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7\n#pip install pyperclip\nprint('Program will make self counting string and copy it to clipboard')\n\ntry:\n import pyperclip\nexcept ImportError:\n pyperclip = None\n print(\"pyperclip module not found, result won't be copyed to clipboard\")\n\nfiller = '*'\nprint('Filler between numbers is - ', filler)\nhowLong = int(input('Input length of desirable self counting string >>> '))\n\nresult = ''\nremainder = howLong\nwhile remainder >= (len(str(remainder))):\n result = str(remainder) + result\n remainder -= len(str(remainder))\n #print(result, remainder)\n if remainder >= len(filler):\n result = filler +result\n remainder -= len(filler)\n #print(result, remainder)\n \nprint('Result is:', result, ',it length is:', len(result))\nif pyperclip:\n pyperclip.copy(result)\n input('Result was copied to clipboard, press ENTER to exit')" }, { "alpha_fraction": 0.5755131840705872, "alphanum_fraction": 0.5931084752082825, "avg_line_length": 20.700000762939453, "blob_id": "bb0cedf5307d70e5dc8bbc712c890dccc0e7dd37", "content_id": "db15515e2eb9d0e57615a36f1fa15df8b55b9786", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1364, "license_type": "permissive", "max_line_length": 77, "num_lines": 60, "path": "/014with.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\r\n#py3.7\r\n\r\nwith open('hello_foo.txt', 'w') as f:\r\n f.write('hello, world!')\r\n#same as:\r\n# f = opent('hello_foo.txt', 'w')\r\n# try:\r\n# f.wtite('hello, world')\r\n# finally:\r\n# f.close()\r\n \r\nwith open('hello_foo.txt', 'r') as f:\r\n print(f.read())\r\n \r\nprint()\r\n\r\n \r\n#more advanced:\r\n \r\nfrom contextlib import contextmanager\r\nimport os\r\n\r\n@contextmanager # https://stackoverflow.com/a/3012921\r\ndef working_directory(path):\r\n current_dir = os.getcwd()\r\n os.chdir(path)\r\n try:\r\n yield\r\n except:\r\n print(f\"directory {path} not found\")\r\n finally:\r\n os.chdir(current_dir)\r\n \r\n#Pavel: enclosed with block to get rid of error if directory is not found\r\n #should think later if possible to handle such things inside \"with\" block\r\ntry:\r\n with working_directory(\"data/stuff\"):\r\n # do something within data/stuff\r\n print('Hi')\r\nexcept:\r\n print('problem with with of directory data/stuff')\r\nfinally:\r\n print('first try final')\r\n# here I am back again in the original working directory\r\n\r\nprint()\r\n\r\n#Another way: https://stackoverflow.com/a/5205878/5233335\r\ntry:\r\n f = open('foo.txt')\r\nexcept IOError:\r\n print('error open \"foo.txt\"')\r\nelse:\r\n with f:\r\n print('foo opened')\r\n print(f.readlines())\r\n #\r\n # some_code\r\n #\r\n\r\n" }, { "alpha_fraction": 0.5722599625587463, "alphanum_fraction": 0.6013578772544861, "avg_line_length": 28.485713958740234, "blob_id": "4105088beaa396d16646183740a805338f644529", "content_id": "856edce231fc6c066ff8753f6bab0537b7217f9f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1031, "license_type": "permissive", "max_line_length": 94, "num_lines": 35, "path": "/052csv_DictWriter.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "import csv\n\nsample1 = {'a1': 1, 'b2': 2, 'c4': 3, 'name': 'abba'}\nsample2 = {'name': 'Bob', 'b2': 'beeTwoo', 'other': 'something'}\nsample3 = {'b2': 'b2', 'c4': True, 'name': 'name'}\nsample4 = {}\nsample5 = {'a1': None, 'b2': ''}\n\nexampleFile = __file__+'_out.csv'\n\ntry:\n with open(exampleFile, 'r') as fr:\n odr = outDictReader = csv.DictReader(fr)\n print(f'file {exampleFile} found:')\n for i,row in enumerate(odr):\n print(f'row number: {i}', row)\n if i==0:\n sample1['a1']=int(row['a1'])+1\nexcept:\n print(f'file {exampleFile} not found, creating new')\n\nprint()\n\nwith open(exampleFile, 'w', newline='') as fw:\n\todw = outDictWriter = csv.DictWriter(fw, ['name', 'a1', 'b2', 'c4'], extrasaction ='ignore') \n #extrasaction='ignore' used to ignore values that not in in fieldlist\n\todw.writeheader()\n\todw.writerow(sample1)\n\todw.writerow(sample2)\n\todw.writerow(sample3)\n\todw.writerow(sample4)\n\todw.writerow(sample5)\n\nwith open(exampleFile, 'r') as fr:\n\tprint(fr.read())" }, { "alpha_fraction": 0.6582568883895874, "alphanum_fraction": 0.6857798099517822, "avg_line_length": 38.54545593261719, "blob_id": "69bf0d8d069f6967717d9f2d8b49c60a30e3e4dc", "content_id": "2f3c9ba39d1e18880c089703c5dbb21e2cca4679", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 436, "license_type": "permissive", "max_line_length": 121, "num_lines": 11, "path": "/024json.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7\n\nimport json\n\nmapping = {'a': 23, 'b': 42, 'c': 0xc0ffee,}\nprint(str(mapping))\nprint( json.dumps(mapping, indent=4, sort_keys=True) )\n#print( json.dumps({sum:'summ'}) ) #TypeError: keys must be str, int, float, bool or None, not builtin_function_or_method\n\n#mapping['complex'] = { 1, 2, 3, } set is not serializable by dumps, but List [] is ok\n#print(json.dumps(mapping)) #TypeError: Object of type set is not JSON serializable\n\n" }, { "alpha_fraction": 0.6326335668563843, "alphanum_fraction": 0.6431297659873962, "avg_line_length": 25.769229888916016, "blob_id": "207c1f585a334d9736f256ffdf06865d5141892b", "content_id": "1adf650021ba1e9c7dce92eaea05fe21db17f680", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1048, "license_type": "permissive", "max_line_length": 100, "num_lines": 39, "path": "/039classProperty_TextWithPointer.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7\n\nclass TextWithPointer(object):\n \n def __init__(self, text):\n self.text = text;\n self._position = 0;\n \n @property\n def charAtCurrentPointer(self):\n return self.text[self._position];\n \n @property\n def pointer(self):\n return self._position;\n \n @pointer.setter\n def pointer(self, value):\n ##if (value > len(self.text)) || (value < 0):\n #raise IndexError(f\"value {value} is not pointing in text (range 0 - {len(self.text)})\")\n self._position = value\n \n\ndef printTextClass(textClass):\n print(textClass)\n print(\"text:\", someText.text)\n print(\"pointer:\", someText.pointer)\n print(\"_position:\", someText._position)\n print(\"charAtCurrentPointer:\", someText.charAtCurrentPointer)\n\nsomeText = TextWithPointer(\"some text to fiddle with\")\nprintTextClass(someText)\nprint(someText.text[0], someText.text[1], someText.text[2], someText.text[3])\n\nsomeText.pointer =2\nprintTextClass(someText)\n\nsomeText.pointer =3\nprintTextClass(someText)\n\n\n\n\n" }, { "alpha_fraction": 0.5559905767440796, "alphanum_fraction": 0.6327329874038696, "avg_line_length": 25.081632614135742, "blob_id": "36aa32121b5ed8940bb84c2e153fa24b62406e9e", "content_id": "c9919da374de3687656ec9092d0d6c8aeb2ab02d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1277, "license_type": "permissive", "max_line_length": 89, "num_lines": 49, "path": "/043RomanArabicConverter.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#small Roman numbers converter (up to 4000)\n\ndef toRoman(arabic):\n romanDict = {1000: 'M', 900: 'CM', 500: 'D', 400: 'CD', \n 100: 'C', 90: 'XC', 50: 'L', 40: 'XL', 10: 'X', 9: 'IX', 5: 'V', 4: 'IV', 1: 'I'}\n result = []\n for ara, rom in romanDict.items():\n result.append( arabic // ara * rom)\n arabic %= ara\n return ''.join(result)\n\t\ndef toArabic(roman):\n romanVal = {'M': 1000, 'D': 500, 'C': 100, 'L': 50, 'X': 10, 'V': 5, 'I': 1}\n result = 0\n for i in range(len(roman)):\n if i > 0 and romanVal[roman[i]] > romanVal[roman[i - 1]]:\n result += romanVal[roman[i]] - 2 * romanVal[roman[i - 1]]\n else:\n result += romanVal[roman[i]]\n return result\n \n \ndef convertAndPrint(number):\n print(f'number: {number}', toRoman(number), toArabic(toRoman(number)))\n\n \n\nconvertAndPrint(1)\nconvertAndPrint(3)\nconvertAndPrint(4)\nconvertAndPrint(5)\nconvertAndPrint(9)\nconvertAndPrint(10)\nconvertAndPrint(11)\nconvertAndPrint(18)\nconvertAndPrint(25)\nconvertAndPrint(42)\nconvertAndPrint(53)\nconvertAndPrint(72)\nconvertAndPrint(87)\nconvertAndPrint(99)\nconvertAndPrint(100)\nconvertAndPrint(101)\nconvertAndPrint(151)\nconvertAndPrint(171)\nconvertAndPrint(199)\nconvertAndPrint(804)\nconvertAndPrint(1999)\n#input()" }, { "alpha_fraction": 0.5464926362037659, "alphanum_fraction": 0.584013044834137, "avg_line_length": 28, "blob_id": "d78cfccb2cb8175708bf8ae73816fd723c0aace0", "content_id": "4cbebf1e876fa88f57971c726bbfef5058414d24", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 613, "license_type": "permissive", "max_line_length": 58, "num_lines": 21, "path": "/053counter.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "from collections import Counter\n\ndef is_list_a_contains_list_b(main, sub):\n cMain = Counter(main)\n cSub = Counter(sub)\n cMain.subtract(cSub)\n for i in cMain.values():\n if i<0:\n return False\n return True\n\n\nif __name__=='__main__':\n assert is_list_a_contains_list_b([], [])\n assert is_list_a_contains_list_b([1,2,3], [])\n assert is_list_a_contains_list_b([1,2,3], [3,2])\n assert is_list_a_contains_list_b([1,2,3,2], [2,2])\n assert not is_list_a_contains_list_b([1], [2])\n assert not is_list_a_contains_list_b([1,2,3], [2,3,2])\n \n print(\"all asserts ok\")\n " }, { "alpha_fraction": 0.5697538256645203, "alphanum_fraction": 0.6160609722137451, "avg_line_length": 24.44776153564453, "blob_id": "ff1399c28f22a04ac2d30ebec9cb4009ac8ad731", "content_id": "f5db33976e9c0c896feac04e7050a30986d2f456", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1706, "license_type": "permissive", "max_line_length": 90, "num_lines": 67, "path": "/042numberToBinaryDecToBin.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.8\nfrom inspect import currentframe, getframeinfo\nframeinfo = getframeinfo(currentframe())\nprint (f'{frameinfo.filename}, {frameinfo.lineno}')\n\n\nvar42 = 42\n\nprint (f'codeline: {getframeinfo(currentframe()).lineno} ::')\n\nprint ('number to binary string:')\n\nprint (var42, bin(var42))\nprint (var42, bin(var42)[2:])\n\nprint (var42, f\"{var42:#b}\")\n\nprint (var42, f\"{var42:b}\") #for now (2019) its will be main method\n\nprint (0, f\"{0:b}\")\nprint (255, f\"{255:b}\")\nprint (-1, f\"{-1:b}\")\nprint (-42, f\"{-42:b}\")\n\nprint (f'codeline: {getframeinfo(currentframe()).lineno} ::')\nprint ('old way using format: ',var42, \"{0:b}\".format(var42))\n\nprint ()\n\ndef toBin(val): #throws exception on negative numbers\n arr = []\n if val< 0:\n raise ValueError ('to convert to binary using this function value should be >= 0')\n while val > 1:\n arr.append(val % 2)\n val = val // 2\n arr.append(val % 2)\n return ''.join([str(x) for x in list(reversed(arr))])\n\ndef toBinRecur(num): #fails on negative numbers like -42\n s = ''\n if num > 1: \n s = toBinRecur(num // 2) \n return s + str(num % 2) \n\ndef tryRunAndPrintCustomBinary(func):\n print ('function to test:', func.__name__)\n print (var42, func(var42))\n print (0, func(0))\n print (1, func(1))\n print (4, func(4))\n print (255, func(255))\n try:\n print (-1, func(-1))\n except Exception as e:\n print (e, str(e))\n try:\n print (-42, func(-42))\n except Exception as e:\n print (e, str(e))\n print()\n \nprint (f'codeline: {getframeinfo(currentframe()).lineno} ::')\nprint ('now cutom functions:')\nprint ()\ntryRunAndPrintCustomBinary(toBin)\ntryRunAndPrintCustomBinary(toBinRecur)\n\n" }, { "alpha_fraction": 0.6578631401062012, "alphanum_fraction": 0.6690676212310791, "avg_line_length": 28.761905670166016, "blob_id": "e525afcfbfe5c29e952bc5fd35d71958a797059b", "content_id": "9f9d1fd7245718fc936aed4cd27b4a1655f631c5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2499, "license_type": "permissive", "max_line_length": 103, "num_lines": 84, "path": "/049pathlib.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#https://docs.python.org/3/library/pathlib.html\nfrom pathlib import Path\n\nprint(__file__)\n#1 [Python-Dev] __file__ is not always an absolute path\n#https://mail.python.org/pipermail/python-dev/2010-February/097461.html\n\n#2 Module __file__ attributes (and related values) should now always contain absolute paths by default,\n# with the sole exception of __main__.__file__ when a script has been\n# executed directly using a relative path. \n#(Contributed by Brett Cannon in https://bugs.python.org/issue18416)\n\n\ncwd = Path.cwd()\nprint('0', cwd)\nprint('1', Path(__file__))\n\np = Path(__file__).resolve()\n#Path.resolve(strict=False)\n#Make the path absolute, resolving any symlinks. A new path object is returned\n\nprint('2', p, type(p))\nprint(p, str(p)) #getting string from Path object\nprint(p, 'exists:', p.exists(), ', is_directory:', p.is_dir())\nprint(p, 'is_file:', p.is_file())\nprint()\n\np_parent = Path(__file__).resolve().parent ##script dir\nprint('3', p_parent, type(p_parent)) ##script dir\nprint(p_parent, 'exists:', p_parent.exists(), ', is_directory:', p_parent.is_dir())\nprint(p_parent, 'is_file:', p_parent.is_file())\nprint()\n\nnep = non_existent_path = Path('sooome path that should not.be/existed')\nprint(non_existent_path)\nprint(nep, 'exists:', nep.exists(), ', is_directory:', nep.is_dir())\nprint(nep, 'is_file:', nep.is_file())\nprint()\n\np_anchor = p.anchor\nprint('4', p_anchor, type(p_anchor))\np_name = p.name\nprint('5', p_name, type(p_name))\np_stem = p.stem\nprint('6', p_stem, type(p_stem))\np_suffix = p.suffix\nprint('7', p_suffix, type(p_suffix))\np_drive = p.drive\nprint('8', p_drive, type(p_drive))\n\np_parent_parent = Path(__file__).resolve().parent.parent\nprint('parent', p_parent_parent)\nprint()\n\n#p.open(mode='r', buffering=-1, encoding=None, errors=None, newline=None)\n#Open the file pointed to by the path, like the built-in open() function does:\n\nwith open(p, 'r') as f:\n print('opened using standart open')\n print('script file contents:')\n print(f.read())\n print()\n\nwith p.open(\"r\") as f:\n print('opened using Path.open')\n print('script file contents:')\n print(f.read())\n print()\n \nlicense = p_parent.joinpath('LICENSE')\nprint('license', license, type(license))\nlicense = p_parent / 'LICENSE' ###good way\nprint('license', license, type(license))\nprint()\n\ntry:\n with license.open(\"r\") as f:\n print(license, 'file contents:')\n print(f.read())\nexcept:\n print(license, \"can't, be read\")\n\n\ninput('>>Thats end of script, input anything to exit')" }, { "alpha_fraction": 0.5662350654602051, "alphanum_fraction": 0.576195240020752, "avg_line_length": 20.75, "blob_id": "5f74000c983ec5fdb5b23d8328af377829d5bef1", "content_id": "9a1a2f33ff94779d50f0b43eebf9f66e41afd09a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2008, "license_type": "permissive", "max_line_length": 88, "num_lines": 88, "path": "/022myIterator.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7\r\n\r\nintegers = range(5)\r\nsquared=(i*i for i in integers)\r\nplus3= (i + 3 for i in squared)\r\nprint( *(plus3) )\r\n\r\ndef repeaterGenerator(value, maxReps):\r\n count = 0\r\n while True:\r\n if count >= maxReps:\r\n return\r\n yield value[count]\r\n count +=1\r\n\r\n\r\nfor item in repeaterGenerator('HelloGen',4):\r\n print(item)\r\n \r\n#difference with listComprehensions is that Generator expression return values on demand\r\n#Generator expression not constructing list object \r\n# list comprh [], gen expr ()\r\niterExpr = ( 'iterExprLikeListComprehens' for i in range(4) )\r\nfor item in iterExpr:\r\n print(item)\r\n \r\n\r\n###########class iterator\r\n \r\nclass BoundedRepIterator:\r\n def __init__(self, value, maxRepeats):\r\n self.value = value;\r\n self.maxRepeats = maxRepeats\r\n self.count = 0\r\n \r\n def __iter__(self):\r\n return self\r\n \r\n def __next__(self):\r\n if self.count >=self.maxRepeats:\r\n raise StopIteration\r\n self.count +=1\r\n return self.value\r\n\r\nbrep = BoundedRepIterator('HelloCount', 8)\r\nfor item in brep:\r\n print(item)\r\n\r\nbrep = BoundedRepIterator('wHelloCount', 8)\r\niterator = iter(brep)\r\nwhile True:\r\n try:\r\n item = next(iterator)\r\n except StopIteration:\r\n break\r\n print('while', item)\r\n \r\n###############very basic class iterator with NO stop, just for understanding:\r\nclass Repeater:\r\n def __init__(self, value):\r\n self.value = value\r\n \r\n def __iter__(self):\r\n return RepeaterIterator(self)\r\n\r\n \r\nclass RepeaterIterator:\r\n def __init__(self, source):\r\n self.source = source\r\n \r\n def __next__(self):\r\n return self.source.value\r\n\r\nrepeater = Repeater('HelloFor')\r\ni = 0\r\nfor item in repeater:\r\n if i>4:\r\n break\r\n print(item)\r\n i+=1\r\n\r\nrepeater = Repeater('HelloWhile')\r\niterator = repeater.__iter__()\r\ni = 0\r\nwhile i < 5:\r\n item = iterator.__next__()\r\n print(item)\r\n i+=1\r\n \r\n" }, { "alpha_fraction": 0.5986670255661011, "alphanum_fraction": 0.6198203563690186, "avg_line_length": 30.575471878051758, "blob_id": "60adcef2284c6dd3582581743488d4d9d5f23887", "content_id": "d6410a9409d65f8e2d31129c7e81ad65be6ab377", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3451, "license_type": "permissive", "max_line_length": 116, "num_lines": 106, "path": "/018class.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#!/usr/bin/var python3\r\n#py3.7\r\n\r\nclass PineTree:\r\n x = 0\r\n def __init__(self, color, height, pX=None):\r\n self.color = color\r\n self.height = height\r\n print('self.x before __init__:', self.x) #at first run: 0\r\n self.__class__.x+=1\r\n print('self.x:', self.x) #at first run: 1\r\n if pX != None:\r\n self.x = pX #if we have pX then shadow class variable \"x\" by it\r\n \r\n def __repr__(self):\r\n return '__repr__ for '+self.printName()\r\n \r\n def __str__(self):\r\n return '__str__ for '+self.printName()\r\n \r\n def printName(self):\r\n return f'PineTree id({id(self)})'+f' color:{self.color!r} {self.color}, height:{self.height}'\r\n \r\n def returnSelfX(self):\r\n return self.x\r\n \r\n @classmethod\r\n def returnClassX(cls):\r\n return cls.x\r\n \r\n @classmethod\r\n def makeYellowTree(cls, height, pX=None):\r\n return cls('Yellow', height, pX)\r\n \r\n @staticmethod\r\n def justSomeCalculation(x, y):\r\n return x + y\r\n \r\nmyTree = PineTree('green', 42)\r\nprint(PineTree.x, myTree.x,) #1 1\r\nprint(id(myTree))\r\nprint(f'plain conversion to string by {myTree}')\r\nprint(f'in list conversion to string going by {[myTree]}')\r\nprint(' ; ' , str(myTree), ' ; ' , repr(myTree))\r\nmyTree2 =PineTree('green', 42, 4)\r\nprint(PineTree.x, myTree.x, myTree2.x) #2 2 4\r\nprint('anonther tree for test: ',id(myTree2))\r\nprint('and first tree again: ',id(myTree))\r\nmyTree3 =PineTree('red', 25)\r\nprint(PineTree.x, myTree.x, myTree2.x, myTree3.x) #3 3 4 3\r\nprint('anonther tree for test: ',id(myTree3), myTree3)\r\n\r\nmyTree4 = PineTree.makeYellowTree(10,9)\r\nmyTree5 = PineTree.makeYellowTree(10)\r\n#####Attention!\r\n#Class method does NOT haveaccess to istance variables!\r\nprint(myTree4.returnSelfX(), myTree5.returnSelfX()) #PineTree.returnSelfX() - error because Class does not have self\r\nprint(PineTree.x, myTree4.x, myTree5.x) #5 9 5 #five nine five\r\nprint(PineTree.returnClassX(), myTree4.returnClassX(), myTree5.returnClassX()) #5 5 5 #five !!!five!!! five\r\n\r\n#just static method test\r\nprint(PineTree.justSomeCalculation(38, 11)) #49\r\n\r\nprint('Method binding start:')\r\n#bind method to PineTree Class\r\ndef printBindedFunctionSelf(self):\r\n return f'Self id({id(self)})'+f' color:{self.color!r} {self.color}, height:{self.height}'\r\nPineTree.wowMethod = printBindedFunctionSelf #class binding is easy\r\nprint(myTree2.wowMethod())\r\nprint(myTree4.wowMethod())\r\n\r\n#bind new method ONLY to myTree4\r\nfrom types import MethodType\r\nmyTree4.pr = MethodType(printBindedFunctionSelf, myTree4) #instance binding is ok too\r\nprint(myTree4.pr())\r\n#print(myTree3.pr()) #AttributeError: 'PineTree' object has no attribute 'pr'\r\n\r\n\r\n#Reusing other objects\r\nclass Person:\r\n def __init__(self, height):\r\n self.height = height\r\n def bio(self):\r\n print(f\"I'm a person with height: {self.height}\")\r\n\r\nclass Student(Person):\r\n def bio(self):\r\n Person.bio(self)\r\n print (f\"I'm studying and my height is {self.height}\")\r\n\r\nclass Cat:\r\n def __init__(self, height):\r\n self.height = height\r\n def icat(self,mew):\r\n print(f'I cat, my height is {self.height}',mew)\r\n def likeAMan(self):\r\n Person.bio(self)\r\n \r\np1 = Person(15)\r\np1.bio()\r\np2 = Student(32)\r\np2.bio()\r\nCat.icat(p1,'meeeow')\r\nnewCat = Cat(3)\r\nnewCat.icat('gfg')\r\nnewCat.likeAMan()" }, { "alpha_fraction": 0.7011308670043945, "alphanum_fraction": 0.7011308670043945, "avg_line_length": 31.605262756347656, "blob_id": "57509d0b4077eeb1d187b721b7a6eb0c338762da", "content_id": "5fe55f2f912f3a2baaf680d5a15809bfddc66a96", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1238, "license_type": "permissive", "max_line_length": 87, "num_lines": 38, "path": "/046observerPattern.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "class Subject:\n def __init__(self):\n self.__observers = []\n \n def attach(self, observer):\n self.__observers.append(observer)\n print(f'New observer: {observer}, total observers: {len(self.__observers)}')\n \n def detach(self, observer):\n self.__observers.remove(observer)\n \n def notify(self, *args, **kwargs):\n for observer in self.__observers:\n observer.update(self, *args, **kwargs)\n ## Subject internal state should be changed via methods that call notify on changes\n \nclass Observer:\n def update(self, subject, *args, **kwargs):\n print(f'update() {self} was called by: {subject}', args, kwargs)\n \n\nsomethingObservable = Subject()\nsomeObserver = Observer()\nsomethingObservable.attach(someObserver)\n\nsomethingObservable.notify()\nsomethingObservable.notify('second update')\nsomethingObservable.notify('and another one update')\n\nsomethingObservable.detach(someObserver)\nsomethingObservable.notify('no one will hear this')\n\nsomethingObservable.attach(someObserver)\nsomethingObservable.notify('hello again')\n\nsecondObserver = Observer()\nsomethingObservable.attach(secondObserver)\nsomethingObservable.notify('two observers will get this')" }, { "alpha_fraction": 0.6166347861289978, "alphanum_fraction": 0.633843183517456, "avg_line_length": 31.71875, "blob_id": "3172b954b3d032b19255a833e6f115bef996429a", "content_id": "d88af45621a181f445cd895d796af9a617eaa225", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1046, "license_type": "permissive", "max_line_length": 69, "num_lines": 32, "path": "/040mostCommonLetterInString.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "import operator\n\ncountDict = dict.fromkeys([x for x in range(ord(\"a\"),ord(\"z\")+1)], 0)\ndef resetDict():\n countDict.update({x:0 for x in range(ord(\"a\"),ord(\"z\")+1)})\n\ndef updateLetterCount(letter: str):\n temp = ord(letter.lower())\n if temp in countDict:\n countDict.update({temp: countDict[temp]+1})\n\ndef mostCommonLetter(text: str) -> str:\n for letter in text:\n updateLetterCount(letter)\n maxLetter = max(countDict.items(), key=operator.itemgetter(1))[0]\n #print(maxLetter, chr(maxLetter))\n #print(countDict)\n #print(text, maxLetter, chr(maxLetter))\n resetDict()\n return chr(maxLetter)\n\nif __name__ == '__main__':\n #print(countDict)\n #print(chr(122))\n\n assert mostCommonLetter(\"Hello World!\") == \"l\"\n assert mostCommonLetter(\"How do you do?\") == \"o\"\n assert mostCommonLetter(\"One\") == \"e\"\n assert mostCommonLetter(\"Aabbc!\") == \"a\"\n assert mostCommonLetter(\"cAaBBb!!!!\") == \"b\"\n assert mostCommonLetter(\"abcdefg\") == \"a\"\n assert mostCommonLetter(\"a\" * 9999 + \"b\" * 5000) == \"a\"" }, { "alpha_fraction": 0.6050870418548584, "alphanum_fraction": 0.6198126077651978, "avg_line_length": 23.129032135009766, "blob_id": "76a78b31f9dceba5ed8caa487a756f23bd00d061", "content_id": "946c3f523455bce0e16ee06b5d725a9fab8146bb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 747, "license_type": "permissive", "max_line_length": 113, "num_lines": 31, "path": "/033pipeAble2way.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7\n#more complex variants at:\n#https://stackoverflow.com/questions/34459274/how-to-make-python-scripts-pipe-able-both-in-bash-and-within-python\n\nimport sys\n\n\n\ndef processing(data)\n #do some stuff\n return data\n \n \nif __name__ == '__main__':\n def multipipe(data):\n #if __main__ call processing and print result\n print(processing(data))\n \n def parse_args(input):\n #Do some sturr with input\n #like get all elements except script name:\n data = imput[1:]\n #...\n return data\n \n input = parse_args(sys.argv)\n main(input)\nelse:\n def miltipipe(data):\n #if called as function from other file - do processing and return result\n return processing(data)" }, { "alpha_fraction": 0.6153846383094788, "alphanum_fraction": 0.6442307829856873, "avg_line_length": 20, "blob_id": "1144e22f773e667cf9856cdc9fe140ea94bbb0ea", "content_id": "7821540a94889267b730abd49f99094af229803a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 104, "license_type": "permissive", "max_line_length": 59, "num_lines": 5, "path": "/001hello.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "print (\"Hello Python\") #This is the comment and its ignored\na = 1\nb = 2.5\nc = 'some text'\nprint(a, b, c)" }, { "alpha_fraction": 0.5199999809265137, "alphanum_fraction": 0.5733333230018616, "avg_line_length": 14.142857551574707, "blob_id": "e99af75ef0fc129d9f078f59237bb6c6fdf9b470", "content_id": "52345ab207120bd91742d5ccef5032bada4c3ae6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 450, "license_type": "permissive", "max_line_length": 42, "num_lines": 28, "path": "/017objectEqCopy.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#!/usr/bin/var python3\r\n#py3.7\r\n\r\na = [1,2,3,]\r\nb = a\r\nc = [1,2,3,]\r\nprint(a == c, a is b)\r\nprint(a == c, a is c)\r\nprint(None == None, None is None)\r\n\r\nimport copy\r\nd =[a,b,c,3]\r\ncopyE = copy.copy(d)\r\ndeepF = copy.deepcopy(d)\r\nprint(d)\r\nprint(copyE)\r\nprint(deepF)\r\nprint('change d[0][1]=7')\r\nd[0][1]=7\r\nprint(d)\r\nprint(copyE)\r\nprint(deepF)\r\nprint('change d[1]=8, deepF[2][1]=\"deep\"')\r\nd[1]=8\r\ndeepF[2][1]= 'deep'\r\nprint(d)\r\nprint(copyE)\r\nprint(deepF)" }, { "alpha_fraction": 0.7126948833465576, "alphanum_fraction": 0.7505567669868469, "avg_line_length": 33.61538314819336, "blob_id": "874f9d693bbba83c4d23069e251ac92cfc0fd274", "content_id": "542ac66dc3fd4218c76f699bad3d5c9b7ce6839e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 449, "license_type": "permissive", "max_line_length": 81, "num_lines": 13, "path": "/037GetWindowTitleEverySecond.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "import win32gui\nimport time\n\ntempWindowName=win32gui.GetWindowText (win32gui.GetForegroundWindow())\nprint(\"first Window Title: \", tempWindowName)\n\nwhile True:\n if tempWindowName == win32gui.GetWindowText (win32gui.GetForegroundWindow()):\n pass #Window name not changed\n else:\n tempWindowName=win32gui.GetWindowText (win32gui.GetForegroundWindow())\n print(time.time(), tempWindowName) #log changed name\n time.sleep(1.01)" }, { "alpha_fraction": 0.5625, "alphanum_fraction": 0.599662184715271, "avg_line_length": 35.9375, "blob_id": "8519aa86738154a1b94f51fe3b7fcc06a7fa0345", "content_id": "0ab68421531ab8437d0bf3cb031d3c2eb94fd8af", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 592, "license_type": "permissive", "max_line_length": 106, "num_lines": 16, "path": "/023dictionaryFunctionCall.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7\n\ndef functionCalc(operation, arg1, arg2):\n d = {'add': lambda x, y: x + y , \n 'sub': lambda x, y: x - y, }\n d['mul'] = lambda x, y: x * y\n d['div'] = lambda x, y: arg1 / arg2 #using external arg1 and arg2 in lambda\n #print(d) #for logging\n return d.get(operation, lambda x, y: None)(arg1, arg2) #error if just \"pass\" in lambda, so return None\n \nprint( functionCalc('add', 3, 7) )\nprint( functionCalc('adddddd', 3, 7) )\nprint( functionCalc('sub', 3, 7) )\nprint( functionCalc('mul', 3, 7) )\nprint( functionCalc('div', 3, 7) )\nprint( functionCalc('div', 8, 4) )\n\n" }, { "alpha_fraction": 0.7263681888580322, "alphanum_fraction": 0.7587064504623413, "avg_line_length": 43.77777862548828, "blob_id": "4ef911360520a65d84bc649cf717d4ca74225d4d", "content_id": "9d5c9ba370e911285b252c04100c6552c2ca2991", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 402, "license_type": "permissive", "max_line_length": 115, "num_lines": 9, "path": "/032random.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7\n\n#random(), uniform(startFloatIncl, endFloatIncl), randint(startIntIncl, endIntIncl)\n# and much much more (like seed(), intervals or population choise) in https://docs.python.org/3/library/random.html\nfrom random import random, uniform, randint \n\nprint('just random',random())\nprint('random floating point number in interval', uniform(5.5, 25.8))\nprint('random int in interval', randint(10,100))" }, { "alpha_fraction": 0.6978798508644104, "alphanum_fraction": 0.71378093957901, "avg_line_length": 39.35714340209961, "blob_id": "4eaabf16c63cafd6c9228c0d84d69b3352424f36", "content_id": "b559cb5069fd82128fb83aac2f14531d73d512d4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 572, "license_type": "permissive", "max_line_length": 100, "num_lines": 14, "path": "/004inputIF3.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7\nprint('input something:')\nwhat=input()\nstrWhat = str(what)\nprint( what, type(what), len(what),'and converted to string:', strWhat, type(strWhat), len(strWhat))\n\n#python.org FAQ: Is there an equivalent of C’s ”?:” ternary operator?\n#something = [on_true] if [expression] else [on_false]\na = 'longer than 2 chars' if len(strWhat)>2 else 'shorter than 3 chars'\n\nprint('You typed',strWhat)\nprint(f'you typen something {a}')\nprint('You again inputed -',input('input something again!'))\n#for more info type in terminal: C:\\Python\\Python37-32\\python -m pydoc input \n" }, { "alpha_fraction": 0.5960384011268616, "alphanum_fraction": 0.6194477677345276, "avg_line_length": 29.320755004882812, "blob_id": "2808a8c61d896344a11cde502d0338d3afa3f223", "content_id": "b84b8b42fbf7ed1f5445a95c576248f00bfe33ef", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1666, "license_type": "permissive", "max_line_length": 104, "num_lines": 53, "path": "/009loops.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7\r\n\r\ntheInts = [1, 2, 3]\r\ntheStrings = ['a one', 'a two', 'a three', 'a four']\r\nvarLc = 'lc'\r\ntheMixed = [1, 'la', 2, 'lb', 3, varLc]\r\n\r\nfor num in theInts:\r\n print('Pyhonic way:', num)\r\n\r\nfor i, item in enumerate(theInts):\r\n print(f'Pyhonic way: {i} {item}')\r\n \r\nfor i in range(len(theInts)):\r\n print('not very pythonic way of loop through items:', i, theInts[i])\r\n\r\n#for i in range(a, n, s):\r\n# ... # in java it like:\r\n#for (int i = a; i < n; i += s) { \r\n# ... }\r\n\r\nfor i in range(0,len(theMixed)):\r\n print('theMixed:',theMixed[i])\r\nfor i in range(0,len(theMixed),2):\r\n print('theMixed with step of 2:',theMixed[i])\r\nfor i in range(0,len(theMixed),3):\r\n print('theMixed with step of 3:',theMixed[i])\r\n \r\n\r\nprint('list comprehensions')\r\n######list comprehensions is VERY Pythonic\r\n#values = [ EXPRESSION for ITEM in COLLECTION if CONDITION]\r\n\r\n#can also be expressed not pythonic way like:\r\n# values = []\r\n# for item in collection:\r\n# if condition:\r\n# values.append(expression)\r\nfrom numbers import Number\r\nprint([x * 10 for x in theMixed if isinstance(x, Number) ])\r\nprint( *( x * 100 for x in theMixed if isinstance(x, Number) ) ) # * is needed to print generator object\r\n#without * print will be: #<generator object <genexpr> at 0x034C29B0>\r\n#also can do same with range:\r\nprint(*range(8))\r\ncDict = {x: x * x + 10 for x in range(5)}\r\nprint(cDict)\r\n \r\n######while used very rarely, useful if loop is going forever. So, almost never\r\nprint('while almost always not pythonic:')\r\ni = 0\r\nwhile i < 10: #use while for loops that runs forever - so almost never (LearnP3 HW p126)\r\n print(i)\r\n i+=2\r\n\r\n " }, { "alpha_fraction": 0.6246649026870728, "alphanum_fraction": 0.6268096566200256, "avg_line_length": 28.080644607543945, "blob_id": "352421b179e0ab67daaa9e27dd72723b136e1d17", "content_id": "ae0d57df3bdbdbcf442e0809513a2e74a15aadc7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1865, "license_type": "permissive", "max_line_length": 88, "num_lines": 62, "path": "/010fileDirs.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#Module usage:\r\n# 1) set \"pathname\" to directory where need to acquire file paths\r\n# 2) call makeListsOfFiles()\r\n\r\nimport os\r\n\r\npathname = \"\"\r\nfileList = None # global, will be initialized in makeListsOfFiles #[]\r\npathList = None #[]\r\nfileFullPathList = None #[]\r\n\r\ndef makeListsOfFiles():\r\n global fileList\r\n fileList = []\r\n global pathList\r\n pathList = []\r\n global fileFullPathList\r\n fileFullPathList = []\r\n for root, dirs, files in os.walk(pathname):\r\n for file in files:\r\n fileList.append(file)\r\n pathList.append(root)\r\n fileFullPathList.append(root + \"\\\\\" + file)\r\n \r\ndef writeListToFile(fileName, list):\r\n with open(fileName, \"w\", encoding=\"utf-8\") as fileToWrite:\r\n for item in list:\r\n fileToWrite.write(item + \"\\n\") \r\n \r\n#if module was executed as standalone write lists to files and print fileList and paths \r\nif __name__ == \"__main__\": \r\n #import sys\r\n from sys import argv\r\n pathname = os.path.dirname(argv[0]) \r\n \r\n makeListsOfFiles()\r\n makeListsOfFiles()\r\n \r\n writeListToFile(\"exFileList.txt\", fileList) \r\n writeListToFile(\"exFilePathList.txt\", fileFullPathList)\r\n \r\n #def openAndPrintAllFiles(list):\r\n # for item in list:\r\n # print(open(item).read())\r\n #openAndPrintAllFiles(fileFullPathList)\r\n \r\n for i in range(len(fileList)):\r\n print(fileList[i], pathList[i])\r\n \r\n \r\n\r\n \r\n#fileList.sort() #changes list inplace\r\n#print(\"N. of files\", len(fileList))\r\n#fileListSorted = sorted(fileList)\r\n#writeListToFile(writeNameSorted, fileListSorted) \r\n \r\n# see all the methods of os\r\n# print(*dir(os), sep=\", \")\r\n#os.system(\"lista_file.txt\") #exFileList.txt\r\n#os.system(\"lista_file_ordinata.txt\") #exFileListSorted.txt\r\n#os.system(\"filePathList.txt\") #exFilePathList.txt\r\n" }, { "alpha_fraction": 0.5711183547973633, "alphanum_fraction": 0.5808903574943542, "avg_line_length": 29.66666603088379, "blob_id": "b7decd74d0ae05b040151f6673bdfdbcfe5c22f0", "content_id": "2c526b719b2b6b82e01e5397d3967d3f38e4b0c1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 921, "license_type": "permissive", "max_line_length": 111, "num_lines": 30, "path": "/027wordsExtractor.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7\n\nimport sys\n\nif len(sys.argv) < 3:\n print('use at least 2 parameters: fileNameToExtract, lettersToFind')\n sys.exit()\nscriptName = sys.argv[0]\nfilenameToExtract = sys.argv[1]\nlettersToFind = sys.argv[2]\ntextToExtract = ''\nsplited=[]\nresult = ''\nif __name__ == '__main__':\n try:\n with open(filenameToExtract) as txt:\n textToExtract =txt.read()\n splited = textToExtract.split()\n check = set(lettersToFind)\n for w in splited:\n if all([x in check for x in w]):\n result = result+w+' '\n #print(splited)\n #print(result)\n \n with open('extractedFrom_'+filenameToExtract, 'w') as txt2:\n txt2.write(result)\n print(f'extracted {len(result.split())} words and writen them to {\"extractedFrom_\"+filenameToExtract}')\n except IOError:\n print(f\"can't open file {filename}\")\n\n" }, { "alpha_fraction": 0.540217399597168, "alphanum_fraction": 0.5554347634315491, "avg_line_length": 37.35416793823242, "blob_id": "18f3690b8ceaa8166904022cc952ece7ba8d2e31", "content_id": "a9af35fd1daea4fd859bcb925dd8449d11d00614", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1840, "license_type": "permissive", "max_line_length": 113, "num_lines": 48, "path": "/028PGnPletsExtractor.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7\n\nimport sys\nfrom decimal import Decimal\nfrom collections import Counter\n\nif len(sys.argv) < 3:\n print('use at least 2 parameters: fileNameToExtract, NumberPletsToExtract')\n sys.exit()\nscriptName = sys.argv[0]\nfilenameToExtract = sys.argv[1] #filenameToExtract = 'pg100.txt'\nnumPlets = int(sys.argv[2]) #numPlets = 1\ntextToExtract = ''\nsplited=[]\nresult = Counter()\nif __name__ == '__main__':\n try:\n with open(filenameToExtract) as txt:\n for line in txt:\n splited = line.split()\n wordToExtract =splited[1]\n count= Decimal(splited[2])\n npletsInWord=len(wordToExtract) - numPlets + 1\n for i in range(npletsInWord): #TODO modificate from 1 letter to numPlets\n #strOnlyLetters[1:3]) #cut start at index 1 and end before 3\n result.update({wordToExtract[i:i+numPlets]: count})\n #print(wordToExtract, splited[2], count)\n #\n #check = set(lettersToFind)\n #for w in splited:\n # if all([x in check for x in w]):\n # result = result+w+' '\n #print(splited)\n #print(result)\n \n with open(f'extracted{numPlets}pletsFrom_'+filenameToExtract, 'w') as txt2:\n sortRes = sorted(result.items(), key=lambda x:x[1], reverse=True)\n i=0\n #print(len(sortRes))\n for k, v in sortRes: #for k, v in result.items():\n txt2.write(f'{k} {v}')\n if i<(len(sortRes)-1):\n txt2.write('\\n')\n i+=1\n #txt2.write(str(result))\n print(f'extracted {len(result)} {numPlets}plets and writen them to {\"extractedFrom_\"+filenameToExtract}')\n except IOError:\n print(f\"can't open file {filename}\")" }, { "alpha_fraction": 0.6865285038948059, "alphanum_fraction": 0.696891188621521, "avg_line_length": 21.705883026123047, "blob_id": "d004b64afb686aa77878e871a9a0ccbafcb21414", "content_id": "88e9915fd8ba7dca2cb72c3d0d91dad41c0f3250", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 386, "license_type": "permissive", "max_line_length": 67, "num_lines": 17, "path": "/006fileRead.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7\nfrom sys import argv\n\nscript, filename = argv[0], argv[1]\nprint(f\"args: {argv}\")\n\ntxt = open(filename)\nprint(f'file named \"{filename}\" contents:')\nprint(txt.read())\n\nprint(f'file named \"{filename}\" contents by line by line in list:')\ntxt = open(filename)\nprint(txt.readlines())\n\nprint(\"type the filename to read again:\")\nanotherFile = input(\"> \")\nprint(open(anotherFile).read())\n" }, { "alpha_fraction": 0.30571427941322327, "alphanum_fraction": 0.49714285135269165, "avg_line_length": 24, "blob_id": "af27031994f0a941c4a1f8a3892a4c43638131c9", "content_id": "eb3f3b8329fd1f4836694eac694cec5138d67a0d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 350, "license_type": "permissive", "max_line_length": 31, "num_lines": 14, "path": "/002math.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#Py3.5\nprint (\"Math operators\")\nprint (\"1+1=\", 1+1) #2\nprint (\"2-1=\", 2-1) #1\nprint (\"3/2=\", 3/2) #1.5\nprint (\"3//2=\", 3//2) #1\nprint (\"3*2=\", 3*2) #6\nprint (\"19%5=\", 19%5) #4\nprint (\"19<5=\", 19<5) #False\nprint (\"19<=19=\", 19<=19) #True\nprint (\"19>5=\", 19>5) #True\nprint (\"8>=9=\", 8>=9) #False\nprint (\"8==8=\", 8==9) #False\nprint ('0==0', 0==0) #True\n" }, { "alpha_fraction": 0.6227158904075623, "alphanum_fraction": 0.6571121215820312, "avg_line_length": 31.823530197143555, "blob_id": "58bbaf34194b95e2c52312326613b5d6c84a91b8", "content_id": "24d307945605f6d36d11b8836308871d86b3a9e1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2797, "license_type": "permissive", "max_line_length": 149, "num_lines": 85, "path": "/025consoleNCurses.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#https://docs.python.org/3/howto/curses.html\n#https://docs.python.org/2/library/curses.html\n\nimport curses #python -m pip install windows-curses\n#import curses.textpad\n#import time\n\nstdscr = curses.initscr()\n#curses.noecho() #To turn off mirroring pressed keys\n#curses.echo()\n\nGRAY_BASE = 15 #15 is max color without error\nGRAY_pair = 50\nRED_pair = 40\n\ncurses.start_color()\nstdscr.addstr(\"Type q to exit \", curses.color_pair(1)) #by default all color pares are default \nstdscr.addstr(\"Type p to print string, and c to print char \", curses.color_pair(1)) #by default all color pares are default \nstdscr.addstr(\"Type s to supress display pressed buttons\", curses.color_pair(1)) #by default all color pares are default \n\n\ncurses.init_color( GRAY_BASE, 500, 500, 500 ) #color (0-15), R,G,B (0-1000)\ncurses.init_pair( GRAY_pair, GRAY_BASE, curses.COLOR_BLACK )\nstdscr.addstr(2,0, \"RED ALERT!\", curses.color_pair(GRAY_pair))\n\ncurses.init_pair( RED_pair, GRAY_BASE, curses.COLOR_RED )\nstdscr.addstr(3,0, \"RED ALERT! 2\", curses.color_pair(RED_pair))\n\ncurses.curs_set(0) #make cursor invisible\n\n#Applications will also commonly need to react to keys instantly, without requiring the Enter key to be pressed;\n# this is called cbreak mode, as opposed to the usual buffered input mode.\n#curses.cbreak()\n\n#begin_x = 20\n#begin_y = 7\n#height = 5\n#width = 40\n#win = curses.newwin(height, width, begin_y, begin_x)\n#tb = curses.textpad.Textbox(win)\n#text = tb.edit()\nstdscr.addstr(8,1,'initial')#text.encode('utf_8')\n\n#hw = \"Hello world!\"\n\nwHeight= 8\nwWidth =40\nbegY = 4\nbegX = 20\nwin = curses.newpad(wHeight*10, wWidth) #newwin is fixed and gives err if text is bigger than window\n\ni = 0\nwhile 1:\n c = stdscr.getch()\n if c == ord('o'):\n win.addstr(f'{i}some Str ')\n \n win.refresh(i //20, 0, begY, begX, begY+wHeight, begX+wWidth) #[PadStartRow, PadStartCol, DispStartRow, DispStartcol, DispEndRow, DispEndCol]\n #if window is not a pad, just windowwin.refresh()\n i+=1\n elif c == ord('c'):\n win.clear()\n i=0\n win.refresh(i //20, 0, begY, begX, begY+wHeight, begX+wWidth)\n elif c == ord('s'):\n stdscr.addch(1,20,curses.ACS_HLINE)\n stdscr.addch(2,20,curses.ACS_PLUS)\n stdscr.addch(3,20,curses.ACS_LRCORNER)\n stdscr.addch(4,20,curses.ACS_DARROW)\n \n stdscr.addch(5,20,curses.ACS_LARROW)\n stdscr.addch(5,21,curses.ACS_LRCORNER)\n elif c == ord('p'):\n stdscr.addstr(10 +(i*2), 30, 'hel_\\u21B2_lo')\n stdscr.addstr(10 +(i*2), 40, 'При\\nвет')\n i+=1\n elif c == ord('c'):\n stdscr.addch('H')\n elif c == ord('s'):\n curses.noecho()\n elif c == ord('q'): break # Exit the while()\n elif c == curses.KEY_HOME: x = y = 0\n\ncurses.endwin()\n\n" }, { "alpha_fraction": 0.5230536460876465, "alphanum_fraction": 0.5491307377815247, "avg_line_length": 32.3636360168457, "blob_id": "6c0293e2e6ce556705d8b149646bdd379ff42c41", "content_id": "1db41e21c6d48e727f1f9c62e9d4106c114f4834", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2646, "license_type": "permissive", "max_line_length": 188, "num_lines": 77, "path": "/_01moduleNPLETS.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7\r\nimport os\r\n\r\ndef makeListsOfFiles(DirPath): #from 010fileDirs.py\r\n fileFullPathList = []\r\n for root, dirs, files in os.walk(DirPath):\r\n for file in files:\r\n fileFullPathList.append(root + \"\\\\\" + file)\r\n return fileFullPathList\r\n \r\ndef filterText(text):\r\n def filterOnlyPrintable(str):\r\n return filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&\\'()*+,-./:;<=>?@[\\]^_`{|}~ \\t\\n\\r\\x0b\\x0c', str) #import string #string.printable\r\n def filterOnlySeenableAndSpace(str):\r\n return filter(lambda x: x not in '\\t\\n\\r\\x0b\\x0c', str) #characters taken from end of string.printable\r\n \r\n filtered1 = filterOnlyPrintable(text)\r\n filtered2 = filterOnlySeenableAndSpace(filtered1)\r\n return filtered2\r\n \r\ndef fileReadFiltered(filePath):\r\n txt = open(filePath)\r\n return filterText(txt.read())\r\n \r\n \r\ndef combineNPletDicts(dict1, dict2):\r\n ret = dict1.copy()\r\n for i in dict2:\r\n if i in ret:\r\n ret[i]+=dict2[i]\r\n else:\r\n ret[i]=dict2[i]\r\n return ret\r\n \r\ndef nPlets(text, n): #from 012nPletsFromString.py\r\n ret = {}\r\n if n<1: return ret\r\n \r\n for i in range(len(text)-n+1):\r\n cut = text[i:i+n]\r\n if cut in ret:\r\n ret[cut] += 1\r\n else:\r\n ret[cut] = 1\r\n #print(cut)\r\n return ret\r\n \r\n#########################################################################\r\nimport unittest\r\n\r\nclass TestStringMethods(unittest.TestCase):\r\n def test_combineDicts(self):\r\n duDict1 = {'a ': 1, ' s': 2, 'sa': 3, 'am': 2, 'mp': 1}\r\n duDict2 = {'sa': 1, ' b': 3, 'am': 5, 'nd': 3, ' a': 1 ,'ot': 2}\r\n duDict3 = combineNPletDicts(duDict1, duDict2)\r\n self.assertEqual(duDict3, {'a ': 1, ' s': 2, 'sa': 4, 'am': 7, 'mp': 1, ' b': 3, 'nd': 3, ' a': 1, 'ot': 2})\r\n self.assertEqual(duDict1['am'], 2)\r\n self.assertEqual(duDict2['am'], 5)\r\n\r\n \r\n def test_isupper(self):\r\n self.assertTrue('FOO'.isupper())\r\n self.assertFalse('Foo'.isupper())\r\n\r\n def test_split(self):\r\n s = 'hello world'\r\n self.assertEqual(s.split(), ['hello', 'world'])\r\n # check that s.split fails when the separator is not a string\r\n with self.assertRaises(TypeError):\r\n s.split(2)\r\n\r\nif __name__ == \"__main__\":\r\n from sys import argv\r\n if len(argv)>1 and argv[1] == 'runTests':\r\n unittest.main(argv=['FakeArgvToRemoveErrorsIfCalledWithParams'])\r\n else:\r\n print('Runned just like a program without \"runTests\" parameter, argv: ', argv)\r\n" }, { "alpha_fraction": 0.5704125165939331, "alphanum_fraction": 0.5867709517478943, "avg_line_length": 21.433332443237305, "blob_id": "a71b28b7aa9da4430e5cb4c6120cc65fc265ff40", "content_id": "9f41b3a8b282e48a50259241249a70a14fb56b81", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1406, "license_type": "permissive", "max_line_length": 103, "num_lines": 60, "path": "/012nPletsFromString.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7\r\n\r\ndef nPlets(text, n):\r\n ret = {}\r\n if n<1: return ret\r\n \r\n for i in range(len(text)-n+1):\r\n cut = text[i:i+n]\r\n if cut in ret:\r\n ret[cut] += 1\r\n else:\r\n ret[cut] = 1\r\n #print(cut)\r\n return ret\r\n\r\ndef filterAllThatLess(dict, n):\r\n ret = {}\r\n for x in dict:\r\n if dict[x]>=n:\r\n ret[x] = dict[x]\r\n return ret\r\n\r\nst = '''a sample, and another(one)? And another one.{\r\n sasaAnass =!\r\n }\r\n \r\n text Ends'''\r\n#one = nPlets(st,1)\r\n#print(one)\r\ntwo = nPlets(st,2)\r\nprint(two)\r\ntwo2 = filterAllThatLess(two,2)\r\nprint(two2)\r\nthree = nPlets(st,3)\r\nprint(three)\r\nthree2 = filterAllThatLess(three,2)\r\nprint(three2)\r\n#four = nPlets(st,4)\r\n\r\nimport re\r\npattern = r'\\w+|[^\\w\\s]+'\r\nflags=re.UNICODE | re.MULTILINE | re.DOTALL\r\nregexp = re.compile(pattern, flags)\r\ntReg = regexp.findall(st)\r\nprint(tReg)\r\n#TODO try .finditer - https://docs.python.org/2/library/re.html#finding-all-adverbs-and-their-positions\r\n\r\nlistStr = []\r\nlistPos = []\r\nfor m in re.finditer(pattern, st):\r\n listStr.append(m.group(0))\r\n listPos.append(m.start())\r\n print (f'{m.start()}-{m.end()}: {m.group(0)}')\r\n \r\nlistZip = list(zip(listStr,listPos))\r\nprint(listZip)\r\nlistincl1 = [(listStr[i], listPos[i]) for i in range(len(listStr))]\r\nprint(listincl1)\r\nlistincl2 = [[listStr[i], listPos[i]] for i in range(len(listStr))]\r\nprint(listincl2)\r\n" }, { "alpha_fraction": 0.6984802484512329, "alphanum_fraction": 0.7069908976554871, "avg_line_length": 30.056604385375977, "blob_id": "7ecd3308c4b92ebe97b4db39dc1ae15267afdcce", "content_id": "6c08265634dd45d5bc4d084a3a00c19075cb16fb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1645, "license_type": "permissive", "max_line_length": 77, "num_lines": 53, "path": "/036structReadTTFheader.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7 #https://docs.python.org/2/library/struct.html\n\nimport struct\n\nwith open('mplus-2p-medium.ttf', 'rb') as fp:\n\tfontCont = memoryview(fp.read()) \n\t\nheader = fontCont[:12]\nprint(bytes(header))\n\nfmt = '>HHHHHH' #> because TTF is big endian\n#https://docs.python.org/2/library/struct.html#format-characters\nprint(struct.unpack(fmt, header))\n\n#http://formats.kaitai.io/ttf/ttf.svg\n#https://github.com/wget/ttf2eot\n#https://docs.microsoft.com/ru-ru/typography/opentype/spec/\n#https://www.codeproject.com/Articles/2293/Retrieving-Font-Name-from-TTF-File\n#//This is TTF file header\n#typedef struct _tagTT_OFFSET_TABLE{\n# USHORT uMajorVersion;\n# USHORT uMinorVersion;\n# USHORT uNumOfTables;\n# USHORT uSearchRange;\n# USHORT uEntrySelector;\n# USHORT uRangeShift;\n#}TT_OFFSET_TABLE;\n#\n#//Tables in TTF file and there placement and name (tag)\n#typedef struct _tagTT_TABLE_DIRECTORY{\n# char szTag[4]; //table name\n# ULONG uCheckSum; //Check sum\n# ULONG uOffset; //Offset from beginning of file\n# ULONG uLength; //length of the table in bytes\n#}TT_TABLE_DIRECTORY;\n#\n#//Header of names table\n#typedef struct _tagTT_NAME_TABLE_HEADER{\n# USHORT uFSelector; //format selector. Always 0\n# USHORT uNRCount; //Name Records count\n# USHORT uStorageOffset; //Offset for strings storage, \n# //from start of the table\n#}TT_NAME_TABLE_HEADER;\n#\n#//Record in names table\n#typedef struct _tagTT_NAME_RECORD{\n# USHORT uPlatformID;\n# USHORT uEncodingID;\n# USHORT uLanguageID;\n# USHORT uNameID;\n# USHORT uStringLength;\n# USHORT uStringOffset; //from start of storage area\n#}TT_NAME_RECORD;" }, { "alpha_fraction": 0.5083194971084595, "alphanum_fraction": 0.518302857875824, "avg_line_length": 28.317073822021484, "blob_id": "1ebdba1d961b29a26696f20c2d921fee25aecd9a", "content_id": "5569a1da3adc3743325c86c8166978125924518d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1202, "license_type": "permissive", "max_line_length": 102, "num_lines": 41, "path": "/007fileReadWrite.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7\nfrom sys import argv\n\nscript, filename = argv[0:2]\n\n\nprint(f\"if you want to erase {filename} type: yes\")\nquestion = str(input('>'))\n#print(question, question.upper(), question.capitalize(), question.upper(), question.upper() == \"YES\")\nif question.upper() == \"YES\":\n print('You choose to erase file')\n try:\n \n txt = open(filename)\n try:\n print(f\"file {filename} contents: {txt.read()}\")\n print(\"and at position 4 folows 8 chars\")\n txt.seek(4)\n print(txt.read(8))\n print()\n finally:\n txt.close()\n \n try:\n txt = open(filename, 'w')\n print('Truncating file')\n txt.truncate()\n print('now input 3 strings')\n def input_and_writen(file, prompt):\n file.write(input(prompt))\n file.write('\\n')\n input_and_writen(txt, '1 string> ')\n input_and_writen(txt, '2 string> ')\n input_and_writen(txt, '3 string> ')\n finally:\n print('closing file')\n txt.close()\n except IOError:\n print(f\"can't open file {filename}\")\n\n #target = open(filename\n" }, { "alpha_fraction": 0.575497031211853, "alphanum_fraction": 0.5969908833503723, "avg_line_length": 21.884614944458008, "blob_id": "d3d4f238cba24f0af180985c4f13d5661a0685b5", "content_id": "577e84d1d1ccc857792236757b90d48c30f2cabe", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1861, "license_type": "permissive", "max_line_length": 87, "num_lines": 78, "path": "/011dictionary.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7\r\n\r\nprint( {True: 'yes', 1: 'no', 1.0: 'maybe'} ) #prints {True: 'maybe'} \r\n#dictionary creation overwrites values of same key, key is not updated due optimization\r\n#bool is subclass of int, so True is 1\r\nprint(True == 1 == 1.0 ) #True\r\nprint( (hash(True), hash(1), hash(1.0)) ) #(1, 1, 1)\r\n\r\n####back to dict :)\r\nd = {}\r\nprint(d)\r\nstr = 'myKey'\r\nstr2 = 'myKey2'\r\nd[str]=1\r\nd[str2]=5\r\nd[str]+=2\r\nd['another'] = 'string of *&^%'\r\nd['myKey5']=0\r\nprint(d)\r\ne =d.pop('another')\r\nprint(e)\r\nprint(d)\r\n\r\n######combining dictionarys\r\nd2 = {'wow': 8, 'myKey5': 99, 'myKey4':44}\r\nprint(d2)\r\ndComb = {}\r\ndComb.update(d)\r\ndComb.update(d2)\r\nprint('one way to combine dictionary: ', dComb)\r\nprint('another way to combine dictionary: ', {**d, **d2})\r\n\r\n\r\n###### access if keys not in dict\r\n#one way\r\ntry:\r\n print(d['noSuchKey'])\r\nexcept KeyError:\r\n print ('Key error happen, but it was catched')\r\n#and another\r\nprint(d.get('keyNotInDict', 'default If Not foutd'))\r\n\r\n###sort by value using lambda\r\ndTupSorted= sorted(d.items(), key=lambda x: x[1])\r\ndfromSorted = dict(dTupSorted)\r\nprint('sorted by value tuple from dict:',dTupSorted, 'And dict from it:', dfromSorted)\r\n\r\n###### check if key is in dict:\r\nif str in d:\r\n print (f'{str} is in {d}')\r\nelse:\r\n print (f'ERR {str} not found is in {d}')\r\nif 'myKey2' in d:\r\n print (f'myKey2 is in {d}')\r\nelse:\r\n print (f'ERR myKey2 not found is in {d}')\r\nif 'another' not in d:\r\n print('another not in d')\r\nelse:\r\n print('ERR another is FOUND in d')\r\n \r\n#also can check item using method get\r\n#dict.get(key[, default])\r\nif d.get(\"test\") != None:\r\n print(\"Yes 'test' key exists in dict\") \r\nelse:\r\n print(\"No 'test' key does not exists in dict\") \r\n\r\nprint('loop1')\r\nfor i in d:\r\n print(i, d[i])\r\n \r\nprint('loop2')\r\nfor k, v in d.items():\r\n print(k, v)\r\n \r\nd.clear()\r\nprint(d)" }, { "alpha_fraction": 0.47074010968208313, "alphanum_fraction": 0.489672988653183, "avg_line_length": 27.365854263305664, "blob_id": "6077c8258b13cd6dd3eae13954efbdd3a86661eb", "content_id": "b11881fb24a44c8ce1b289a74ac259a9efd95a70", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1162, "license_type": "permissive", "max_line_length": 73, "num_lines": 41, "path": "/045stringRecursiveParse.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "def parseString (expression):\n\n print(expression)##########\n\n openPar = ['('] \n closePar = [')'] \n queue = [] \n result = []\n \n def doSomethingUseful(string):\n result.append(string)\n \n \n for pos, char in enumerate(expression): \n #print(pos, char)\n if char in openPar: \n queue.append([pos, char]) \n elif char in closePar: \n if len(queue) == 0: #or i != queue.pop():\n #print('error', expression, result, queue, pos, char)\n return \"no open parentneses for {i}\" \n temp = queue.pop()\n #print('closePar', temp, queue)\n if len(queue) == 0:\n doSomethingUseful(parseString(expression[temp[0]+1:pos]))\n elif len(queue) == 0:\n doSomethingUseful(char)\n \n if len(queue)>0:\n #print('error at end', expression, result, queue)\n return \"unBalanced\"\n \n print(result)\n \n return '{' + ','.join(result) + '}'\n \n \nif __name__=='__main__':\n print(parseString('1+2*(8+3+9+1-(1*2)) +()-34'))\n print('----------------')\n print(parseString('(1+2+3+4-(5*6))'))" }, { "alpha_fraction": 0.6243094205856323, "alphanum_fraction": 0.6390423774719238, "avg_line_length": 20.760000228881836, "blob_id": "344687b99e8ac0a5a10a52a2983eccaceb34d67b", "content_id": "59d497e804faacd16f0189f86935a72a7f6f4bd1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 543, "license_type": "permissive", "max_line_length": 64, "num_lines": 25, "path": "/051csv.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "import csv\n\noutFileName = __file__+'_out.csv'\n\noutFile = open(outFileName, 'w', newline='')\noutWriter = csv.writer(outFile)\noutWriter.writerow(['a', 'b', 'cccc', 'dee', 'e', '2f', '3'])\noutWriter.writerow(['say something:', 'who?', 'You!', 'hello!'])\noutWriter.writerow([1,2,3,4.4, 5])\noutFile.close()\n\nwith open(outFileName, 'r') as rf:\n print(rf.read())\n\n\nwith open(outFileName, 'r') as rf:\n outReader = csv.reader(rf)\n outData = list(outReader)\n\nprint(outData)\nprint()\n\nprint('now print by row:')\nfor row in outData:\n print(row)" }, { "alpha_fraction": 0.6071987748146057, "alphanum_fraction": 0.6142410039901733, "avg_line_length": 22.784313201904297, "blob_id": "82f42e61cfcbdf9c1fc90ddfb56a0821db5fcd6c", "content_id": "a1def051c8de3dcef1053dc6df80b9d8d0338ba4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1278, "license_type": "permissive", "max_line_length": 69, "num_lines": 51, "path": "/019customException.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#!/usr/bin/var python3\r\n#py3.7\r\nimport traceback\r\n\r\nclass BaseValidationError(ValueError):\r\n pass\r\n\r\nclass NameTooShortError(BaseValidationError):\r\n pass\r\n \r\nclass NameTooLongError(BaseValidationError):\r\n pass\r\n \r\nclass Name8long(BaseValidationError):\r\n pass\r\n \r\ndef validate(name):\r\n if len(name) < 5:\r\n raise NameTooShortError(name)\r\n if len(name) == 8:\r\n raise Name8long(name)\r\n if len(name) >10:\r\n raise NameTooLongError(name)\r\n\r\nprint(traceback.format_exc()) #do no error and print \"NoneType: None\"\r\n\r\ndef tryValidate(name):\r\n print('Try to validate:',name)\r\n try: \r\n validate(name)\r\n except NameTooShortError:\r\n print(traceback.format_exc())\r\n print('name too short')\r\n except NameTooLongError:\r\n print(traceback.format_exc())\r\n print('name too long')\r\n except Exception as err:\r\n print(traceback.format_exc())\r\n print(traceback.print_tb(err.__traceback__))\r\n print('just Exception')\r\n else:\r\n print('validation successful')\r\n finally:\r\n print('this message always prints')\r\n print()\r\n print()\r\n\r\ntryValidate('hi')\r\ntryValidate('aaaabbbb')\r\ntryValidate('hello everyone!')\r\ntryValidate('fiine')\r\n \r\n " }, { "alpha_fraction": 0.5931333899497986, "alphanum_fraction": 0.5978689789772034, "avg_line_length": 31.922077178955078, "blob_id": "c6163143fd80f5aa4202296aa3940751157ac0a5", "content_id": "c0438421bdc80cc077d5eda13532ca73048ba26e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2534, "license_type": "permissive", "max_line_length": 133, "num_lines": 77, "path": "/047stoppableThread.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "import time\nimport threading\nimport queue\n\nclass StoppableThread(threading.Thread):\n \"\"\"Thread class with stop_it() method. \n The thread itself has to checks regularly for the is_stopped() condition.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(StoppableThread, self).__init__(*args, **kwargs)\n self._stoper = threading.Event() #class threading.Event implements event objects. \n #An event manages a flag that can be set to true with the set() method and reset to false with the clear() method. \n #is_set() Return true if and only if the internal flag is true.\n #The wait(timeout=None) ###not used in this example### method blocks until the flag is true. The flag is initially false.\n \n\n def stop_it(self):\n self._stoper.set()\n\n def is_stopped(self):\n return self._stoper.isSet()\n\ntimeBetweenTasks = 4 #seconds\nclass KindOfSheduler(StoppableThread):\n \"\"\"Thread based on StoppableThread. do real work. \n Uses queue to pause ( analog of sleep() ) thread on timeout\n when paused thread can be waken up using queue\n thread can be stopped by sending queue string message 'stop' \n any other message just wakes up thread before timeout\"\"\"\n def __init__(self, *args, **kwargs):\n super(KindOfSheduler, self).__init__(*args, **kwargs)\n\n def run(self):\n while True:\n if self.is_stopped():\n return\n ##################\n #do real work here\n \n print(\"Hello, world!\", time.time())\n \n #end of real work!\n ##################\n try:\n task = q.get(True, timeBetweenTasks)#time.sleep(4)\n except queue.Empty:\n print('empty queue')\n else:\n print(task)\n print(type(task))\n if task == 'stop!':\n self.stop_it()\n \n\nq = queue.Queue()\n\nthread = KindOfSheduler()\nprint('thread before start',thread)\nthread.start()\nprint('thread started',thread)\ntime.sleep(15)\nq.put('do something1!')\nq.put('do something2!')\nq.put('do something3!')\nq.put('do something4!')\nq.put('stop!') #stop it using message, comment this line to test non message stop\nq.put('do something6!')\nq.put('do something7!')\nq.put('do something8!')\ntime.sleep(1)\nprint('thread still going?',thread)\nthread.stop_it() #stop it\nprint('thread should be stopped',thread)\nthread.join()\nprint('thread after join',thread)\nprint('end')" }, { "alpha_fraction": 0.6693401336669922, "alphanum_fraction": 0.6893911957740784, "avg_line_length": 32.45121765136719, "blob_id": "a2acf66d21433d3cc95dee2fef542221796e12b1", "content_id": "277577c016ca46c74facc7da683a954b4683b68d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2745, "license_type": "permissive", "max_line_length": 142, "num_lines": 82, "path": "/020collections.py", "repo_name": "ipavel83/Python", "src_encoding": "UTF-8", "text": "#py3.7\nimport collections\n\n#############\nordDict1 = collections.OrderedDict(one=1, andTwo='Two', three=3, four=4)\nprint(ordDict1)\nordDict1['five']=5\nordDict1['two'] = 2\nprint(ordDict1)\nprint(ordDict1.keys())\nprint([(key, ordDict1[key]) for key in ordDict1.keys()])\n\njustDict1 = {'something': 's', 'justAnother': 'ss2'}\n##############\nchainMap1 = collections.ChainMap(ordDict1, justDict1)\nprint(chainMap1)\nprint('from chainMap:', chainMap1['justAnother'])\n\n\n############NamedTuple\ntuple1 = ('one', 'andTwo', 3, 'four')\n#del tuple1[1] #TypeError: 'tuple' object does not support item deletion\nprint('just tuple:', tuple1)\n\nnamedTup = collections.namedtuple('SomeTupleName_noSpaces','first second third fourth')\nsome1 = namedTup(*tuple1)\nprint('named tuple fields:', some1._fields)\nprint('named tuple name one way: ', type(some1).__name__)\nprint('named tuple name other way: ', some1.__class__.__name__) #but don't access the __class__ attribute directly, it's bad practice.\n#print(dir(some1))\nprint(some1, 'Has length of', len(some1), \n ' and get it first element by 3 different ways: ', \n some1[0], some1.first, getattr(some1, 'first'))\n \nsome2 = namedTup(first = 'q',second = 'w', third = 'e', fourth = 'r')\nprint(some2, 'Has length of', len(some2), \n ' and get it first element by 3 different ways: ', \n some2[0], some2.first, getattr(some2, 'first')) \n\nfrom sys import getsizeof\nprint(getsizeof(tuple1), getsizeof(some1), getsizeof(some2))\n\n##############Counter\nstuff = collections.Counter()\nlootDict = {'bread': 1, 'apple':1 }\nstuff.update(lootDict) #Counter is adding elements not replacing it like simple dictionary\nmoreloot = {'towel':1, 'bread':3}\nstuff.update(moreloot)\nprint(stuff)\nstuff.update(lootDict)\nprint(stuff)\nprint(len(stuff), sum(stuff.values()))\n\n################Python’s deque objects are implemented as doubly-linked lists\ndq = collections.deque()\ndq.append('first')\ndq.append('sec')\ndq.append('thiiird')\n#dq.put('dfs') #AttributeError: 'collections.deque' object has no attribute 'put'\n#print(dq.get()) #AttributeError: 'collections.deque' object has no attribute 'get'\nprint(dq)\nprint(dq.popleft())\nprint(dq)\nprint(dq.pop()) ########just usual pop right\nprint(dq)\nprint(dq.popleft())\n#print(dq.popleft()) #IndexError: pop from an empty deque\n\n#####this queue Python standard library is synchronized and provides locking semantics to support multiple concurrent producers and consumers.\n#for mulitprocessing Queue see Shared Job Queues - #from multiprocessing import Queue\nfrom queue import Queue\nqq = Queue()\nqq.put('q')\nqq.put('r')\nqq.put('qfgd')\nqq.put('a')\nprint(qq)\nprint(qq.get())\nprint(qq.get())\nprint(qq.get())\nprint(qq.get())\n#print(qq.get()) #Blocks / waits forever...\n" } ]
56
pombredanne/pyphp
https://github.com/pombredanne/pyphp
b2323076e7be0cb295132a5b6601f5771ac37203
e7d3dbe295a0b555e134c039a88bc1232dc29476
906c26a928c3b83f070aeddd5d045c084a4d4833
refs/heads/master
2018-03-23T11:54:42.248628
2008-10-13T19:12:39
2008-10-13T19:12:39
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5949820876121521, "alphanum_fraction": 0.602150559425354, "avg_line_length": 22.16666603088379, "blob_id": "bf4089fae676dc3b25be6f6fb0d2362a31c02c86", "content_id": "76cf71f59ae22f24e4891760572ee6bda4197d61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 279, "license_type": "no_license", "max_line_length": 59, "num_lines": 12, "path": "/bin/php.py", "repo_name": "pombredanne/pyphp", "src_encoding": "UTF-8", "text": "\nfrom pyphp.interpreter import PHPInterpreter\n\ndef php_main():\n import sys\n if len(sys.argv) == 2:\n php = PHPInterpreter(code=open(sys.argv[1]).read())\n else:\n php = PHPInterpreter(interactive=True)\n php.run()\n\nif __name__ == \"__main__\":\n php_main()\n" }, { "alpha_fraction": 0.4423076808452606, "alphanum_fraction": 0.5192307829856873, "avg_line_length": 6.5714287757873535, "blob_id": "ace4db2713148ed9a879fcb22f5074e872f90f27", "content_id": "82f5dc3df5a2278de0ae8110a5b295e2c347e428", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 52, "license_type": "no_license", "max_line_length": 17, "num_lines": 7, "path": "/tests/test001.php", "repo_name": "pombredanne/pyphp", "src_encoding": "UTF-8", "text": "<?php\n\necho test_func();\necho 6 + 9;\necho 5 * 6;\n\n?>" }, { "alpha_fraction": 0.4410497546195984, "alphanum_fraction": 0.45045045018196106, "avg_line_length": 24.913705825805664, "blob_id": "69f3fad92717f94a84703c96ea6f8c929f460754", "content_id": "6f42263528bb0999290ec62851a13800a9925454", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5106, "license_type": "no_license", "max_line_length": 73, "num_lines": 197, "path": "/pyphp/interpreter.py", "repo_name": "pombredanne/pyphp", "src_encoding": "UTF-8", "text": "\nimport sys\n\ntry:\n import ply\nexcept ImportError:\n print \"error: you need ply\"\n raise SystemExit\nelse:\n import ply.lex as lex\n import ply.yacc as yacc\n\nclass Interpreter(object):\n def __init__(self, code=\"\", interactive=False):\n self.code = code\n self.interactive = interactive\n \n self.vars = {}\n self.constants = {}\n self.functions = {}\n \n self.lexer = lex.lex(module=self)\n self.parser = yacc.yacc(module=self)\n \n def run(self):\n if not self.interactive:\n yacc.parse(self.code)\n else:\n print self.banner\n while 1:\n try:\n s = raw_input(\"php >> \")\n except EOFError:\n break\n if not s:\n continue\n yacc.parse(s)\n print\n\nclass PHPInterpreter(Interpreter):\n \n def __init__(self, *args, **kwargs):\n super(PHPInterpreter, self).__init__(*args, **kwargs)\n if self.interactive:\n self.lexer.begin(\"php\")\n def test_func():\n return \"hey this is pretty cool.\\n\"\n self.functions[\"test_func\"] = test_func\n \n def _get_banner(self):\n return \"pyPHP 0.1\"\n banner = property(_get_banner)\n \n states = (\n (\"php\", \"exclusive\"),\n )\n \n tokens = (\n \"ECHO\",\n \"DOLLAR\",\n \"SEMI\",\n \"LABEL\",\n \"SINGLE_QUOTE\",\n \"DOUBLE_QUOTE\",\n \"INT\",\n \"RPARA\",\n \"LPARA\",\n \"COMMA\",\n \"PLUS\",\n \"MINUS\",\n \"TIMES\",\n \"DIVIDE\",\n )\n \n precedence = (\n (\"left\", \"PLUS\", \"MINUS\"),\n (\"left\", \"TIMES\", \"DIVIDE\"),\n )\n \n t_php_ignore = \" \\t\"\n \n def t_php(self, t):\n r\"<\\?php\"\n t.lexer.begin(\"php\")\n def t_php_CLOSE_TAG(self, t):\n r\"\\?>\"\n t.lexer.begin(\"INITIAL\")\n \n def t_php_NEWLINE(self, t):\n r\"\\n+\"\n t.lexer.lineno += t.value.count(\"\\n\")\n \n t_php_DOLLAR = r\"\\$\"\n t_php_SEMI = r\";\"\n t_php_SINGLE_QUOTE = r\"'\"\n t_php_DOUBLE_QUOTE = r'\"'\n t_php_LPARA = r\"\\(\"\n t_php_RPARA = r\"\\)\"\n t_php_COMMA = r\",\"\n t_php_PLUS = r\"\\+\"\n t_php_MINUS = r\"-\"\n t_php_TIMES = r\"\\*\"\n t_php_DIVIDE = r\"/\"\n t_php_ECHO = r\"echo\"\n \n def t_php_INT(self, t):\n r\"\\d+\"\n try:\n t.value = int(t.value)\n except ValueError:\n t.value = 0\n return t\n \n reserved_map = {\n \"echo\": \"ECHO\"\n }\n \n def t_php_LABEL(self, t):\n r\"[A-Za-z_][\\w_]*\"\n t.type = self.reserved_map.get(t.value, \"LABEL\")\n return t\n \n def p_statement_list(self, p):\n \"\"\"statement_list : statement_list statement\n | statement\"\"\"\n pass\n \n def p_statement_expr(self, p):\n \"\"\"statement : expr SEMI\"\"\"\n if self.interactive:\n sys.stdout.write(str(p[1]) + \"\\n\")\n \n def p_statement_echo(self, p):\n \"\"\"statement : ECHO expr SEMI\"\"\"\n sys.stdout.write(str(p[2]))\n \n def p_expr_int(self, p):\n \"\"\"expr : INT\n | function_call\"\"\"\n p[0] = p[1]\n def p_expr_variable(self, p):\n \"\"\"expr : DOLLAR LABEL\"\"\"\n p[0] = self.vars.get(p[2], \"\")\n def p_expr_single_quote(self, p):\n \"\"\"expr : SINGLE_QUOTE SINGLE_QUOTE\"\"\"\n if len(p) == 3:\n p[0] = \"\"\n def p_expr_double_quote(self, p):\n \"\"\"expr : DOUBLE_QUOTE DOUBLE_QUOTE\"\"\"\n if len(p) == 3:\n p[0] = \"\"\n def p_expr_binop(self, p):\n \"\"\"expr : expr PLUS expr\n | expr MINUS expr\n | expr TIMES expr\n | expr DIVIDE expr\"\"\"\n if p[2] == \"+\":\n p[0] = p[1] + p[3]\n elif p[2] == \"-\":\n p[0] = p[1] - p[3]\n elif p[2] == \"*\":\n p[0] = p[1] * p[3]\n elif p[2] == \"/\":\n p[0] = p[1] / p[3]\n \n def p_function_call(self, p):\n \"\"\"function_call : LABEL function_params\"\"\"\n try:\n p[0] = self.functions[p[1]](*p[2])\n except KeyError: # undefined function\n print \"Undefined function: %s\" % p[1]\n except TypeError, e: # bad parameters\n print e\n def p_function_params(self, p):\n \"\"\"function_params : LPARA RPARA\n | LPARA function_argument_list RPARA\"\"\"\n if len(p) == 3:\n p[0] = []\n else:\n p[0] = p[2]\n def p_function_argument_list(self, p):\n \"\"\"function_argument_list : function_argument_list COMMA expr\n | expr\"\"\"\n if len(p) == 4:\n p[0] = p[1] + [p[3]]\n else:\n p[0] = [p[1]]\n \n def t_php_error(self, t):\n print \"illegal character in PHP state: %s\" % repr(t.value[0])\n t.lexer.skip(1)\n \n def t_error(self, t):\n print \"illegal character in INITIAL state: %s\" % repr(t.value[0])\n t.lexer.skip(1)\n \n def p_error(self, p):\n print \"Syntax error on line %d.\" % p.lineno\n" } ]
3
LucaCappelletti94/golomb_coding
https://github.com/LucaCappelletti94/golomb_coding
43e34747e7f5829bf665ea2253a8a05c61e12956
bd71b37616afe2bfe1ba8b4f411ae93811e88ccf
b0226730484e06e0925dcd789952ca8a4249433d
refs/heads/master
2022-05-02T19:29:07.129072
2022-04-05T13:51:23
2022-04-05T13:51:23
207,140,129
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7658536434173584, "alphanum_fraction": 0.7658536434173584, "avg_line_length": 67.66666412353516, "blob_id": "dd06e6f57e22bb8b6653676a71f7902bcb0b4459", "content_id": "197f71f7c28ded854a6a2ebc901836a036ccf31d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 205, "license_type": "no_license", "max_line_length": 105, "num_lines": 3, "path": "/golomb_coding/__init__.py", "repo_name": "LucaCappelletti94/golomb_coding", "src_encoding": "UTF-8", "text": "from .golomb_coding import golomb_coding, optimal_golomb_coding, bernoulli_golomb_coding, golomb_decoding\n\n__all__ = [\"golomb_coding\", \"optimal_golomb_coding\", \"bernoulli_golomb_coding\", \"golomb_decoding\"]" }, { "alpha_fraction": 0.469453364610672, "alphanum_fraction": 0.6141479015350342, "avg_line_length": 43.57143020629883, "blob_id": "2bd2e9d44dc66aeed01e569feda8dd9ac0aa9d72", "content_id": "29a85936b93117115926a1818136524d39cd14a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 311, "license_type": "no_license", "max_line_length": 93, "num_lines": 7, "path": "/tests/test_bernoulli_golomb_coding.py", "repo_name": "LucaCappelletti94/golomb_coding", "src_encoding": "UTF-8", "text": "from golomb_coding import bernoulli_golomb_coding\n\ndef test_bernoulli_golomb_coding():\n expected = ['10', '10', '10', '1001', '1010', '011', '011', '011', '011', '0100', '0100']\n sequential = bernoulli_golomb_coding([0,0,0,1,2,3,3,3,3,4,4])\n for e, s in zip(expected, sequential):\n assert e == s" }, { "alpha_fraction": 0.3024793267250061, "alphanum_fraction": 0.4677686095237732, "avg_line_length": 21.407407760620117, "blob_id": "9c7bbc43fb2dbefa042520ce8a9c2f1c579997a3", "content_id": "ae82d75234cbe0c27aaad45e73dc645fe4b223b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 605, "license_type": "no_license", "max_line_length": 71, "num_lines": 27, "path": "/tests/test_golomb_coding.py", "repo_name": "LucaCappelletti94/golomb_coding", "src_encoding": "UTF-8", "text": "\"\"\"Module providing test for the Golomb coding.\"\"\"\nfrom golomb_coding import golomb_coding\n\n\ndef test_golomb_coding():\n \"\"\"Test whether the golomb coding is comparable to ground truth.\"\"\"\n tests = {\n 0: \"10\",\n 1: \"110\",\n 2: \"111\",\n 3: \"010\",\n 4: \"0110\",\n 5: \"0111\",\n 6: \"0010\",\n 7: \"00110\",\n 8: \"00111\",\n 9: \"00010\",\n 10: \"000110\",\n 11: \"000111\",\n 12: \"000010\",\n 13: \"0000110\",\n 14: \"0000111\",\n 15: \"0000010\"\n }\n\n for n, u in tests.items():\n assert u == golomb_coding(n, 3)\n" }, { "alpha_fraction": 0.6549223065376282, "alphanum_fraction": 0.7046632170677185, "avg_line_length": 41.42856979370117, "blob_id": "7c362dc49653190d24821649a7a39567ec21c6ae", "content_id": "396a3482a7ad4724a1bcd545c99e7dea6cc68270", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3860, "license_type": "no_license", "max_line_length": 197, "num_lines": 91, "path": "/README.rst", "repo_name": "LucaCappelletti94/golomb_coding", "src_encoding": "UTF-8", "text": "golomb_coding\n=========================================================================================\n|travis| |sonar_quality| |sonar_maintainability| |codacy| |code_climate_maintainability| |pip| |downloads|\n\nDidactical python package implementing Golomb coding and decoding.\n\nAlso available, are optimal golomb coding and Bernulli golomb coding.\n\nHow do I install this package?\n----------------------------------------------\nAs usual, just download it using pip:\n\n.. code:: shell\n\n pip install golomb_coding\n\nTests Coverage\n----------------------------------------------\nSince some software handling coverages sometime get slightly different results, here's three of them:\n\n|coveralls| |sonar_coverage| |code_climate_coverage|\n\n\nUsage examples\n----------------------------------------------\nThe coding available from this package are `golomb_coding`, `bernoulli_golomb_coding` and `optimal_golomb_coding`.\nThe following examples are usages of Golomb coding.\n\n.. code:: python\n\n from golomb_coding import golomb_coding, golomb_decoding\n\n golomb_coding(0, 3) # 10\n golomb_coding(1, 3) # 110\n golomb_coding(2, 3) # 111\n golomb_coding(3, 3) # 010\n golomb_coding(4, 3) # 0110\n golomb_coding(5, 3) # 0111\n golomb_coding(6, 3) # 0010\n golomb_coding(7, 3) # 00110\n golomb_coding(8, 3) # 00111\n golomb_coding(9, 3) # 00010\n golomb_coding(10, 3) # 000110\n golomb_coding(11, 3) # 000111\n golomb_coding(12, 3) # 000010\n golomb_coding(13, 3) # 0000110\n golomb_coding(14, 3) # 0000111\n golomb_coding(15, 3) # 0000010\n\n assert 42 == golomb_decoding(golomb_coding(42))\n\n\n.. |travis| image:: https://travis-ci.org/LucaCappelletti94/golomb_coding.png\n :target: https://travis-ci.org/LucaCappelletti94/golomb_coding\n :alt: Travis CI build\n\n.. |sonar_quality| image:: https://sonarcloud.io/api/project_badges/measure?project=LucaCappelletti94_golomb_coding&metric=alert_status\n :target: https://sonarcloud.io/dashboard/index/LucaCappelletti94_golomb_coding\n :alt: SonarCloud Quality\n\n.. |sonar_maintainability| image:: https://sonarcloud.io/api/project_badges/measure?project=LucaCappelletti94_golomb_coding&metric=sqale_rating\n :target: https://sonarcloud.io/dashboard/index/LucaCappelletti94_golomb_coding\n :alt: SonarCloud Maintainability\n\n.. |sonar_coverage| image:: https://sonarcloud.io/api/project_badges/measure?project=LucaCappelletti94_golomb_coding&metric=coverage\n :target: https://sonarcloud.io/dashboard/index/LucaCappelletti94_golomb_coding\n :alt: SonarCloud Coverage\n\n.. |coveralls| image:: https://coveralls.io/repos/github/LucaCappelletti94/golomb_coding/badge.svg?branch=master\n :target: https://coveralls.io/github/LucaCappelletti94/golomb_coding?branch=master\n :alt: Coveralls Coverage\n\n.. |pip| image:: https://badge.fury.io/py/golomb-coding.svg\n :target: https://badge.fury.io/py/golomb-coding\n :alt: Pypi project\n\n.. |downloads| image:: https://pepy.tech/badge/golomb-coding\n :target: https://pepy.tech/badge/golomb-coding\n :alt: Pypi total project downloads \n\n.. |codacy| image:: https://api.codacy.com/project/badge/Grade/cb6aa47c254948e388b05a5dd8404c84\n :target: https://www.codacy.com/manual/LucaCappelletti94/golomb_coding?utm_source=github.com&amp;utm_medium=referral&amp;utm_content=LucaCappelletti94/golomb_coding&amp;utm_campaign=Badge_Grade\n :alt: Codacy Maintainability\n\n.. |code_climate_maintainability| image:: https://api.codeclimate.com/v1/badges/67cf2724ca33dbcd33c4/maintainability\n :target: https://codeclimate.com/github/LucaCappelletti94/golomb_coding/maintainability\n :alt: Maintainability\n\n.. |code_climate_coverage| image:: https://api.codeclimate.com/v1/badges/67cf2724ca33dbcd33c4/test_coverage\n :target: https://codeclimate.com/github/LucaCappelletti94/golomb_coding/test_coverage\n :alt: Code Climate Coverate" }, { "alpha_fraction": 0.6176470518112183, "alphanum_fraction": 0.6617646813392639, "avg_line_length": 33.5, "blob_id": "8a6c0610e0d2ff832c4e666dcc88ddab9c91e994", "content_id": "34ef12cf6d93bf9e6c6d24c540aa094738c86843", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 68, "license_type": "no_license", "max_line_length": 46, "num_lines": 2, "path": "/golomb_coding/__version__.py", "repo_name": "LucaCappelletti94/golomb_coding", "src_encoding": "UTF-8", "text": "\"\"\"Current version of package golomb_coding\"\"\"\n__version__ = \"1.0.6\"" }, { "alpha_fraction": 0.7558139562606812, "alphanum_fraction": 0.7558139562606812, "avg_line_length": 33.599998474121094, "blob_id": "98a20ad86008edfc49ed7b7d81107f0ff8179270", "content_id": "0820e31d8512a3c2d0f6a20f119eba05542cfa48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 172, "license_type": "no_license", "max_line_length": 55, "num_lines": 5, "path": "/tests/test_version.py", "repo_name": "LucaCappelletti94/golomb_coding", "src_encoding": "UTF-8", "text": "from validate_version_code import validate_version_code\nfrom golomb_coding.__version__ import __version__\n\ndef test_version():\n assert validate_version_code(__version__)" }, { "alpha_fraction": 0.6346704959869385, "alphanum_fraction": 0.637535810470581, "avg_line_length": 28.91428565979004, "blob_id": "e0244226cf6ca619d73d556fcaabaf9d26d4cf6f", "content_id": "3cbd5024038b57258da158688cb4934ba02ba108", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2094, "license_type": "no_license", "max_line_length": 105, "num_lines": 70, "path": "/golomb_coding/golomb_coding.py", "repo_name": "LucaCappelletti94/golomb_coding", "src_encoding": "UTF-8", "text": "\"\"\"Module providing didactical tools to show encoding and decoding of the Golomb format.\"\"\"\nfrom math import ceil, log2\nfrom typing import List\n\nfrom minimal_binary_coding import minimal_binary_coding, decode_minimal_binary\nfrom unary_coding import inverted_unary, get_len_of_leading_inverted_unary, decode_leading_inverted_unary\n\n\ndef golomb_coding(n: int, b: int) -> str:\n \"\"\"Return string representing given number in golomb coding.\n\n Parameters\n ------------------------\n n: int\n Number to convert to golomb coding.\n b: int\n Module.\n \"\"\"\n return inverted_unary(n // b)+minimal_binary_coding(n % b, b)\n\n\ndef golomb_decoding(golomb_code: str, b: int) -> str:\n \"\"\"Return integer represented in provided golomb code.\n\n Parameters\n ------------------------\n golomb_code: str\n Golomb encoding to be converted back.\n b: int\n Module.\n \"\"\"\n inverted_unary_portion_len = get_len_of_leading_inverted_unary(golomb_code)\n decoded_inverted_unary_portion = decode_leading_inverted_unary(golomb_code)\n decoded_minimal_binary = decode_minimal_binary(\n golomb_code[inverted_unary_portion_len:],\n b\n )\n return decoded_inverted_unary_portion*b + decoded_minimal_binary\n\n\ndef optimal_golomb_coding(n: int, p: float) -> str:\n \"\"\"Return string representing given number in optimal golomb coding.\n\n Parameters\n ------------------------\n n: int\n Number to convert to optimal golomb coding.\n p: float\n Probability for given number n.\n \"\"\"\n return golomb_coding(n, ceil(-1 / log2(1-p)))\n\n\ndef bernoulli_golomb_coding(numbers: List[int]) -> List[str]:\n \"\"\"Return list of strings representing given numbers in bernoulli golomb coding.\n\n Parameters\n ------------------------\n numbers: List[int]\n List of numbers to convert to bernoulli golomb coding.\n \"\"\"\n frequencies = {}\n N = len(numbers)\n for n in numbers:\n frequencies[n] = frequencies.get(n, 0) + 1\n\n return [\n optimal_golomb_coding(n, frequencies[n]/N)\n for n in numbers\n ]\n" }, { "alpha_fraction": 0.6953125, "alphanum_fraction": 0.7083333134651184, "avg_line_length": 37.400001525878906, "blob_id": "8c8fa4ab89a55e6987280aeb9293b15a2c8b7b6d", "content_id": "0f51fa7fa6ed08bab0185a666b62dbd7f1dbe318", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 384, "license_type": "no_license", "max_line_length": 77, "num_lines": 10, "path": "/tests/test_encode_and_decode.py", "repo_name": "LucaCappelletti94/golomb_coding", "src_encoding": "UTF-8", "text": "\"\"\"Module providing test to test encoding and decoding of Golomb code.\"\"\"\nfrom golomb_coding import golomb_coding, golomb_decoding\n\n\ndef test_encode_and_decode():\n \"\"\"Test whether the coding and decoding process works.\"\"\"\n MAXIMAL_VALUES = 1000\n block_size = 3\n for i in range(MAXIMAL_VALUES):\n assert i == golomb_decoding(golomb_coding(i, block_size), block_size)\n" } ]
8
jamespratt41/vickers_graham_pratt_james_dataviz
https://github.com/jamespratt41/vickers_graham_pratt_james_dataviz
3a1c13a657f42951c00e8842b39e34e535d179ec
e7b850f284456a2f9458b095a3edde3a618b4377
9396656d72592fc78c0e2d03680ae5f306c390ae
refs/heads/master
2020-04-08T13:09:15.816666
2018-11-29T20:28:00
2018-11-29T20:28:00
159,377,556
0
0
null
2018-11-27T17:59:23
2018-11-29T20:17:18
2018-11-29T20:20:35
Python
[ { "alpha_fraction": 0.5797468423843384, "alphanum_fraction": 0.6278480887413025, "avg_line_length": 18.268293380737305, "blob_id": "13bbfeda637d39afc666bd6242a76ff9bceab320", "content_id": "8ff139fc6ebdd3e19b91a3a0bb70660bdd8330db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 790, "license_type": "no_license", "max_line_length": 94, "num_lines": 41, "path": "/Python/canMedalsPie.py", "repo_name": "jamespratt41/vickers_graham_pratt_james_dataviz", "src_encoding": "UTF-8", "text": "import csv\nimport numpy as np\nimport matplotlib.pyplot as plt; plt.rcdefaults()\n\ngold = 0\nsilver = 0\nbronze = 0\n\n\nwith open('../data/canada_medals.csv') as f:\n reader = csv.reader(f)\n\n\n\n for row in reader:\n print(row)\n if row[0] == \"Gold\":\n gold = gold +1\n\n elif row[0] == \"Silver\":\n silver = silver +1\n\n else:\n bronze = bronze +1\n\n\n\n\npie chart for our shiny new data\n\nlabels = \"Gold,\", \"Silver\",\"Bronze\"\nsizes = [gold, silver,bronze]\ncolors = ['#ffD700', '#c0c0c0','#daa520']\nexplode = (0.1, 0.1, 0.15)\n\nplt.pie(sizes, explode=explode, colors=colors, autopct='%1.1f%%', shadow=True, startangle=140)\nplt.axis('equal')\nplt.legend(labels, loc=1)\nplt.title(\"Total Canadian Medals\")\nplt.xlabel(\"Canadian Medals All Sports 1894-2014\")\nplt.show()\n" }, { "alpha_fraction": 0.5699481964111328, "alphanum_fraction": 0.6191709637641907, "avg_line_length": 15.54285717010498, "blob_id": "4184cb6c075ed52784101f943dc1ffb5bcf2c87e", "content_id": "7ece3bdef8f35929373ac258e11cb2c4f52f2ec3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1158, "license_type": "no_license", "max_line_length": 96, "num_lines": 70, "path": "/Python/canMedals.py", "repo_name": "jamespratt41/vickers_graham_pratt_james_dataviz", "src_encoding": "UTF-8", "text": "import csv\nimport numpy as np\nimport matplotlib.pyplot as plt; plt.rcdefaults()\n\ngold = 0\nsilver = 0\nbronze = 0\n\n\nwith open('../data/canada_medals.csv') as f:\n reader = csv.reader(f)\n\n\n\n for row in reader:\n print(row)\n if row[0] == \"Gold\":\n gold = gold +1\n\n elif row[0] == \"Silver\":\n silver = silver +1\n\n else:\n bronze = bronze +1\n\n\n\n\n\n\nobjects = ('Gold', 'Silver', 'Bronze',)\ny_pos = np.arange(len(objects))\nperformance = [gold,silver,bronze,]\ncolors = ['#ffD700', '#c0c0c0','#daa520']\n\n\nplt.barh(y_pos, performance, align='center', alpha=0.5, color = colors)\n\nplt.yticks(y_pos, objects)\nplt.xlabel('Amount')\nplt.title('Total Canadian Medals 1894-2014')\n\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# # chart for our shiny new data\n#\n# labels = \"Gold,\", \"Silver\",\"Bronze\"\n# sizes = [gold, silver,bronze]\n# colors = ['#ffD700', '#c0c0c0','#daa520']\n# explode = (0.1, 0.1, 0.15)\n#\n# plt.pie(sizes, explode=explode, colors=colors, autopct='%1.1f%%', shadow=True, startangle=140)\n# plt.axis('equal')\n# plt.legend(labels, loc=1)\n# plt.title(\"Total Canadian Medals\")\n# plt.xlabel(\"Canadian Medals All Sports 1894-2014\")\n# plt.show()\n" }, { "alpha_fraction": 0.6002928018569946, "alphanum_fraction": 0.6354319453239441, "avg_line_length": 16.512821197509766, "blob_id": "3ea1a386731455e55bff05e5f5853c110a44ed22", "content_id": "2667c8452c8788125a85775c9078191be29db7dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 683, "license_type": "no_license", "max_line_length": 94, "num_lines": 39, "path": "/Python/canvsusabar.py", "repo_name": "jamespratt41/vickers_graham_pratt_james_dataviz", "src_encoding": "UTF-8", "text": "import csv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ncancount = 0\nusacount = 0\n\n\n\nwith open('../data/canada_vs_usa.csv') as f:\n reader = csv.reader(f)\n\n\n\n for row in reader:\n print(row)\n if row[0] == 'CAN':\n cancount = cancount +1\n\n else:\n usacount = usacount +1\n\n\n\n\n\n # chart for our shiny new data\n\nlabels = \"CAN,\", \"USA\"\nsizes = [cancount, usacount]\ncolors = ['lightblue', 'pink']\nexplode = (0.1, 0.15)\n\nplt.pie(sizes, explode=explode, colors=colors, autopct='%1.1f%%', shadow=True, startangle=140)\nplt.axis('equal')\nplt.legend(labels, loc=1)\nplt.title(\"Can vs USA\")\nplt.xlabel(\"Hockey Medals CAN vs USA (B,S,G) 1894-2014\")\nplt.show()\n" }, { "alpha_fraction": 0.6135495901107788, "alphanum_fraction": 0.6459923386573792, "avg_line_length": 18.054546356201172, "blob_id": "6069847e472544d153172f7e15a31b0e0f2f3a0d", "content_id": "a9512cf9f2fb1e9ca3677dc4f4bd589c6c7f9101", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1048, "license_type": "no_license", "max_line_length": 94, "num_lines": 55, "path": "/Python/MVWaustraliapie.py", "repo_name": "jamespratt41/vickers_graham_pratt_james_dataviz", "src_encoding": "UTF-8", "text": "import csv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nmencount = 0\nwomencount = 0\n\n\n\nwith open('../data/australia_skiing.csv') as f:\n reader = csv.reader(f)\n\n\n\n for row in reader:\n print(row)\n if row[1] == 'Men':\n mencount = mencount +1\n\n else:\n womencount = womencount +1\n\n\n\n\n\n# objects = ('Men', 'Women',)\n# y_pos = np.arange(len(objects))\n# performance = [mencount,womencount,]\n# colors = ['lightblue', 'lightpink',]\n#\n#\n# plt.barh(y_pos, performance, align='center', alpha=0.5, color = colors)\n#\n# plt.yticks(y_pos, objects)\n# plt.xlabel('Amount')\n# plt.title('Australian Medals Men vs Women 1894-2014')\n#\n# plt.show()\n\n\n\n# chart for our shiny new data\n\nlabels = \"Men,\", \"Women\"\nsizes = [mencount, womencount]\ncolors = ['lightblue', 'pink']\nexplode = (0.1, 0.15)\n\nplt.pie(sizes, explode=explode, colors=colors, autopct='%1.1f%%', shadow=True, startangle=140)\nplt.axis('equal')\nplt.legend(labels, loc=1)\nplt.title(\"Australian Medal Count M v W\")\nplt.xlabel(\"All Medals (B,S,G) 1894-2014\")\nplt.show()\n" }, { "alpha_fraction": 0.6557068824768066, "alphanum_fraction": 0.6740253567695618, "avg_line_length": 32.265625, "blob_id": "33e8d0671d95254dd144419fa6b5f8dba029e709", "content_id": "cc5f4692b8ae194d546eb19b181de83fa359f177", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2129, "license_type": "no_license", "max_line_length": 94, "num_lines": 64, "path": "/Python/old_template.py", "repo_name": "jamespratt41/vickers_graham_pratt_james_dataviz", "src_encoding": "UTF-8", "text": "import csv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# figure out what data we want to use\ncategories = []\ninstalls = []\nratings = []\n\nwith open('data/googeplaystore.csv') as csvfile:\n reader = csv.reader(csvfile)\n line_count = 0\n\n for row in reader:\n # move the page column headers out of the actual data to get a clean dataset\n if line_count is 0: # this will be text, not data\n # print(\"pushing categories into a seperate array\")\n categories.append(row) # push the text into this array\n line_count += 1 # increment the line count fo rthe next loop\n else:\n # print(\"pushing ratings data into the ratings array\")\n ratingsData = row[2]\n ratingsData = ratingsData.replace(\"NaN\",\"0\")\n ratings.append(float(ratingsData))\n\n installs.append(np.char.strip(ratingsData, \"+\"))\n line_count += 1\n\n# get some ratings we can work with\n# how many ratings are above 4\n# how many ratings are below 2\n# how many ratings are in the middle\n\n\nnp_ratings = np.array(ratings) #turn a regular array into a numpy array\npopular_apps = np_ratings > 4\nunpopular_apps = np_ratings < 2\nprint(\"popular apps\", len(np_ratings[popular_apps]))\npercent_popular = len(np_ratings[popular_apps]) / len(np_ratings) * 100\nprint(percent_popular)\npercent_unpopular = len(np_ratings[unpopular_apps]) / len(np_ratings) * 100\nprint(percent_unpopular)\n\nkinda_popular = int(100 - (percent_popular + percent_unpopular))\nprint(kinda_popular)\n\n# print('processed', line_count, 'lines of data')\n# print(categories)\n# print('first row of data:', installs[0])\n# print('last row of data:', installs[-1])\n\n# chart for our shiny new data\n\nlabels = \"Sucks,\", \"Meh\", \"I LOVE IT\"\nsizes = [percent_unpopular, kinda_popular, percent_popular]\ncolors = ['red', 'yellow', 'green']\nexplode = (0.1, 0.1, 0.15)\n\nplt.pie(sizes, explode=explode, colors=colors, autopct='%1.1f%%', shadow=True, startangle=140)\nplt.axis('equal')\nplt.legend(labels, loc=1)\nplt.title(\"Do we love us some apps?\")\nplt.xlabel(\"User Ratings - App Installs (10000+ apps)\")\nplt.show()\n" }, { "alpha_fraction": 0.6135495901107788, "alphanum_fraction": 0.6459923386573792, "avg_line_length": 18.054546356201172, "blob_id": "923d7e689aa6fd722fc27e76adf3b3ad4cb46f65", "content_id": "b525592ac15ff26d0f313f4db822e709dd2c54a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1048, "license_type": "no_license", "max_line_length": 96, "num_lines": 55, "path": "/Python/MVWaustralia.py", "repo_name": "jamespratt41/vickers_graham_pratt_james_dataviz", "src_encoding": "UTF-8", "text": "import csv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nmencount = 0\nwomencount = 0\n\n\n\nwith open('../data/australia_skiing.csv') as f:\n reader = csv.reader(f)\n\n\n\n for row in reader:\n print(row)\n if row[1] == 'Men':\n mencount = mencount +1\n\n else:\n womencount = womencount +1\n\n\n\n\n\nobjects = ('Men', 'Women',)\ny_pos = np.arange(len(objects))\nperformance = [mencount,womencount,]\ncolors = ['lightblue', 'lightpink',]\n\n\nplt.barh(y_pos, performance, align='center', alpha=0.5, color = colors)\n\nplt.yticks(y_pos, objects)\nplt.xlabel('Amount')\nplt.title('Australian Medals Men vs Women 1894-2014')\n\nplt.show()\n\n\n\n# chart for our shiny new data\n#\n# labels = \"Men,\", \"Women\"\n# sizes = [mencount, womencount]\n# colors = ['lightblue', 'pink']\n# explode = (0.1, 0.15)\n#\n# plt.pie(sizes, explode=explode, colors=colors, autopct='%1.1f%%', shadow=True, startangle=140)\n# plt.axis('equal')\n# plt.legend(labels, loc=1)\n# plt.title(\"Australian Medal Count M v W\")\n# plt.xlabel(\"All Medals (B,S,G) 1894-2014\")\n# plt.show()\n" } ]
6
architnarang/Twitter_Sentiment_Analysis
https://github.com/architnarang/Twitter_Sentiment_Analysis
4e88fbb9246edb03394737d919b8056ca658ef09
0d2640b44490a7fc2bc8c0b7234ae3ca4906a9b4
4662307a2564045d2be05948f4c1bc761fecf1c9
refs/heads/master
2021-01-06T16:41:30.830498
2020-02-19T11:56:28
2020-02-19T11:56:28
241,402,309
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5879354476928711, "alphanum_fraction": 0.5952987670898438, "avg_line_length": 28.33241081237793, "blob_id": "2eeaad8ba58141c714f0273c11150379b515e3f7", "content_id": "de44374e9b4792d083e58e5b30a8efd14f50f418", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10593, "license_type": "no_license", "max_line_length": 222, "num_lines": 361, "path": "/Twitter_Sentiment_Analysis.py", "repo_name": "architnarang/Twitter_Sentiment_Analysis", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport tweepy\nimport jsonpickle\nimport csv\nimport time\nst = time.time()\nAPI_KEY=\"z5fwBpqXKReGZdR9qI2VyYsvs\"\nAPI_SECRET=\"5GaSPBTvEyKvv4G2rku1d13Lw05iz6MbnwKCprfjatYClkmsBq\"\nACCESS_TOKEN=\"1479289146-kvsxXbGz5mWKmunsKpeoeJwOqwZ7VZUdeNXggx4\"\nACCESS_TOKEN_SECRET=\"kAk1y1Jk75FFU8jnILW57qXoiRT4bTTFikbTYEZBMDdob\"\nauth = tweepy.OAuthHandler(API_KEY, API_SECRET)\nauth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\napi = tweepy.API(auth,wait_on_rate_limit=True,wait_on_rate_limit_notify=True)\nprint(api.me().name)\n\n\n# In[2]:\n\n\ntweetsPerQuery = 100#this is the maximum provided by API\nmax_tweets = 100 # just for the sake of While loop\nfName = 'sharomo.txt' # where i save the tweets\n\n\n# In[3]:\n\n\nsince_id = None\nmax_id = -1\ntweet_count = 0\nprint(\"Downloading the tweeets..takes some time..\")\n\nsearch_query=\"\\\"#SwachhBharat\\\"\"\n\n\n# In[4]:\n\n\nsearch_query=\"\\\"#SwachhBharat\\\"\"\nx=0\nwith open(fName,'w') as f:\n print(\"Downloading hashtag\" + search_query)\n \n while(tweet_count<max_tweets):\n try:\n if(max_id<=0):\n if(not since_id):\n new_tweets = api.search(q=search_query,count=tweetsPerQuery,lang=\"en\",tweet_mode='extended')\n else:\n new_tweets = api.search(q=search_query,count=tweetsPerQuery,lang=\"en\",tweet_mode='extended',since_id=since_id)\n \n else:\n if(not since_id):\n new_tweets = api.search(q=search_query,count=tweetsPerQuery,lang=\"en\",tweet_mode='extended',max_id=str(max_id-1))\n else:\n new_tweets = api.search(q=search_query,count=tweetsPerQuery,lang=\"en\",tweet_mode='extended',max_id=str(max_id-1),since_id=since_id)\n \n # Tweets Exhausted\n if(not new_tweets):\n print(\"No more tweets found!!\")\n break\n # write all the new_tweets to a json file\n for tweet in new_tweets:\n f.write(jsonpickle.encode(tweet._json,unpicklable=False)+'\\n')\n tweet_count+=1\n print(\"Successfully downloaded {0} tweets\".format(tweet_count))\n max_id=new_tweets[-1].id\n # in case of any error\n except tweepy.TweepError as e:\n print(\"Some error!!:\"+str(e))\n break\nend = time.time()\nprint(\"A total of {0} tweets are downloaded and saved to {1}\".format(tweet_count,fName))\nprint(\"Total time taken is \",end-st,\"seconds.\")\n\n\n# In[5]:\n\n\nimport json\nimport csv\nf = open('som2.csv','a',encoding='utf-8')\ncsvWriter = csv.writer(f)\nheaders=['full_text','retweet_count','user_followers_count','favorite_count','place','coordinates','geo','created_at','id_str']\ncsvWriter.writerow(headers)\nfor inputFile in ['sharomo.txt']:#all the text-file names you want to convert to Csv in the sae folder as this code\n tweets = []\n for line in open(inputFile, 'r'):\n tweets.append(json.loads(line))\n\n print('HI',len(tweets))\n \n count_lines=0\n\n for tweet in tweets:\n try:\n csvWriter.writerow([tweet['full_text'],tweet['retweet_count'],tweet['user']['followers_count'],tweet['favorite_count'],tweet['place'],tweet['coordinates'],tweet['geo'],tweet['created_at'],str(tweet['id_str'])])\n count_lines+=1\n except Exception as e:\n print(e)\n print(count_lines)\n\n\n# In[6]:\n\n\nimport pandas as pd\ndf = pd.read_csv('som2.csv', encoding = 'unicode_escape')\ndf.info()\n\n\n# In[7]:\n\n\ndf\n\n\n# In[8]:\n\n\nprint(len(df.index))#14195\nserlis=df.duplicated().tolist()\nprint(serlis.count(True))#112\nserlis=df.duplicated(['full_text']).tolist()\nprint(serlis.count(True))#8585\n\n\n# In[9]:\n\n\ndf=df.drop_duplicates(['full_text'])\ndf.head()\n\n\n# In[10]:\n\n\ndf=df.drop([\"place\",\"coordinates\",\"geo\",\"id_str\"],axis=1)\ndf\n\n\n# In[71]:\n\n\nimport csv, json\ncsvfile = open('som2.csv', 'r')\njsonfile = open('file.json', 'w')\n\nfieldnames = (\"FirstName\",\"LastName\",\"IDNumber\",\"Message\")\nreader = csv.DictReader( csvfile, fieldnames)\nfor row in reader:\n json.dump(row, jsonfile)\n jsonfile.write('\\n')\n\n\n# In[3]:\n\n\n\nimport string\nimport re\nimport csv\n\n\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\n\nclass TweetCleaner:\n def __init__(self, remove_stop_words=False, remove_retweets=False, stopwords_file='NLTK_DEFAULT'):\n \"\"\"\n clean unnecessary twitter data\n remove_stop_words = True if stopwords are to be removed (default = False)\n remove_retweets = True if retweets are to be removed (default = False)\n stopwords_file = file containing stopwords(one on each line) (default: nltk english stopwords)\n \"\"\"\n if remove_stop_words:\n if stopwords_file == 'NLTK_DEFAULT':\n self.stop_words = set(stopwords.words('english'))\n else:\n stop_words = set()\n with open(stopwords_file,'r') as f:\n for line in f:\n line = line.replace('\\n','')\n stop_words.add(line.lower())\n self.stop_words = stop_words\n else:\n self.stop_words = set()\n \n self.remove_retweets = remove_retweets\n \n self.punc_table = str.maketrans(\"\", \"\", string.punctuation) # to remove punctuation from each word in tokenize\n\n\n# In[4]:\n\n\n\n def compound_word_split(self, compound_word):\n \"\"\"\n Split a given compound word(string) and return list of words in given compound_word\n Ex: compound_word='pyTWEETCleaner' --> ['py', 'TWEET', 'Cleaner']\n \"\"\"\n matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', compound_word)\n return [m.group(0) for m in matches]\n\n\n# In[5]:\n\n\n\ndef remove_non_ascii_chars(self, text):\n \"\"\"\n return text after removing non-ascii characters i.e. characters with ascii value >= 128\n \"\"\"\n return ''.join([w if ord(w) < 128 else ' ' for w in text])\n\n\n# In[6]:\n\n\n\ndef remove_hyperlinks(self,text):\n \"\"\"\n return text after removing hyperlinks\n \"\"\"\n return ' '.join([w for w in text.split(' ') if not 'http' in w])\n\n\n# In[7]:\n\n\n\ndef get_cleaned_text(self, text):\n \"\"\"\n return cleaned text(string) for provided tweet text(string)\n \"\"\"\n cleaned_text = text.replace('\\\"','').replace('\\'','').replace('-',' ')\n cleaned_text = self.remove_non_ascii_chars(cleaned_text)\n \n # retweet\n if re.match(r'RT @[_A-Za-z0-9]+:',cleaned_text): # retweet\n if self.remove_retweets: return ''\n retweet_info = cleaned_text[:cleaned_text.index(':')+2] # 'RT @name: ' will be again added in the text after cleaning\n cleaned_text = cleaned_text[cleaned_text.index(':')+2:]\n else:\n retweet_info = ''\n cleaned_text = self.remove_hyperlinks(cleaned_text)\n cleaned_text = cleaned_text.replace('#','HASHTAGSYMBOL').replace('@','ATSYMBOL') # to avoid being removed while removing punctuations\n \n tokens = [w.translate(self.punc_table) for w in word_tokenize(cleaned_text)] # remove punctuations and tokenize\n tokens = [w for w in tokens if not w.lower() in self.stop_words and len(w)>1] # remove stopwords and single length words\n cleaned_text = ' '.join(tokens)\n \n cleaned_text = cleaned_text.replace('HASHTAGSYMBOL','#').replace('ATSYMBOL','@')\n cleaned_text = retweet_info + cleaned_text\n \n return cleaned_text\n \n\n\n# In[8]:\n\n\n\ndef get_cleaned_tweet(self, tweet):\n \"\"\"\n return a json dictionary of cleaned data from provided original tweet json dictionary\n \"\"\"\n if not \"created_at\" in tweet: return None # remove info about deleted tweets\n if not tweet['lang'] == 'en': return None # remove tweets in non english language\n if not tweet['in_reply_to_status_id'] == None or not tweet['in_reply_to_user_id'] == None: return None # remove comments of any tweet\n \n cleaned_text = self.get_cleaned_text(tweet['text'])\n if cleaned_text == '': return None\n\n cleaned_tweet = {}\n \n cleaned_tweet['created_at'] = tweet['created_at']\n cleaned_tweet['full_text'] = cleaned_text\n \n cleaned_tweet['user'] = {}\n cleaned_tweet['user']['favourite_count'] = tweet['user']['favourite_count']\n \n \n cleaned_tweet['retweet_count'] = tweet['retweet_count']\n\n return cleaned_tweet\n\n\n# In[13]:\n\n\n\ndef clean_tweets(self, input_file, output_file='cleaned_tweets.json'): \n \"\"\"\n input_file: name or path of input twitter json data where each line is a json tweet\n output_file: file name or path where cleaned twitter json data is stored (default='cleaned_tweets.json')\n \"\"\"\n in_file = open(input_file, 'r')\n out_file = open(output_file, 'w')\n \n while True:\n line = in_file.readline()\n if line=='': break\n tweet = json.loads(line)\n \n cleaned_tweet = self.get_cleaned_tweet(tweet)\n if cleaned_tweet == None: continue\n \"\"\"\n if 'retweeted_status' in tweet: # will be present if it is a retweet\n cleaned_tweet['retweeted_status'] = self.get_cleaned_tweet(tweet['retweeted_status'])\n if cleaned_tweet['retweeted_status'] == None: continue\n \"\"\"\n \n out_file.write(json.dumps(cleaned_tweet)+'\\n')\n \n \n in_file.close()\n out_file.close()\n \n\n\n# In[17]:\n\n\n\nif __name__ == '__main__':\n \n tc = TweetCleaner(remove_stop_words=False, remove_retweets=False)\n clean_tweets(input_file='file.json', output_file='cleaned_tweets.json') # clean tweets from entire file\n print('Output with remove_stop_words=False, remove_retweets=False:')\n print(tc.get_cleaned_text(sample_text), '\\n')\n \n tc = TweetCleaner(remove_stop_words=False, remove_retweets=True)\n print('Output with remove_stop_words=False, remove_retweets=True:')\n print(tc.get_cleaned_text(sample_text), '\\n')\n \n tc = TweetCleaner(remove_stop_words=True, remove_retweets=False, stopwords_file='user_stopwords.txt')\n print('Output with remove_stop_words=True, remove_retweets=False:')\n print(tc.get_cleaned_text(sample_text))\n\n\n# In[80]:\n\n\nimport re\nfor i in range(len(df)):\n txt = df.loc[i][\"full_text\"]\n txt=re.sub(r'@[A-Z0-9a-z_:]+','',txt)#replace username-tags\n txt=re.sub(r'^[RT]+','',txt)#replace RT-tags\n txt = re.sub('https?://[A-Za-z0-9./]+','',txt)#replace URLs\n txt=re.sub(\"[^a-zA-Z]\", \" \",txt)#replace hashtags\n df.at[i,\"full_text\"]=txt\n\n\n# In[ ]:\n\n\n\n\n" } ]
1
wjddbwns/dbot
https://github.com/wjddbwns/dbot
f000119e88ce70d795574b20550d9278bd37dad8
6b71bc1add256ce783736ea40e681a947b272edc
a83e663af64253f696cb4d377c4299e686398d1b
refs/heads/master
2022-12-04T02:46:13.847133
2020-08-18T03:55:08
2020-08-18T03:55:08
288,341,483
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.7353603839874268, "alphanum_fraction": 0.7646396160125732, "avg_line_length": 40.66666793823242, "blob_id": "1ee6ad9f3b2ce8198de0c06681191581c04fac79", "content_id": "a65435320e4db1023a88961a8e6ea69f7bc27c9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 976, "license_type": "no_license", "max_line_length": 120, "num_lines": 21, "path": "/bot.py", "repo_name": "wjddbwns/dbot", "src_encoding": "UTF-8", "text": "import discord\nfrom discord.ext import commands\nimport os\n\nclient = commands.Bot(command_prefix = '-')\n\[email protected]\nasync def on_ready():\n\n# [discord.Status.online = 온라인],[discord.Status.idle = 자리비움],[discord.Status.dnd = 다른용무],[discord.Status.offline = 오프라인]\nawait client.change_presence(status=discord.Status.online)\n\nawait client.change_presence(activity=discord.Game(name=\"관리 하는중\"))\n#await client.change_presence(activity=discord.Streaming(name=\"스트림 방송중\", url='링크'))\n#await client.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=\"노래 듣는중\"))\n#await client.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=\"영상 시청중\"))\n\nprint(\"파엠커뮤니티:\",client.user.name,\"697449476603904011:\",client.user.id,\"1:\",discord.__version__)\n\n\nclient.run(os.environ['Njk3NDQ5NDc2NjAzOTA0MDEx.Xo3ccQ.Pcebl-LdA3rVjxD5ijYdYeHIq-E'])\n\n\n\n\n\n\n\n\n\n\n\n\n\n" } ]
1
MatthewGadsden/WarehouseManager
https://github.com/MatthewGadsden/WarehouseManager
670868a23bebb10284ce2608cf73cd66de0aa2b8
deec001f0d70710b6e405e74f9e324c758101d93
75097c05fc762b21a19be38a16a188686eadafe6
refs/heads/master
2023-06-08T20:14:01.057927
2021-06-19T08:43:46
2021-06-19T08:43:46
237,360,908
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5593287944793701, "alphanum_fraction": 0.5912904739379883, "avg_line_length": 42.92982482910156, "blob_id": "c02762f3b80c603cb7dadbf45405bb156fbfdaec", "content_id": "030a74b4c56dbc3e319138781b3ec0836092d788", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2503, "license_type": "no_license", "max_line_length": 124, "num_lines": 57, "path": "/cLibrary/guis/popups/DashDimensions.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from cLibrary.guis.popups.StandardPopup import *\nfrom cLibrary.guis.popups.ErrorWindow import ErrorWindow\n\n\nclass DashDimensions(StandardPopUp):\n\n def __init__(self, master, controller, *args, **kwargs):\n \"\"\"\n Initialise from super\n :param master: master window\n :param controller: program controller\n \"\"\"\n super(DashDimensions, self).__init__(master, controller, width=230, height=85, *args, **kwargs)\n\n def load_display(self):\n \"\"\"\n Load display\n :return: None\n \"\"\"\n rows_label = Label(self, text=\"Rows:\", relief=\"flat\", fg=\"gray40\", anchor=W)\n rows_label.place(width=100, height=20, x=(self.winfo_reqwidth()//2)-105, y=(self.winfo_reqheight()//2)-35)\n self.rows_var = StringVar()\n rows_entry = Entry(self, relief=\"groove\", textvariable=self.rows_var)\n rows_entry.place(width=100, height=20, x=(self.winfo_reqwidth() // 2) + 5, y=(self.winfo_reqheight() // 2) - 35)\n\n cols_label = Label(self, text=\"Columns:\", relief=\"flat\", fg=\"gray40\", anchor=W)\n cols_label.place(width=100, height=20, x=(self.winfo_reqwidth()//2)-105, y=(self.winfo_reqheight()//2)-10)\n self.cols_var = StringVar()\n cols_entry = Entry(self, relief=\"groove\", textvariable=self.cols_var)\n cols_entry.place(width=100, height=20, x=(self.winfo_reqwidth()//2)+5, y=(self.winfo_reqheight()//2)-10)\n\n with open(\"resources/data/config.txt\", \"r\") as file:\n line = file.readline().strip(\"\\n\").split(\",\")\n self.rows_var.set(line[0])\n self.cols_var.set(line[1])\n\n ok_button = Button(self, text=\"OK\", relief=\"groove\", pady=13, bg=\"SteelBlue1\",\n command=lambda :(self.ok(),))\n ok_button.place(x=self.winfo_reqwidth()-65, y=self.winfo_reqheight()-25, width=55, height=20)\n\n cancel_button = Button(self, text=\"Cancel\", relief=\"groove\", pady=30, bg=\"grey\", command=lambda :(self.on_close(),))\n cancel_button.place(x=self.winfo_reqwidth()-125, y=self.winfo_reqheight()-25, width=55, height=20)\n\n def ok(self):\n \"\"\"\n Save dash dimensions\n :return: None\n \"\"\"\n try:\n rows = int(self.rows_var.get())\n cols = int(self.cols_var.get())\n self.on_close()\n self.controller.wigs_w = cols\n self.controller.wigs_h = rows\n self.master.update_dash()\n except Exception as e:\n ErrorWindow(self,e,'U001',)" }, { "alpha_fraction": 0.6917808055877686, "alphanum_fraction": 0.6917808055877686, "avg_line_length": 25.545454025268555, "blob_id": "cab562adaec5242db4b4aee035c78ab6622c0256", "content_id": "46fbc775b4b662e48820b85a9d96ce2894e87aee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 292, "license_type": "no_license", "max_line_length": 55, "num_lines": 11, "path": "/cLibrary/structure/item/StockRecord.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from typing import Set, List, Union\nfrom cLibrary.structure.item.Item import Item\nfrom cLibrary.structure.warehouse.Area import Area\n\n\nclass StockRecord:\n\n def __init__(self, item: Item, location, qty: int):\n self.item = item\n self.location = location\n self.qty = qty\n" }, { "alpha_fraction": 0.5666795372962952, "alphanum_fraction": 0.5902590155601501, "avg_line_length": 41.40983581542969, "blob_id": "dfc676fdae0c2848172608509ba71122e9d473f1", "content_id": "87b3c8529870d6291d6f79abf6d55db3085369c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2587, "license_type": "no_license", "max_line_length": 119, "num_lines": 61, "path": "/cLibrary/guis/popups/ErrorWindow.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from cLibrary.guis.popups.StandardPopup import *\nfrom cLibrary.widgets.Output import Output\n\n\nclass ErrorWindow(StandardPopUp):\n\n def __init__(self, master, error_msg, error_code='ZZZZ', cust_sugg=None, *args, **kwargs):\n super(ErrorWindow, self).__init__(master, width=400, height=170, title='ERROR : '+error_code.upper(),\n icon=\"resources/img/error.ico\", *args, **kwargs)\n\n img = PIL.Image.open(\"resources/img/error.ico\").resize((40, 40), PIL.Image.ANTIALIAS)\n self.n_img = PIL.ImageTk.PhotoImage(img)\n osa = Label(self, image=self.n_img, anchor=\"n\", bd=1)\n osa.place(x=20, y=20)\n\n self.suggest_action(error_code, cust_sugg)\n self.expanded = False\n\n code = Label(self, text=\"Error Code : \" + error_code, font=\"bold 15\")\n code.place(x=70, y=30)\n\n self.smb = Button(self, text=\"show more\", command=self.show_more)\n self.smb.place(x=30, y=130)\n\n self.output = Output(self, relief=\"groove\", wrap=WORD, )\n self.output.r_insert(error_msg)\n\n ok = Button(self, text=\"OK\", bg=\"white\", relief=\"groove\", command=self.on_close)\n ok.place(x=400 - 110, y=130, width=90, height=30)\n\n def show_more(self):\n expand_val = 70\n if not self.expanded:\n self.geometry('{}x{}'.format(self.winfo_width(), self.winfo_height()+expand_val))\n self.output.place(x=10, y=175, width=380, height=50)\n self.smb['text'] = \"show less\"\n self.update()\n else:\n self.geometry('{}x{}'.format(self.winfo_width(), self.winfo_height()-expand_val))\n self.output.place_forget()\n self.smb['text'] = \"show more\"\n self.update()\n self.expanded = not self.expanded\n\n def suggest_action(self, code, cust_sugg):\n def suggestion(msg):\n output = Output(self, relief=\"flat\", wrap=WORD, bg=self['bg'])\n output.place(x=10, y=70, width=380, height=50)\n output.r_insert(msg)\n\n code_id = code[0]\n\n if cust_sugg is not None:\n suggestion(cust_sugg)\n elif code_id == 'U':\n suggestion('User Input Error Detected: Make sure to check you have entered the correct values into fields')\n elif code_id == \"I\":\n suggestion('Import Error Detected: It seems something failed to import properly, check what you\\'re '\n 'trying to import is correct')\n else:\n suggestion('Unknown Error: Please restart the program. If the problem persists see Admin')\n" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.5997638702392578, "avg_line_length": 30.407407760620117, "blob_id": "04f6299b9ad62fbf1e294ece8a0f979290881f58", "content_id": "0e82a2fc041c56ed9a75b7dd3a0f2633dc1ee481", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 847, "license_type": "no_license", "max_line_length": 71, "num_lines": 27, "path": "/cLibrary/widgets/controlPanel/DSP.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom cLibrary.widgets.controlPanel.HexColour import HexColour\n\n\nclass DSP(Frame):\n\n def __init__(self, master, pickslot, width=100, height=20):\n super().__init__(master)\n self.avehitsday = 0\n id = 0\n best_hits = pickslot.warehouse.get_best_hits()\n\n if not pickslot.allocations:\n pass\n else:\n self.avehitsday = pickslot.get_item_avehitsday()\n id = pickslot.allocations[0].item.item_id\n\n OldRange = (best_hits - 0)\n NewRange = (255 - 0)\n NewValue = (((self.avehitsday - 0) * NewRange) / OldRange) // 1\n\n self.color = HexColour(255, 255 - int(NewValue), 0)\n\n label = Label(self, text=id, bg=str(self.color))\n label.place(x=0,y=0, width=width, height=height)\n self.configure(width=width, height=height)" }, { "alpha_fraction": 0.5310987234115601, "alphanum_fraction": 0.5471135973930359, "avg_line_length": 37.371429443359375, "blob_id": "ffe938bc526c1e17726c9a24d3fda2d7da756051", "content_id": "afcfbea6f7f39ab877171901d655b40916f5203e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2685, "license_type": "no_license", "max_line_length": 121, "num_lines": 70, "path": "/cLibrary/widgets/controlPanel/RAFP.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from cLibrary.widgets.controlPanel.DispWidgetFrame import *\nfrom cLibrary.widgets.ToolTip import CreateToolTip\nfrom cLibrary.structure.warehouse.Warehouse import Warehouse\nfrom cLibrary.structure.warehouse.Aisle import Aisle\nfrom typing import List, Set, Union\n\n\nclass RAFP(DispWidgetFrame):\n\n def __init__(self, master, warehouse, controller, height=50, width=200, bg=\"snow\", fg=\"lime green\", *args, **kwargs):\n if not isinstance(warehouse, Warehouse):\n raise TypeError(\"warehouse must be of Type Warehouse\")\n self.bg = bg\n self.fg = fg\n super().__init__(master, warehouse, controller, height=height, width=width, *args, **kwargs)\n\n def load_title(self):\n super(RAFP, self).load_title()\n self.title['text'] = \"Aisle Reserve Slot Fill Display\"\n self.title['bg'] = \"MediumOrchid4\"\n self.title['fg'] = \"snow\"\n\n def load_display(self):\n ta = 0\n max_slots = 0\n\n aisle_empty_filled = [] # type: List[Set[Aisle, int, int]]\n for aisle in self.warehouse:\n reserves = aisle.get_reserve_slots()\n if len(reserves) > 0:\n e = 0\n for slot in reserves:\n if not slot.stock_records:\n e += 1\n f = len(reserves) - e\n aisle_empty_filled.append((aisle, e, f))\n if e + f > max_slots:\n max_slots = e + f\n ta += 1\n\n aisle_empty_filled.sort(key=lambda x: x[0].spot_id)\n\n x_val = 0\n y_val = 20\n w = self.winfo_reqwidth() // ta\n h = self.winfo_reqheight() - 42\n for aisle, empty_n, filled_n in aisle_empty_filled:\n t = empty_n + filled_n\n\n t_ratio = ((t / max_slots) * (h - 1)) // 1\n f_ratio = ((filled_n / max_slots) * (h - 1)) // 1\n\n frame = Frame(self, bg=self.bg, relief=\"solid\")\n frame.place(x=x_val + 1, y=y_val + h - t_ratio - 1, width=w - 2, height=t_ratio + 2)\n\n border = Label(frame, bg=self.bg, relief=\"solid\")\n border.place(x=0, y=0, width=w - 2, height=t_ratio + 2)\n\n background = Label(frame, bg=self.bg, relief=\"flat\")\n background.place(x=1, y=1, width=w - 4, height=t_ratio)\n\n filled_graphic = Label(frame, bg=self.fg, relief=\"flat\")\n filled_graphic.place(x=1, y=(t_ratio - f_ratio) + 1, width=w - 4, height=f_ratio)\n\n text = Label(self, text=aisle.aisle, relief=\"groove\")\n text.place(x=x_val, y=h + 20 + 2, width=w, height=20)\n\n CreateToolTip(frame, \"{} / {}\".format(filled_n, t), c_off=(-15 + w))\n\n x_val += w" }, { "alpha_fraction": 0.5991671085357666, "alphanum_fraction": 0.61287522315979, "avg_line_length": 43.68217086791992, "blob_id": "0dae45c32e661a05705203e835303f05a65ee8b1", "content_id": "f7cdc7e8e8a62ee3fb5baa39876b05b4f1df44a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5763, "license_type": "no_license", "max_line_length": 147, "num_lines": 129, "path": "/cLibrary/widgets/controlPanel/DashWig.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom tkinter.ttk import Combobox\nfrom cLibrary.widgets.ToolTip import CreateToolTip\nfrom cLibrary.widgets.controlPanel.BAD import BAD\nfrom cLibrary.widgets.controlPanel.AFP import AFP\nfrom cLibrary.widgets.controlPanel.BAIL import BAIL\nfrom cLibrary.widgets.controlPanel.BIL import BIL\nfrom cLibrary.widgets.controlPanel.STS import STS\nfrom cLibrary.widgets.controlPanel.DSP import DSP\nfrom cLibrary.widgets.controlPanel.WFP import WFP\nfrom cLibrary.widgets.controlPanel.WRFP import WRFP\nfrom cLibrary.widgets.controlPanel.TimeGradient import TimeGradient\nfrom cLibrary.widgets.controlPanel.RAFP import RAFP\n\n\nclass DashWig(Frame):\n widget_dict = {\"PickSlot Fill Percentage\": AFP, \"Warehouse Fill Percentage\": WFP, \"Best Items List\": BIL,\n \"Time Gradient\": TimeGradient, \"Best Aisle Display\": BAD, \"Best Aisle Items List\": BAIL,\n \"Reserve Fill Percentage\": RAFP, \"Warehouse Reserves Fill\": WRFP, \"Slots To Swap\": STS}\n widget_dict_2 = {\"AFP\": AFP, \"WFP\": WFP, \"BIL\": BIL, \"TimeGradient\": TimeGradient, \"BAD\": BAD, \"BAIL\": BAIL,\n \"RAFP\": RAFP, \"WRFP\": WRFP, \"STS\": STS,}\n\n widgets = [\"PickSlot Fill Percentage\", \"Warehouse Fill Percentage\",\n \"Best Aisle Display\", \"Best Items List\", \"Best Aisle Items List\",\n \"Reserve Fill Percentage\", \"Warehouse Reserves Fill\", \"Slots To Swap\"]\n\n def __init__(self, master, controller, width=100, height=100, *args, **kwargs):\n super().__init__(master, *args, **kwargs)\n self.controller = controller\n self.current_widget_type = None\n self.current_type = None\n self.close_button = None\n self.fullscreen = False\n self.disp_widgets = []\n self.def_val = self.set_menu_val(self.widgets[0])\n self.bw = 2\n self.configure(width=width, height=height, bg=\"light grey\", borderwidth=self.bw)\n self.og_w = width\n self.og_h = height\n self.width = width\n self.height = height\n self.og_x = None\n self.og_y = None\n self.clean()\n self.update()\n\n def choose_widget(self):\n while len(self.disp_widgets) > 0:\n temp = self.disp_widgets.pop()\n temp.destroy()\n self.comb = Combobox(self, textvariable=self.def_val, value=self.widgets, state=\"readonly\")\n self.comb.place(width=(self.width/2.2)//1, height=30, x=(self.width//2)-((self.width/4.4)//1)-self.bw, y=self.height//2 - 45)\n ok_but = Button(self, text=\"OK\", bg=\"lawn green\", command=lambda:self.disp_wid(self.widget_dict[self.comb.get()]))\n ok_but.place(width=(self.width/2.2)//1, height=30, x=(self.width//2)-((self.width/4.4)//1)-self.bw, y=self.height//2-15)\n cancel_but = Button(self, text=\"Cancel\", bg=\"grey90\",\n command=lambda: self.clean())\n cancel_but.place(width=(self.width / 2.2) // 1, height=30,\n x=(self.width // 2) - ((self.width / 4.4) // 1) - self.bw, y=self.height // 2+15)\n\n self.disp_widgets.append(self.comb)\n self.disp_widgets.append(ok_but)\n self.disp_widgets.append(cancel_but)\n\n def set_menu_val(self, x):\n val = StringVar(self)\n val.set(x) # default value\n return val\n\n def disp_wid(self, type=None):\n if type is not None:\n self.current_widget_type = type.__name__\n self.current_type = type\n while len(self.disp_widgets) > 0:\n temp = self.disp_widgets.pop()\n temp.destroy()\n widget = self.current_type(self, warehouse=self.controller.warehouse, controller=self.controller, width=self.width-4, height=self.height-4)\n widget.place(x=0, y=0)\n self.close_button = Button(self, text=\"X\", bg=\"coral1\", command=self.clean, relief=\"groove\")\n self.close_button.place(width=20, height=20, x=self.width-24, y=0)\n\n CreateToolTip(self.close_button, \"Close\", c_off=10)\n\n widget.title.bind('<Double-Button-1>', lambda e: self.full_screen())\n if not self.fullscreen:\n widget.title.unbind_all(\"<Escape>\")\n else:\n widget.title.bind_all(\"<Escape>\", self.full_screen)\n self.disp_widgets.append(widget)\n self.disp_widgets.append(self.close_button)\n\n def full_screen(self, event=None):\n self.fullscreen = not self.fullscreen\n if not self.fullscreen:\n self.width = self.og_w\n self.height = self.og_h\n self.configure(height=self.height, width=self.width)\n self.disp_wid()\n self.place(x=self.og_x, y=self.og_y)\n else:\n self.width = self.controller.container.winfo_width()\n self.height = self.controller.container.winfo_height()\n self.configure(height=self.height, width=self.width)\n self.lift()\n self.disp_wid()\n self.og_x = self.winfo_x()\n self.og_y = self.winfo_y()\n self.place(x=0, y=0)\n\n def clean(self):\n if self.fullscreen:\n self.full_screen()\n else:\n self.current_widget_type = None\n for widget in self.winfo_children():\n widget.destroy()\n self.default_display()\n\n def default_display(self):\n add_button = Button(self, text=\"+\", anchor=\"center\", bg=\"orange\", fg=\"white\", font=\"bold\",\n command=lambda: self.choose_widget())\n add_button.place(width=30, height=30, x=self.width // 2 - 15 - self.bw, y=self.height // 2 - 15 - self.bw)\n\n CreateToolTip(add_button, \"Add New Widget\", c_off=20)\n\n self.disp_widgets.append(add_button)\n self.update()\n\n def load_widget(self, type):\n self.disp_wid(self.widget_dict_2[type])" }, { "alpha_fraction": 0.6088888645172119, "alphanum_fraction": 0.6195555329322815, "avg_line_length": 42.30769348144531, "blob_id": "42d510981eb68109d463d9bee9dcb315ffb5d214", "content_id": "7e248786500fffd376c066cda237dee9e7298b27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1125, "license_type": "no_license", "max_line_length": 113, "num_lines": 26, "path": "/cLibrary/guis/popups/ViewAreas.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom cLibrary.guis.popups.SelectArea import SelectArea\nfrom cLibrary.methods.general import center_to_win\n\n\nclass ViewAreas(Toplevel):\n\n def __init__(self, master, areas, selecting=True, *args, **kwargs):\n \"\"\"\n Initialise view warehouse popup\n :param master: master window\n :param areas: Warehouse location to view into\n :param selecting: True/False (are areas needed to be selected)\n \"\"\"\n super(ViewAreas, self).__init__(master, *args, **kwargs)\n self.selecting = selecting\n self.grid_row = 0\n self.grid_col = 0\n for area in areas:\n label = Button(self, text=area.area_name, relief=\"groove\", bg=\"thistle1\", width=13,\n command=lambda e=area: SelectArea(self, e, self.selecting))\n label.grid(row=self.grid_row, column=self.grid_col, pady=(0, 2), padx=0)\n (self.grid_col, self.grid_row) = (0, self.grid_row + 1) if self.grid_col == 1 else (1, self.grid_row)\n center_to_win(self, self.master.master)\n self.resizable(False, False)\n self.grab_set()" }, { "alpha_fraction": 0.5495989918708801, "alphanum_fraction": 0.5753482580184937, "avg_line_length": 36.60317611694336, "blob_id": "43ffc164362ed57f32b5726554272a4108c9ca0e", "content_id": "47c3d89a3cfb229a4542ea3e4f11e218db21a67a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2369, "license_type": "no_license", "max_line_length": 137, "num_lines": 63, "path": "/cLibrary/guis/popups/GroundConSettings.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from cLibrary.guis.popups.StandardPopup import *\nfrom cLibrary.guis.popups.ErrorWindow import ErrorWindow\n\n\nclass GroundConSettings(StandardPopUp):\n\n def __init__(self, master, controller, *args, **kwargs):\n \"\"\"\n Initialise from super\n :param master: master window\n :param controller: program controller\n \"\"\"\n super(GroundConSettings, self).__init__(master, controller, width=230, height=105, cen_win=master.master.master, *args, **kwargs)\n\n def load_display(self):\n \"\"\"\n Load popup display\n :return: None\n \"\"\"\n gap_text = Label(self, text=\"Percentage Gap :\", anchor=E)\n gap_text.place(x=5, y=5, width=125)\n gap_entry = Entry(self)\n gap_entry.insert(0, self.controller.gap)\n gap_entry.place(x=130, y=5, width=90)\n\n bay_text = Label(self, text=\"Bay Range :\", anchor=E)\n bay_text.place(x=5, y=30, width=125)\n bay_entry = Entry(self)\n bay_entry.insert(0, self.controller.bay_range)\n bay_entry.place(x=130, y=30, width=90)\n\n hp_text = Label(self, text=\"Height Percentage :\", anchor=E)\n hp_text.place(x=5, y=55, width=125)\n hp_entry = Entry(self)\n hp_entry.insert(0, self.controller.hp)\n hp_entry.place(x=130, y=55, width=90, height=20)\n\n ok_button = Button(self, text=\"Save\", bg=\"SteelBlue1\",\n command=lambda: self.save(gap_entry, bay_entry, hp_entry))\n ok_button.place(x=180, y=80, width=40, height=20)\n\n def save(self, gap, bay, hp):\n \"\"\"\n Save consolidation settings\n :param gap: Gap % between cartons\n :param bay: how many bats to look forward\n :param hp: height %\n :return: None\n \"\"\"\n try:\n temp_hp = self.controller.hp\n temp_gap = self.controller.gap\n self.controller.bay_range = int(bay.get())\n\n if (temp_hp != int(hp.get())) or (temp_gap != int(gap.get())):\n self.controller.hp = int(hp.get())\n self.controller.gap = int(gap.get())\n reserves = self.controller.warehouse.get_reserve_slots()\n for spot in reserves:\n spot.get_attrs(room=self.controller.gap)\n self.on_close()\n except ValueError as error:\n ErrorWindow(self,error,'U001')\n" }, { "alpha_fraction": 0.542259693145752, "alphanum_fraction": 0.5691442489624023, "avg_line_length": 42.1136360168457, "blob_id": "c099d30bf634fb722f5f8cfbe27e5e72eeee0312", "content_id": "ccea0fc21991a418bd1d51849a8a0157d59f7dc5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5691, "license_type": "no_license", "max_line_length": 135, "num_lines": 132, "path": "/cLibrary/guis/windows/Relay.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from cLibrary.guis.windows.WidgetWindow import *\nfrom tkinter.filedialog import *\nfrom cLibrary.methods.relay import relay\nfrom cLibrary.methods.general import open_excel, xlsx_ft\nfrom cLibrary.guis.popups.ErrorWindow import ErrorWindow\nfrom cLibrary.widgets.WinOutput import WinOutput\n\n\nclass Relay(WidgetWindow):\n\n def __init__(self, master, controller):\n super(Relay, self).__init__(master, controller)\n self.output = WinOutput(self, width=870, height=self.controller.wig_height - 80,\n x=self.controller.wig_width - 870 - 20, y=70,\n wrap=WORD, bg=\"snow\", font=\"none 10\", relief=\"groove\")\n\n self.excel = None\n\n def load_title(self, text=\"Relay Warehouse\", bg=\"burlywood1\", relief=\"groove\"):\n super(Relay, self).load_title(text=text, bg=bg, relief=relief)\n\n def relay(self, area_dict, cat_dict, area_comb, cat_comb, excess_comb, outfile):\n \"\"\"\n Relay selected Category into the specified Areas within the Warehouse\n :param area_dict: dictionary for CustomArea's\n :param cat_dict: dictionary for Categories\n :param area_comb: Area combobox\n :param cat_comb: Category combobox\n :param excess_comb: Excess Area combobox\n :param outfile: file location to output relay file to\n :return: None\n \"\"\"\n try:\n if outfile == '':\n raise ValueError(\"1\")\n elif area_comb.get() == excess_comb.get():\n raise ValueError(\"2\")\n else:\n outfile = xlsx_ft(outfile)\n self.output.r_insert(\"Creating Allocations...\")\n self.excel = relay(cat_dict[cat_comb.get()], area_dict[area_comb.get()],\n area_dict[excess_comb.get()], self.controller, outfile)\n self.output.r_insert(\"Allocations created and sent to {}\".format(outfile))\n except ValueError as e:\n auto_resp = \"User error, no additional info needed\"\n e = str(e)\n if e == \"1\":\n ErrorWindow(self, auto_resp, \"U003\",\n \"Please enter an output file name to send the data to.\")\n elif e == \"2\":\n ErrorWindow(self, auto_resp, 'U002',\n \"Area option and excess area option cannot have the same area selected.\")\n else:\n ErrorWindow(self, e, \"U004\",\n \"Unknown user error detected, please contact admin if issue cannot be resolved\")\n\n def open_excel(self):\n open_excel(self.excel, self.output)\n\n @staticmethod\n def entry_set(entry, text):\n \"\"\"\n Set entry box text\n :param entry: entry box\n :param text: text to set\n :return: None\n \"\"\"\n if text != '':\n entry.delete(0, 'end')\n entry.insert(END, text)\n\n def load_display(self):\n w = 250\n\n cat_title = Label(self, text=\"Category\", font=\"bold 15\")\n cat_title.place(x=30, y=70, width=w, height=40)\n\n cats = self.controller.categories\n cat_dict = {}\n for cat in cats:\n cat_dict[cat] = cats[cat]\n cat_combo_options = [key for key in cats]\n\n cat_combo = Combobox(self, values=cat_combo_options, font=\"bold 10\", state=\"readonly\")\n cat_combo.place(x=30, y=110, width=w, height=30)\n cat_combo.set(cat_combo_options[0])\n\n area_title = Label(self, text=\"Area\", font=\"bold 15\")\n area_title.place(x=30, y=150, width=w, height=40)\n\n areas = self.controller.areas\n area_dict = {}\n for area in areas.values():\n area_dict[area.area_name] = area\n area_combo_options = [key.area_name for key in areas.values()]\n\n area_combo = Combobox(self, values=area_combo_options, font=\"bold 10\", state=\"readonly\")\n area_combo.place(x=30, y=190, width=w, height=30)\n area_combo.set(area_combo_options[0])\n\n area_title2 = Label(self, text=\"Excess Area\", font=\"bold 15\")\n area_title2.place(x=30, y=230, width=w, height=40)\n\n area_combo_options2 = [key.area_name for key in areas.values()]\n\n area_combo2 = Combobox(self, values=area_combo_options2, font=\"bold 10\", state=\"readonly\")\n area_combo2.place(x=30, y=270, width=w, height=30)\n area_combo2.set(area_combo_options2[0])\n\n outfile_label = Label(self, text=\"Output File Name\", font=\"bold 15\")\n outfile_label.place(x=30, y=310, width=w-40, height=40)\n\n outfile_entry = Entry(self, )\n outfile_entry.place(x=30, y=350, width=w, height=30)\n\n outfile_button = Button(self, text=\"\\uD83D\\uDCC2\", font=\"bold 14\", relief=\"groove\",\n command=lambda: (\n self.entry_set(outfile_entry,\n asksaveasfilename(filetypes=(('XML', '*.xlsx'),), defaultextension=\".xlsx\")),\n outfile_entry.configure(fg=\"black\")))\n\n outfile_button.place(x=30+w-40, y=310, width=40, height=40)\n\n relay_button = Button(self, text=\"Relay\",\n bg=\"limegreen\",\n font=\"bold 12\",\n relief=\"groove\",\n command=lambda: self.relay(area_dict, cat_dict, area_combo, cat_combo, area_combo2, outfile_entry.get()))\n relay_button.place(x=30, y=390, width=w-60, height=30)\n\n open_button = Button(self, text=\"Open\", bg=\"lawn green\", relief=\"groove\", command=lambda: self.open_excel())\n open_button.place(x=30+w-60, y=390, width=60, height=30)\n" }, { "alpha_fraction": 0.5215547680854797, "alphanum_fraction": 0.5399293303489685, "avg_line_length": 25.679244995117188, "blob_id": "d809e116afde53774af7111333dc43169d88172e", "content_id": "64bfd0745d447067058ed290681feb49ae970e03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1415, "license_type": "no_license", "max_line_length": 111, "num_lines": 53, "path": "/cLibrary/structure/datatypes/HexColour.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "\nclass HexColour:\n def __init__(self, red, green, blue):\n self.check_val(red)\n self.check_val(green)\n self.check_val(blue)\n\n self._red = red\n self._green = green\n self._blue = blue\n\n def get_red(self):\n return self._red\n\n def get_green(self):\n return self._green\n\n def get_blue(self):\n return self._blue\n\n def increment_red(self):\n self.check_val((self._red + 1))\n self._red += 1\n\n def decrement_red(self):\n self.check_val((self._red - 1))\n self._red -= 1\n\n def increment_green(self):\n self.check_val((self._green + 1))\n self._red += 1\n\n def decrement_green(self):\n self.check_val((self._green - 1))\n self._red -= 1\n\n def increment_blue(self):\n self.check_val((self._blue + 1))\n self._red += 1\n\n def decrement_blue(self):\n self.check_val((self._blue - 1))\n self._red -= 1\n\n @staticmethod\n def check_val(color):\n if not isinstance(color, int):\n raise TypeError(\"red, green and blue must be integers\")\n if color > 255 or color < 0:\n raise ValueError(\"red, green and blue must be greater than 0 and less than 255\")\n\n def __str__(self):\n return str(\"#\" + str(str(hex(self._red))[2:]).zfill(2) + str(str(hex(self._green))[2:]).zfill(2) + str(\n str(hex(self._blue))[2:]).zfill(2))\n" }, { "alpha_fraction": 0.5031108856201172, "alphanum_fraction": 0.5053732991218567, "avg_line_length": 28.223140716552734, "blob_id": "a73e46a15beffca408379c4375d445b7424f9a67", "content_id": "7d0018d2d06b84b1141a09a87c878e74d850a153", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3536, "license_type": "no_license", "max_line_length": 117, "num_lines": 121, "path": "/cLibrary/structure/warehouse/Level.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from cLibrary.structure.warehouse.Area import Area\nfrom cLibrary.structure.warehouse.Slot import Slot\nfrom typing import List, Set, Union, Optional\nimport math\nimport random\n\n\nclass Level(Area):\n\n def __init__(self, slf, warehouse):\n super().__init__()\n self.warehouse = warehouse\n self.aisle = slf.aisle\n self.bay = slf.bay\n self.level = slf.level\n self.spot_id = self.aisle + self.bay + self.level\n\n def add_line(self, slf):\n postition = slf.position\n if postition in self:\n raise Exception(\"There is no way this could possibly occur within the warehouse, contact admin urgently\")\n else:\n self[postition] = Slot(self.warehouse, slf)\n self.count += 1\n\n def get_filled_pick_slots_count(self):\n e = 0\n for slot in self:\n if slot.suits_pick_face:\n if slot.allocations:\n e += 1\n return e\n\n def get_empty_pick_slots_count(self):\n e = 0\n for slot in self:\n if slot.suits_pick_face:\n if not slot.allocations:\n e += 1\n return e\n\n def get_filled_pick_slots(self):\n filled = []\n for spot in self:\n if spot.suits_pick_face:\n if spot.allocations:\n filled.append(spot)\n return filled\n\n def get_pick_slots(self, filt=None):\n slots = []\n for spot in self:\n if spot.suits_pick_face:\n if filt is None:\n slots.append(spot)\n elif filt(spot):\n slots.append(spot)\n return slots\n\n def get_reserve_slots(self) -> List[Slot]:\n slots = []\n for spot in self:\n if not spot.suits_pick_face:\n slots.append(spot)\n return slots\n\n def get_best_hits(self) -> float:\n best_hits = None\n for spot in self:\n if spot.is_pick_face:\n if best_hits is None:\n if spot.allocations:\n best_hits = spot.get_item_avehitsday()\n else:\n best_hits = None\n else:\n if spot.allocations:\n best_hits = max(best_hits, spot.get_item_avehitsday())\n else:\n pass\n return best_hits\n\n def get_average_hits(self):\n e = 0\n n = self.get_filled_pick_slots_count()\n for slot in self:\n if slot.item is not None:\n e += slot.get_item_avehitsday()\n return e/n\n\n def aux_average_hits(self):\n e = 0\n for slot in self:\n if slot.suits_pick_face:\n if slot.allocations:\n e += slot.get_item_avehitsday()\n return e\n\n def get_rand_area(self):\n n = len(self.spots)\n rand = random.randint(1, n)\n for i in range(n):\n try:\n return self[(rand + i)%n]\n except KeyError:\n pass\n\n def find_item_location(self, item_id):\n for pickslot in self:\n if pickslot.item is not None and pickslot.item.item_id == item_id:\n return pickslot.spot_id\n\n def find_area(self, area_id):\n area = None\n for spot in self:\n if spot.spot_id == area_id:\n return spot\n return area\n\n def get_slot(self, position_code: str) -> Slot:\n return self[position_code]\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 3.3333332538604736, "blob_id": "e4e29091dfbbce8938e5fa4110ced90f52e5ce62", "content_id": "adbf6251214c968e5d6308f6e4e095dc87fbfbd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 25, "license_type": "no_license", "max_line_length": 4, "num_lines": 6, "path": "/resources/data/saves/general3.cp", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "BAIL\nBIL\nAFP\nBAD\nWFP\nNone" }, { "alpha_fraction": 0.6016713380813599, "alphanum_fraction": 0.6155988574028015, "avg_line_length": 27.760000228881836, "blob_id": "e9c376970c33275a550da11ad20aaa136aa99e69", "content_id": "abf9769266f1e32e502f87b3472b436395931e0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 718, "license_type": "no_license", "max_line_length": 93, "num_lines": 25, "path": "/cLibrary/widgets/controlPanel/DispWidgetFrame.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom tkinter.ttk import Combobox\n\n\nclass DispWidgetFrame(Frame):\n\n def __init__(self, master, warehouse, controller, height=50, width=200, *args, **kwargs):\n super().__init__(master, *args, **kwargs)\n self.warehouse = warehouse\n self.controller = controller\n self.configure(width=width, height=height)\n\n self.title = Label(self)\n self.load_title()\n\n self.load_display()\n\n def load_title(self):\n self.title['text'] = \"Generic Dash Widget\"\n self.title['bg'] = \"CadetBlue1\"\n self.title['relief'] = \"groove\"\n self.title.place(x=0, y=0, width=self.winfo_reqwidth(), height=20)\n\n def load_display(self):\n pass" }, { "alpha_fraction": 0.5910311341285706, "alphanum_fraction": 0.6122449040412903, "avg_line_length": 29.032258987426758, "blob_id": "d2cc756d0ba9f84538b964726f88651e3273a542", "content_id": "93313248dc00bb1a589f83293992513d73159cb2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3724, "license_type": "no_license", "max_line_length": 125, "num_lines": 124, "path": "/cLibrary/methods/AreaMethods.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from cLibrary.structure.warehouse.Area import Area\nfrom cLibrary.structure.warehouse.Aisle import Aisle\nfrom cLibrary.structure.warehouse.Bay import Bay\nfrom cLibrary.structure.warehouse.Level import Level\nfrom cLibrary.structure.warehouse.Slot import Slot\nfrom cLibrary.structure.item.Item import Item\n\nfrom cLibrary.methods.general import *\nimport pandas as pd\nimport datetime\nimport math\nimport csv\nimport copy\nfrom typing import Set, List, Tuple\n\n\ndef check_nested_area(area1, area2, warehouse):\n if not isinstance(area1, (Aisle, Bay, Level, Slot)) and not isinstance(area2, (Aisle, Bay, Level, Slot)):\n raise TypeError(\"area must be of Type Aisle, Bay, Level\")\n\n area1_len = len(area1.spot_id)\n area2_len = len(area2.spot_id)\n dif = area1_len - area2_len\n\n if dif > 0:\n path = area1.spot_id[:area2_len]\n current = area2.spot_id\n if current == path:\n raise ValueError(\"area, or part of area already exists in CustomArea\")\n elif dif < 0:\n path = area2.spot_id[:area1_len]\n current = area1.spot_id\n if current == path:\n raise ValueError(\"area, or part of area already exists in CustomArea\")\n else:\n if area1.spot_id == area2.spot_id:\n raise ValueError(\"area, or part of area already exists in CustomArea\")\n\n\ndef sorted_1_4(area, method):\n \"\"\"\n Make sure best items are on the 2nd and 3rd levels of picking\n \"\"\"\n area_spots = area.get_pick_slots() # type: List[Slot]\n cat_items = area.get_filled_pick_slots() # type: List[Slot]\n\n l_23 = area.get_pick_slots(filt=lambda spot: spot.level in [\"2\", \"3\"]) # type: List[Slot]\n\n n_23 = len(l_23)\n cat_items.sort(key=lambda slot: slot.get_item_avehitsday())\n\n best_items = cat_items[:n_23]\n worst_items = cat_items[n_23:]\n\n i = 0\n while i < len(best_items):\n if best_items[i].level in [\"2\", \"3\"]:\n best_items.pop(i)\n i -= 1\n i += 1\n\n i = 0\n while i < len(worst_items):\n if worst_items[i].level in [\"1\", \"4\"]:\n worst_items.pop(i)\n i -= 1\n i += 1\n\n l_23_empty = area.get_pick_slots(filt=lambda spot: spot.level in [\"2\", \"3\"] and not spot.allocations) # type: List[Slot]\n worst_areas = l_23_empty + worst_items # type: List[Slot]\n\n worst_areas.sort(key=lambda x: x.spot_id)\n best_items.sort(key=lambda x: x.spot_id)\n\n moves = []\n while len(best_items) > 0:\n moves.append((best_items.pop(0), worst_areas.pop(0)))\n\n return moves\n\n\ndef best_sort_1_4(area):\n return sorted_1_4(area, Area.get_best_avehitsday)\n\n\ndef dayxhits_sort_1_4(area):\n return sorted_1_4(area, Area.get_best_dayxhits)\n\n\ndef ground_con(area, bay_range=10, gap=5, hp=80) -> List[List[Slot]]:\n bay_range = int(bay_range)\n gap = int(gap)\n\n if not isinstance(area, Area):\n raise TypeError()\n\n reserves = area.get_reserve_slots() # type: List[ReserveSlot]\n reserves.sort(key=lambda x: x.spot_id)\n for i in reserves:\n i.get_attrs(gap, hp)\n\n gap /= 100\n gap += 1\n\n _ = 0 # removing the empty reserve slots\n while _ < len(reserves):\n if not reserves[_].stock_records:\n reserves.pop(_)\n _ -= 1\n _ += 1\n\n i = 0\n consolidations = []\n while i < len(reserves):\n j = i\n current_slot = reserves.pop(i)\n current_con = [current_slot, ]\n while j < len(reserves) and len(current_con) < 2 and j < i + bay_range + 1:\n if current_slot.used_width + reserves[j].used_width < current_slot.s_width:\n current_con.append(reserves.pop(j))\n consolidations.append(current_con)\n j += 1\n\n return consolidations\n" }, { "alpha_fraction": 0.49121665954589844, "alphanum_fraction": 0.5100845694541931, "avg_line_length": 29.156862258911133, "blob_id": "b5a4728e032a161532b4eba1cacf8e2de44db1c5", "content_id": "13972cbc0a41c231331f96ecf0bf606192473a7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1537, "license_type": "no_license", "max_line_length": 97, "num_lines": 51, "path": "/cLibrary/widgets/controlPanel/STS.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from cLibrary.widgets.controlPanel.DispWidgetFrame import *\nfrom cLibrary.methods.AreaMethods import dayxhits_sort_1_4\nfrom cLibrary.widgets.ToolTip import CreateToolTip\n\n\nclass STS(DispWidgetFrame):\n\n def load_title(self):\n super().load_title()\n self.title['text'] = \"Slots To Swap\"\n self.title['bg'] = \"orange\"\n\n def load_display(self):\n ta = 0\n max_slots = 0\n empty = []\n aisles = []\n for area in self.controller.areas.values():\n e = len(dayxhits_sort_1_4(area))\n if e > 0:\n empty.append(e)\n aisles.append(area)\n ta += 1\n if e > max_slots:\n max_slots = e\n\n x_val = 0\n y_val = 20\n\n if ta == 0:\n return\n\n w = self.winfo_reqwidth() // ta\n h = self.winfo_reqheight() - 42\n for _ in range(len(empty)):\n e = empty[_]\n\n t_ratio = ((e / max_slots) * (h - 1)) // 1\n\n border = Label(self, relief=\"solid\")\n border.place(x=x_val + 1, y=y_val + h - t_ratio - 1, width=w - 2, height=t_ratio + 2)\n\n background = Label(self, relief=\"flat\", bg=\"light blue\")\n background.place(x=x_val + 2, y=y_val + h - t_ratio, width=w - 4, height=t_ratio)\n\n CreateToolTip(background, \"{}\".format(e), c_off=(-15 + w))\n\n text = Label(self, text=aisles[_].area_name, relief=\"groove\")\n text.place(x=x_val, y=h + 20 + 2, width=w, height=20)\n\n x_val += w" }, { "alpha_fraction": 0.6008254885673523, "alphanum_fraction": 0.6291273832321167, "avg_line_length": 61.85185241699219, "blob_id": "5c1b8cd6729c094adb140a5b63828858d4ffc516", "content_id": "920ab46401727074a131309e68dd768fd1607d5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1696, "license_type": "no_license", "max_line_length": 120, "num_lines": 27, "path": "/cLibrary/widgets/controlPanel/SlotRow.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from tkinter import *\n\n\nclass SlotRow(Frame):\n\n def __init__(self, master, index, slot, width, height, *args, **kwargs):\n super(SlotRow, self).__init__(master, *args, **kwargs)\n self.configure(width=width, height=height)\n self.parts = []\n self.numLabel = Label(self, text=str(index) + \". \", bg=\"white\", relief=\"groove\")\n self.numLabel.place(x=0, y=0, width=(4 / 37) * width, height=height)\n self.tempLabel = Label(self, text=slot.spot_id, bg=\"white\", relief=\"groove\")\n self.tempLabel.place(x=(4 / 37) * width, y=0, width=(5 / 37) * width, height=height)\n self.itemLabel = Label(self, text=slot.allocations[0].item.item_id, bg=\"white\", relief=\"groove\")\n self.itemLabel.place(x=(9 / 37) * width, y=0, width=(7 / 37) * width, height=height)\n self.hitsLabel = Label(self, text=slot.allocations[0].item.hits, bg=\"white\", relief=\"groove\")\n self.hitsLabel.place(x=(16 / 37) * width, y=0, width=(5 / 37) * width, height=height)\n self.dayshitsLabel = Label(self, text=slot.allocations[0].item.dayshit, bg=\"white\", relief=\"groove\")\n self.dayshitsLabel.place(x=(21 / 37) * width, y=0, width=(6 / 37) * width, height=height)\n self.avgLabel = Label(self, text=slot.allocations[0].item.avehitsday, bg=\"white\", relief=\"groove\")\n self.avgLabel.place(x=(27 / 37) * width, y=0, width=(10 / 37) * width, height=height)\n self.parts += [self.numLabel, self.tempLabel, self.itemLabel, self.hitsLabel, self.dayshitsLabel, self.avgLabel]\n\n def bind(self, sequence=None, func=None, add=None):\n super().bind(sequence, func, add)\n for i in self.parts:\n i.bind(sequence, func, add)" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 3.555555582046509, "blob_id": "6ebbd9c705edc5221e200c6c17e63e771bfde499", "content_id": "675326b9d9b0005bcc3ccf65fd7411403b753e64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 40, "license_type": "no_license", "max_line_length": 4, "num_lines": 9, "path": "/resources/data/saves/basic_control_panel.cp", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "WFP\nWRFP\nBIL\nAFP\nRAFP\nBAIL\nBAD\nNone\nNone" }, { "alpha_fraction": 0.5195390582084656, "alphanum_fraction": 0.5415831804275513, "avg_line_length": 41.92473220825195, "blob_id": "2a4f640b15db0f63b77e697e63bf34467c1704a8", "content_id": "c93b08ae23b520a3862b86e43964150a6bb9ca20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3992, "license_type": "no_license", "max_line_length": 136, "num_lines": 93, "path": "/cLibrary/guis/popups/ChooseNewWarehouse.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from cLibrary.methods.general import *\nimport PIL.ImageTk, PIL.Image, copy, shutil\nfrom tkinter.filedialog import *\nfrom cLibrary.guis.popups.ErrorWindow import ErrorWindow\nfrom cLibrary.widgets.InfoHover import InfoHover\nfrom tkinter import *\n\n\nclass ChooseNewWarehouse(Toplevel):\n\n def __init__(self, master):\n \"\"\"\n Initialise New Warehouse Popup window\n :param master: master window\n \"\"\"\n super(ChooseNewWarehouse, self).__init__(master)\n self.transient(master)\n title = Label(self, text=\"Create New Warehouse\", font=\"bold 22\", fg=\"grey26\")\n title.place(x=55, y=5, width=340, height=60)\n\n def entry_set(entry, text):\n \"\"\"\n Set entry box text\n :param entry: entry box\n :param text: text to set\n :return: None\n \"\"\"\n if text!='':\n entry['state'] = 'normal'\n entry.delete(0, 'end')\n entry.insert(END, text)\n entry['state'] = 'readonly'\n\n def file_line(title, x, y, msg):\n \"\"\"\n Create file entry and buttons widget\n :param title: file title\n :param x: x pos\n :param y: y pos\n :param msg: msg on hover\n :return:\n \"\"\"\n item_label = Label(self, text=title + \": \", relief=\"flat\", fg=\"gray40\", anchor=W)\n item_label.place(x=x, y=y, height=20, width=60)\n item_file = StringVar()\n item_entry = Entry(self, textvariable=item_file, state=\"readonly\")\n item_entry.place(x=x + 60, y=y, height=20, width=300)\n item_button = Button(self, text=\"\\uD83D\\uDCC2\", relief=\"groove\",\n command=lambda: (entry_set(item_entry, askopenfilename()), item_entry.configure(fg=\"black\")))\n item_button.place(x=x + 360, y=y, height=20, width=20)\n info_hover = InfoHover(self, msg)\n info_hover.place(x=x + 360 + 20, y=y, height=20, width=20)\n return item_entry\n\n titles = [(\"item file\", \"file containing item data\\n(WISE > Reports > Export > Item)\"),\n (\"slot file\", \"file containing slot data\\n(WISE > Reports > Export > Slot)\"),\n (\"stock file\", \"file containing info on current stock\\n(WISE > Reports > Export > Stock)\"),\n (\"hits file\", \"file for hits history\\n(WISE > Reports > Movement > Hits and Picks)\")]\n x = 20\n y = 60\n self.entries = []\n for name in titles:\n self.entries.append(file_line(name[0], x, y, name[1]))\n y += 30\n\n def create_new_warehouse():\n \"\"\"\n Use files from entries to create new warehouse and push it into the system\n :return: None\n \"\"\"\n for entry in self.entries:\n if entry.get() == \"\" or entry.get() == \"No File Selected\":\n entry_set(entry, \"No File Selected\")\n entry.configure(fg=\"red\")\n return\n\n try:\n self.master.change_warehouse(self.entries[0].get(), self.entries[1].get(), self.entries[2].get(), self.entries[3].get())\n shutil.copy(self.entries[0].get(), \"resources/source/item.csv\")\n shutil.copy(self.entries[1].get(), \"resources/source/slot.csv\")\n shutil.copy(self.entries[2].get(), \"resources/source/itmslot.csv\")\n shutil.copy(self.entries[3].get(), \"resources/source/hits.csv\")\n self.destroy()\n except Exception as e:\n ErrorWindow(self, e, 'I001')\n\n create_button = Button(self, text=\"Create\", relief=\"groove\", pady=13, bg=\"SteelBlue1\",\n command=create_new_warehouse)\n create_button.place(x=450-105, y=220-40, width=55, height=20)\n\n self.configure(width=450, height=220)\n center_to_win(self, master)\n self.grab_set()\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 3.3333332538604736, "blob_id": "a0a1702fd31f2a5aa3b3051912c8e63b17eb3d19", "content_id": "0497e2403e6d215dd05f6f499b98b42fdd73d0b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 25, "license_type": "no_license", "max_line_length": 4, "num_lines": 6, "path": "/resources/data/saves/general.cp", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "AFP\nBIL\nBAD\nBAIL\nWFP\nNone" }, { "alpha_fraction": 0.5847328305244446, "alphanum_fraction": 0.6106870174407959, "avg_line_length": 35.44444274902344, "blob_id": "f1e58e8ac11de2d52ae6bd810870cf127982f5de", "content_id": "3484d52c2dd89f31ce2a4fd6aef599f053fbdb3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 655, "license_type": "no_license", "max_line_length": 105, "num_lines": 18, "path": "/cLibrary/widgets/DayNightButton.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from tkinter import *\n\n\nclass DayNightButton(Button):\n\n def __init__(self, master, *args, **kwargs):\n super(DayNightButton, self).__init__(master, *args, **kwargs)\n self.configure(text=\"\\uD83C\\uDF19\", bg=\"gray26\", fg=\"light goldenrod\", command=self.switch_state)\n self.night = False\n\n def switch_state(self):\n if self.night:\n self.master.daynight(False)\n self.configure(text=\"\\uD83C\\uDF19\", bg=\"gray26\", fg=\"light goldenrod\")\n else:\n self.master.daynight(True)\n self.configure(text=\"\\u2600\\uFE0F\", bg=\"snow\", fg=\"goldenrod\", anchor=W)\n self.night = not self.night" }, { "alpha_fraction": 0.5503821969032288, "alphanum_fraction": 0.5767894387245178, "avg_line_length": 42.6363639831543, "blob_id": "ddac73d9d98cfa11e2e9072e0c514fc7d18edc0f", "content_id": "9b25a19cd4486879adf4556abab4a2594151cfd6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1439, "license_type": "no_license", "max_line_length": 95, "num_lines": 33, "path": "/cLibrary/widgets/controlPanel/TimeGradient.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom cLibrary.widgets.controlPanel.HexColour import HexColour\n\n\nclass TimeGradient(Frame):\n def __init__(self, master, warehouse, **kwargs):\n super().__init__(master)\n self.width = width = kwargs['width'] if kwargs.get('width') is not None else 200\n self.height = height = kwargs['height'] if kwargs.get('height') is not None else 100\n self.color = HexColour(255, 0, 0)\n self.label = Label(self, text=\"Testing Gradients\", bg=str(self.color), relief=\"groove\")\n self.label.place(x=0, y=0, width=width, height=height)\n self.reverse = False\n self.update_clock()\n self.configure(width=width, height=height)\n\n def update_clock(self):\n if self.reverse:\n if self.color.get_red() == 255 and self.color.get_green() < 255:\n self.color._green += 1\n elif self.color.get_red() > 0 and self.color.get_green() == 255:\n self.color._red -= 1\n else:\n self.reverse = not self.reverse\n else:\n if self.color.get_red() < 255 and self.color.get_green() == 255:\n self.color._red += 1\n elif self.color.get_red() == 255 and self.color.get_green() > 0:\n self.color._green -= 1\n else:\n self.reverse = not self.reverse\n self.label.configure(bg=str(self.color))\n self.after(5, self.update_clock)" }, { "alpha_fraction": 0.6039047241210938, "alphanum_fraction": 0.614493727684021, "avg_line_length": 32.955055236816406, "blob_id": "212d588888d48d8815b674a9f0b433f546babb7f", "content_id": "85b4b0b65e8e66a308c8ebc100477e66a9b44d35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3022, "license_type": "no_license", "max_line_length": 113, "num_lines": 89, "path": "/cLibrary/methods/distro.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from typing import List, Tuple, Set\nfrom cLibrary.structure.item.Item import Item\nfrom cLibrary.structure.warehouse.Warehouse import Warehouse\nfrom cLibrary.structure.warehouse.Area import Area\nfrom cLibrary.structure.warehouse.Slot import Slot\nfrom cLibrary.methods.general import create_xlsx, xlsx_ft, data_to_dataframe, dataframes_to_xlsx\nfrom cLibrary.methods.slotReport import pick_slot_report\nimport math\n\n\ndef get_facings_width(item: Item, qty: int, max_depth: int = 1200, max_height: int = 1400):\n \"\"\"\n Get width of facings in a distro setup\n :param item: Item\n :param qty: qty of item\n :param max_depth: maximum depth a each facing can be\n :param max_height: maximum height a facing can be\n :return: width of facings needed for item in distro\n \"\"\"\n w = item.carton.width # type: int\n d = 0 # type: int\n h = 0 # type: int\n c = math.ceil(qty / item.carton.units) # type: int\n\n # simulates stacking of cartons to get the facings width\n while c > 0:\n c -= 1\n d += item.carton.length\n if d > max_depth:\n d = 0\n h += item.carton.height\n\n if h > max_height:\n h = 0\n w += item.carton.width\n\n return w\n\n\ndef distro_setup(distro_data: List[Tuple[Item, int]], area: Area, warehouse: Warehouse, output_file: str) -> str:\n \"\"\"\n Create excel sheets to setup a distro order\n :param distro_data: list of items and their qty's\n :param area: Area to allocate Pick Slots to (assuming that pick slots are empty)\n :param warehouse: warehouse data\n :param output_file: output file destination\n :return: File name\n \"\"\"\n\n # getting facing width data\n data = []\n item_list = []\n for i, (itm, qty) in enumerate(distro_data):\n facing_w = get_facings_width(itm, qty) * 1.1\n data.append(distro_data[i] + (facing_w,))\n item_list.append(itm)\n\n pick_slots = area.get_pick_slots() # type: List[Slot]\n pick_slots.sort(key=lambda x: x.position)\n pick_slots.sort(key=lambda x: x.level)\n pick_slots.sort(key=lambda x: int(x.bay))\n\n bay = []\n bays = []\n current_bay = None\n for slot in pick_slots:\n if slot.bay != current_bay:\n if len(bay) > 0:\n bays.append(bay)\n bay = []\n current_bay = slot.bay\n bay.insert(0, slot)\n\n current_width = 0\n bay = 0\n for i, item in enumerate(data):\n if (current_width + item[2]) > 2550 or len(bays[bay]) == 0:\n bay += 1\n current_width = 0\n current_width += item[2]\n location = bays[bay].pop()\n data[i] = [item[0].item_id, item[1], item[2],\n location.aisle, location.bay, location.level, location.position]\n\n output_file = xlsx_ft(output_file)\n dfs = [data_to_dataframe(data, ['ITEM ID', 'QTY', 'FACING WIDTH', 'Aisle', 'Bay', 'Level', 'Position']),\n pick_slot_report(item_list)]\n dataframes_to_xlsx(dfs=dfs, outfile=output_file)\n return output_file\n" }, { "alpha_fraction": 0.5630573034286499, "alphanum_fraction": 0.5643312335014343, "avg_line_length": 26.068965911865234, "blob_id": "c271e9453132b9bb06580483752c8b94e785af32", "content_id": "d0501d830c558c691fd770cb943b601f2f50713b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 785, "license_type": "no_license", "max_line_length": 52, "num_lines": 29, "path": "/cLibrary/structure/warehouse/Bay.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from cLibrary.structure.warehouse.Area import Area\nfrom cLibrary.structure.warehouse.Level import Level\n\n\nclass Bay(Area):\n\n def __init__(self, slf, warehouse):\n super().__init__()\n self.warehouse = warehouse\n self.aisle = slf.aisle\n self.bay = slf.bay\n self.spot_id = self.aisle + self.bay\n\n def add_line(self, slf):\n \"\"\"\n import a bay into this Aisle\n :param slf: Bay information\n :return: None\n \"\"\"\n level = slf.level\n if level in self:\n self[level].add_line(slf)\n else:\n self[level] = Level(slf, self.warehouse)\n self.count += 1\n self[level].add_line(slf)\n\n def get_level(self, level_code: str) -> Level:\n return self[level_code]\n" }, { "alpha_fraction": 0.615450918674469, "alphanum_fraction": 0.6214818954467773, "avg_line_length": 29.543859481811523, "blob_id": "720780aa05ef8ea9c9eea9dfd08e2413cc0dc24e", "content_id": "04552caab539fe8b6b18e777e5e6b88bc2fb848c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3482, "license_type": "no_license", "max_line_length": 108, "num_lines": 114, "path": "/cLibrary/methods/general.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "import os\nimport pandas as pd\nfrom typing import List, Tuple, Set, Optional\n\n\ndef open_excel(file_path, output=None):\n \"\"\"\n open most recently created excel doc\n :return: None (prints update to Output console)\n \"\"\"\n try:\n output.r_insert(\"Opening file...\") if output is not None else 'pass'\n output.update()\n assert file_path is not None, \"ObjectError: No consolidations run yet\"\n os.system('start excel.exe \"{}\"'.format(file_path))\n except AssertionError as e:\n output.r_insert(e) if output is not None else AssertionError(e)\n\n\ndef create_xlsx(data: List, cols: List, outfile: str) -> str:\n \"\"\"\n create a xlsx file\n :param data: sheet data\n :param cols: sheet columns\n :param outfile: output location\n :return: outfile string\n \"\"\"\n outfile = xlsx_ft(outfile)\n df1 = pd.DataFrame(data, columns=cols)\n writer = pd.ExcelWriter(outfile)\n df1.to_excel(writer, 'Sheet1')\n writer.save()\n return outfile\n\n\ndef data_to_dataframe(data: List, cols: List, sheet_name=None) -> Tuple[Optional[str], pd.DataFrame]:\n df = pd.DataFrame(data, columns=cols)\n if sheet_name is not None:\n sheet_name = sheet_name.replace('/', '.')\n return sheet_name, df\n\n\ndef dataframes_to_xlsx(dfs: List[Tuple[Optional[str], pd.DataFrame]], outfile: str) -> str:\n writer = pd.ExcelWriter(xlsx_ft(outfile))\n for i, df in enumerate(dfs):\n sheet_name = str(df[0]) if df[0] is not None else ('Sheet_' + str(i + 1))\n df[1].to_excel(writer, sheet_name)\n writer.save()\n return outfile\n\n\ndef intersperse(lst, item=\"\"):\n \"\"\"\n intersperse a list with a set character\n :param lst: list to intersperse\n :param item: character to intersperse with\n :return: resulting list\n \"\"\"\n result = [item] * (len(lst) * 2)\n result[0::2] = lst\n return result\n\n\ndef center_to_screen(window, adj=0) -> None:\n \"\"\"\n Set a Tk window to the centre of the screen\n :param window: Tk or Toplevel object\n :param adj: amount to adjust vertically\n :return: None\n \"\"\"\n width = window.winfo_reqwidth()\n height = window.winfo_reqheight()\n x = (window.winfo_screenwidth() // 2) - (width // 2)\n y = (window.winfo_screenheight() // 2) - (height // 2) - adj\n window.geometry('{}x{}+{}+{}'.format(width, height, x, y))\n\n\ndef center_to_win(window, master) -> None:\n \"\"\"\n Set window to the center of another window\n :param window: Tk or Toplevel object to set location\n :param master: Tk or Toplevel to center on\n :return: None\n \"\"\"\n window.update()\n x = master.winfo_x()\n y = master.winfo_y()\n w = window.winfo_reqwidth()\n h = window.winfo_reqheight()\n total_x = x + (master.winfo_width() // 2) - (w // 2)\n total_y = y + (master.winfo_height() // 2) - (h // 2)\n window.geometry(\"%dx%d+%d+%d\" % (int(w), int(h), int(total_x), int(total_y)))\n\n\ndef relocate_window(window, x, y) -> None:\n \"\"\"\n relocate windows\n :param window: Tk or Toplevel for relocation\n :param x: x pos to move to\n :param y: y pos to move to\n :return: None\n \"\"\"\n window.geometry(\"%dx%d+%d+%d\" % (int(window.winfo_width()), int(window.winfo_height()), int(x), int(y)))\n\n\ndef flatten_list_of_lists(list_of_lists):\n return [inner for outer in list_of_lists for inner in outer]\n\n\ndef xlsx_ft(filepath: str) -> str:\n if len(filepath) < 5 or filepath[-5:] != \".xlsx\":\n return filepath + '.xlsx'\n else:\n return filepath\n" }, { "alpha_fraction": 0.7948718070983887, "alphanum_fraction": 0.7948718070983887, "avg_line_length": 3.444444417953491, "blob_id": "d07e883abb5cb16e68505e273589407f0270b20a", "content_id": "68897ba9d7248f2662eac38b95726ca145c7d763", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 39, "license_type": "no_license", "max_line_length": 4, "num_lines": 9, "path": "/resources/data/widgets.cp", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "WFP\nWRFP\nBAD\nAFP\nRAFP\nSTS\nBIL\nBAIL\nNone" }, { "alpha_fraction": 0.5872162580490112, "alphanum_fraction": 0.6015531420707703, "avg_line_length": 39.82926940917969, "blob_id": "0b865d5717f585c167a7d41cf66d3557de58b0c2", "content_id": "4964dbf12012e8ca8210ef7eec69eb1f02c77c6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1674, "license_type": "no_license", "max_line_length": 121, "num_lines": 41, "path": "/cLibrary/widgets/controlPanel/WRFP.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from cLibrary.widgets.controlPanel.DispWidgetFrame import *\n\n\nclass WRFP(DispWidgetFrame):\n \"\"\"\n This is a graphic display of how full the warehouse is\n The green bar shown is how many full slots there are\n The grey bar behind is how many total slots there are\n \"\"\"\n def __init__(self, master, warehouse, controller, height=50, width=200, bg=\"snow\", fg=\"lime green\", *args, **kwargs):\n self.bg = bg\n self.fg = fg\n super().__init__(master, warehouse, controller, height=height, width=width, *args, **kwargs)\n\n def load_title(self):\n super(WRFP, self).load_title()\n self.title['text'] = \"Reserve Warehouse Fill Display\"\n self.title['bg'] = \"MediumOrchid4\"\n self.title['fg'] = \"Snow\"\n\n def load_display(self):\n h = self.winfo_reqheight() - 60\n reserves = self.warehouse.get_reserve_slots()\n empty = 0\n for slot in reserves:\n if not slot.stock_records:\n empty += 1\n filled = len(reserves) - empty\n total = empty + filled\n if total == 0:\n total = 1\n\n title_str = \"Filled Slots / Total Slots\\n\" + str(filled) + \" / \" + str(total)\n label = Label(self, text=title_str, bg=\"white\", fg=\"black\", relief=\"groove\")\n label.place(y=h + 20, width=self.winfo_reqwidth(), height=40)\n\n filled_ratio = (filled / total * self.winfo_reqwidth()) // 1\n background = Label(self, bg=self.bg, relief=\"groove\")\n background.place(width=self.winfo_reqwidth(), height=h, y=20)\n filled_graphic = Label(self, bg=self.fg)\n filled_graphic.place(x=2, y=2 + 20, width=filled_ratio, height=h - 5)\n" }, { "alpha_fraction": 0.5947677493095398, "alphanum_fraction": 0.6022424101829529, "avg_line_length": 37.224491119384766, "blob_id": "0e23d029cd92d9dc87446b2fba07dca834fc7356", "content_id": "ad803e21325fcf1a9d0a8cceeaf73cf273184ea9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1873, "license_type": "no_license", "max_line_length": 83, "num_lines": 49, "path": "/cLibrary/widgets/ScrolledFrame.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "import tkinter as tk\n\n\nclass ScrolledFrame(tk.Frame):\n\n def __init__(self, master, *args, **kwargs):\n super(ScrolledFrame, self).__init__(master, *args, **kwargs)\n vscrollbar = tk.Scrollbar(self, orient=tk.VERTICAL)\n vscrollbar.pack(side=tk.RIGHT, fill=tk.Y)\n\n self.canvas = canvas = tk.Canvas(self, bd=0, yscrollcommand=vscrollbar.set)\n canvas.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.TRUE)\n vscrollbar.config(command=canvas.yview)\n\n canvas.xview_moveto(0)\n canvas.yview_moveto(0)\n\n self.interior = interior = tk.Frame(canvas)\n interior_id = canvas.create_window(0, 0, window=interior, anchor=tk.NW)\n\n def _config_interior(event):\n size = (interior.winfo_reqwidth(), interior.winfo_reqheight())\n canvas.config(scrollregion=\"0 0 %s %s\" % size)\n if interior.winfo_reqwidth() != canvas.winfo_width():\n canvas.config(width=interior.winfo_reqwidth())\n\n interior.bind('<Configure>', _config_interior)\n\n def _config_canvas(event):\n if interior.winfo_reqwidth() != canvas.winfo_width():\n canvas.itemconfigure(interior_id, width=canvas.winfo_width())\n canvas.bind('<Configure>', _config_canvas)\n\n self.canvas.bind_all('<MouseWheel>', self.__mouse_scroll)\n self.bind_all('<MouseWheel>', self.__mouse_scroll)\n self.bind(\"<Configure>\", self.__on_frame_configure)\n\n def __mouse_scroll(self, event):\n if event.delta:\n self.canvas.yview_scroll(int(-1 * (event.delta / 100)), \"units\")\n else:\n if event.num == 5:\n move = 1\n else:\n move = -1\n self.canvas.yview_scroll(move, \"units\")\n\n def __on_frame_configure(self, event=None):\n self.canvas.configure(scrollregion=self.canvas.bbox(tk.ALL))\n" }, { "alpha_fraction": 0.6456456184387207, "alphanum_fraction": 0.6456456184387207, "avg_line_length": 32.29999923706055, "blob_id": "e65371fdc6605e703603ff20264622717bddfd91", "content_id": "84b42e9493306d733c98d086dae57ad8f871cce9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 666, "license_type": "no_license", "max_line_length": 105, "num_lines": 20, "path": "/cLibrary/methods/slotReport.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "import pandas as pd\nfrom typing import List, Tuple, Set\nfrom cLibrary.structure.item.Item import Item\nfrom cLibrary.methods.general import data_to_dataframe\n\n\ndef pick_slot_report(items: List[Item]) -> [str, pd.DataFrame]:\n data = []\n\n data = [[item.item_id,\n record.location.aisle,\n record.location.bay,\n record.location.level,\n record.location.position,\n record.qty, \"\"] for item in items for record in item.allocations]\n return data_to_dataframe(data, ['ITEM ID', 'Aisle', 'Bay', 'Level', 'Position', 'QTY', 'ACTUAL QTY'])\n\n\ndef reserve_slot_report(items: List[Item]) -> pd.DataFrame:\n pass\n" }, { "alpha_fraction": 0.4137931168079376, "alphanum_fraction": 0.6551724076271057, "avg_line_length": 13.5, "blob_id": "b488a28ba794f9613c6adc2f4d8c7c9730b960d8", "content_id": "e53f423c4422e3c302c28011117397483b353ad6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 29, "license_type": "no_license", "max_line_length": 14, "num_lines": 2, "path": "/requirements.txt", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "pandas==0.24.1\nPillow==8.2.0\n" }, { "alpha_fraction": 0.5278106331825256, "alphanum_fraction": 0.5569230914115906, "avg_line_length": 36.72321319580078, "blob_id": "e19d827c014f55cb4e5c4454425a6aad39c80cf5", "content_id": "6cee13063d9c64db02ec258699d46bdab02f9faa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4225, "license_type": "no_license", "max_line_length": 166, "num_lines": 112, "path": "/cLibrary/guis/popups/ImportCategories.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from cLibrary.guis.popups.StandardPopup import *\nfrom cLibrary.guis.popups.Notification import Notification\nfrom cLibrary.widgets.InfoHover import InfoHover\n\n\nclass ImportCategories(StandardPopUp):\n\n def load_display(self):\n l1 = Label(self, text=\"Category File :\")\n l1.place(x=40, y=30)\n\n def entry_set(entry, text):\n \"\"\"\n Set entry box text\n :param entry: entry box\n :param text: text to set\n :return: None\n \"\"\"\n\n if text!='':\n entry['state'] = 'normal'\n entry.delete(0, 'end')\n entry.insert(END, text)\n entry['state'] = 'readonly'\n\n item_file = StringVar()\n item_entry = Entry(self, textvariable=item_file, state='readonly')\n item_entry.place(x=40, y=50, width=390, height=20)\n\n def ok_press():\n if item_entry.get() == \"\" or item_entry.get() == \"No File Selected\":\n entry_set(item_entry, \"No File Selected\")\n item_entry.configure(fg=\"red\")\n return\n\n else:\n self.import_item_cat(item_entry.get())\n\n item_button = Button(self, text=\"\\uD83D\\uDCC2\", relief=\"groove\", command=lambda: (entry_set(item_entry, askopenfilename()), item_entry.configure(fg=\"black\")))\n item_button.place(x=430, y=50, width=20, height=20)\n\n info_hover = InfoHover(self, \"Select item category data\")\n info_hover.place(x=450, y=50, width=20, height=20)\n\n l2 = Label(self, text=\"File Format : \")\n l2.place(x=40, y=80)\n\n excel_eg = Frame(self,)\n excel_eg.place(x=115, y=80)\n\n excel_eg.grid_columnconfigure(0, minsize=50)\n excel_eg.grid_columnconfigure(1, minsize=50)\n\n col_1_title = Label(excel_eg, text=\"item id\", bg=\"white\", relief=\"groove\")\n col_1_title.grid(column=0, row=0, sticky=\"nsew\")\n\n col_2_title = Label(excel_eg, text=\"category\", bg=\"white\", relief=\"groove\")\n col_2_title.grid(column=1, row=0, sticky=\"nsew\")\n\n col_1_row_1 = Label(excel_eg, text=\"527540\", bg=\"white\", relief=\"groove\")\n col_1_row_1.grid(column=0, row=1, sticky=\"nsew\")\n\n col_1_row_2 = Label(excel_eg, text=\"...\", bg=\"white\", relief=\"groove\")\n col_1_row_2.grid(column=0, row=2, sticky=\"nsew\")\n\n col_2_row_1 = Label(excel_eg, text=\"ashdene\", bg=\"white\", relief=\"groove\")\n col_2_row_1.grid(column=1, row=1, sticky=\"nsew\")\n\n col_2_row_2 = Label(excel_eg, text=\"...\", bg=\"white\", relief=\"groove\")\n col_2_row_2.grid(column=1, row=2, sticky=\"nsew\")\n\n ok_button = Button(self, text=\"upload\", bg=\"lime\", fg=\"grey20\", relief=\"groove\", command=ok_press)\n ok_button.place(x=400, y=70, height=20, width=\"50\")\n\n def import_item_cat(self, file):\n if not isinstance(self.controller, Controller):\n raise TypeError(\"Controller must be a controller\")\n with open(file, \"r\", encoding='utf-8-sig') as cat_file:\n data = []\n for line in cat_file:\n data.append(line.strip(\"\\n\").split(\",\"))\n\n added_count = 0\n total_count = 0\n error_count = 0\n\n cats = {}\n error_items = []\n\n for x in data:\n item_string = x[0].lstrip()\n item = self.controller.warehouse.item_list[item_string]\n total_count += 1\n if item is not None:\n added_count += 1\n self.controller.categories.add_item(item, x[1].lower())\n cats[x[1].lower()] = (1 if cats.get(x[1].lower()) is None else cats.get(x[1].lower())+1)\n else:\n error_items.append(item_string)\n error_count += 1\n msg = \"Successfully assigned items to categories:\\n\"\n\n for key in cats:\n msg += \"\\t{} items successfully assigned to {}\\n\".format(cats[key], key)\n\n for error in error_items:\n msg += 'error with item: ' + error + '\\n'\n\n msg += '\\n{} out of {} items added\\n'.format(added_count, total_count)\n msg += 'Errors = {}'.format(error_count)\n self.wait_window(Notification(self, msg, width=500, height=180))\n self.on_close()\n" }, { "alpha_fraction": 0.7124999761581421, "alphanum_fraction": 0.7124999761581421, "avg_line_length": 39, "blob_id": "17498158431cfc9e9cf6548d6d14059b5cc44297", "content_id": "8010617e5793b5395c50d57fbd6d35c4f9fd2c0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 240, "license_type": "no_license", "max_line_length": 141, "num_lines": 6, "path": "/Main.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from cLibrary.guis.MainWindow import MGSlotSystem\n\n\nif __name__ == '__main__':\n win = MGSlotSystem(\"resources/source/item.csv\", \"resources/source/slot.csv\", \"resources/source/itmslot.csv\", \"resources/source/hits.csv\")\n win.mainloop()\n" }, { "alpha_fraction": 0.6248000264167786, "alphanum_fraction": 0.6299999952316284, "avg_line_length": 45.314815521240234, "blob_id": "44301b53d6cc35083636fee84558c0f2c2b8b00c", "content_id": "43a0b748077fe2a693b856c247eeb72241857b41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2500, "license_type": "no_license", "max_line_length": 109, "num_lines": 54, "path": "/cLibrary/widgets/AppIcon.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom cLibrary.widgets.ToolTip import CreateToolTip\nfrom PIL import ImageTk, Image\n\n\nclass AppIcon(Frame):\n\n def __init__(self, master, window, **kwargs):\n \"\"\"\n Icon for opening a task bar app in MGSS\n :param master: master window\n :param img: icon image\n :param window: window that icon opens\n :param kwargs: style arguments\n \"\"\"\n super(AppIcon, self).__init__(master)\n\n # initialize keyword argument values\n bg = kwargs['bg'] if kwargs.get('bg') is not None else 'dark grey'\n name = kwargs['name'] if kwargs.get('name') is not None else 'testing'\n width = kwargs['width'] if kwargs.get('width') is not None else 25\n height = kwargs['height'] if kwargs.get('height') is not None else 25\n self.img = kwargs['img'] if kwargs.get('img') is not None else None\n\n # check value types\n if not isinstance(width, int): raise TypeError(\"width must be of type Integer\")\n if not isinstance(height, int): raise TypeError(\"height must be of type Integer\")\n if not isinstance(name, str): raise TypeError(\"name must be of type String\")\n if not isinstance(bg, str): raise TypeError(\"bg must be of type String\")\n\n if self.img is not None:\n self.img = Image.open(self.img).resize((width-5, width-5), Image.ANTIALIAS)\n self.img = ImageTk.PhotoImage(self.img)\n\n osa = Button(self, image=self.img, anchor=\"n\", bg=bg, bd=1, command=lambda: self.open_window(window))\n self.configure(width=width, height=height)\n osa.configure(width=width-5, height=height-5)\n osa.place(x=0, y=0)\n CreateToolTip(osa, name, c_off=width-15)\n\n def open_window(self, window):\n self.master.root_menu.entryconfig(\"File\", state=\"disabled\")\n self.master.root_menu.entryconfig(\"Edit\", state=\"disabled\")\n self.master.root_menu.entryconfig(\"View\", state=\"disabled\")\n self.master.root_menu.entryconfig(\"Stock\", state=\"disabled\")\n self.master.wait_window(window(self.master.controller.container,\n self.master.controller))\n try:\n self.master.root_menu.entryconfig(\"Edit\", state=\"normal\")\n self.master.root_menu.entryconfig(\"File\", state=\"normal\")\n self.master.root_menu.entryconfig(\"View\", state=\"normal\")\n self.master.root_menu.entryconfig(\"Stock\", state=\"normal\")\n except TclError:\n pass" }, { "alpha_fraction": 0.6008836627006531, "alphanum_fraction": 0.6273932456970215, "avg_line_length": 31.380952835083008, "blob_id": "9a785e28ccbe3e2f8ea8289ea3a157d056680701", "content_id": "939f6631317d337d65133d9ba93d65ba53cac70c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 679, "license_type": "no_license", "max_line_length": 119, "num_lines": 21, "path": "/cLibrary/guis/popups/Notification.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from cLibrary.guis.popups.StandardPopup import *\nfrom cLibrary.widgets.Output import Output\n\n\nclass Notification(StandardPopUp):\n\n def __init__(self, master, msg, *args, **kwargs):\n self.msg = msg\n super(Notification, self).__init__(master, *args, **kwargs)\n\n def load_display(self):\n w = self.winfo_reqwidth()\n h = self.winfo_reqheight()\n\n output = Output(self,)\n output.place(x=10, y=10, width=w-20, height=h-50)\n\n ok_button = Button(self, text=\"okay\", bg=\"lime\", fg=\"grey35\", relief=\"groove\", command=lambda: self.on_close())\n ok_button.place(x=w-70, y=h-30, width=60, height=20)\n\n output.r_insert(self.msg)" }, { "alpha_fraction": 0.6174402236938477, "alphanum_fraction": 0.6202531456947327, "avg_line_length": 32.873016357421875, "blob_id": "0a9deaf13374fb6e0294e0b3b820ffe31ce92914", "content_id": "7bcf3b5ab7e31174478b8af6a5e7e8d6a029df52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2133, "license_type": "no_license", "max_line_length": 139, "num_lines": 63, "path": "/cLibrary/guis/popups/StandardPopup.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from tkinter.ttk import Combobox\nfrom cLibrary.methods.general import *\nimport PIL.ImageTk, PIL.Image, copy, shutil\nfrom tkinter.filedialog import *\nfrom cLibrary.guis.Controller import Controller\nfrom typing import Union, Optional, Set, List\nfrom tkinter import *\n\n\nclass StandardPopUp(Toplevel):\n\n def __init__(self, master, controller: Controller = None, width=230, height=220, title=None, cen_win=None, icon=None, *args, **kwargs):\n \"\"\"\n Build standard Popup\n :param master: Master window\n :param controller: Data controller (used to access data)\n :param width: Width of popup\n :param height: Height of popup\n :param title: Title of popup\n :param cen_win: window to center on\n :param icon: Popup window icon\n :param args: Extra arguments\n :param kwargs: Keyword arguments\n \"\"\"\n super(StandardPopUp, self).__init__(master, *args, **kwargs)\n self.load_config(width, height, cen_win, title, icon)\n self.config_loaded = False\n self.controller = controller # type: Controller\n self.load_display()\n\n def load_config(self, width, height, master, title, icon):\n \"\"\"\n Loading configuration for popup\n :param width: width of popup\n :param height: height of popup\n :param master: master of popup\n :param title: title for popup\n :param icon: icon for popup\n :return: None\n \"\"\"\n self.configure(width=width, height=height)\n self.resizable(False, False)\n center_to_win(self, self.master) if not master else center_to_win(self, master)\n self.iconbitmap(icon) if not master else 'pass'\n self.title(title) if not master else 'pass'\n self.grab_set()\n self.protocol(\"WM_DELETE_WINDOW\", self.on_close)\n self.config_loaded = True\n\n def load_display(self):\n \"\"\"\n Load display of popup\n :return: None\n \"\"\"\n pass\n\n def on_close(self):\n \"\"\"\n Closing popup protocols\n :return: None\n \"\"\"\n self.master.grab_set()\n self.destroy()" }, { "alpha_fraction": 0.6859756112098694, "alphanum_fraction": 0.6859756112098694, "avg_line_length": 26.33333396911621, "blob_id": "91b50c32166357dd04cea07be8d2591f9146c632", "content_id": "3595044ef863927d9dfbc2e3e328d4ecc1d31f5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 328, "license_type": "no_license", "max_line_length": 65, "num_lines": 12, "path": "/cLibrary/widgets/WarehouseFrame.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from tkinter import *\nimport tkinter.ttk as ttk\nfrom cLibrary.guis.Controller import Controller\n\n\nclass WarehouseFrame(Frame):\n\n def __init__(self, master, *args, **kw):\n super(WarehouseFrame, self).__init__(master, *args, **kw)\n\n def get_controller(self) -> Controller:\n return self.master.get_controller()\n" }, { "alpha_fraction": 0.5098039507865906, "alphanum_fraction": 0.5143288373947144, "avg_line_length": 23.55555534362793, "blob_id": "8311c0533829fa6bb0cb35be0c792015b1348728", "content_id": "b0054f09df9379ae062859024188359ad4f69ae8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 663, "license_type": "no_license", "max_line_length": 70, "num_lines": 27, "path": "/cLibrary/widgets/AreaCheckbox.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from tkinter import *\n\n\nclass AreaCheckbutton(Checkbutton):\n\n def __init__(self, master, area, *args, **kwargs):\n \"\"\"\n Initiliase check button\n :param master: master window\n :param area: area link\n \"\"\"\n super(AreaCheckbutton, self).__init__(master, *args, **kwargs)\n self['onvalue'] = 1\n self['offvalue'] = 0\n self.var = IntVar(self)\n self['variable'] = self.var\n self.area = area\n\n def get(self):\n \"\"\"\n get area link\n :return: area that is linked\n \"\"\"\n if self.var.get() == 1:\n return self.area\n else:\n return None\n" }, { "alpha_fraction": 0.583032488822937, "alphanum_fraction": 0.5866426229476929, "avg_line_length": 25.428571701049805, "blob_id": "54bef7de44f3d80721dc3b2c94116c9571db6880", "content_id": "a4bc4700607622b90f10dd3927e876ea35c2231a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 554, "license_type": "no_license", "max_line_length": 61, "num_lines": 21, "path": "/cLibrary/widgets/Output.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from tkinter import *\n\n\nclass Output(Text):\n\n def __init__(self, master, *args, **kwargs):\n super(Output, self).__init__(master, *args, **kwargs)\n self.configure(state=\"disabled\")\n\n def r_insert(self, text):\n self.configure(state=\"normal\")\n self.delete(0.0, END)\n self.insert(END, text)\n self.configure(state=\"disabled\")\n self.update()\n\n def i_insert(self, text):\n self.configure(state=\"normal\")\n self.insert(END, text)\n self.configure(state=\"disabled\")\n self.update()" }, { "alpha_fraction": 0.5603688359260559, "alphanum_fraction": 0.587856650352478, "avg_line_length": 44.626983642578125, "blob_id": "9d093a968a444eba43c02fb399995afb89887038", "content_id": "db362bc2af3c0303c599eee958ca1386fd6ed2d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5748, "license_type": "no_license", "max_line_length": 111, "num_lines": 126, "path": "/cLibrary/widgets/controlPanel/BAIL.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from cLibrary.widgets.controlPanel.DispWidgetFrame import *\nfrom cLibrary.widgets.controlPanel.SlotRow import SlotRow\n\n\nclass BAIL(DispWidgetFrame):\n\n def __init__(self, master, warehouse, controller, height=50, width=200, bg=\"snow\", fg=\"lime green\", *args,\n **kwargs):\n self.bg = bg\n self.fg = fg\n super().__init__(master, warehouse, controller, height=height, width=width, *args, **kwargs)\n\n def load_title(self):\n super(BAIL, self).load_title()\n self.title['text'] = \"Best Aisle Items List\"\n self.title['bg'] = \"light grey\"\n self.title.place(y=0, x=0, width=self.winfo_reqwidth() - 40 - 40 - 100, height=40)\n\n def load_display(self):\n self.item_num = 20\n\n i_ware = iter(self.warehouse)\n self.ais_num = (next(i_ware).spot_id)\n self.items = []\n\n self.hier_canvas = Canvas(self, width=self.winfo_reqwidth() - 5, height=self.winfo_reqheight() - 45)\n self.hier_frame = Frame(self.hier_canvas)\n\n self.scrollbar = Scrollbar(self.hier_canvas, orient=\"vertical\", command=self.hier_canvas.yview)\n self.hier_canvas.configure(yscrollcommand=self.scrollbar.set)\n\n self.hier_canvas.place(x=0, y=42)\n self.scrollbar.place(x=self.winfo_reqwidth() - 20, y=0, width=20, height=self.winfo_reqheight() - 42)\n\n self.canvas_frame = self.hier_canvas.create_window((0, 0), window=self.hier_frame, anchor=\"nw\")\n\n self.hier_canvas.bind(\"<MouseWheel>\", self.mouse_scroll)\n self.hier_frame.bind(\"<MouseWheel>\", self.mouse_scroll)\n self.bind(\"<Configure>\", self.on_frame_configure)\n\n update_button = Button(self, text=\"\\uD83D\\uDD04\", bg=\"light green\", font=\"bold 18\",\n command=lambda: self.create(), relief=\"groove\")\n update_button.place(x=self.winfo_reqwidth() - 40, y=0, width=20, height=20)\n\n self.num_input = Entry(self, font=\"bold 10\", relief=\"groove\", bd=2, bg=\"white\")\n self.num_input.place(x=self.winfo_reqwidth() - 40 - 40, y=0, width=40, height=20)\n self.num_input.insert(0, self.item_num)\n\n sel_num_label = Label(self, text=\"Num of Slots: \", bg=\"grey\", relief=\"groove\", fg=\"white\")\n sel_num_label.place(x=self.winfo_reqwidth() - 40 - 40 - 100, y=0, width=100, height=20)\n\n sel_ais_label = Label(self, text=\"Selected Aisle: \", bg=\"grey\", relief=\"groove\", fg=\"white\")\n sel_ais_label.place(x=self.winfo_reqwidth() - 40 - 40 - 100, y=20, width=100, height=20)\n\n blank = Label(self, bg=\"grey\", relief=\"groove\", fg=\"white\")\n blank.place(x=self.winfo_reqwidth() - 40, y=20, width=40, height=20)\n\n self.ais_input = Entry(self, font=\"bold 10\", relief=\"groove\", bd=2, bg=\"white\")\n self.ais_input.place(x=self.winfo_reqwidth() - 40 - 40, y=20, width=40, height=20)\n self.ais_input.insert(0, self.ais_num)\n\n self.create()\n\n def mouse_scroll(self, event):\n if event.delta:\n self.hier_canvas.yview_scroll(int(-1 * (event.delta / 100)), \"units\")\n else:\n if event.num == 5:\n move = 1\n else:\n move = -1\n self.hier_canvas.yview_scroll(move, \"units\")\n\n def on_frame_configure(self, event=None):\n self.hier_canvas.configure(scrollregion=self.hier_canvas.bbox(ALL))\n\n def create(self):\n self.update()\n while len(self.items) > 0:\n item = self.items.pop()\n item.destroy()\n\n self.item_num = int(self.num_input.get())\n self.ais_num = (self.ais_input.get())\n\n slots = self.warehouse[self.ais_num].get_best_avehitsday(self.item_num)\n row = None\n row_val = 1\n y_val = 20\n for slot in slots:\n row = SlotRow(self.hier_frame, row_val, slot, width=self.hier_canvas.winfo_width() - 20, height=20)\n row.place(x=0, y=y_val)\n row.bind(\"<MouseWheel>\", self.mouse_scroll)\n self.items.append(row)\n y_val += 20\n row_val += 1\n\n self.hier_frame.configure(width=self.hier_canvas.winfo_width(), height=y_val)\n self.hier_frame.update()\n if row is not None:\n nw = row.numLabel.winfo_width()\n sw = row.tempLabel.winfo_width()\n iw = row.itemLabel.winfo_width()\n hw = row.hitsLabel.winfo_width()\n dhw = row.dayshitsLabel.winfo_width()\n aw = row.avgLabel.winfo_width()\n self.hier_canvas.update()\n self.hier_canvas.configure(scrollregion=self.hier_canvas.bbox(ALL))\n num_label = Label(self, text=\"#\", bg=\"azure3\", relief=\"groove\")\n num_label.place(y=42, x=0, width=nw, height=20)\n slot_label = Label(self, text=\"Slot ID\", bg=\"azure3\", relief=\"groove\")\n slot_label.place(y=42, x=nw, width=sw, height=20)\n item_label = Label(self, text=\"Item ID\", bg=\"azure3\", relief=\"groove\")\n item_label.place(y=42, x=nw + sw, width=iw, height=20)\n hits_label = Label(self, text=\"Hits\", bg=\"azure3\", relief=\"groove\")\n hits_label.place(y=42, x=nw + sw + iw, width=hw, height=20)\n day_hits_label = Label(self, text=\"Days Hit\", bg=\"azure3\", relief=\"groove\")\n day_hits_label.place(y=42, x=nw + sw + iw + hw, width=dhw, height=20)\n avg_label = Label(self, text=\"Avg Hits / Day\", bg=\"azure3\", relief=\"groove\")\n avg_label.place(y=42, x=nw + sw + iw + hw + dhw, width=aw, height=20)\n self.items.append(num_label)\n self.items.append(slot_label)\n self.items.append(item_label)\n self.items.append(hits_label)\n self.items.append(day_hits_label)\n self.items.append(avg_label)" }, { "alpha_fraction": 0.5458333492279053, "alphanum_fraction": 0.5569444298744202, "avg_line_length": 27.571428298950195, "blob_id": "5d9d200a6d4609714cc502440519c47bec470712", "content_id": "35f9a7f67469ccd2aef55e24a45498a669eb2101", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3600, "license_type": "no_license", "max_line_length": 102, "num_lines": 126, "path": "/cLibrary/structure/item/Item.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from cLibrary.structure.datatypes.LineFormat import ILF\nfrom typing import List, Set, Union\n\n\nclass Item:\n\n def __init__(self, ilf, hl, warehouse):\n # ['Urbane Quince+Blueberry 125ml Diffuser', 'Seasonal', 'DIFFUSER', nan, '125ml', nan,\n # 'URBA4', 'Urbane', 'SHAD', 'Shadows']\n self.item_id = ilf.item_id\n self.total_qty = ilf.qty\n self.hits = 0\n self.avehitsday = 0\n self.dayshit = 0\n self.pmax = ilf.pmax\n self.lettrigger = ilf.lettrigger\n\n self.carton = Carton(ilf.length_pc, ilf.width_pc, ilf.height_pc, ilf.weight_pc, ilf.units_pc)\n self.inner = Inner(ilf.length_pi, ilf.width_pi, ilf.height_pi, ilf.weight_pi, ilf.units_pi)\n self.unit = Unit(ilf.length_pu, ilf.width_pu, ilf.height_pu, ilf.weight_pu, 1)\n self.barcode1 = ilf.barcode1\n self.barcode2 = ilf.barcode2\n\n self.allocations = []\n self.stock_records = []\n\n if hl is not None and hl is not False:\n self.hits = hl.hits\n self.avehitsday = hl.avehitsday\n self.dayshit = hl.dayshit\n\n def get_unit_vol(self):\n unit = self.unit\n return (unit.width * unit.length * unit.height) / (1000000000)\n\n def get_inner_vol(self):\n inner = self.inner\n vol = (inner.width * inner.length * inner.height) / (1000000000)\n\n if vol == float(0):\n return self.get_unit_vol()\n\n return vol\n\n\nclass Packet:\n def __init__(self, length, width, height, weight, units):\n self.length = length\n self.width = width\n self.height = height\n self.weight = weight\n self.units = units\n\n\nclass Unit(Packet):\n def __init__(self, *args, **kwargs):\n super(Unit, self).__init__(*args, **kwargs)\n\n\nclass Inner(Packet):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\nclass Carton(Packet):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\nclass ItemList:\n\n def __init__(self, filename, hits_hash, warehouse):\n self.count = 0\n self.warehouse = warehouse\n self.hits_hash = hits_hash\n self.items = {}\n self.item_list = []\n self.read_item_data(filename)\n\n def read_item_data(self, filename):\n f = open(filename)\n fline = True\n name_indexing = {}\n for line in f:\n if fline:\n line1 = line.strip('\\n').replace('\"', '').split(\",\")\n for i, name in enumerate(line1):\n name_indexing[name] = i\n fline = False\n else:\n self.add_line(line, name_indexing)\n f.close()\n\n def add_line(self, line, indexing):\n ilf = ILF(line, indexing)\n item_id = ilf.item_id\n try:\n item_hlf = self.hits_hash[item_id]\n except:\n item_hlf = None\n item = Item(ilf, item_hlf, self.warehouse)\n self[item_id] = item\n self.count += 1\n\n def __contains__(self, item_id):\n return item_id in self.items\n\n def __setitem__(self, item_id, item):\n if self.items.get(item_id) is not None: # IMPORTANT! - without this items will be doubled up!\n self.item_list.remove(self.items[item_id])\n self.items[item_id] = item\n self.item_list.append(item)\n\n def __iter__(self):\n return self.items.__iter__()\n\n def __getitem__(self, item_id):\n return self.items[item_id]\n\n def __len__(self):\n return self.count\n\n def get_item(self, item_id):\n return self.items[item_id]\n" }, { "alpha_fraction": 0.5584016442298889, "alphanum_fraction": 0.5747950673103333, "avg_line_length": 25.37837791442871, "blob_id": "6424a437a025bef6a16eab287ea82972f8482e7c", "content_id": "f94d3767b450daf462742af0364066e4227c0cfb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 976, "license_type": "no_license", "max_line_length": 57, "num_lines": 37, "path": "/cLibrary/guis/popups/PickSlotSettings.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom cLibrary.widgets.controlPanel.DSP import DSP\nfrom cLibrary.methods.general import center_to_win\n\n\nclass PickSlotSettings(Toplevel):\n\n def __init__(self, master, slot):\n \"\"\"\n Initialise popup\n :param master: master window\n :param slot: pick slot to display\n \"\"\"\n super(PickSlotSettings, self).__init__(master)\n self.area = slot\n self.load_settings()\n self.grab_set()\n dsp = DSP(self, self.area, width=180, height=130)\n dsp.place(x=10, y=10)\n\n def load_settings(self):\n \"\"\"\n Load popup config\n :return:\n \"\"\"\n self.resizable(False, False)\n self.protocol(\"WM_DELETE_WINDOW\", self.on_close)\n # self.configure(width=200, height=200)\n center_to_win(self, self.master)\n\n def on_close(self):\n \"\"\"\n Close popup protocol\n :return:\n \"\"\"\n self.master.grab_set()\n self.destroy()\n" }, { "alpha_fraction": 0.6296296119689941, "alphanum_fraction": 0.644444465637207, "avg_line_length": 35.818180084228516, "blob_id": "44b976b15c32ad8ef5b970a570ef0d329f09cd3d", "content_id": "3615cab3aa58cd87c84033fe692eb78c4f716446", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 405, "license_type": "no_license", "max_line_length": 69, "num_lines": 11, "path": "/cLibrary/widgets/WinOutput.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom cLibrary.widgets.Output import Output\n\n\nclass WinOutput(Output):\n\n def __init__(self, master, width, height, x, y, *args, **kwargs):\n super(WinOutput, self).__init__(master, *args, **kwargs)\n self.title = Label(self.master, text=\"Output\")\n self.title.place(x=x, y=y, width=width, height=30)\n self.place(x=x, y=y+30, width=width, height=height-30)\n" }, { "alpha_fraction": 0.5384907722473145, "alphanum_fraction": 0.5395036935806274, "avg_line_length": 28.91666603088379, "blob_id": "211183e4a035378e119494046eb47336bb5e3152", "content_id": "c33c739f5aea536d852b3b51761c1374dc55acaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7898, "license_type": "no_license", "max_line_length": 99, "num_lines": 264, "path": "/cLibrary/structure/warehouse/Area.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "import random\nfrom typing import List, Tuple, Set\n\n\nclass Area:\n\n def __init__(self):\n self.spot_id = None\n self.spots = {}\n self.count = 0\n\n def __eq__(self, other):\n \"\"\"\n Area equality testing\n :param other: Area to compare to\n :return: Boolean\n \"\"\"\n if not isinstance(other, Area):\n raise TypeError(\"Cannot compare {} type with Area type\".format(type(other)))\n return self.spot_id == other.spot_id\n\n def __lt__(self, other):\n \"\"\"\n Area comparison testing\n :param other: Area to compare to\n :return: Boolean\n \"\"\"\n if not isinstance(other, Area):\n raise TypeError(\"Cannot compare {} type with Area type\".format(type(other)))\n return self.spot_id < other.spot_id\n\n def __gt__(self, other):\n \"\"\"\n Area comparison testing\n :param other: Area to compare to\n :return: Boolean\n \"\"\"\n if not isinstance(other, Area):\n raise TypeError(\"Cannot compare {} type with Area type\".format(type(other)))\n return self.spot_id > other.spot_id\n\n def __delitem__(self, key):\n \"\"\"\n Delete child area from current area\n :param key: Key to area of child\n :return: None\n \"\"\"\n del self.spots[key]\n\n def check_duplicate(self, key):\n \"\"\"\n Check if spot already exists in children of this Area\n :param key:\n :return:\n \"\"\"\n if key in self.spots:\n raise KeyError(\"Spot already exists in the warehouse, cannot have two identical Spots\")\n\n def get_sorted_list(self):\n spots = [*self.spots.values()]\n spots.sort()\n return spots\n\n def __len__(self):\n return sum(map(lambda x: len(x), self))\n\n def __setitem__(self, key, value):\n \"\"\"\n Used to set spots\n :param key: spot number\n :param value: spot object\n \"\"\"\n self.check_duplicate(key)\n self.spots[key] = value\n\n def __contains__(self, key):\n \"\"\"\n :param key: aisle number\n :return: return True is the warehouse contains this aisle, else return False\n \"\"\"\n if key in self.spots:\n return True\n else:\n return False\n\n def create_level(self, level_n):\n area = self.get_pick_slots()\n level = [slot for slot in area if slot.level == str(level_n)]\n return level\n\n def __getitem__(self, key):\n \"\"\"\n :param key: aisle number\n :return: aisle object\n \"\"\"\n if key in self.spots:\n return self.spots[key]\n else:\n return None\n\n def __iter__(self):\n return self.spots.values().__iter__()\n\n def get_filled_pick_slots_count(self):\n \"\"\"\n Get the number of slots that have an item allocated to them\n :return: returns the total number of filled areas within this area\n # A \"filled area\" is a pick_slot with an item within it in this instance\n \"\"\"\n e = 0\n for spot in self:\n e += spot.get_filled_pick_slots_count()\n return e\n\n def get_empty_pick_slots_count(self):\n \"\"\"\n Get the number of slots that have an item allocated to them\n :return: returns the total number of filled areas within this area\n # A \"filled area\" is a pick_slot with an item within it in this instance\n \"\"\"\n e = 0\n for spot in self:\n e += spot.get_empty_pick_slots_count()\n return e\n\n def get_best_avehitsday(self, num, used_slots=[]):\n \"\"\"\n Purpose - To get the best slots according to their average hits per day\n :param num: number of slots you want\n :param used_slots: slots already used, so do not find these ones\n :return: list of the best slots according to the average hits per day of that slot\n \"\"\"\n\n # this is done to make sure not searching for more slots than are actually filled\n filled = self.get_filled_pick_slots()\n max_i = len(filled)\n if num > max_i:\n num = max_i\n\n filled.sort(key=lambda x: x.get_item_avehitsday(), reverse=True)\n best_list = filled[:num]\n return best_list\n\n def get_best_dayxhits(self, num, used_slots=[]):\n \"\"\"\n Purpose - To get the best slots according to their average hits per day\n\n :param num: number of slots you want\n :param used_slots: slots already used, so do not find these ones\n :return: list of the best slots according to the average hits per day of that slot\n \"\"\"\n\n # this is done to make sure not searching for more slots than are actually filled\n filled = self.get_filled_pick_slots()\n max_i = len(filled)\n if num > max_i:\n num = max_i\n\n filled.sort(key=lambda x: x.item.avehitsday * (x.item.dayshit*0.3), reverse=True)\n best_list = filled[:num]\n return best_list\n\n def get_filled_pick_slots(self):\n \"\"\"\n Get a list of all the \"empty\" slots in this area\n :return: List of Empty slots found in this area\n \"\"\"\n filled = []\n for spot in self:\n temp = spot.get_filled_pick_slots()\n if temp is not None:\n filled += temp\n return filled\n\n def get_pick_slots(self, filt=None):\n \"\"\"\n Get list of all slots in area\n :return: List of slots\n \"\"\"\n slots = []\n for spot in self:\n slots += spot.get_pick_slots(filt)\n return slots\n\n def get_best_hits(self):\n \"\"\"\n get best hits of position in this Area\n :return:\n \"\"\"\n best_hits = None\n for area in self:\n if not isinstance(area, Area):\n raise TypeError(\"area Should be of type Area\")\n if best_hits is None:\n best_hits = area.get_best_hits()\n else:\n temp_hits = area.get_best_hits()\n if temp_hits is None:\n pass\n else:\n best_hits = max(best_hits, temp_hits)\n return best_hits\n\n def get_average_hits(self):\n \"\"\"\n get average hits of all filled positions in this Area\n :return: float of average hits\n \"\"\"\n try:\n total = 0\n for spot in self:\n total += spot.aux_average_hits()\n n = self.get_filled_pick_slots_count()\n return total/n\n except ZeroDivisionError:\n return 0\n\n def aux_average_hits(self):\n \"\"\"\n get average hits of all filled positions in this Area\n :return: float of average hits\n \"\"\"\n total = 0\n for spot in self:\n total += spot.aux_average_hits()\n return total\n\n def find_item_reserves(self, item_id):\n \"\"\"\n Find reserve slots of an Item\n :param item_id: Item id string\n :return: List[ReserveSlot]\n \"\"\"\n slots = []\n reserves = self.get_reserve_slots()\n for res in reserves:\n for item in res.items:\n if item.item_id == item_id:\n slots.append(res.spot_id)\n return slots\n\n def get_reserve_slots(self):\n \"\"\"\n Get list of reserve slots\n :return: List[Slot]\n \"\"\"\n slots = []\n for spot in self:\n slots += spot.get_reserve_slots()\n return slots\n\n def find_area(self, area_id):\n \"\"\"\n find an area\n :param area_id: area id that needs to be matched\n :return:\n \"\"\"\n if self.spot_id == area_id:\n return self\n for spot in self.spots.values():\n area = spot.find_area(area_id)\n if area is not None:\n return area\n return None\n" }, { "alpha_fraction": 0.5303621292114258, "alphanum_fraction": 0.5559888482093811, "avg_line_length": 35.65306091308594, "blob_id": "5901527f919276ed5e4fedb6789bd8b855e4bb72", "content_id": "b0b388439ada2a07043a9a5d8be1e7509cd21138", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1795, "license_type": "no_license", "max_line_length": 110, "num_lines": 49, "path": "/cLibrary/widgets/controlPanel/BAD.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom cLibrary.widgets.ToolTip import CreateToolTip\nfrom cLibrary.widgets.controlPanel.HexColour import HexColour\n\n\nclass BAD(Frame):\n\n def __init__(self, master, warehouse, controller, height=50, width=200, *args, **kwargs):\n super().__init__(master, *args, **kwargs)\n self.warehouse = warehouse\n self.width = width\n self.height = height\n self.h = height\n self.y_val = 0\n self.x_val = 0\n self.configure(width=width, height=height)\n\n self.title = Label(self, text=\"Best Aisles PickSlot Display\", bg=\"CadetBlue1\", relief=\"groove\")\n self.title.place(x=0, y=0, width=self.width, height=20)\n self.h -= 40\n self.y_val += 20\n\n hits = []\n best = 0\n for aisle in self.warehouse:\n hit = aisle.get_average_hits()\n if hit > 0:\n hits.append((aisle.aisle, hit))\n if hit > best:\n best = hit\n\n hits.sort(key=lambda x: x[0])\n self.w = (self.width/len(hits))//1\n for hit in hits:\n filled_ratio = (hit[1] / best * self.h) // 1\n OldRange = (best - 0)\n NewRange = (255 - 0)\n NewValue = (((hit[1] - 0) * NewRange) / OldRange) // 1\n\n self.color = HexColour(255, 255 - int(NewValue), 0)\n\n tempLabel = Label(self, bg=str(self.color), relief=\"groove\")\n tempLabel.place(x=self.x_val, y=self.y_val+self.h-filled_ratio, width=self.w, height=filled_ratio)\n\n CreateToolTip(tempLabel, \"avg hits / day: {}\".format(round(hit[1], 3)), c_off=(-15 + self.w))\n\n text = Label(self, text=hit[0], relief=\"groove\")\n text.place(x=self.x_val, y=self.h + 20, width=self.w, height=20)\n self.x_val += self.w" }, { "alpha_fraction": 0.6349353194236755, "alphanum_fraction": 0.6349353194236755, "avg_line_length": 34.47541046142578, "blob_id": "2a34d71a22c02a820bf265efbb858245d9a54349", "content_id": "2b3cdf16f7108000a8ccf33c5baff17b7ef446e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2164, "license_type": "no_license", "max_line_length": 111, "num_lines": 61, "path": "/cLibrary/structure/warehouse/CustomArea.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from cLibrary.structure.warehouse.Area import Area\nfrom cLibrary.structure.warehouse.Aisle import Aisle\nfrom cLibrary.structure.warehouse.Bay import Bay\nfrom cLibrary.structure.warehouse.Level import Level\nfrom cLibrary.methods.AreaMethods import *\n\n\nclass CustomArea(Area):\n\n def __init__(self, areas, area_name, warehouse):\n \"\"\"\n Creates a \"Custom Area\" this is an area that doesn't have to contain solely one type of Area\n (eg Aisles only contain bay) CustomAreas can contain Aisles, Bays, Levels, other CustomAreas. It can be\n any mix and match of any of those types\n :param areas: the areas to combine to make a CustomArea\n \"\"\"\n\n if not isinstance(areas, list):\n raise TypeError(\"areas needs to be of Type list, which contains objects of Type Area\")\n super().__init__()\n\n self.area_name = area_name\n self.warehouse = warehouse\n\n for area in areas:\n self.area_error(area)\n self.add_spot(area)\n\n def area_error(self, area):\n self.light_area_error(area)\n for spot in self.spots:\n check_nested_area(self[spot], area, self.warehouse)\n\n def error_check(self, area):\n for j in area:\n for i in self:\n check_nested_area(j, i, self.warehouse)\n\n def add_spot(self, area):\n self.spots[area.spot_id] = area\n\n def light_area_error(self, area):\n if not isinstance(area, (Aisle, Bay, Level, CustomArea, Slot)):\n raise TypeError(\"area must be of Type Aisle, Bay, Level or another CustomArea\")\n\n def __add__(self, other):\n self.area_error(other)\n self.add_spot(other)\n\n def __sub__(self, other):\n self.light_area_error(other)\n try:\n del self.spots[other.spot_id]\n except ValueError:\n raise ValueError(\"Area being removed from the Custom Area does not exist within the Custom Area.\")\n\n def remove(self, area):\n if area.spot_id in self:\n del self[area.spot_id]\n return\n raise ValueError(\"Area being removed from the Custom Area does not exist within the Custom Area.\")\n" }, { "alpha_fraction": 0.5614973306655884, "alphanum_fraction": 0.5775400996208191, "avg_line_length": 33.04545593261719, "blob_id": "b106564c78f319a8efdffcff23105605c542538a", "content_id": "5d243e2c31b1cf583d22062ea4ddad96d65aebaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 748, "license_type": "no_license", "max_line_length": 89, "num_lines": 22, "path": "/cLibrary/widgets/Clock.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from tkinter import *\nimport time\n\n\nclass Clock(Frame):\n def __init__(self, master, width=50, height=20):\n self.show_colon = True\n super().__init__(master)\n self.label = Label(self, text=time.strftime(\"%H:%M\"), bg=\"snow\", relief=\"groove\")\n self.label.place(x=0, y=0, width=width, height=height)\n self.configure(width=width, height=height)\n self.up_clock()\n\n def up_clock(self):\n if self.show_colon:\n self.label.configure(text=time.strftime(\"%H:%M\"))\n self.after(600, self.up_clock)\n self.show_colon = False\n else:\n self.label.configure(text=time.strftime(\"%H %M\"))\n self.after(600, self.up_clock)\n self.show_colon = True" }, { "alpha_fraction": 0.5394015312194824, "alphanum_fraction": 0.5441264510154724, "avg_line_length": 38.260128021240234, "blob_id": "b4a68cb01259e3823072a9cf98293d9dbeacf058", "content_id": "1361f16d196b22e0b427399829631fb1ec121a91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18413, "license_type": "no_license", "max_line_length": 146, "num_lines": 469, "path": "/cLibrary/guis/MainWindow.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom tkinter.font import *\nfrom tkinter.ttk import Combobox, Progressbar, Notebook, Separator\nfrom cLibrary.methods.general import *\n\nfrom cLibrary.widgets.controlPanel.DashGrid import DashGrid\nfrom cLibrary.widgets.AppIcon import AppIcon\nfrom cLibrary.widgets.Clock import Clock\nfrom cLibrary.widgets.DayNightButton import DayNightButton\n\nfrom cLibrary.guis.popups.ErrorWindow import ErrorWindow\nfrom cLibrary.guis.popups.DashDimensions import DashDimensions\nfrom cLibrary.guis.popups.AreasPopUp import AreasPopUp\nfrom cLibrary.guis.popups.ChooseNewWarehouse import ChooseNewWarehouse\nfrom cLibrary.guis.popups.ImportCategories import ImportCategories\nfrom cLibrary.guis.popups.SelectArea import SelectArea\n\nfrom cLibrary.guis.windows.SlotAllocation import SlotAllocation\nfrom cLibrary.guis.windows.MGCons import MGCons\nfrom cLibrary.guis.windows.Relay import Relay\nfrom cLibrary.guis.windows.DistroWindow import Distro\n\nfrom tkinter.filedialog import askopenfilename, asksaveasfile, asksaveasfilename\nfrom PIL import ImageTk, Image\nfrom cLibrary.structure.warehouse.Warehouse import Warehouse\nfrom cLibrary.structure.warehouse.CustomArea import CustomArea\nfrom cLibrary.methods.AreaMethods import best_sort_1_4\nfrom cLibrary.guis.Controller import Controller\nimport shutil\nfrom typing import List, Set, Union, Optional, Tuple\n\n\nclass MGSlotSystem(Tk):\n def __init__(self, item_file, slot_file, stock_file, hits_file):\n \"\"\"\n initialize the MGSlotSystem Window\n :param item_file: item data of warehouse\n :param slot_file: slot data of warehouse\n :param stock_file: stock data of warehouse\n :param hits_file: hit data of warehouse\n \"\"\"\n super().__init__()\n # values\n self.slot_file = slot_file\n self.item_file = item_file\n self.stock_file = stock_file\n self.hits_file = hits_file\n\n # window properties\n error_check = True\n while error_check:\n try:\n self.warehouse = Warehouse(item_file, slot_file, stock_file, hits_file)\n error_check = not error_check\n except Exception as error:\n self.wait_window(ErrorWindow(self, error))\n\n self.recent_cps = []\n self.load_recent_cps()\n\n self.configure(width=1200, height=650, bg=\"snow\")\n self.controller = Controller(self, self.warehouse)\n self.resizable(False, False)\n self.title(\"MGSS\")\n self.iconbitmap(\"resources/img/main-icon.ico\")\n self.grab_set()\n\n self.root_menu = Menu(self)\n self.menus()\n self.config(menu=self.root_menu)\n self.protocol(\"WM_DELETE_WINDOW\", self.on_close)\n\n self.dash = DashGrid(self.controller.container, self.controller)\n self.load_cp()\n self.task_bar()\n\n def on_close(self, event=None):\n \"\"\"\n MGSS system close procedure\n :param event: catch event if needed\n :return: None\n \"\"\"\n self.save_cp()\n self.save_recent_cps()\n self.save_con_settings()\n self.controller.on_close()\n wigs_w = self.controller.wigs_w\n wigs_h = self.controller.wigs_h\n x = self.winfo_x()\n y = self.winfo_y()\n with open(\"resources/data/config.txt\", \"w\") as file:\n file.write(\"{},{}\\n{},{}\".format(wigs_w, wigs_h, x, y))\n self.destroy()\n\n def update_dash(self):\n \"\"\"\n reset dash board of control panel\n :return: None\n \"\"\"\n self.dash.destroy()\n self.save_cp()\n self.dash = DashGrid(self.controller.container, self.controller)\n self.load_cp()\n\n def menus(self):\n \"\"\"\n Create the menu bar for MGSS\n :return: None\n \"\"\"\n # FILE MENUS\n file_menu = Menu(self.root_menu, tearoff=0)\n self.root_menu.add_cascade(label=\"File\", menu=file_menu)\n\n if True:\n file_menu.add_command(label=\"New Warehouse...\", accelerator=\"Ctrl+N\", command=lambda: ChooseNewWarehouse(self))\n new_menu = Menu(file_menu, tearoff=0)\n file_menu.add_cascade(label=\"New...\", menu=new_menu)\n new_menu.add_command(label=\"Item File\", command=lambda: self.change_warehouse(item_file=askopenfilename()))\n new_menu.add_command(label=\"Slot File\", command=lambda: self.change_warehouse(slot_file=askopenfilename()))\n new_menu.add_command(label=\"ItmSlot File\",\n command=lambda: self.change_warehouse(stock_file=askopenfilename()))\n new_menu.add_command(label=\"Hits File\", command=lambda: self.change_warehouse(hits_file=askopenfilename()))\n\n file_menu.add_separator()\n\n # File Commands\n if True: # File Commands\n file_menu.add_command(label=\"Save...\", accelerator=\"Ctrl+S\", command=lambda:self.save_ware_format())\n # file_menu.add_command(label=\"Save As...\", accelerator=\"Ctrl+Shift+S\", command=lambda:self.save_as())\n file_menu.add_command(label=\"Open...\", accelerator=\"Ctrl+O\", command=lambda:self.load_ware_format())\n\n self.bind_all(\"<Control-s>\", self.save_ware_format)\n self.bind_all(\"<Control-S>\", self.save_ware_format)\n # self.bind_all(\"<Control-Shift-S>\", self.save_as)\n # self.bind_all(\"<Control-Shift-s>\", self.save_as)\n self.bind_all(\"<Control-O>\", self.load_ware_format)\n self.bind_all(\"<Control-o>\", self.load_ware_format)\n # Open Recent\n if True: # Open Recent\n open_recent_menu = Menu(file_menu,tearoff=0)\n file_menu.add_cascade(label=\"Open Recent...\", menu=open_recent_menu)\n\n # File Commands\n if True: # File Commands\n\n file_menu.add_separator()\n file_menu.add_command(label=\"Exit\", command=self.on_close)\n\n # EDIT MENUS\n if True:\n edit_menu = Menu(self.root_menu, tearoff=0)\n self.root_menu.add_cascade(label=\"Edit\", menu=edit_menu)\n edit_menu.add_command(label=\"Warehouse Areas\", command=lambda: AreasPopUp(self, self.controller))\n edit_menu.add_command(label=\"Categories\")\n pass\n\n # VIEW MENUS\n if True:\n view_menu = Menu(self.root_menu, tearoff=0)\n self.root_menu.add_cascade(label=\"View\", menu=view_menu)\n\n # control panel cascade\n if True:\n control_panel = Menu(view_menu, tearoff=0)\n view_menu.add_cascade(label=\"Control Panel\", menu=control_panel)\n\n control_panel.add_command(label=\"Save CP\", command=lambda: self.menu_save_cp())\n control_panel.add_command(label=\"Load CP\", command=lambda: self.menu_load_cp())\n\n self.recent_cps_menu = Menu(control_panel, tearoff=0)\n control_panel.add_cascade(label=\"Load Recent CP...\", menu=self.recent_cps_menu)\n\n for recent in self.recent_cps:\n self.recent_cps_menu.add_command(label=re.sub(r'.*/', '/', recent),\n command=lambda e=recent: self.menu_load_cp(e))\n control_panel.add_command(label=\"Dimensions\", command=lambda: DashDimensions(self, self.controller))\n view_menu.add_separator()\n view_menu.add_command(label=\"Warehouse\",\n command=lambda: SelectArea(self,\n self.warehouse,\n False))\n\n # STOCK MENUS\n if True:\n stock_menu = Menu(self.root_menu, tearoff=0)\n self.root_menu.add_cascade(label=\"Stock\", menu=stock_menu)\n\n # Stock.Import Menus\n if True: # File.Import Menus\n stock_import_menu = Menu(stock_menu,\n tearoff=0)\n stock_menu.add_cascade(label=\"Import\",\n menu=stock_import_menu)\n stock_import_menu.add_cascade(label=\"New Stock (Unavailable)\")\n stock_import_menu.add_cascade(label=\"Stock Categories\",\n command=lambda: ImportCategories(self,\n controller=self.controller,\n width=500,\n height=175,\n title=\"Import Categories\",)\n )\n\n # HELP MENUS\n help_menu = Menu(self.root_menu, tearoff=0)\n self.root_menu.add_cascade(label=\"Help\", menu=help_menu)\n\n def task_bar(self):\n \"\"\"\n Load task bar apps and widgets\n :return: None\n \"\"\"\n osa = AppIcon(self, SlotAllocation, img=\"resources/img/mgsa-icon.ico\", name=\"Slot Allocation\", bg=\"#FFB642\") # Slot allocation app button\n osa.place(x=97, y=0)\n\n osa2 = AppIcon(self, MGCons, img=\"resources/img/cons-icon.ico\", name=\"Consolidations\", bg=\"#FFFFFF\") # Consolidations app button\n osa2.place(x=37, y=0)\n\n osa3 = AppIcon(self, Relay, img=\"resources/img/relay-icon.ico\", name=\"Warehouse Relay\", bg=\"#b37fc1\",) # Relay warehouse app button\n osa3.place(x=67, y=0)\n\n osa4 = AppIcon(self, Distro, img=\"resources/img/distro-icon.ico\", name=\"Distro\", bg=\"#FFB642\")\n osa4.place(x=7, y=0)\n\n clock = Clock(self, height=25) # Clock Widget\n clock.place(x=self.winfo_width() - 50, y=0)\n\n def change_warehouse(self, item_file=None, slot_file=None, stock_file=None, hits_file=None):\n \"\"\"\n Load new warehouse to the system\n :param item_file: item data\n :param slot_file: slot data\n :param stock_file: stock data\n :param hits_file: hits data\n :return: None\n \"\"\"\n if item_file == '' or slot_file == '' or stock_file == '' or hits_file == '':\n return None\n if slot_file is None:\n slot_file = self.slot_file\n else:\n self.slot_file = slot_file\n if item_file is None:\n item_file = self.item_file\n else:\n self.item_file = item_file\n if stock_file is None:\n stock_file = self.stock_file\n else:\n self.stock_file = stock_file\n if hits_file is None:\n hits_file = self.hits_file\n else:\n self.hits_file = hits_file\n\n self.warehouse = Warehouse(item_file, slot_file, stock_file, hits_file)\n self.controller.warehouse = self.warehouse\n self.controller.save_areas()\n self.controller.load_areas()\n self.save_cp()\n self.load_cp()\n\n def daynight(self, night):\n \"\"\"\n Toggle dark mode\n :param night: checking if dark mode is active\n :return: None\n \"\"\"\n if night:\n self.configure(bg=\"gray26\")\n for child in self.winfo_children():\n child.configure(bg=\"gray26\")\n\n for child in self.controller.container.winfo_children():\n child.configure(bg=\"gray26\")\n for child2 in child.winfo_children():\n child2.configure(bg=\"gray38\")\n else:\n self.configure(bg=\"snow\")\n for child in self.winfo_children():\n child.configure(bg=\"snow\")\n\n for child in self.controller.container.winfo_children():\n child.configure(bg=\"snow\")\n for child2 in child.winfo_children():\n child2.configure(bg=\"gray78\")\n\n def save_ware_format(self, event=None):\n \"\"\"\n Save warehouse layout\n :param event: event catch\n :return: None\n \"\"\"\n file_dir = asksaveasfilename(initialdir=\"resources/data/saves\", filetypes=(('wf files', '*.wf'),), defaultextension=\".wf\")\n if file_dir == '':\n return None\n with open(file_dir, \"w\") as file:\n for area in self.controller.areas:\n file.write(area.area_name)\n for spot in area:\n file.write(\",\" + spot.spot_id)\n file.write(\"\\n\")\n\n def load_ware_format(self, event=None, file=None):\n \"\"\"\n Load warehouse format from file\n :param event: catch event\n :param file: file to load\n :return: None\n \"\"\"\n if file is None:\n file_dir = askopenfilename(initialdir=\"resources/data/saves\", filetypes=(('wf files', '*.wf'),))\n else:\n file_dir = file\n if file_dir == '':\n return None\n with open(file_dir, \"r\") as file:\n areas = []\n for line in file:\n line = line.strip(\"\\n\").split(\",\")\n i = 1\n while i < len(line):\n try:\n save_line = line[i]\n spot = self.controller.warehouse.find_area(line[i])\n if spot is None:\n raise ValueError(\"Save File Error:\\n\\nSpot ID {} was found in save file, contact help is further \"\n \"assistance is needed\".format(save_line))\n line[i] = spot\n except Exception as e:\n line.pop(i)\n ErrorWindow(self, e)\n i -= 1\n i += 1\n try:\n areas.append(CustomArea(line[1:], line[0], self.warehouse))\n except Exception as e:\n ErrorWindow(self, \"Error creating area {}\\n\\nThis area has been removed\".format(line[0]))\n self.controller.areas = areas\n\n def save_con_settings(self, event=None):\n \"\"\"\n Save Consolidation settings\n :param event: event catch\n :return: None\n \"\"\"\n with open(\"resources/data/con-set.txt\", \"w\") as file:\n file.write(str(self.controller.bay_range) + \",\" + str(self.controller.gap) + \",\" + str(self.controller.hp))\n file.close()\n\n def menu_load_cp(self, file=None):\n \"\"\"\n loading control panel save file\n :param file: file to load to the control panel\n :return: None\n \"\"\"\n if file is None:\n file_dir = askopenfilename(initialdir=\"resources/data/saves\", filetypes = (('cp files', '*.cp'),))\n else:\n file_dir = file\n file_dir = re.sub(r'.*resources', 'resources', file_dir)\n if file_dir == '':\n return None\n self.add_recent_cp(file_dir)\n file = open(file_dir, \"r\")\n ws = []\n for line in file:\n ws.append(line.strip(\"\\n\"))\n\n i = 0\n for widget in self.dash:\n widget.clean()\n if i < len(ws) and ws[i] != \"None\":\n widget.load_widget(ws[i])\n i += 1\n file.close()\n\n def menu_save_cp(self):\n \"\"\"\n save current control panel layout\n :return: None\n \"\"\"\n file_dir = asksaveasfilename(initialdir=\"resources/data/saves\", filetypes=(('cp files', '*.cp'),), defaultextension=\".cp\")\n if file_dir == '':\n return None\n file = open(file_dir, \"w\")\n f_line = True\n for widget in self.dash:\n if f_line:\n of_type = str(widget.current_widget_type)\n f_line = False\n else:\n of_type = \"\\n\" + str(widget.current_widget_type)\n file.write(of_type)\n file.close()\n\n def load_recent_cps(self):\n \"\"\"\n Load in recent control panel files\n :return: None\n \"\"\"\n with open(\"resources/data/recent_cps.txt\", \"r\") as file:\n for line in file:\n self.recent_cps.append(line.strip(\"\\n\"))\n\n def save_recent_cps(self):\n \"\"\"\n Save recent control panel files for next load of system\n :return: None\n \"\"\"\n with open(\"resources/data/recent_cps.txt\", \"w\") as file:\n f_line = True\n for line in self.recent_cps:\n if f_line:\n file.write(line)\n f_line = False\n else:\n file.write(\"\\n\" + line)\n\n def save_cp(self):\n \"\"\"\n Save current control panel layout to default control panel file location\n :return: None\n \"\"\"\n with open(\"resources/data/widgets.cp\", \"w\") as file:\n f_line = True\n for widget in self.dash:\n if f_line:\n of_type = str(widget.current_widget_type)\n f_line = False\n else:\n of_type = \"\\n\" + str(widget.current_widget_type)\n file.write(of_type)\n\n def load_cp(self, file_dir=None):\n \"\"\"\n Load control panel layout from file, if file_dir == None load default file\n :param file_dir: file location to load from\n :return: None\n \"\"\"\n with open(\"resources/data/widgets.cp\", \"r\") if file_dir is None else open(file_dir, \"r\") as file:\n ws = []\n for line in file:\n ws.append(line.strip(\"\\n\"))\n\n i = 0\n for widget in self.dash:\n widget.clean()\n if i < len(ws) and ws[i] != \"None\":\n widget.load_widget(ws[i])\n i += 1\n\n def add_recent_cp(self, file_dir):\n \"\"\"\n Add control panel save file to recent cp files\n :param file_dir: file to add\n :return: None\n \"\"\"\n while len(self.recent_cps) > 9:\n self.recent_cps.pop(len(self.recent_cps)-1)\n self.recent_cps_menu.delete(len(self.recent_cps)-1)\n i = 0\n while i < len(self.recent_cps):\n if self.recent_cps[i] == file_dir:\n self.recent_cps.pop(i)\n self.recent_cps_menu.delete(i)\n break\n i += 1\n self.recent_cps.insert(0, file_dir)\n self.recent_cps_menu.insert(0, label=re.sub(r'.*/', '/', file_dir), command=lambda e=file_dir: self.menu_load_cp(e), itemType=COMMAND)\n" }, { "alpha_fraction": 0.5557480454444885, "alphanum_fraction": 0.5780225396156311, "avg_line_length": 47.10714340209961, "blob_id": "e2e7b670bfc34c1f05cdc6e6afb1f977be1f619c", "content_id": "cf0f5e2413c105fe3bdb63e7edb5912c2e8bce33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8081, "license_type": "no_license", "max_line_length": 134, "num_lines": 168, "path": "/cLibrary/guis/windows/SlotAllocation.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "import datetime\nimport pandas as pd\nfrom cLibrary.guis.windows.WidgetWindow import *\nfrom cLibrary.methods.AreaMethods import dayxhits_sort_1_4, best_sort_1_4\nfrom cLibrary.widgets.WinOutput import WinOutput\n\n\nclass SlotAllocation(WidgetWindow):\n\n def __init__(self, master, controller):\n \"\"\"\n initialise from super\n :param master: master window / frame\n :param controller: program controller\n \"\"\"\n super(SlotAllocation, self).__init__(master, controller)\n self.areas = controller.areas\n self.area_dict = {}\n self.file_path = None\n\n for area in self.areas.values():\n self.area_dict[area.area_name] = area\n\n self.combo_options = [key for key in self.area_dict]\n\n self.start_val = StringVar(self)\n self.start_val.set(list(self.areas.values())[0].area_name)\n\n self.start_val2 = StringVar(self)\n self.start_val2.set(list(self.areas.values())[0].area_name)\n\n # create sort areas widgets\n bg = Label(self)\n sort_area = Frame(self)\n sort_area.place(x=60, y=70, width=210, height=100)\n sort_area.grid_rowconfigure((0, 1, 2), weight=2)\n sort_area.grid_columnconfigure(1, weight=1)\n sort_area.grid_columnconfigure(0, weight=1)\n if True:\n title = Label(sort_area, text=\"Best Ave Hits Sort\", bg=\"cornflower blue\", font=\"bold 10\")\n title.grid(row=0, column=0, columnspan=2, sticky=NSEW)\n combo_title = Label(sort_area, text=\"Area\", bg=\"light grey\", font=\"bold 10\")\n combo_title.grid(row=1, column=0, sticky=NSEW)\n sort_button = Button(sort_area, text=\"Sort\", relief=\"groove\", bg=\"maroon1\", font=\"bold 10\",\n command=lambda: self.sort_area(self.area_dict[self.start_val.get()], best_sort_1_4))\n sort_button.grid(row=1, column=1, sticky=NSEW)\n self.combo = Combobox(sort_area, values=self.combo_options, textvariable=self.start_val,\n font=\"bold 10\", state=\"readonly\")\n self.combo.grid(row=2, column=0, sticky=NSEW)\n open_button = Button(sort_area, text=\"Open\", relief=\"groove\", bg=\"chartreuse2\", font=\"bold 10\",\n command=lambda: self.open_file())\n open_button.grid(row=2, column=1, sticky=NSEW)\n pass\n sort_area.update()\n bg.place(x=sort_area.winfo_x()-2, y=sort_area.winfo_y()-2, width=sort_area.winfo_width()+4, height=sort_area.winfo_height()+4)\n\n # create sort2 areas widgets\n bg3 = Label(self)\n sort_area2 = Frame(self)\n sort_area2.place(x=60, y=180, width=210, height=100)\n sort_area2.grid_rowconfigure((0, 1, 2), weight=2)\n sort_area2.grid_columnconfigure(1, weight=1)\n sort_area2.grid_columnconfigure(0, weight=1)\n if True:\n title = Label(sort_area2, text=\"Day x Hits Sort\", bg=\"cornflower blue\", font=\"bold 10\")\n title.grid(row=0, column=0, columnspan=2, sticky=NSEW)\n combo_title = Label(sort_area2, text=\"Area\", bg=\"light grey\", font=\"bold 10\")\n combo_title.grid(row=1, column=0, sticky=NSEW)\n sort_button = Button(sort_area2, text=\"Sort\", relief=\"groove\", bg=\"maroon1\", font=\"bold 10\",\n command=lambda: self.sort_area(self.area_dict[self.start_val2.get()], dayxhits_sort_1_4))\n sort_button.grid(row=1, column=1, sticky=NSEW)\n self.combo2 = Combobox(sort_area2, values=self.combo_options, textvariable=self.start_val2,\n font=\"bold 10\", state=\"readonly\")\n self.combo2.grid(row=2, column=0, sticky=NSEW)\n open_button = Button(sort_area2, text=\"Open\", relief=\"groove\", bg=\"chartreuse2\", font=\"bold 10\",\n command=lambda: self.open_file())\n open_button.grid(row=2, column=1, sticky=NSEW)\n pass\n sort_area2.update()\n bg3.place(x=sort_area2.winfo_x() - 2, y=sort_area2.winfo_y() - 2, width=sort_area2.winfo_width() + 4,\n height=sort_area2.winfo_height() + 4)\n\n # create warehouse report widgets\n bg2 = Label(self)\n warehouse_reports = Frame(self)\n warehouse_reports.place(x=sort_area.winfo_x(), y=sort_area.winfo_y()+sort_area.winfo_height()+120,\n width=sort_area.winfo_width(), height=90)\n warehouse_reports.grid_columnconfigure(0, weight=1)\n warehouse_reports.grid_rowconfigure(0, weight=1)\n warehouse_reports.grid_rowconfigure(1, weight=2)\n if True:\n title = Label(warehouse_reports, text=\"Get Warehouse Report\", bg=\"cornflower blue\", font=\"bold 10\")\n title.grid(row=0, column=0, sticky=NSEW)\n\n sort_button = Button(warehouse_reports, text=\"Print Report\", relief=\"groove\", bg=\"maroon1\",\n command=lambda: self.warehouse_report(dayxhits_sort_1_4), font=\"bold 10\")\n sort_button.grid(row=1, column=0, sticky=NSEW)\n pass\n warehouse_reports.update()\n bg2.place(x=warehouse_reports.winfo_x()-2,\n y=warehouse_reports.winfo_y()-2,\n width=warehouse_reports.winfo_width() + 4,\n height=warehouse_reports.winfo_height() + 4)\n\n self.output = WinOutput(self, width=870, height=self.controller.wig_height - 80,\n x=self.controller.wig_width - 870 - 20, y=70,\n wrap=WORD, bg=\"snow\", font=\"none 10\", relief=\"groove\")\n\n def sort_area(self, area, sort):\n \"\"\"\n Run slot allocation, and create excel doc\n :param area: warehouse area to run on\n :param sort: sort type to use\n :return: None\n \"\"\"\n now = datetime.datetime.now()\n date = \"{0}-{1}-{2}\".format(now.day, now.month, now.year)\n self.output.r_insert(\"Sorting \" + area.area_name + \" now...\\n\")\n self.output.update()\n area_sort = sort(area)\n self.file_path = \"resources/output/\"+area.area_name + \"_{}_sorted.xlsx\".format(date)\n for i, switch in enumerate(area_sort):\n area_sort[i] = [switch[0].spot_id,\n (str(switch[0].item.item_id) if switch[0].item is not None else str(switch[0].item)),\n \"\", switch[1].spot_id,\n (str(switch[1].item.item_id) if switch[1].item is not None else str(switch[1].item)), \"\"]\n\n df = pd.DataFrame(area_sort, columns=[\"From Slot\", \"Item Code\", \"Moved?\", \"To Slot\", \"Item Code\", \"Moved?\"])\n\n writer = pd.ExcelWriter(self.file_path)\n df.to_excel(writer, 'Sheet1')\n writer.save()\n self.output.i_insert(area.area_name.title() + \" has been sorted.\")\n self.output.update()\n\n def warehouse_report(self, sort):\n \"\"\"\n run warehouse report for slot allocation\n :param sort: sort to use\n :return: None (output prints to the Output console)\n \"\"\"\n area_nums = []\n self.output.r_insert(\"\")\n for area in self.areas.values():\n self.output.i_insert(\" \" + str(area.area_name.title()) + \" has \" + str(len(sort(area))) + \" swaps to make\\n\")\n self.output.update()\n\n def open_file(self):\n \"\"\"\n open most recently created excel doc\n :return: None (prints update to Output console)\n \"\"\"\n try:\n self.output.r_insert(\"Opening file...\")\n self.output.update()\n if self.file_path is None:\n assert False, \"ObjectError: No consolidations run yet\"\n else:\n os.system('start excel.exe \"{}\"'.format(self.file_path))\n except AssertionError as e:\n self.output.r_insert(e)\n\n def load_title(self, text=\"Slot Allocation Program\", bg=\"burlywood1\", relief=\"groove\"):\n \"\"\"\n load window title\n :return: None\n \"\"\"\n super(SlotAllocation, self).load_title(text=text, bg=bg, relief=relief)" }, { "alpha_fraction": 0.6235167384147644, "alphanum_fraction": 0.646530032157898, "avg_line_length": 41.1363639831543, "blob_id": "c091a130c88c2818fad4cbb0f56e420be1d90011", "content_id": "c415a098064501fb129053ee076e9ba317749838", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2781, "license_type": "no_license", "max_line_length": 128, "num_lines": 66, "path": "/cLibrary/methods/relay.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from cLibrary.structure.warehouse.Area import Area\nfrom cLibrary.structure.warehouse.Aisle import Aisle\nfrom cLibrary.structure.warehouse.Bay import Bay\nfrom cLibrary.structure.warehouse.Level import Level\nfrom cLibrary.structure.warehouse.Slot import Slot\nfrom cLibrary.structure.item.Item import *\nfrom typing import List, Tuple, Set\nfrom cLibrary.structure.datatypes.Category import Category\nfrom cLibrary.structure.warehouse.CustomArea import CustomArea\nimport pandas as pd\n\n\ndef relay(category, area, excess_area, controller, outfile):\n category = category # type: Category\n area = area # type: CustomArea\n excess_area = excess_area # type: CustomArea\n\n cat_items = category.get_items() # type: List[Item]\n\n # Building the lists of pick slots to fill\n area_spots_23_b = area.get_pick_slots(\n filt=lambda x: x.aisle in [\"BB\", ] and x.level in [\"2\", \"3\"] and x.position in [\"1\", \"2\", \"3\", \"4\"]) # type: List[Slot]\n\n area_spots_23_c = area.get_pick_slots(\n filt=lambda x: x.aisle in [\"CC\", ] and x.level in [\"2\", \"3\"] and x.position in [\"1\", \"2\", \"3\", \"4\"]) # type: List[Slot]\n\n area_spots_14 = area.get_pick_slots(\n filt=lambda x: x.level in [\"4\", ] and x.position in [\"1\", \"2\", \"3\", \"4\"]) # type: List[Slot]\n\n excess_spots = excess_area.get_pick_slots() # type: List[Slot]\n\n cat_items.sort(key=lambda x: x.get_inner_vol(), reverse=True) # sorting items by volume (size)\n area_spots_23_b.sort(key=lambda x: x.spot_id, reverse=True) # sort B aisle in reverse\n area_spots_23_c.sort(key=lambda x: x.spot_id) # sort C aisle normally\n\n area_spots_23 = area_spots_23_b + area_spots_23_c # merge aisles B and C\n\n area_spots_14.sort(key=lambda x: x.spot_id, reverse=True)\n\n for item in cat_items:\n print(item.get_inner_vol())\n\n for spot in area_spots_23:\n print(spot.spot_id)\n\n for spot in area_spots_14:\n print(spot.spot_id)\n\n pairings = [] # type: List[Tuple[Item, Slot]]\n while len(cat_items) > 0 and len(area_spots_23) > 0:\n pairings.append((cat_items.pop(0), area_spots_23.pop(0)))\n\n while len(cat_items) > 0 and len(area_spots_14) > 0:\n pairings.append((cat_items.pop(0), area_spots_14.pop(0)))\n sheet1 = []\n\n for item, slot in pairings:\n sheet1.append([\"No Location\" if not item.allocations else item.allocations[0].location.spot_id, item.item_id,\n 0 if not item.allocations else item.allocations[0].qty, \"\", slot.spot_id, \"\", ])\n\n df1 = pd.DataFrame(sheet1, columns=['Current Location', 'Item ID',\n 'Qty', 'Qty Check', 'New Location', 'Moved Check', ])\n writer = pd.ExcelWriter(outfile)\n df1.to_excel(writer, 'Sheet1')\n writer.save()\n return outfile\n" }, { "alpha_fraction": 0.807692289352417, "alphanum_fraction": 0.807692289352417, "avg_line_length": 3.5, "blob_id": "d1df58cb719281a92bd95bfd1b6d53500a3aa717", "content_id": "465894f2504ef95f3d295be5f06ded032c393a3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 26, "license_type": "no_license", "max_line_length": 4, "num_lines": 6, "path": "/resources/data/saves/test_cp.cp", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "BAD\nBIL\nBAIL\nAFP\nNone\nNone" }, { "alpha_fraction": 0.6066350936889648, "alphanum_fraction": 0.6421800851821899, "avg_line_length": 41.29999923706055, "blob_id": "91b0836aa067ed31728b698348f08331866760ca", "content_id": "b6356fe9762206d3f48ca595cd3eaa5387d6dcaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 422, "license_type": "no_license", "max_line_length": 121, "num_lines": 10, "path": "/cLibrary/widgets/InfoHover.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom cLibrary.widgets.ToolTip import CreateToolTip\n\n\nclass InfoHover(Frame):\n def __init__(self, root, text, *args, **kwargs):\n super(InfoHover, self).__init__(root, *args, **kwargs)\n label = Label(self, text=\" \\u2139\\uFE0F\", bg=\"dodgerblue4\", font=\"none 8\", pady=0, borderwidth=0, fg=\"white\")\n label.place(x=0, y=0, width=10, height=10)\n CreateToolTip(label, text)" }, { "alpha_fraction": 0.567186713218689, "alphanum_fraction": 0.5832914113998413, "avg_line_length": 30.0625, "blob_id": "9659d594dc10616eecc520b5ff9576530ee8cb18", "content_id": "8d18c1c0df17aebaf6b46f9a6e5a7161c288d9f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1987, "license_type": "no_license", "max_line_length": 109, "num_lines": 64, "path": "/cLibrary/guis/windows/WidgetWindow.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from tkinter.ttk import *\nfrom tkinter import *\nfrom tkinter.filedialog import *\n\n\nclass WidgetWindow(Frame):\n\n def __init__(self, master, controller):\n \"\"\"\n Overarching class for a standard widget window template\n :param master: master window / frame\n :param controller: program controller\n \"\"\"\n super(WidgetWindow, self).__init__(master)\n self.controller = controller\n self.warehouse = controller.warehouse\n self.configure(width=controller.wig_width, height=controller.wig_height, bg=\"dark grey\")\n self.title = Label(self)\n self.load_title()\n self.load_display()\n self.load_close_button()\n\n self.place(x=0, y=0)\n self.grab_set()\n\n def load_title(self, text=\"Generic Dash Widget\", bg=\"burlywood1\", relief=\"groove\"):\n \"\"\"\n loading widget title\n :return: None\n \"\"\"\n self.title['text'] = text\n self.title['bg'] = bg\n self.title['relief'] = relief\n self.title.place(x=0, y=0, width=self.controller.wig_width-30, height=30)\n\n def get_controller(self):\n return self.controller\n\n def load_display(self):\n \"\"\"\n load widget display\n :return: None\n \"\"\"\n wip_text = Label(self, text=\"Work In Progress\\n(Not Ready Yet)\", font=\"bold 20\", relief=\"groove\")\n wip_text.place(x=self.winfo_reqwidth()/2-200, y=self.winfo_reqheight()/2 - 75, width=400, height=150)\n pass\n\n def load_close_button(self):\n \"\"\"\n load close button for widget\n :return: None\n \"\"\"\n self.close_but = Button(self, command=self.on_close)\n self.close_but['text'] = \"X\"\n self.close_but['bg'] = \"coral1\"\n self.close_but['relief'] = \"groove\"\n self.close_but.place(x=self.controller.wig_width - 30, y=0, width=30, height=30)\n\n def on_close(self):\n \"\"\"\n close widget protocol\n :return:\n \"\"\"\n self.destroy()" }, { "alpha_fraction": 0.5729995369911194, "alphanum_fraction": 0.5894950032234192, "avg_line_length": 41.14024353027344, "blob_id": "5028f5b671a1cfaf39bdf042c8329a180136c328", "content_id": "24ea7769f71614c6b4fd9196adac179246d56877", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6911, "license_type": "no_license", "max_line_length": 137, "num_lines": 164, "path": "/cLibrary/guis/windows/MGCons.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from cLibrary.guis.windows.WidgetWindow import *\nfrom cLibrary.guis.popups.GroundConSettings import GroundConSettings\nfrom cLibrary.widgets.ToolTip import CreateToolTip\nfrom cLibrary.widgets.WinOutput import WinOutput\nfrom cLibrary.methods.AreaMethods import ground_con\nfrom cLibrary.methods.general import intersperse\nimport datetime\nimport pandas as pd\n\n\nclass MGCons(WidgetWindow):\n\n def __init__(self, master, controller):\n \"\"\"\n initialize from super class\n :param master: master window\n :param controller: program controller\n \"\"\"\n super(MGCons, self).__init__(master, controller)\n\n def load_title(self, text=\"Consolidation Program\", bg=\"burlywood1\", relief=\"groove\"):\n super(MGCons, self).load_title(text=text, bg=bg, relief=relief)\n\n def load_display(self):\n \"\"\"\n load window display\n :return: None\n \"\"\"\n self.area_dict = {}\n self.file_path = None\n\n for aisle in self.controller.warehouse:\n if len(aisle.get_reserve_slots()) > 0:\n self.area_dict[str(aisle.aisle)] = aisle\n\n settings = Button(self, text=\"\\u2699\\uFE0F\", bg=\"light grey\", fg=\"grey25\", font=\"bold 16\",\n command=lambda: GroundConSettings(self, self.controller))\n settings.place(x=10, y=70, height=40, width=40)\n\n CreateToolTip(settings, \"change consolidation\\n settings\", c_off=30)\n\n self.output = WinOutput(self, width=870, height=self.controller.wig_height - 80,\n x=self.controller.wig_width - 870 - 20, y=70,\n wrap=WORD, bg=\"snow\", font=\"none 10\", relief=\"groove\")\n values = [*self.area_dict.keys()]\n values.sort()\n self.combo_options = [key for key in values]\n\n self.start_val = StringVar(self)\n self.start_val.set(self.combo_options[0])\n\n bg = Label(self)\n\n sort_area = Frame(self)\n sort_area.place(x=60, y=70, width=210, height=100)\n sort_area.grid_rowconfigure((0, 1, 2), weight=2)\n sort_area.grid_columnconfigure(1, weight=0)\n sort_area.grid_columnconfigure(0, weight=1)\n\n gc_label = Label(sort_area, text=\"Ground Con\", bg=\"cornflower blue\", font=\"bold 10\")\n gc_label.grid(row=0, column=0, columnspan=2, sticky=NSEW)\n\n combo_title = Label(sort_area, text=\"Aisle\", bg=\"light grey\", font=\"bold 10\")\n combo_title.grid(row=1, column=0, sticky=NSEW)\n\n sort_button = Button(sort_area, text=\"Consolidate\", relief=\"groove\", bg=\"maroon1\", font=\"bold 10\",\n command=lambda: self.ground_con(self.area_dict[self.start_val.get()]))\n sort_button.grid(row=1, column=1, sticky=NSEW)\n\n self.combo = Combobox(sort_area, values=self.combo_options, textvariable=self.start_val,\n font=\"bold 10\", state=\"readonly\")\n self.combo.grid(row=2, column=0, sticky=NSEW)\n\n open_button = Button(sort_area, text=\"Open\", relief=\"groove\", bg=\"chartreuse2\", font=\"bold 10\",\n command=lambda: self.open_file())\n open_button.grid(row=2, column=1, sticky=NSEW)\n sort_area.update()\n bg.place(x=sort_area.winfo_x() - 2, y=sort_area.winfo_y() - 2, width=sort_area.winfo_width() + 4,\n height=sort_area.winfo_height() + 4)\n\n bg2 = Label(self)\n warehouse_reports = Frame(self)\n warehouse_reports.place(x=sort_area.winfo_x(), y=sort_area.winfo_y() + sort_area.winfo_height() + 10,\n width=sort_area.winfo_width(), height=90)\n warehouse_reports.grid_columnconfigure(0, weight=1)\n warehouse_reports.grid_rowconfigure(0, weight=1)\n warehouse_reports.grid_rowconfigure(1, weight=2)\n if True:\n title = Label(warehouse_reports, text=\"Get Warehouse Report\", bg=\"cornflower blue\", font=\"bold 10\")\n title.grid(row=0, column=0, sticky=NSEW)\n\n sort_button = Button(warehouse_reports, text=\"Print Report\", relief=\"groove\", bg=\"maroon1\",\n command=lambda: self.ground_report(), font=\"bold 10\")\n sort_button.grid(row=1, column=0, sticky=NSEW)\n pass\n warehouse_reports.update()\n bg2.place(x=warehouse_reports.winfo_x() - 2, y=warehouse_reports.winfo_y() - 2,\n width=warehouse_reports.winfo_width() + 4, height=warehouse_reports.winfo_height() + 4)\n pass\n\n def open_file(self):\n \"\"\"\n open most recently created excel doc\n :return: None (prints update to Output console)\n \"\"\"\n try:\n self.output.r_insert(\"Opening file...\")\n self.output.update()\n if self.file_path is None:\n assert False, \"ObjectError: No consolidations run yet\"\n else:\n os.system('start excel.exe \"{}\"'.format(self.file_path))\n except AssertionError as e:\n self.output.r_insert(e)\n\n def ground_con(self, area):\n \"\"\"\n run ground con and create excel file\n :param area: area to consolidate\n :return: None (prints update in output console)\n \"\"\"\n self.output.r_insert(\"Working out Consolidations...\\n\")\n self.output.update()\n cons = ground_con(area, self.controller.bay_range, self.controller.gap, self.controller.hp)\n\n report = []\n for i in cons:\n temp = []\n for idx, a in enumerate(i):\n i[idx] = a.spot_id\n temp.append(a.aisle)\n temp.append(a.bay)\n temp.append(a.level)\n temp.append(a.position)\n temp.append(\"\")\n report.append(temp)\n\n for idx, a in enumerate(cons):\n cons[idx] = intersperse(cons[idx], \"\")\n\n df = pd.DataFrame(report, columns=[\"Aisle\", \"Bay\", \"Level\", \"Position\", \"Check1\", \"Aisle\", \"Bay\", \"Level\", \"Position\", \"Check2\"])\n\n now = datetime.datetime.now()\n date = \"{0}-{1}-{2}\".format(now.day, now.month, now.year)\n output = \"Ground_A{1}-CON-{0}.xlsx\".format(date, area.aisle)\n self.file_path = \"resources/output/\" + output\n writer = pd.ExcelWriter(self.file_path)\n\n df.to_excel(writer, 'Sheet2')\n writer.save()\n self.output.i_insert(\"Consolidations done.\")\n\n def ground_report(self):\n \"\"\"\n runs consolidations report on entire warehouse\n :return: None (Outputs report to output console)\n \"\"\"\n self.output.r_insert(\"\")\n vals = [*self.area_dict.values()]\n vals.sort(key=lambda x: x.aisle)\n for i in vals:\n temp_con = ground_con(i, self.controller.bay_range, self.controller.gap, self.controller.hp)\n self.output.i_insert(\"Aisle {} has {} consolidations\\n\".format(i.aisle, len(temp_con)))\n self.output.update()\n" }, { "alpha_fraction": 0.5601900815963745, "alphanum_fraction": 0.5612460374832153, "avg_line_length": 21.819276809692383, "blob_id": "9724c4a78e4555099b5c05c40a82efcb6d40af54", "content_id": "5f6ae7ca5b3413891e1ec3a5f0fc16f7fce5b990", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1894, "license_type": "no_license", "max_line_length": 64, "num_lines": 83, "path": "/cLibrary/structure/datatypes/Category.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from cLibrary.structure.item.Item import Item\n\n\nclass CatList:\n\n def __init__(self):\n self.categories = {}\n\n def add_category(self, category):\n if category in self:\n pass\n else:\n self[category] = Category(category)\n\n def add_item(self, item, category):\n if not isinstance(item, Item):\n raise TypeError(\"Cannot add non-item to a Category\")\n if category in self:\n self[category].add_item(item)\n else:\n self[category] = Category(category)\n self[category].add_item(item)\n\n def values(self):\n return self.categories.values()\n\n def keys(self):\n return self.categories.keys()\n\n def __contains__(self, item):\n return item in self.categories.keys()\n\n def __setitem__(self, key, value):\n self.categories[key] = value\n\n def __iter__(self):\n return self.categories.__iter__()\n\n def __getitem__(self, key):\n return self.categories[key]\n\n def __len__(self):\n return len(self.categories)\n\n\nclass Category:\n\n def __init__(self, name):\n self.name = name\n self.count = 0\n self.items = {}\n\n def add_item(self, item):\n self[item.item_id] = item\n\n def get_items(self):\n return list(self.items.values())\n\n def __contains__(self, item_id):\n return item_id in self.items.keys()\n\n def pop(self, key):\n self.items.pop(key)\n\n def values(self):\n return self.items.values()\n\n def keys(self):\n return self.items.keys()\n\n def __setitem__(self, item_id, item):\n if self.items.get(item_id) is None:\n self.count += 1\n self.items[item_id] = item\n\n def __iter__(self):\n return self.items.__iter__()\n\n def __getitem__(self, item_id):\n return self.items[item_id]\n\n def __len__(self):\n return self.count\n" }, { "alpha_fraction": 0.5172860622406006, "alphanum_fraction": 0.5231921672821045, "avg_line_length": 36.934425354003906, "blob_id": "e0aecd56ae14d7ae7af737642bf71b62a19adf90", "content_id": "28a478a11955889f2149f143d3c034e0ab3aa750", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6942, "license_type": "no_license", "max_line_length": 153, "num_lines": 183, "path": "/cLibrary/guis/Controller.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "# from cLibrary.guis.Windows import\nfrom tkinter import *\nfrom tkinter.font import *\nfrom cLibrary.methods.general import *\nfrom cLibrary.structure.datatypes.Category import CatList, Category\nfrom cLibrary.structure.warehouse.CustomArea import CustomArea\nimport tkinter.ttk as ttk\n\n\nclass Controller:\n\n def __init__(self, master, warehouse):\n \"\"\"\n initialize the controller\n :param master: master window\n :param warehouse: warehouse framework the system is running on\n \"\"\"\n self.warehouse = warehouse\n self.areas = {} # type: dict\n self.categories = CatList()\n self.master = master\n\n self.title_font = Font(family='Helvetica', size=18, weight=\"bold\", slant=\"italic\")\n self.title_font2 = Font(family='Calibri', size=11)\n self.title_fontmainTMenu = Font(family='Helvetica', size=15)\n self.title_fontDevices = Font(family='Didot', size=13)\n self.title_fontT = Font(family='Cambria', size=16)\n self.wig_width = master.winfo_reqwidth()-10\n self.wig_height = master.winfo_reqheight()-50\n self.bay_range = 0\n self.gap = 0\n self.hp = 0\n\n self.load_con_settings()\n self.load_categories()\n self.load_areas()\n\n self._create_styles()\n\n with open(\"resources/data/config.txt\", \"r\") as file:\n line = file.readline().strip(\"\\n\").split(\",\")\n self.wigs_w = int(line[0])\n self.wigs_h = int(line[1])\n\n co_ords = file.readline().strip(\"\\n\").split(\",\")\n x = co_ords[0]\n y = co_ords[1]\n wid = self.master.winfo_reqwidth()\n hei = self.master.winfo_reqheight()\n\n screen_w = self.master.winfo_screenwidth()\n screen_h = self.master.winfo_screenheight()\n if 0 > int(x) or int(x)+10 > screen_w or 0 > int(y) or int(y) > screen_h:\n center_to_screen(self.master, adj=20)\n else:\n self.master.geometry(\"%dx%d+%d+%d\" % (wid, hei, int(x), int(y)))\n self.container = Frame(master, bg=\"coral1\")\n self.container.place(x=5, y=25, height=self.wig_height, width=self.wig_width)\n self.frames = {}\n\n def on_close(self):\n self.save_categories()\n self.save_areas()\n\n def load_con_settings(self):\n \"\"\"\n load con settings from settings file\n :return: None\n \"\"\"\n with open(\"resources/data/con-set.txt\") as file: # opening con settings file\n x = file.readline().strip(\"\\n\").split(\",\")\n self.bay_range = int(x[0])\n self.gap = int(x[1])\n self.hp = int(x[2])\n\n reserves = self.warehouse.get_reserve_slots()\n for spot in reserves:\n spot.get_attrs(room=self.gap, hp=self.hp)\n\n def save_categories(self):\n \"\"\"\n Save categories to data save (application data)\n :return: None\n \"\"\"\n save_data = []\n for category in self.categories.values():\n temp_cat_data = [category.name]\n for item in category.values():\n temp_cat_data.append(item.item_id)\n save_data.append(temp_cat_data)\n\n with open('resources/data/categories.csv', 'w') as file:\n for row in save_data:\n first_value = True\n for value in row:\n if first_value:\n first_value = not first_value\n file.write(value)\n else:\n file.write(',' + str(value))\n file.write('\\n')\n\n def load_categories(self):\n \"\"\"\n Load categories from data save (application data)\n :return: None\n \"\"\"\n with open('resources/data/categories.csv', 'r', encoding='utf-8-sig') as file:\n data = []\n for line in file:\n data.append(line.strip('\\n').split(','))\n\n for row in data:\n first_value = True\n current_cat = None\n for value in row:\n if first_value:\n first_value = not first_value\n current_cat = value\n self.categories.add_category(value)\n else:\n self.categories.add_item(self.warehouse.item_list[value], current_cat) if self.warehouse.item_list[value] is not None else \"pass\"\n\n def load_areas(self):\n \"\"\"\n Load warehouse areas into system\n :return:\n \"\"\"\n from cLibrary.structure.warehouse.CustomArea import CustomArea\n with open(\"resources/data/areas.wf\", \"r\") as file:\n areas = {}\n for line in file:\n line = line.strip(\"\\n\").split(\",\")\n i = 1\n while i < len(line):\n save_line = line[i]\n spot = self.warehouse.find_area(line[i])\n if spot is None:\n raise ValueError(\"Save File Error:\\n\\nSpot ID {} was found in save file, contact help is further \"\n \"assistance is needed\".format(save_line))\n line[i] = spot\n i += 1\n areas[line[0]] = (CustomArea(line[1:], line[0], self.warehouse))\n self.areas = areas\n\n def save_areas(self):\n \"\"\"\n Save current warehouse areas in warehouse system (application data)\n :return: None\n \"\"\"\n with open(\"resources/data/areas.wf\", \"w\") as file:\n for area in self.areas:\n a = self.areas[area]\n file.write(a.area_name)\n for spot in a:\n file.write(\",\" + spot.spot_id)\n file.write(\"\\n\")\n\n @staticmethod\n def _create_styles():\n style = ttk.Style()\n style.element_create(\"Custom.Treeheading.border\", \"from\", \"default\")\n style.layout(\"Custom.Treeview.Heading\", [\n (\"Custom.Treeheading.cell\", {'sticky': 'nswe'}),\n (\"Custom.Treeheading.border\", {'sticky': 'nswe', 'children': [\n (\"Custom.Treeheading.padding\", {'sticky': 'nswe', 'children': [\n (\"Custom.Treeheading.image\", {'side': 'right', 'sticky': ''}),\n (\"Custom.Treeheading.text\", {'sticky': 'we'})\n ]})\n ]}),\n ])\n style.configure(\"Custom.Treeview\",\n background=\"white\", foreground=\"black\", relief=\"sunken\")\n style.map(\"Custom.Treeview.Heading\",\n relief=[('active', 'groove'), ('pressed', 'sunken')])\n style.map(\"Custom.Treeview.Row\",\n relief=[('active', 'groove')])\n\n def get_area(self, area_name: str) -> CustomArea:\n return self.areas[area_name]\n\n def get_category(self, cat_name: str) -> Category:\n return self.categories['cat_name']\n" }, { "alpha_fraction": 0.5905843377113342, "alphanum_fraction": 0.60359787940979, "avg_line_length": 43.28248596191406, "blob_id": "b7377c36d261706dac05857854c01fb8cb53a675", "content_id": "397533963ff5c263b474c296779b63e8f9146ec8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7838, "license_type": "no_license", "max_line_length": 146, "num_lines": 177, "path": "/cLibrary/guis/windows/DistroWindow.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from cLibrary.guis.windows.WidgetWindow import *\nfrom cLibrary.widgets.ScrolledFrame import ScrolledFrame\nfrom typing import List, Tuple, Set, Optional\nfrom cLibrary.structure.item.Item import Item\nfrom cLibrary.widgets.FileImport import FileImport\nfrom cLibrary.widgets.WarehouseFrame import WarehouseFrame\nfrom cLibrary.structure.warehouse.Area import Area\nfrom cLibrary.guis.popups.SelectArea import SelectArea\nfrom cLibrary.methods.distro import distro_setup\nfrom cLibrary.methods.general import open_excel\nfrom cLibrary.widgets.WinOutput import WinOutput\nimport tkinter.ttk as ttk\n\n\nclass Distro(WidgetWindow):\n\n def __init__(self, master, controller):\n self.upload_box = None # type: Optional[UploadDistro]\n self.area_select = None # type: Optional[AreaSelect]\n self.run_frame = None\n self.excel_entry = None\n self.excel_file = None # type: Optional[str]\n super(Distro, self).__init__(master, controller)\n self.output = WinOutput(self, width=800, height=self.controller.wig_height - 80,\n x=self.controller.wig_width - 800 - 20, y=70,\n wrap=WORD, bg=\"snow\", font=\"none 10\", relief=\"groove\")\n\n def load_title(self, text=\"Distro Program\", bg=\"burlywood1\", relief=\"groove\"):\n super(Distro, self).load_title(text=text, bg=bg, relief=relief)\n\n def load_display(self):\n self.upload_box = UploadDistro(self) # type: UploadDistro\n self.area_select = AreaSelect(self) # type: AreaSelect\n self.run_frame = WarehouseFrame(self, )\n self.excel_entry = Entry(self.run_frame, relief=\"groove\")\n\n self.upload_box.place(x=50, y=100, width=300, height=200)\n self.area_select.place(x=50, y=320, width=300, height=60)\n\n output_label = Label(self.run_frame, text=\"Output File Name: \", relief=\"groove\", bg=\"light grey\")\n run_button = Button(self.run_frame, text=\"Run Distro Setup\", relief=\"groove\", bg=\"pink\", command=self.run_distro)\n open_file_button = Button(self.run_frame, text=\"Open File\", relief=\"groove\", bg=\"lime\", command=self.open_file)\n\n self.run_frame.place(x=50, y=400, width=300, height=60)\n self.excel_entry.grid(row=0, column=1, columnspan=2, sticky=NSEW)\n self.run_frame.grid_columnconfigure(2, weight=1)\n self.run_frame.rowconfigure(0, weight=1)\n\n outfile_button = Button(self.run_frame, text=\"\\uD83D\\uDCC2\", font=\"bold 14\", relief=\"groove\",\n command=lambda: (\n self.excel_entry.delete(0, 'end'),\n self.excel_entry.insert(0, asksaveasfilename(filetypes=(('XML', '*.xlsx'),),\n defaultextension=\".xlsx\")),\n self.excel_entry.configure(fg=\"black\")))\n outfile_button.grid(row=0, column=3, sticky=NSEW)\n\n output_label.grid(row=0, column=0, sticky=NSEW)\n run_button.grid(row=1, column=0, columnspan=3, sticky=NSEW)\n open_file_button.grid(row=1, column=3, sticky=NSEW)\n\n def run_distro(self):\n try:\n if self.excel_entry.get() == \"\" or self.excel_entry.get() == \"Must Select a File!\":\n raise ValueError(\"\")\n self.excel_file = distro_setup(self.upload_box.get(), self.area_select.get(), self.get_controller().warehouse, self.excel_entry.get())\n except ValueError as e:\n self.excel_entry.delete(0, \"end\")\n self.excel_entry.insert(0, \"Must Select a File!\")\n self.excel_entry.configure(fg=\"red\")\n\n def open_file(self):\n open_excel(self.excel_file, self.output)\n\n\nclass UploadDistro(WarehouseFrame):\n\n def __init__(self, master, *args, **kw):\n super(UploadDistro, self).__init__(master, *args, **kw)\n self.data = None\n self.distro_data = DistroData(self) # type: DistroData\n self.title = Label(self, text=\"Upload Distro Data\", relief='groove', bg='deep sky blue', )\n self.imp_box = FileImport(self, title='Distro File')\n self.info = Label(self, text=\"Format: [Item Code, Quantity]\", relief='groove')\n self.upload_button = Button(self, text=\"Upload\", relief='groove', bg='lime',\n command=lambda: self.imp_distro_file())\n\n self.title.grid(row=0, column=0, columnspan=8, sticky=NSEW)\n self.imp_box.grid(row=1, column=0, columnspan=8, sticky=NSEW)\n self.info.grid(row=2, column=0, columnspan=7, sticky=NSEW)\n self.upload_button.grid(row=2, column=7, columnspan=1, sticky=NSEW)\n self.distro_data.grid(row=3, column=0, columnspan=8, sticky=NSEW)\n self.grid_columnconfigure(0, weight=1)\n self.grid_rowconfigure(3, weight=1)\n\n def imp_distro_file(self):\n data = [] # type: List[Tuple[Item, int]]\n warehouse = self.get_controller().warehouse\n with open(self.imp_box.get(), 'r', encoding='utf-8-sig') as f:\n for line in f:\n x = line.strip('\\n').replace(' ', '').split(',')\n itm = warehouse.item_list[x[0]] # type: Item\n i = int(x[1]) # type: int\n data.append((itm, i))\n self.data = data\n self.distro_data.change_data(data)\n\n def get(self) -> List[Tuple[Item, int]]:\n return self.data\n\n\nclass DistroData(WarehouseFrame):\n \"\"\"\n Display uploaded distro data in a scrolled frame\n \"\"\"\n def __init__(self, master, *args, **kw):\n super(DistroData, self).__init__(master)\n self.data = None\n self.tview = ttk.Treeview(self) # type: Treeview\n self.tview.pack(fill=BOTH, expand=True)\n self.tview['show'] = 'headings'\n\n def change_data(self, data: List[Tuple[Item, int]]):\n \"\"\"\n update the distro data\n :param data: distro data to update to\n :return: None\n \"\"\"\n self.tview.destroy()\n self.tview = ttk.Treeview(self, style=\"Custom.Treeview\")\n self.tview.pack(fill=BOTH, expand=True)\n self.tview['columns'] = ('itm', 'qty')\n self.tview.heading('itm', text='Item Code')\n self.tview.heading('qty', text=\"Quantity\")\n self.tview['show'] = 'headings'\n self.tview.column('itm', width=90)\n self.tview.column('qty', stretch=True)\n for item in data:\n self.tview.insert('', 'end', values=(item[0].item_id, str(item[1])))\n\n\nclass AreaSelect(WarehouseFrame):\n\n def __init__(self, master, *args, **kw):\n super(AreaSelect, self).__init__(master, *args, **kw)\n controller = self.get_controller()\n self.grid_columnconfigure(0, weight=1)\n self.title = Label(self, text=\"Select / Create Area\", relief=\"groove\")\n\n self.areas = controller.areas\n self.area_dict = {}\n for area in self.areas.values():\n self.area_dict[area.area_name] = area\n area_combo_options = [key.area_name for key in self.areas.values()]\n\n self.area_combo = Combobox(self, values=area_combo_options, font=\"bold 10\", state=\"readonly\")\n self.area_combo.set(area_combo_options[0])\n\n self.create_area_button = Button(self, text=\"New Area\", relief=\"groove\", bg=\"orange\",\n command=self.new_area, state=\"disabled\")\n\n self.title.pack(fill=BOTH, expand=True)\n self.area_combo.pack(side=LEFT, fill=BOTH, expand=True)\n self.create_area_button.pack(side=LEFT, fill=BOTH, expand=True)\n\n def get(self) -> Area:\n \"\"\"\n Get area selected in combo box\n :return: Area relating to selection\n \"\"\"\n return self.area_dict[self.area_combo.get()]\n\n def new_area(self):\n \"\"\"\n Create new area for distro's\n :return: None\n \"\"\"\n pass\n" }, { "alpha_fraction": 0.5268051028251648, "alphanum_fraction": 0.5289742946624756, "avg_line_length": 30.95049476623535, "blob_id": "fbd2c1250d8e70a430bee9d209125f2f1521a323", "content_id": "a3047b04f5060f11ec3a714856fe5c690e6df328", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3227, "license_type": "no_license", "max_line_length": 115, "num_lines": 101, "path": "/cLibrary/structure/warehouse/Warehouse.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from cLibrary.structure.warehouse.Aisle import Aisle\nfrom cLibrary.structure.warehouse.Area import Area\nfrom cLibrary.structure.item.Item import ItemList\nfrom cLibrary.structure.datatypes.LineFormat import SltLF, StkLF, HLF\n\n\nclass Warehouse(Area):\n\n def __init__(self, item_file, slot_file, stock_file, hits_file):\n super().__init__()\n self.spot_id = \"Ladelle\"\n hits_hash = self.hash_hits(hits_file)\n self.read_slot_file(slot_file)\n self.item_list = ItemList(item_file, hits_hash, self)\n self.assign_ps_items(stock_file)\n\n def add_line(self, line, indexing):\n \"\"\"\n This will import data into the aisles and create a new aisle if there isn't an aisle matching the line data\n :param line: line being imported into the warehouse\n :param indexing: indexing for item attributes\n \"\"\"\n slf = SltLF(line, indexing)\n aisle = slf.aisle\n if aisle in self:\n self[aisle].add_line(slf)\n else:\n self[aisle] = Aisle(slf, self)\n self.count += 1\n self[aisle].add_line(slf)\n\n def read_slot_file(self, filename):\n \"\"\"\n importing slots from file\n :param filename: file to import\n :return: None\n \"\"\"\n f = open(filename)\n name_indexing = {}\n fline = True\n for line in f:\n if fline:\n line1 = line.strip('\\n').replace('\"', '').split(\",\")\n for i, name in enumerate(line1):\n name_indexing[name] = i\n fline = False\n else:\n self.add_line(line, name_indexing)\n f.close()\n\n @staticmethod\n def hash_hits(hits_file):\n \"\"\"\n import file for item hits\n :param hits_file: file to import\n :return: dictionary of item_id: item hits data\n \"\"\"\n h = {}\n name_indexing = {}\n fline = True\n f = open(hits_file)\n for line in f:\n if fline:\n line1 = line.strip('\\n').replace('\"', '').split(\",\")\n for i, name in enumerate(line1):\n name_indexing[name] = i\n fline = False\n else:\n x = HLF(line, name_indexing)\n h[x.item_id] = x\n f.close()\n return h\n\n def assign_ps_items(self, filename):\n \"\"\"\n assigning items to pick slots\n :param filename:\n :return: None\n \"\"\"\n f = open(filename)\n\n fline = True\n name_indexing = {}\n for line in f:\n if fline:\n line1 = line.strip('\\n').replace('\"', '').split(\",\")\n for i, name in enumerate(line1):\n name_indexing[name] = i\n fline = False\n else:\n self.assign_item(line, name_indexing)\n f.close()\n\n def assign_item(self, line, indexing):\n stock_obj = StkLF(line, indexing)\n item_id = stock_obj.item_id\n slot = self[stock_obj.aisle][stock_obj.bay][stock_obj.level][stock_obj.position]\n slot.assign_item(item_id, stock_obj)\n\n def get_aisle(self, aisle_code: str) -> Aisle:\n return self[aisle_code]\n" }, { "alpha_fraction": 0.547499418258667, "alphanum_fraction": 0.5539602637290955, "avg_line_length": 37.703704833984375, "blob_id": "f784f80ff917d2b59dd167a29590b953efcb755a", "content_id": "4bae925e2596584b29844123cf7e8b6f0cc2f253", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4179, "license_type": "no_license", "max_line_length": 122, "num_lines": 108, "path": "/cLibrary/guis/popups/SelectArea.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom cLibrary.structure.warehouse.Area import Area\nfrom cLibrary.structure.warehouse.Slot import Slot\nfrom cLibrary.structure.warehouse.Level import Level\nfrom cLibrary.structure.warehouse.Bay import Bay\nfrom cLibrary.structure.warehouse.Aisle import Aisle\nfrom cLibrary.widgets.AreaCheckbox import AreaCheckbutton\nfrom cLibrary.methods.general import center_to_win\nfrom cLibrary.guis.popups.PickSlotSettings import PickSlotSettings\n\n\nclass SelectArea(Toplevel):\n\n def __init__(self, master, area, selecting=True):\n \"\"\"\n Initialise view warehouse popup\n :param master: master window\n :param area: Warehouse location to view into\n :param selecting: Boolean (are areas being selected)\n \"\"\"\n if not isinstance(area, (Area, Slot)):\n raise TypeError(\"area must be a type of Area\")\n super(SelectArea, self).__init__(master)\n self.selecting = selecting\n if isinstance(area, Slot):\n PickSlotSettings(master, area)\n self.destroy()\n else:\n self.area = area.get_sorted_list()\n if isinstance(area, Level):\n self.area.sort(key=lambda x: int(x.position))\n else:\n self.area.sort(key=lambda x: x.spot_id)\n self.grid_row = 0\n self.grid_col = 0\n self.selected_areas = []\n\n self.load_area()\n self.load_settings()\n self.grab_set()\n\n def load_settings(self):\n \"\"\"\n Load popup config\n :return: None\n \"\"\"\n self.resizable(False, False)\n self.protocol(\"WM_DELETE_WINDOW\", self.on_close)\n center_to_win(self, self.master)\n\n def get_area(self, areas=None):\n \"\"\"\n Attempt to push areas into system\n :param areas: areas to push (if None check selected boxes)\n :return: None\n \"\"\"\n if areas is None:\n areas = []\n for box in self.selected_areas:\n areas.append(box.get()) if box.get() is not None else None\n self.get_area(areas)\n else:\n self.on_close()\n self.master.get_area(areas)\n\n def load_area(self):\n \"\"\"\n Load display of current area being viewed\n :return: None\n \"\"\"\n for spot in self.area:\n fill = 1\n emp = 1\n if not isinstance(spot, Slot):\n fill = spot.get_filled_pick_slots_count()\n emp = spot.get_empty_pick_slots_count()\n if fill != 0 or emp != 0:\n spot_str = spot.spot_id\n if isinstance(spot, Aisle):\n spot_str = spot.aisle\n elif isinstance(spot, Bay):\n spot_str = spot.bay\n elif isinstance(spot, Level):\n spot_str = spot.level\n elif isinstance(spot, Slot):\n spot_str = spot.position\n\n label = Button(self, text=str(type(spot).__name__)+\" \"+spot_str, relief=\"groove\", bg=\"thistle1\", width=13,\n command=lambda e=spot: SelectArea(self, e, self.selecting))\n label.grid(row=self.grid_row, column=self.grid_col, pady=(0,2), padx=0)\n if self.selecting:\n check_box = AreaCheckbutton(self, spot, onvalue=1, offvalue=0)\n check_box.grid(row=self.grid_row, column=self.grid_col+1, pady=(0,2), padx=0, sticky=N+S+E+W)\n self.selected_areas.append(check_box)\n (self.grid_col, self.grid_row) = (0, self.grid_row + 1) if self.grid_col == 2 else (2, self.grid_row)\n if self.selecting:\n select_area = Button(self, text=\"Select Area\", relief=\"groove\", bg=\"olivedrab1\",\n command=lambda : self.get_area())\n select_area.grid(row=(self.grid_row if self.grid_col == 0 else self.grid_row + 1),\n column=0, columnspan=4, sticky=N+S+E+W)\n\n def on_close(self):\n \"\"\"\n Close popup protocol\n :return: None\n \"\"\"\n self.master.grab_set()\n self.destroy()" }, { "alpha_fraction": 0.6960452198982239, "alphanum_fraction": 0.7175140976905823, "avg_line_length": 30.60714340209961, "blob_id": "fa34962662e68fa2c553b2661dcacbe8d2ccfb70", "content_id": "b3cef326e265ef6de22acef0ffb413d261da6a66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 888, "license_type": "no_license", "max_line_length": 151, "num_lines": 28, "path": "/README.md", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "<p align=\"center\">\n <img alt=\"WMS\" src=\"https://i.imgur.com/JydMRnL.png\" width=\"70\" />\n</p>\n<div align=\"center\">\n <img src=\"https://img.shields.io/badge/-Python-%233573a6\"/>\n <img src=\"https://img.shields.io/badge/-tkinter-orange\"/>\n <img src=\"https://img.shields.io/badge/-pandas-%23ff69b4\"/>\n</div>\n<h1 align=\"center\">\n Warehouse Management Tool\n</h1>\n\nA Warehouse Management System tool used to automate the creation of custom work orders for warehousing staff, as well as reports on the \ncurrent state of the warehouse and product lines in it. \n\nThis app was made to be used with a [WISE Warehouse System](http://www.wisesystems.com.au/index.htm) and uses files exported directly from this system.\n\n## 💻 Quick Setup\n\n1) Download and extract zip\n2) Install required packages:\n```shell\npip install -r requirements.txt\n```\n3) Run the `Main.py` Script\n```shell\npython Main.py\n```\n" }, { "alpha_fraction": 0.5229746103286743, "alphanum_fraction": 0.5278113484382629, "avg_line_length": 39.34146499633789, "blob_id": "0a63b2e8bca7428a45f8c110eb2a76f2dda29306", "content_id": "63a4303d50c8ac1b1cc5da90a7aba6a16a5894b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1654, "license_type": "no_license", "max_line_length": 120, "num_lines": 41, "path": "/cLibrary/widgets/FileOutput.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "import tkinter as tk\nimport tkinter.filedialog as fdlog\n\n\nclass FileOutput(tk.Frame):\n\n def __init__(self, master, **kw):\n super(FileOutput, self).__init__(master)\n title = kw.get('title', 'Output')\n filetypes = kw.get('filetypes', ((\"all files\",\"*.*\"), ))\n t_fg = kw.get('t_fg', 'black')\n t_font = kw.get('t_font', 'bold 10')\n b_fg = kw.get('b_bg', t_fg)\n b_font = kw.get('b_font', t_font)\n self.item_label = tk.Label(self, text=title + \": \", relief=\"groove\", fg=t_fg, font=t_font, anchor=tk.W, padx=15)\n self.item_label.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n self.item_file = tk.StringVar()\n self.item_entry = tk.Entry(self, textvariable=self.item_file)\n self.item_entry.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)\n self.item_button = tk.Button(self, text=\"\\uD83D\\uDCC2\", font=b_font, fg=b_fg, relief=\"groove\",\n command=lambda: (\n self.entry_set(self.item_entry, fdlog.askopenfilename(filetypes=filetypes)),\n self.item_entry.configure(fg=\"black\")\n )\n )\n self.item_button.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)\n\n def get(self):\n return self.item_entry.get()\n\n @staticmethod\n def entry_set(entry: tk.Entry, text: str):\n \"\"\"\n Set entry box text\n :param entry: entry box\n :param text: text to set\n :return: None\n \"\"\"\n if text != '':\n entry.delete(0, 'end')\n entry.insert(tk.END, text)\n" }, { "alpha_fraction": 0.582922101020813, "alphanum_fraction": 0.599442183971405, "avg_line_length": 39.18965530395508, "blob_id": "b81195de53c700103d2972b98076c7143a5f2256", "content_id": "31145865d15bb71e18b609eedaa4a62ca8cc3a57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4661, "license_type": "no_license", "max_line_length": 135, "num_lines": 116, "path": "/cLibrary/guis/popups/AreasPopUp.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from cLibrary.guis.popups.StandardPopup import *\nfrom cLibrary.guis.popups.ErrorWindow import ErrorWindow\nfrom cLibrary.guis.popups.NewAreaPopUp import NewAreaPopUp\n\n\nclass AreasPopUp(StandardPopUp):\n\n def __init__(self, master, controller, *args, **kwargs):\n \"\"\"\n Initialise from super class\n :param master: master window\n :param controller: program controller\n \"\"\"\n controller.save_areas()\n super(AreasPopUp, self).__init__(master, controller, *args, **kwargs)\n\n def load_display(self):\n \"\"\"\n Loading popup display information\n :return: None\n \"\"\"\n self.area_dict = {}\n\n for area in self.controller.areas.values():\n self.area_dict[area.area_name] = area\n\n self.combo_options = [key for key in self.area_dict]\n self.start_val = StringVar(self)\n try:\n self.start_val.set(list(self.controller.areas.values())[0].area_name)\n except IndexError:\n self.start_val.set(\"\")\n\n areas_title = Label(self, text=\"Areas\", bg=\"light grey\", relief=\"groove\")\n areas_title.place(x=40, y=40, width=150, height=20)\n self.combo = Combobox(self, values=self.combo_options, textvariable=self.start_val,\n font=\"bold 10\", state='readonly')\n self.combo.place(x=40, y=60, width=150, height=30)\n edit_button = Button(self, text=\"Edit\", bg=\"light goldenrod\", relief=\"groove\",\n command=lambda: NewAreaPopUp(self, controller=self.controller, area=self.area_dict[self.start_val.get()]))\n edit_button.place(x=40, y=90, width=75, height=20)\n delete_button = Button(self, text=\"Delete\", bg=\"orange red\", relief=\"groove\", command=self.delete_area)\n delete_button.place(x=115, y=90, width=75, height=20)\n\n create_new_but = Button(self, text=\"Create New Area\", bg=\"chartreuse2\", relief=\"groove\",\n command=lambda: NewAreaPopUp(self, controller=self.controller))\n create_new_but.place(y=125, x=40, width=150, height=30)\n\n cancel_but = Button(self, text=\"Cancel\", bg=\"gray70\", relief=\"groove\",\n command=lambda: self.on_close())\n cancel_but.place(y=self.winfo_height() - 50, x=40, width=72, height=30)\n\n ok_button = Button(self, text=\"Ok\", bg=\"chartreuse2\", relief=\"groove\", command=lambda:(self.on_close(cancel=False)))\n ok_button.place(y=self.winfo_height()-50, x=40+73+4, width=73, height=30)\n\n def delete_area(self):\n \"\"\"\n Remove area from warehouse\n :return: None\n \"\"\"\n try:\n area = self.area_dict[self.start_val.get()]\n self.controller.areas.pop(area.area_name, None)\n self.area_dict.pop(self.start_val.get())\n self.combo_options = [key for key in self.area_dict]\n except KeyError as e:\n ErrorWindow(self, e, 'U002')\n\n try:\n self.start_val.set(list(self.controller.areas.values())[0].area_name)\n except IndexError as e:\n self.start_val.set(\"\")\n\n self.combo['values'] = self.combo_options\n self.combo['textvariable'] = self.start_val\n\n def on_close(self, cancel=True):\n super(AreasPopUp, self).on_close()\n if cancel:\n self.controller.load_areas()\n\n def add_new(self, area):\n \"\"\"\n Add new area to warehouse\n :param area: New Area\n :return: None\n \"\"\"\n a_names = []\n for a in self.controller.areas.values():\n a_names.append(a.area_name)\n a.error_check(area) # check for any overlap errors\n if area.area_name in a_names:\n raise ValueError('Area Name Error: Area name is duplicated, use different area name')\n\n self.controller.areas[area.area_name] = area\n self.area_dict[area.area_name] = area\n self.combo_options = [key for key in self.area_dict]\n self.start_val.set(list(self.controller.areas.values())[0].area_name)\n self.combo['values'] = self.combo_options\n self.combo['textvariable'] = self.start_val\n\n def update_combo(self):\n \"\"\"\n Update areas combobox\n :return: None\n \"\"\"\n self.area_dict = {}\n\n for area in self.controller.areas.values():\n self.area_dict[area.area_name] = area\n\n self.combo_options = [key for key in self.area_dict]\n self.start_val = StringVar(self)\n self.start_val.set(list(self.controller.areas.values())[0].area_name)\n self.combo['values'] = self.combo_options\n self.combo['textvariable'] = self.start_val" }, { "alpha_fraction": 0.5489048361778259, "alphanum_fraction": 0.5634509325027466, "avg_line_length": 36.381248474121094, "blob_id": "d062cc8c2bddbc4a2e365d382ef7974f0cb4c5b1", "content_id": "109bb722acc7db488c23104793b1402e1d17a12f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5981, "license_type": "no_license", "max_line_length": 124, "num_lines": 160, "path": "/cLibrary/guis/popups/NewAreaPopUp.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from cLibrary.guis.popups.StandardPopup import *\nfrom cLibrary.guis.popups.ErrorWindow import ErrorWindow\nfrom cLibrary.guis.popups.SelectArea import SelectArea\n\n\nclass NewAreaPopUp(StandardPopUp):\n\n def __init__(self, master, controller, area=None, *args, **kwargs):\n \"\"\"\n Initialise from super class\n :param master: master window\n :param controller: program controller\n :param area: Warehouse area (None if creating new area)\n \"\"\"\n self.save_string = self.save_area(area)\n self.og = False\n if area is None:\n from cLibrary.structure.warehouse.CustomArea import CustomArea\n area = CustomArea([], \"\", controller.warehouse)\n else:\n self.og = True\n self.area = area\n super(NewAreaPopUp, self).__init__(master, controller, *args, **kwargs)\n\n def load_display(self):\n \"\"\"\n Load area display\n :return: None\n \"\"\"\n self.area_dict = {}\n for area in self.area:\n self.area_dict[area.spot_id] = area\n\n self.combo_options = [key for key in self.area_dict]\n self.start_val = StringVar(self)\n try:\n self.start_val.set(self.combo_options[0])\n except IndexError:\n self.start_val.set(\"\")\n\n name_label = Label(self, text=\"Name: \", bg=\"light grey\", relief=\"groove\")\n name_label.place(x=40, y=40, width=60, height=20)\n\n self.name_var = StringVar()\n self.name_var.set(self.area.area_name)\n\n name_entry = Entry(self, textvariable=self.name_var)\n name_entry.place(x=100, y=40, width=90, height=20)\n\n locs_label = Label(self, text=\"Locations: \", bg=\"dark grey\", relief=\"groove\")\n locs_label.place(x=40, y=60, width=60, height=30)\n\n self.locs_combo = Combobox(self, textvariable=self.start_val, values=self.combo_options, state='readonly')\n self.locs_combo.place(x=100, y=60, width=90, height=30)\n\n add_button = Button(self, text=\"Add\", bg=\"chartreuse2\", command=lambda: SelectArea(self, self.controller.warehouse))\n add_button.place(x=40, y=90, width=75, height=20)\n\n delete_button = Button(self, text=\"Delete\", bg=\"orange red\", command=lambda: self.delete_location())\n delete_button.place(x=115, y=90, width=75, height=20)\n\n ok_button = Button(self, text=\"Ok\", bg=\"chartreuse2\", relief=\"groove\",\n command=lambda: (self.ok()))\n ok_button.place(y=self.winfo_height() - 50, x=40 + 73 + 4, width=73, height=30)\n\n cancel_but = Button(self, text=\"Cancel\", bg=\"gray70\", relief=\"groove\",\n command=lambda: self.on_close())\n cancel_but.place(y=self.winfo_height() - 50, x=40, width=72, height=30)\n\n def delete_location(self):\n \"\"\"\n Delete location from the current area\n :return: None\n \"\"\"\n try:\n area = self.area_dict[self.start_val.get()]\n self.area - area\n self.area_dict.pop(self.start_val.get())\n self.update_combos()\n except KeyError as e:\n ErrorWindow(self, e, 'U002',)\n\n def get_area(self, areas):\n \"\"\"\n Add locations to the warehouse area\n :param areas: locations to add to area\n :return: None\n \"\"\"\n for area in areas:\n try:\n self.area + area\n self.area_dict[area.spot_id] = area\n except ValueError as e:\n error = ErrorWindow(self, e, \"U003\", \"User Error: Spot Id {} area, or part of this area already exists \"\n \"within CustomArea location\".format(area.spot_id))\n self.wait_window(error)\n self.update_combos()\n\n def update_combos(self):\n \"\"\"\n update area combobox\n :return:\n \"\"\"\n self.combo_options = [key for key in self.area_dict]\n try:\n self.start_val.set(self.combo_options[0])\n except IndexError:\n self.start_val.set(\"\")\n self.locs_combo['values'] = self.combo_options\n self.locs_combo['textvariable'] = self.start_val\n\n def ok(self):\n \"\"\"\n Save edits to the Area\n :return: None\n \"\"\"\n try:\n if self.name_var.get() == \"\":\n raise ValueError(\"Area Name Error: Area must have a name\\nEnter an Area Name\")\n if not self.og:\n self.area.area_name = self.name_var.get()\n self.master.add_new(self.area)\n else:\n for area in self.controller.areas.values():\n if area.area_name != self.area.area_name:\n self.area.error_check(area)\n self.area.area_name = self.name_var.get()\n self.master.update_combo()\n self.on_close(cancel=False)\n except ValueError as e:\n ErrorWindow(self, e, 'U003', e)\n\n def on_close(self, cancel=True):\n super(NewAreaPopUp, self).on_close()\n if cancel:\n saved_area = self.load_area(self.save_string)\n for area in self.controller.areas.values():\n if area.area_name == saved_area.area_name:\n self.controller.areas[area.area_name] = saved_area\n break\n self.master.update_combo()\n\n @staticmethod\n def save_area(area) -> str:\n if area is None:\n return \"\"\n save_string = \"\"\n save_string += area.area_name\n for spot in area:\n save_string += (\",\" + spot.spot_id)\n return save_string\n\n def load_area(self, save_string: str):\n from cLibrary.structure.warehouse.CustomArea import CustomArea\n warehouse = self.controller.warehouse\n save = save_string.strip(\"\\n\").split(\",\")\n spots = []\n for i in save[1:]:\n spots.append(warehouse.find_area(i))\n return CustomArea(spots, save[0], warehouse)\n" }, { "alpha_fraction": 0.5317073464393616, "alphanum_fraction": 0.5329268574714661, "avg_line_length": 23.84848403930664, "blob_id": "dfd563ba9adfbb19fb34ae13e03689307da53e1a", "content_id": "3e9a6047603dcd330e2fed065c880af4c95b696f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 820, "license_type": "no_license", "max_line_length": 50, "num_lines": 33, "path": "/cLibrary/structure/warehouse/Aisle.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from cLibrary.structure.warehouse.Area import Area\nfrom cLibrary.structure.warehouse.Bay import Bay\n\n\nclass Aisle(Area):\n\n def __init__(self, slf, warehouse):\n \"\"\"\n Initialise an Aisle\n :param slf:\n :param warehouse:\n \"\"\"\n super().__init__()\n self.warehouse = warehouse\n self.aisle = slf.aisle\n self.spot_id = slf.aisle\n\n def add_line(self, slf):\n \"\"\"\n import a bay into this Aisle\n :param slf: Bay information\n :return: None\n \"\"\"\n bay = slf.bay\n if bay in self:\n self[bay].add_line(slf)\n else:\n self[bay] = Bay(slf, self.warehouse)\n self.count += 1\n self[bay].add_line(slf)\n\n def get_bay(self, bay_code: str) -> Bay:\n return self[bay_code]\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 3.3333332538604736, "blob_id": "fd50ef52760fcf4fc7a40a89e933fed8c476e9c1", "content_id": "ac9164f3a0b70501b39ab1480e3fb344ec3a1c7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 25, "license_type": "no_license", "max_line_length": 4, "num_lines": 6, "path": "/resources/data/saves/general2.cp", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "WFP\nBIL\nAFP\nBAIL\nBAD\nNone" }, { "alpha_fraction": 0.5573878884315491, "alphanum_fraction": 0.5653034448623657, "avg_line_length": 30.60416603088379, "blob_id": "085719cd84713706d240f1d971c4a39f69d5a817", "content_id": "9f2007b674e21898fbf44c944432b325c3d4d468", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1516, "license_type": "no_license", "max_line_length": 102, "num_lines": 48, "path": "/cLibrary/widgets/controlPanel/DashGrid.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom cLibrary.widgets.controlPanel.DashWig import DashWig\n\n\nclass DashGrid(Frame):\n\n def __init__(self, master, controller):\n super().__init__(master)\n self.configure(width=controller.wig_width, height=controller.wig_height)\n self.place(x=0, y=0)\n self.controller = controller\n\n self.dash_wigs = []\n self.wig_cols = controller.wigs_w\n self.wig_rows = controller.wigs_h\n\n self.wig_height = (controller.wig_height - (5 * (controller.wigs_h - 1))) / self.wig_rows\n self.wig_width = (controller.wig_width - (5 * (controller.wigs_w - 1))) / self.wig_cols\n\n y_val = 0\n for i in range(self.wig_rows):\n x_val = 0\n for j in range(self.wig_cols):\n cur_wig = DashWig(self, self.controller, width=self.wig_width, height=self.wig_height)\n cur_wig.place(x=x_val, y=y_val)\n self.dash_wigs.append(cur_wig)\n x_val += self.wig_width + 5\n y_val += self.wig_height + 5\n\n def __iter__(self):\n return ListIterator(len(self.dash_wigs), self.dash_wigs)\n\nclass ListIterator:\n def __init__(self, length, list):\n self.list = list\n self.length = length\n self.count = 0\n\n def __next__(self):\n if self.count >= self.length:\n raise StopIteration\n else:\n item = self.list[self.count]\n self.count += 1\n return item\n\n def __iter__(self):\n return self" }, { "alpha_fraction": 0.5471004247665405, "alphanum_fraction": 0.5504950284957886, "avg_line_length": 38.27777862548828, "blob_id": "d252c89151829dde3aff0028a4df8e73469bc524", "content_id": "61c1b75fe5beaf1ae1bf2fafd01c23a9ba54a4b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3535, "license_type": "no_license", "max_line_length": 91, "num_lines": 90, "path": "/cLibrary/structure/datatypes/LineFormat.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "import re\n\n\nclass LF:\n def __init__(self):\n pass\n\n\nclass SltLF(LF):\n\n def __init__(self, line, indexing):\n super().__init__()\n psp2 = re.compile(\"[0-9]{5}[A-D]\")\n line = line.strip('\\n').replace('\"', '').split(\",\")\n\n self.loccode = line[indexing['loccode']].replace(\" \", \"\")\n self.aisle = line[indexing['aisle']].replace(\" \", \"\").zfill(2)\n self.bay = line[indexing['bay']].replace(\" \", \"\")\n self.level = line[indexing['level']].replace(\" \", \"\")\n self.position = line[indexing['position']].replace(\" \", \"\")\n self.id = self.aisle + self.bay + self.level + self.position\n\n self.active = False\n if line[indexing['activ']] != \"N\":\n self.active = True\n\n self.suits_pick_face = line[indexing['okpickslot']].replace(\" \", \"\") == \"T\"\n self.status = line[indexing['frzstock']]\n self.is_pick_slot = line[indexing['ispickslot']] == \"T\"\n self.suits_multi_pick = line[indexing['multiitmpk']] == \"T\"\n\n self.height = line[indexing['shgt']]\n self.width = line[indexing['width']]\n self.depth = line[indexing['sdepth']]\n\n\nclass ILF(LF):\n def __init__(self, line, indexing):\n super().__init__()\n line = line.strip('\\n').replace('\"', '').replace(\" \", \"\").split(\",\")\n self.item_id = line[indexing['itm']]\n self.qty = float(line[indexing['qty']])\n self.barcode1 = line[indexing['apn']]\n self.barcode2 = line[indexing['tun']]\n\n self.length_pc = int(line[indexing['l']])\n self.width_pc = int(line[indexing['w']])\n self.height_pc = int(line[indexing['h']])\n self.weight_pc = float(line[indexing['wgt']])\n self.units_pc = int(line[indexing['spk']])\n\n self.length_pi = int(line[indexing['l_inner']])\n self.width_pi = int(line[indexing['w_inner']])\n self.height_pi = int(line[indexing['h_inner']])\n self.weight_pi = float(line[indexing['wgt_inner']])\n self.units_pi = int(line[indexing['spk_inner']])\n\n self.length_pu = int(line[indexing['l2']])\n self.width_pu = int(line[indexing['w2']])\n self.height_pu = int(line[indexing['h2']])\n self.weight_pu = float(line[indexing['wg2']])\n\n self.pmax = int(line[indexing['pmax']])\n self.lettrigger = int(line[indexing['lettrigger']])\n\n\nclass StkLF(LF):\n def __init__(self, line, indexing):\n super().__init__()\n line = line.strip('\\n').replace('\"', '').replace(\" \", \"\").split(\",\")\n self.item_id = line[indexing['sitm']].replace(\" \", \"\")\n self.loccode = line[indexing['loccode']].replace(\" \", \"\")\n self.aisle = line[indexing['aisle']].replace(\" \", \"\").zfill(2)\n self.bay = line[indexing['bay']].replace(\" \", \"\")\n self.level = line[indexing['level']].replace(\" \", \"\")\n self.position = line[indexing['position']].replace(\" \", \"\")\n self.id = self.aisle + self.bay + self.level + self.position\n self.status = line[indexing['status']]\n self.type = line[indexing['slottype']] if line[indexing['slottype']] != \"\" else \"P\"\n self.qty = float(line[indexing['sqty']])\n\n\nclass HLF(LF):\n def __init__(self, line, indexing):\n super().__init__()\n line = line.strip('\\n').replace('\"', '').replace(\" \", \"\").split(\",\")\n self.item_id = line[indexing['item']]\n self.hits = float(line[indexing['hits']])\n self.avehitsday = float(line[indexing['aveunitday']])\n self.dayshit = float(line[indexing['dayshit']])\n" }, { "alpha_fraction": 0.5493863224983215, "alphanum_fraction": 0.5530878901481628, "avg_line_length": 31.487340927124023, "blob_id": "f2518856aadd00ecbf7a2c7f0890ec1b32456525", "content_id": "5cc56d4d71a18a972c12ef46d47ef18cfe92a5e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5133, "license_type": "no_license", "max_line_length": 88, "num_lines": 158, "path": "/cLibrary/structure/warehouse/Slot.py", "repo_name": "MatthewGadsden/WarehouseManager", "src_encoding": "UTF-8", "text": "from typing import Set, List, Union, Optional\nfrom cLibrary.structure.item.StockRecord import StockRecord\nimport math\n\n\nclass Slot:\n\n def __init__(self, warehouse, slf):\n self.warehouse = warehouse \n\n self.aisle = slf.aisle\n self.bay = slf.bay\n self.level = slf.level\n self.position = slf.position\n self.spot_id = slf.id\n\n self.suits_pick_face = slf.suits_pick_face\n self.is_pick_face = slf.is_pick_slot\n self.suits_multi_pick = slf.suits_multi_pick\n\n self.stock_records = [] # type: List[StockRecord]\n self.allocations = [] # type: List[StockRecord]\n\n self.s_height = int(slf.height)\n self.s_width = int(slf.width)\n self.s_depth = int(slf.depth)\n\n self.used_width = 0\n self.used_height = 0\n self.used_weight = 0\n\n def __eq__(self, other):\n return (self.aisle == other.aisle and\n self.bay == other.bay and\n self.level == other.level and\n self.position == other.position)\n\n def __gt__(self, other):\n if not isinstance(other, (Slot, )):\n raise TypeError()\n if self.aisle > other.aisle:\n return True\n elif self.bay > other.bay:\n return True\n elif self.level > other.level:\n return True\n elif self.position > other.position:\n return True\n return False\n\n def __lt__(self, other):\n return not (self > other)\n\n def _item_used_width(self, record: StockRecord, hp: int) -> int:\n \"\"\"\n Calculate width used on pallet by item, given quantity\n :param record: Stock Record (item, location, qty)\n :param hp: Height percentage to use of pallet\n :return: width used (int)\n \"\"\"\n item = record.item\n max_height = self.s_height * (hp / 100)\n\n carton_length = item.carton.length\n carton_width = item.carton.width\n carton_height = item.carton.height\n\n d = carton_length # set starting depth\n h = carton_height # set starting height\n w = carton_width # set starting width\n c = math.ceil(record.qty / item.carton.units) # number of cartons (rounded up)\n\n # Loop through cartons and simulate stack on a pallet\n for i in range(c):\n if w > self.s_width:\n return w\n if d + item.carton.length < self.s_depth:\n d += item.carton.length\n\n elif h + item.carton.height < max_height:\n d = item.carton.length\n h += item.carton.height\n else:\n d = item.carton.length\n h = item.carton.height\n w += item.carton.width\n return w\n\n def _item_used_weight(self, record: StockRecord) -> float:\n \"\"\"\n Calculate item used weight, given quantity of item\n :param record: Stock Record (item, location, qty)\n :return:\n \"\"\"\n w = record.item.carton.weight\n c = math.ceil(record.qty / record.item.carton.units)\n w = c * w\n return w\n\n def get_attrs(self, room=5, hp=80):\n room /= 100\n room += 1\n width = 0\n weight = 0\n for record in self.stock_records:\n width += self._item_used_width(record, hp=hp)*room\n weight += self._item_used_weight(record)\n self.used_width = width\n self.used_weight = weight\n\n def get_pick_slots(self, filt):\n \"\"\"\n Get list of pick slots\n :param filt: filter to pick which pick slots are wanted\n :return:\n \"\"\"\n if filt is None:\n return [self,]\n else:\n return [self,] if filt(self) else []\n\n def assign_item(self, item_id: str, stk):\n \"\"\"\n Add item pick slots assignment to this slot\n :param item_id: Item Code being Assigned to Slot\n :param stk: Stk Data Container\n :return: None\n \"\"\"\n stock_record = StockRecord(self.warehouse.item_list[item_id], self, stk.qty)\n if self.is_pick_face:\n self.allocations.append(stock_record)\n stock_record.item.allocations.append(stock_record)\n else:\n self.stock_records.append(stock_record)\n stock_record.item.stock_records.append(stock_record)\n\n def get_item_avehitsday(self) -> Optional[float]:\n \"\"\"\n Get the average hits per day for this slots item allocations\n (Returns the average of all items, if more than 1 allocation to slot)\n :return: average hits per day (float), None if no allocations\n \"\"\"\n if not self.allocations:\n return None\n else:\n c = 0\n t_ahd = 0\n for record in self.allocations:\n t_ahd += record.item.avehitsday\n c += 1\n return t_ahd / c\n\n def get_display_id(self) -> str:\n \"\"\"\n Get printable id string\n :return: Aisle-Bay-Level-Position (string)\n \"\"\"\n return str(self.aisle + '-' + self.bay + '-' + self.level + '-' + self.position)\n" } ]
66
bo233/FaceDiary
https://github.com/bo233/FaceDiary
0caefdf7ab4fb342e3df0820e3774eb4a442943e
9205d3192abc864f04068c1615a1780c044a0888
1d3d7fdde78f40383e53bb4c053e8694d7e78ab0
refs/heads/master
2020-03-30T12:31:16.004799
2018-10-12T14:49:34
2018-10-12T14:49:34
151,227,772
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4755488336086273, "alphanum_fraction": 0.499361127614975, "avg_line_length": 38.49082565307617, "blob_id": "9b80074438b1fa976e74f953281808f25ef45753", "content_id": "ee7f545623d45298a3b0738e2036dbb8728a9e47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8843, "license_type": "no_license", "max_line_length": 120, "num_lines": 218, "path": "/utils/video_process.py", "repo_name": "bo233/FaceDiary", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\nfrom keras.models import load_model\nfrom utils.preprocessor import preprocess_input\nimport os\nimport sys\nimport cv2.face\nfrom GUI.widgets import *\nimport time\n\n\nclass VideoProcessor:\n RUN_FACE_GENERATE = 0\n RUN_EMOTION_RECOG = 1\n RUN_FACE_RECOG = 2\n\n runCommand = 1\n\n def __init__(self, gui):\n self.camera = cv2.VideoCapture(0)\n # 人脸检测器\n self.facePath = './model/lbpcascade_frontalface.xml'\n self.faceCascade = cv2.CascadeClassifier(self.facePath)\n # 表情检测器\n emotionModelPath = './model/emotion_model.hdf5'\n self.emotionClassifier = load_model(emotionModelPath)\n self.emotionLabels = {0: 'angry', 1: 'disgust', 2: 'fear', 3: 'happy',\n 4: 'sad', 5: 'surprise', 6: 'neutral'}\n # 记录一个识别表情过程中,各表情出现的次数\n self.emotionRecord = [0, 0, 0, 0, 0, 0, 0]\n # 记录已注册人脸的数量\n self.faceRegistered = 0\n self.whoRU = -1\n # 获取模型的张量\n self.emotionTargetSize = self.emotionClassifier.input_shape[1:3]\n # GUI\n self.gui = gui\n # 人脸识别模型\n self.x = []\n self.y = []\n self.names = []\n self.readImage('./data/at/')\n self.y = np.asarray(self.y, dtype=np.int32)\n self.model = cv2.face.LBPHFaceRecognizer_create()\n # self.model = cv2.face.EigenFaceRecognizer_create()\n self.model.train(np.asarray(self.x), np.asarray(self.y))\n # self.model.save('./model/face_model.xml')\n # self.model = load_model('./model/face_model.xml')\n\n def faceGenerate(self, name):\n count = 0\n\n while True:\n ret, frame = self.camera.read()\n try:\n grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = self.faceCascade.detectMultiScale(grayFrame, 1.3, 5)\n\n for (x, y, w, h) in faces:\n f = cv2.resize(grayFrame[y:y + h, x:x + w], (200, 200))\n\n cv2.imwrite('./data/at/%s/%s.pgm' % (str(self.faceRegistered), str(count)), f)\n count += 1\n cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)\n cv2.putText(frame, 'Recording you...%d%%' % (count*2), (x, y - 20), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2)\n self.display(frame)\n except:\n continue\n if cv2.waitKey(85) & 0xff == ord('q'):\n break\n # 每张脸采样50张\n if count > 49:\n self.runCommand = self.RUN_EMOTION_RECOG\n break\n\n self.x.clear()\n del self.y\n self.y = []\n self.names.clear()\n self.readImage('./data/at/')\n self.y = np.asarray(self.y, dtype=np.int32)\n self.model.train(self.x, self.y)\n\n def emotionRecog(self, record=False):\n self.emotionRecord = np.zeros(7)\n time_start = time.time()\n time_cost = 0\n while True:\n ret, frame = self.camera.read()\n grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # 首先检测人脸,返回的是框住人脸的矩形框\n faces = self.faceCascade.detectMultiScale(\n grayFrame,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(30, 30),\n flags=cv2.CASCADE_SCALE_IMAGE\n )\n\n # 画出每一个人脸,提取出人脸所在区域\n for (x, y, w, h) in faces:\n grayFace = grayFrame[y:y+h, x:x+w]\n\n try:\n grayFace = cv2.resize(grayFace, (self.emotionTargetSize))\n except:\n continue\n\n grayFace = preprocess_input(grayFace, True)\n grayFace = np.expand_dims(grayFace, 0)\n grayFace = np.expand_dims(grayFace, -1)\n emotionPrediction = self.emotionClassifier.predict(grayFace)\n emotionProbability = np.max(emotionPrediction)\n emotionLabelArg = np.argmax(emotionPrediction)\n emotionText = self.emotionLabels[emotionLabelArg]\n\n if emotionText == 'angry':\n color = emotionProbability * np.asarray((255, 0, 0))\n elif emotionText == 'sad':\n color = emotionProbability * np.asarray((0, 0, 255))\n elif emotionText == 'happy':\n color = emotionProbability * np.asarray((255, 255, 0))\n elif emotionText == 'surprise':\n color = emotionProbability * np.asarray((0, 255, 255))\n else:\n color = emotionProbability * np.asarray((0, 255, 0))\n\n # 标明心情 框出人脸\n cv2.rectangle(frame, (x, y), (x+w, y+h), color, 2)\n if record:\n self.emotionRecord[emotionLabelArg] += 1\n time_cost = time.time() - time_start\n cv2.putText(frame, emotionText+' %ds' % (5-time_cost+1), (x, y - 7), 3, 1.2, color, 2, cv2.LINE_AA)\n else:\n cv2.putText(frame, emotionText, (x, y - 7), 3, 1.2, color, 2, cv2.LINE_AA)\n if cv2.waitKey(85) & 0xff == ord('q') | self.runCommand != self.RUN_EMOTION_RECOG:\n break\n if time_cost > 5:\n break\n self.display(frame)\n # if record:\n\n\n def readImage(self, path, sz=None):\n self.faceRegistered = 0\n for dirname, dirnames, filenames in os.walk(path):\n for subdirname in dirnames:\n subjectPath = os.path.join(dirname, subdirname)\n for filename in os.listdir(subjectPath):\n try:\n if filename == '.directory':\n continue\n elif filename == 'name.txt':\n filePath = os.path.join(subjectPath, filename)\n fo = open(filePath, \"r+\")\n str = fo.read(10)\n self.names.append(str)\n fo.close()\n elif filename == 'diary.txt':\n continue\n else:\n filePath = os.path.join(subjectPath, filename)\n im = cv2.imread(filePath, cv2.IMREAD_GRAYSCALE)\n\n if sz is not None:\n im = cv2.resize(im, (200, 200))\n self.x.append(np.asarray(im, dtype=np.uint8))\n self.y.append(self.faceRegistered)\n\n except:\n print(sys.exc_info()[0])\n raise\n self.faceRegistered += 1\n\n def faceRecog(self):\n faceRecord = np.zeros(self.faceRegistered)\n time_start = time.time()\n while True:\n read, img = self.camera.read()\n faces = self.faceCascade.detectMultiScale(img, 1.3, 5)\n for (x, y, w, h) in faces:\n img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n roi = gray[x:x + w, y:y + h]\n try:\n roi = cv2.resize(roi, (200, 200), interpolation=cv2.INTER_LINEAR)\n params = self.model.predict(roi)\n faceRecord[params[0]] += 1\n cv2.putText(img, 'Recognizing you...%ds' % (15-time_cost), (x, y - 20),\n cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2)\n except:\n continue\n self.display(img)\n time_cost = time.time() - time_start\n if cv2.waitKey(85) & 0xff == ord('q') | self.runCommand != self.RUN_FACE_RECOG:\n break\n if time_cost > 15:\n return -1\n if faceRecord.max() > 10:\n return faceRecord.argmax()\n\n def runControl(self, record=False, name=''): # record:是否记录人脸或表情出现的次数\n if self.runCommand == self.RUN_EMOTION_RECOG:\n self.emotionRecog(record)\n elif self.runCommand == self.RUN_FACE_GENERATE:\n self.faceGenerate(name)\n elif self.runCommand == self.RUN_FACE_RECOG:\n self.whoRU = self.faceRecog()\n\n def run(self, command, record=False, name=''):\n self.runCommand = command\n self.runControl(record, name)\n\n def display(self, frame):\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n self.gui.canvas.add(frame)\n self.gui.window.update_idletasks()\n self.gui.window.update()\n" }, { "alpha_fraction": 0.5334464907646179, "alphanum_fraction": 0.5539527535438538, "avg_line_length": 41.08928680419922, "blob_id": "a9d8bacdf5995b9505fc2afcb8f11a6dabc40a07", "content_id": "f895356564894c03cdc680739864e2306fb28ef3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7749, "license_type": "no_license", "max_line_length": 113, "num_lines": 168, "path": "/GUI_module.py", "repo_name": "bo233/FaceDiary", "src_encoding": "UTF-8", "text": "import cv2\nfrom tkinter import *\nimport tkinter.messagebox\nfrom GUI.widgets import *\nfrom PIL import Image, ImageTk\n# from utils.emotion_recognize import *\nfrom utils.video_process import *\nfrom tkinter import ttk\n\n\nclass Window:\n def __init__(self):\n self.window = Tk()\n self.window.title('人脸日记')\n self.window.size()\n sw = self.window.winfo_screenwidth() # 获取屏幕宽\n sh = self.window.winfo_screenheight() # 获取屏幕高\n wx = 850\n wh = 600\n self.window.geometry(\"%dx%d+%d+%d\" %(wx, wh, (sw-wx)/2, (sh-wh)/2-100)) # 窗口至指定位置\n self.videoProc = VideoProcessor(self)\n # ---------------------------控件\n self.bLogin = Button(self.window, text='登陆', width=10, command=self.loginButtonFuntion)\n self.bLogin.place(x=745, y=200)\n self.bExit = Button(self.window, text='退出', width=10, command=self.exit)\n self.bExit.place(x=745, y=480)\n self.canvas = ICanvas(self.window, bg='#ffffff', height=480, width=720)\n self.canvas.place(x=0, y=120)\n self.bAbout = Button(self.window, text='关于', width=10,\n command=lambda: tkinter.messagebox.showinfo(title='关于',\n message='由bo233及其团队编写的人脸“日记”。\\n\\n由于时间和技术水平有限,功能可能不够完善,敬请谅解。\\n\\n'\n '感谢CSDN、GitHub、StackOverflow,感谢为开源项目奋斗的前辈们,'\n '是你们的奉献使得该程序的完成成为可能。'))\n self.bAbout.place(x=745, y=410)\n self.bLogon = Button(self.window, text='注册', width=10,\n command=lambda: self.logonButtonFunction())\n self.bLogon.place(x=745, y=270)\n self.bHelp = Button(self.window, text='帮助', width=10)\n self.bHelp.place(x=745, y=340)\n\n self.bLogout = Button(self.window, text='登出', width=10, command=self.logoutButtonFuntion)\n self.bRecordEmotion = Button(self.window, text='记录心情', width=10, command=self.emotionButtonFunction)\n self.bReadDiary = Button(self.window, text='查看日记', width=10, command=self.readDiaryButtonFunction)\n\n self.window.resizable(0, 0)\n # 刚进入界面时进行表情识别\n self.videoProc.runControl()\n self.window.mainloop()\n\n def exit(self):\n self.videoProc.runCommand = -1\n self.window.destroy()\n self.videoProc.camera.release()\n\n def logoutButtonFuntion(self):\n tkinter.messagebox.showinfo(title='登出', message='感谢使用,欢迎再次使用!')\n self.bLogin.place(x=745, y=200)\n self.bLogon.place(x=745, y=270)\n self.bLogout.place_forget()\n self.bReadDiary.place_forget()\n self.bRecordEmotion.place_forget()\n self.lWelcome.place_forget()\n\n # 表情按钮相关执行函数,包括表情识别,表情确定,记录表情\n def emotionButtonFunction(self):\n self.videoProc.run(self.videoProc.RUN_EMOTION_RECOG, record=True)\n yes = tkinter.messagebox.askyesno(title='记录心情', message='识别出你的表情为'+\n self.videoProc.emotionLabels[np.argmax(self.videoProc.emotionRecord)]+\n ',是否记录表情?')\n if yes:\n fo = open('./data/at/%s/diary.txt' % str(self.videoProc.whoRU), 'a')\n fo.write(str(time.strftime('\\n%Y-%m-%d %H:%M', time.localtime()))+' '\n +self.videoProc.emotionLabels[np.argmax(self.videoProc.emotionRecord)])\n fo.close()\n\n\n # 登陆按钮相关函数,包括人脸识别并且匹配到相应用户\n def loginButtonFuntion(self):\n self.bLogon.config(state='disabled')\n self.bLogin.config(state='disabled')\n self.videoProc.run(self.videoProc.RUN_FACE_RECOG)\n if self.videoProc.whoRU != -1:\n tkinter.messagebox.showinfo(title='登陆成功', message=self.videoProc.names[self.videoProc.whoRU]+',欢迎你!')\n self.bLogin.place_forget()\n self.bLogon.place_forget()\n self.bLogout.place(x=745, y=270)\n self.bReadDiary.place(x=745, y=200)\n self.bRecordEmotion.place(x=745, y=130)\n self.lWelcome = tk.Label(self.window, text=self.videoProc.names[self.videoProc.whoRU]+'\\n欢迎你')\n self.lWelcome.config(font='Blackletter -20')\n self.lWelcome.place(x=750, y=30)\n else:\n tkinter.messagebox.showinfo(title='登陆失败', message='抱歉,无法识别你!')\n self.bLogon.config(state='normal')\n self.bLogin.config(state='normal')\n\n # 注册按钮相关函数\n def logonButtonFunction(self):\n self.bLogon.config(state='disabled')\n self.bLogin.config(state='disabled')\n\n class TextMsgbox:\n def __init__(self):\n self.name = ''\n self.root = tk.Tk()\n sw = self.root.winfo_screenwidth() # 获取屏幕宽\n sh = self.root.winfo_screenheight() # 获取屏幕高\n self.root.title(\"注册\")\n self.root.geometry(\"%dx%d+%d+%d\" % (300, 80, (sw - 300) / 2, (sh - 80) / 2 - 100))\n self.l1 = tk.Label(self.root, text=\"请输入姓名:\")\n self.l1.pack()\n self.xls = tk.Entry(self.root)\n self.xls.pack()\n self.button = Button(self.root, text=\"确认\", width=7, command=self.getName)\n self.button.pack()\n # ##########可以的话加个取消\n self.root.mainloop()\n\n def getName(self):\n self.name = self.xls.get()\n self.root.quit()\n self.root.destroy()\n\n # 弹框提示用户输入姓名\n textBox = TextMsgbox()\n name = textBox.name\n path = './data/at/'+str(self.videoProc.faceRegistered)\n if name == '':\n tkinter.messagebox.showinfo(title='提示', message='请输入姓名!')\n else:\n os.mkdir(path)\n self.videoProc.run(self.videoProc.RUN_FACE_GENERATE, name=name)\n tkinter.messagebox.showinfo(title='提示', message='注册成功!')\n self.bLogon.config(state='normal')\n self.bLogin.config(state='normal')\n\n def readDiaryButtonFunction(self):\n class DiaryView:\n def __init__(self, num):\n path = './data/at/' + str(num) + '/diary.txt'\n of = open(path)\n fread = of.readlines()\n self.root = tkinter.Tk()\n self.root.title('查看日记')\n self.tree = ttk.Treeview(self.root, height=20, show=\"headings\") # 表格\n self.tree['columns'] = ('日期', '心情')\n # 表示列,不显示\n self.tree.column('日期', width=200, anchor='center')\n self.tree.column('心情', width=100, anchor='center')\n self.tree.heading('日期', text=\"日期\") # 显示表头\n self.tree.heading('心情', text=\"心情\")\n i = 0\n for raw in fread:\n self.tree.insert('', i, values=(raw[0:17], raw[18:]))\n i += 1\n self.tree.pack()\n of.close()\n self.root.protocol(\"WM_DELETE_WINDOW\", self.exit)\n self.root.mainloop()\n def exit(self):\n self.root.destroy()\n self.root.quit()\n\n DiaryView(self.videoProc.whoRU)\n\n\nif __name__ == '__main__':\n w = Window()\n" } ]
2
br34kc0de/HackerRank
https://github.com/br34kc0de/HackerRank
e273dd392a9c1f537f6aab0287cc3c6c63b76ef6
720c333bc2be6c3184426589491dd2658d4942ec
e2a6ca138bd8f63d6af1f4234b1365424f8cacf8
refs/heads/master
2020-03-11T15:59:20.402181
2018-04-19T21:12:58
2018-04-19T21:12:58
130,102,160
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.5952380895614624, "avg_line_length": 20, "blob_id": "5c001fb6e4275e62d2bda2dc1917fea471f0134e", "content_id": "ab704cd3add67ae80978d4e2e8db167a2af2151d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 42, "license_type": "no_license", "max_line_length": 32, "num_lines": 2, "path": "/shell_script/ryaoi_Arithmetic_Operations.sh", "repo_name": "br34kc0de/HackerRank", "src_encoding": "UTF-8", "text": "read num\nprintf \"%.3f\" `echo $num| bc -l`\n" }, { "alpha_fraction": 0.6197183132171631, "alphanum_fraction": 0.6901408433914185, "avg_line_length": 19.285715103149414, "blob_id": "a338611084bb9b96dcb28b131239e4a810d8212c", "content_id": "2e574932774a00450eb9d485bc3ea32159f9325c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 142, "license_type": "no_license", "max_line_length": 32, "num_lines": 7, "path": "/shell_script/ryaoi_the_World_of_Numbers.sh", "repo_name": "br34kc0de/HackerRank", "src_encoding": "UTF-8", "text": "read number1\nread number2\n\necho $((number1 + number2))\necho $[number1 - number2]\necho `expr $number1 \\* $number2`\necho $((number1 / number2))\n" }, { "alpha_fraction": 0.5141093730926514, "alphanum_fraction": 0.5202822089195251, "avg_line_length": 34.4375, "blob_id": "8bcbbbff442eb8c1dc5350946c8a109abdc61a72", "content_id": "80734b0790d29e0e549a101af2e58f1530bf3460", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1134, "license_type": "no_license", "max_line_length": 67, "num_lines": 32, "path": "/ryaoi_minion_game.py", "repo_name": "br34kc0de/HackerRank", "src_encoding": "UTF-8", "text": "def minion_game(string):\n board = {'Stuart':0, 'Kevin': 0}\n Stuart_word, Kevin_word = {}, {}\n vowels = ['a', 'e', 'i', 'o', 'u']\n string = string.lower()\n for index, char in enumerate(string):\n sliced = string[index:]\n start = 0\n if char in vowels:\n for x,c in enumerate(sliced):\n if sliced[start:len(sliced) - x] in Kevin_word:\n Kevin_word[sliced[start:len(sliced) - x]] += 1\n else:\n Kevin_word[sliced[start:len(sliced) - x]] = 1\n else:\n for x,c in enumerate(sliced):\n if sliced[start:len(sliced) - x] in Stuart_word:\n Stuart_word[sliced[start:len(sliced) - x]] += 1\n else:\n Stuart_word[sliced[start:len(sliced) - x]] = 1\n\n stuart_result = sum(Stuart_word.values())\n kevin_result = sum(Kevin_word.values())\n\n if (stuart_result > kevin_result):\n print(\"Stuart\", stuart_result)\n elif (kevin_result > stuart_result):\n print(\"Kevin\", kevin_result)\n else:\n print(\"Draw\")\n\nminion_game(input().lower())\n" } ]
3
e-so5/motivationApp
https://github.com/e-so5/motivationApp
af3ed64e8918990a5e9d78d13cef399ac9ff9646
94b5bddc2509b35b4438821a92215a8e3a5afeb8
3748e0698203cd93345bd00e401f0ac228b466b0
refs/heads/main
2023-01-24T06:54:37.423128
2020-11-13T07:09:59
2020-11-13T07:09:59
309,223,080
1
1
null
2020-11-02T01:04:52
2020-11-13T07:10:02
2020-11-14T13:36:21
HTML
[ { "alpha_fraction": 0.5928809642791748, "alphanum_fraction": 0.6017143130302429, "avg_line_length": 25.765323638916016, "blob_id": "fb020bcdde3ee2313b3bd3797ea3d049eeaa6119", "content_id": "88dcc443d3bc3f210858bb2d83619374929a3aef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16737, "license_type": "no_license", "max_line_length": 109, "num_lines": 571, "path": "/app.py", "repo_name": "e-so5/motivationApp", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, request, redirect, session\n# データベースのimport\nimport sqlite3\nimport random\n\n# appにFlaskを定義して使えるようにする\napp = Flask(__name__)\napp.secret_key = \"panda\"\n\n\[email protected]('/')\ndef index():\n if 'user_id' in session:\n return redirect(\"/MyPage\")\n else:\n return render_template('/index.html')\n\n\[email protected]('/event', methods=['POST'])\ndef event_post():\n # タスクリストからタスクのidを取ってくる\n id = request.form.get(\"task_id\")\n print(id)\n\n # # データベースをフラグ更新する\n conn = sqlite3.connect('app.db')\n c = conn.cursor()\n c.execute(\"UPDATE tasktable SET flag = 1 where id = ?\", (id,))\n conn.commit()\n conn.close()\n return render_template('/event.html')\n\n\[email protected]('/game')\ndef game():\n userChoice_1 = 1\n userChoice_2 = 3\n comChoice = random.randint(1, 6)\n print(comChoice)\n if userChoice_1 == comChoice or userChoice_2 == comChoice:\n return redirect(\"/resultwin\")\n else:\n return redirect(\"/resultlose\")\n\n\[email protected]('/use', methods=['POST'])\ndef use():\n # タスクリストからタスクのidを取ってくる\n user_id = session['user_id'][0]\n id = request.form.get(\"task_id\")\n print(id)\n # item_id = int(id)\n\n # idをキーにして、使いたい項目のポイントを取得\n conn = sqlite3.connect('app.db')\n c = conn.cursor()\n c.execute(\"SELECT point FROM user_table where id = ?\", (id,))\n point = c.fetchone()\n point = point[0]\n print(point)\n # user_idをキーにして、ユーザーの今のポイントを取得\n c.execute(\"SELECT point FROM user where id = ?\", (user_id,))\n user_point = c.fetchone()\n user_point = user_point[0]\n print(user_point)\n\n if point <= user_point:\n # ポイントを更新\n updatedPoint = user_point - point\n print(updatedPoint)\n\n else:\n return ''' <p>ポイントが足りません</p> '''\n\n # データベースを更新\n\n c.execute(\"UPDATE user SET point=? where id = ?\", (updatedPoint, user_id))\n conn.commit()\n conn.close()\n\n return redirect(\"/uselist\")\n\n\[email protected](\"/pointDouble\")\ndef pointDouble():\n user_id = session['user_id'][0]\n conn = sqlite3.connect('app.db')\n c = conn.cursor()\n\n # user_idをキーにして、ユーザーの今のポイントを取得\n c.execute(\"SELECT point FROM user where id = ?\", (user_id,))\n user_point = c.fetchone()\n user_point = user_point[0]\n print(user_point)\n\n # user_idをキーにして、ユーザーの今のレベル管理用ポイントを取得\n # c.execute(\"SELECT point FROM level_table where user_id = ?\", (user_id,))\n # current_level_point = c.fetchone()\n # current_level_point = current_level_point[0]\n\n # user_idをキーにして、ユーザーの今のレベルとレベル管理用ポイントを取得\n c.execute(\"SELECT level, level_point FROM user where id = ?\", (user_id,))\n current_status = c.fetchone()\n current_level = current_status[0]\n current_level_point = current_status[1]\n print(current_level)\n\n # タスクテーブルからフラグ1(タスクリストで選択したタスク)のポイントを取得\n c.execute(\"SELECT id, point FROM tasktable where flag = 1\")\n current_task = c.fetchone()\n task_id = current_task[0]\n task_point = current_task[1]\n print(task_point)\n # # 当たったのでポイント2倍\n getPoint = task_point * 2\n\n # 当選時のポイント算出\n currentPoint = user_point + getPoint\n print(currentPoint)\n\n # レベル管理用ポイントを計算\n currentLevelPoint = current_level_point + getPoint\n print(currentLevelPoint)\n\n # ユーザーデータベースのポイントを更新\n c.execute(\"UPDATE user SET point=?, level_point=? where id = ?\",\n (currentPoint, currentLevelPoint, user_id))\n c.execute(\"UPDATE tasktable SET flag = 0 where id = ?\", (task_id,))\n\n # レベルテーブルを更新\n # c.execute(\"UPDATE level_table SET point = ? where user_id = ?\",\n # (currentLevelPoint, user_id))\n conn.commit()\n conn.close()\n\n # レベル管理用ポイントが200を超えるとレベルアップ!\n if currentLevelPoint >= 200:\n current_level += 1\n reset_counter = currentLevelPoint - 200\n\n # データベース更新\n conn = sqlite3.connect('app.db')\n c = conn.cursor()\n # c.execute(\"UPDATE level_table SET point=? where user_id = ?\",\n # (reset_counter, user_id))\n c.execute(\"UPDATE user SET level = ?, level_point=? where id = ?\",\n (current_level, reset_counter, user_id))\n conn.commit()\n conn.close()\n return render_template(\"levelUp.html\")\n else:\n return redirect(\"/MyPage\")\n\n\[email protected](\"/pointNormal\")\ndef pointNormal():\n user_id = session['user_id'][0]\n conn = sqlite3.connect('app.db')\n c = conn.cursor()\n\n # user_idをキーにして、ユーザーの今のポイントを取得\n c.execute(\"SELECT point FROM user where id = ?\", (user_id,))\n user_point = c.fetchone()\n user_point = user_point[0]\n print(user_point)\n\n # user_idをキーにして、ユーザーの今のレベル管理用ポイントを取得\n # c.execute(\"SELECT point FROM level_table where user_id = ?\", (user_id,))\n # current_level_point = c.fetchone()\n # current_level_point = current_level_point[0]\n\n # user_idをキーにして、ユーザーの今のレベルとレベル管理用ポイントを取得\n c.execute(\"SELECT level, level_point FROM user where id = ?\", (user_id,))\n current_status = c.fetchone()\n current_level = current_status[0]\n current_level_point = current_status[1]\n print(current_level)\n\n # タスクテーブルからフラグ1(タスクリストで選択したタスク)のポイントを取得\n c.execute(\"SELECT id, point FROM tasktable where flag = 1\")\n current_task = c.fetchone()\n task_id = current_task[0]\n task_point = current_task[1]\n print(current_task)\n\n # # 更新後のポイント算出\n currentPoint = user_point + task_point\n print(currentPoint)\n\n # レベル管理用ポイントを計算\n currentLevelPoint = current_level_point + task_point\n print(currentLevelPoint)\n\n # ユーザーデータベースのポイントを更新\n c.execute(\"UPDATE user SET point=?, level_point=? where id = ?\",\n (currentPoint, currentLevelPoint, user_id))\n c.execute(\"UPDATE tasktable SET flag = 0 where id = ?\", (task_id,))\n conn.commit()\n conn.close()\n\n # レベル管理用ポイントが200を超えるとレベルアップ!\n if currentLevelPoint >= 200:\n current_level += 1\n reset_counter = currentLevelPoint - 200\n\n # データベース更新\n conn = sqlite3.connect('app.db')\n c = conn.cursor()\n # c.execute(\"UPDATE level_table SET point=? where user_id = ?\",\n # (reset_counter, user_id))\n c.execute(\"UPDATE user SET level = ?, level_point=? where id = ?\",\n (current_level, reset_counter, user_id))\n conn.commit()\n conn.close()\n return render_template(\"levelUp.html\")\n else:\n return redirect(\"/MyPage\")\n\n\n@ app.route(\"/resultwin\")\ndef resultwin():\n user_id = session['user_id']\n conn = sqlite3.connect('app.db')\n c = conn.cursor()\n\n # タスクテーブルからフラグ1(タスクリストで選択したタスク)のポイントを取得\n c.execute(\"SELECT id, point FROM tasktable where flag = 1\")\n current_task = c.fetchone()\n task_id = current_task[0]\n task_point = current_task[1]\n print(current_task)\n\n gotPoint = task_point * 2\n\n return render_template('/resultwin.html', gotPoint=gotPoint)\n\n\n@ app.route(\"/MyPage\")\ndef MyPage():\n user_id = session['user_id'][0]\n conn = sqlite3.connect('app.db')\n c = conn.cursor()\n print(user_id)\n\n c.execute(\"SELECT user_name, point, level FROM user where id = ?\", (user_id,))\n user_info = c.fetchone()\n user_name = user_info[0]\n user_point = user_info[1]\n user_level = user_info[2]\n print(user_id)\n print(user_info)\n\n conn.commit()\n conn.close()\n\n return render_template('MyPage.html', user_name=user_name, user_point=user_point, user_level=user_level)\n\n\n@ app.route(\"/resultlose\")\ndef ResultLose():\n user_id = session['user_id']\n conn = sqlite3.connect('app.db')\n c = conn.cursor()\n\n # タスクテーブルからフラグ1(タスクリストで選択したタスク)のポイントを取得\n c.execute(\"SELECT id, point FROM tasktable where flag = 1\")\n current_task = c.fetchone()\n task_id = current_task[0]\n task_point = current_task[1]\n print(current_task)\n\n return render_template(\"/resultlose.html\", task_point=task_point)\n\n\n@ app.route(\"/levelUp\")\ndef levelUp():\n return render_template(\"/levelUp.html\")\n\n\[email protected](403)\ndef mistake403(code):\n return render_template(\"/403error.html\")\n\n\[email protected](404)\ndef notfound404(code):\n return render_template(\"/new_404.html\")\n\n\n@ app.route(\"/addpage\")\ndef addpage():\n return render_template(\"/addpage.html\")\n\n\n@ app.route(\"/start\")\ndef start():\n return render_template(\"/start.html\")\n\n\[email protected](\"/tasklist\")\ndef task_list():\n if \"user_id\" in session:\n py_user_id = session[\"user_id\"][0]\n conn = sqlite3.connect(\"app.db\")\n c = conn.cursor()\n c.execute(\n \"select id, task,point from tasktable where user_id = ?\", (py_user_id,))\n task_list = []\n for row in c.fetchall():\n task_list.append({\"id\": row[0], \"task\": row[1], \"point\": row[2]})\n c.close()\n\n return render_template(\"tasklist.html\", html_task_list=task_list)\n else:\n return redirect(\"/login\")\n\n# task追加のget通信\n\n\[email protected](\"/addtask\")\ndef add_get():\n if \"user_id\" in session:\n return render_template(\"/addpage.html\")\n else:\n return redirect(\"/login\")\n\n# task追加のpost通信\n\n\[email protected]('/addtask', methods=['POST'])\ndef add_post():\n user_id = session[\"user_id\"][0]\n add_task = request.form.get(\"task\")\n add_point = request.form.get(\"point\")\n conn = sqlite3.connect('app.db')\n c = conn.cursor()\n c.execute(\"INSERT INTO tasktable VALUES(null,?,?,?,0)\",\n (add_task, add_point, user_id,))\n conn.commit()\n c.close()\n return redirect('/tasklist')\n\n# uselistの作成\n\n\[email protected](\"/uselist\")\ndef uselist():\n if \"user_id\" in session:\n py_user_id = session[\"user_id\"][0]\n conn = sqlite3.connect(\"app.db\")\n c = conn.cursor()\n c.execute(\n \"select id, item, point from user_table where user_id = ?\", (py_user_id,))\n item_list = []\n for row in c.fetchall():\n item_list.append({\"id\": row[0], \"item\": row[1], \"point\": row[2]})\n c.execute(\n \"select point from user where id = ?\", (py_user_id,))\n user_point = c.fetchone()\n c.close()\n print(item_list)\n\n return render_template(\"uselist.html\", html_item_list=item_list, user_point=user_point)\n else:\n return redirect(\"/login\")\n\n# uselist追加のget通信\n\n\[email protected](\"/additem\")\ndef add_item():\n if \"user_id\" in session:\n return render_template(\"/additempage.html\")\n else:\n return redirect(\"/login\")\n\n# uselist追加のpost通信\n\n\[email protected]('/additem', methods=['POST'])\ndef additem_post():\n user_id = session[\"user_id\"][0]\n add_item = request.form.get(\"item\")\n add_point = request.form.get(\"point\")\n conn = sqlite3.connect('app.db')\n c = conn.cursor()\n c.execute(\"INSERT INTO user_table VALUES(null,?,?,?)\",\n (add_item, add_point, user_id,))\n conn.commit()\n c.close()\n return redirect('/uselist')\n\n\n# タスクリスト編集機能の実装\[email protected](\"/edittask/<int:id>\")\ndef edit(id):\n if \"user_id\" in session:\n conn = sqlite3.connect(\"app.db\")\n c = conn.cursor()\n c.execute(\"select id, task, point from tasktable where id = ?\", (id,))\n task = c.fetchone()\n c.close\n if task is not None:\n task_id = task[0]\n user_task = task[1]\n user_point = task[2]\n else:\n return \"タスクがありません\"\n\n print(task_id)\n return render_template(\"task_edit.html\", task_id=task_id, user_task=user_task, user_point=user_point)\n else:\n return redirect(\"/login\")\n\n# tasklist編集機能のpost通信\n\n\[email protected]('/edittask', methods=['POST'])\ndef edit_post():\n taskId = request.form.get(\"task_id\")\n taskId = int(taskId)\n task = request.form.get(\"task\")\n point = request.form.get(\"point\")\n point = int(point)\n conn = sqlite3.connect(\"app.db\")\n c = conn.cursor()\n c.execute(\"update tasktable set task=?, point = ? where id = ?\",\n (task, point, taskId))\n conn.commit()\n c.close()\n return redirect('/tasklist')\n\n# tasklistの削除機能の実装\n\n\[email protected]('/deltask/<int:id>')\ndef task_del(id):\n if \"user_id\" in session:\n conn = sqlite3.connect(\"app.db\")\n c = conn.cursor()\n c.execute(\"delete from tasktable where id = ?\", (id,))\n conn.commit()\n c.close()\n return redirect(\"/tasklist\")\n else:\n return redirect(\"/login\")\n\n# uselist編集機能の実装\n\n\[email protected](\"/edititem/<int:id>\")\ndef edituselist(id):\n if \"user_id\" in session:\n conn = sqlite3.connect(\"app.db\")\n c = conn.cursor()\n c.execute(\"select id, item, point from user_table where id = ?\", (id,))\n task = c.fetchone()\n c.close\n if task is not None:\n task_id = task[0]\n user_item = task[1]\n user_point = task[2]\n else:\n return \"タスクがありません\"\n return render_template(\"use_edit.html\", task_id=task_id, user_item=user_item, user_point=user_point)\n else:\n return redirect(\"/login\")\n\n# uselist編集機能のpost通信\n\n\n@ app.route('/edititem', methods=['POST'])\ndef edituselist_post():\n itemId = request.form.get(\"task_id\")\n itemId = int(itemId)\n point = request.form.get(\"point\")\n point = int(point)\n item = request.form.get(\"item\")\n conn = sqlite3.connect(\"app.db\")\n c = conn.cursor()\n c.execute(\n \"update user_table set item = ?, point = ? where id = ?\", (item, point, itemId))\n conn.commit()\n c.close()\n return redirect('/uselist')\n\n# uselistの削除機能の実装\n\n\n@ app.route(\"/delitem/<int:id>\")\ndef uselist_del(id):\n if \"user_id\" in session:\n conn = sqlite3.connect(\"app.db\")\n c = conn.cursor()\n c.execute(\"delete from user_table where id = ?\", (id,))\n conn.commit()\n c.close()\n return redirect(\"/uselist\")\n else:\n return redirect(\"/login\")\n\n\n@ app.route('/comp')\ndef comp_login():\n return render_template(\"/comp.html\")\n#\n\n\n@ app.route('/new_login')\ndef new_login_get():\n return render_template('new_login.html')\n\n\n@ app.route('/TOP')\ndef TOP():\n return render_template('TOP.html')\n\n\n@ app.route('/new_login', methods=[\"POST\"])\ndef new_login_post():\n name = request.form.get('user_name')\n password = request.form.get('password')\n conn = sqlite3.connect('app.db')\n c = conn.cursor()\n c.execute(\"INSERT INTO user VALUES(null,?,?,0,1,0)\", (name, password))\n conn.commit()\n c.close()\n return redirect('/comp')\n\n\n# login\n@ app.route('/login', methods=[\"GET\", \"POST\"])\ndef login_post():\n if request.method == \"GET\":\n if 'user_id' in session:\n return redirect(\"/MyPage\")\n else:\n return render_template(\"login.html\")\n else:\n name = request.form.get('user_name')\n password = request.form.get('password')\n conn = sqlite3.connect('app.db')\n c = conn.cursor()\n c.execute(\n \"SELECT id FROM user WHERE user_name = ? AND password = ?\", (name, password))\n py_user_id = c.fetchone()\n c.close()\n\n if py_user_id is not None:\n session[\"user_id\"] = py_user_id\n # /new_loginをtopに変える↓\n return redirect('/MyPage')\n else:\n return redirect('/login')\n\n\[email protected](\"/logout\")\ndef logout():\n session.pop('user_id', None)\n # ログアウト後はログインページにリダイレクトさせる\n return render_template('/TOP.html')\n\n\n@ app.route('/task_edit')\ndef task_edit():\n return render_template(\"/task_edit.html\")\n\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run(host='0.0.0.0', port=5000)\n" }, { "alpha_fraction": 0.6842105388641357, "alphanum_fraction": 0.6842105388641357, "avg_line_length": 17, "blob_id": "809700cc11c9ded1deb41c9c9e7e3c9b6a95335a", "content_id": "8d6e453545062b5149410d477cf92ac41852ba38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 17, "num_lines": 1, "path": "/README.md", "repo_name": "e-so5/motivationApp", "src_encoding": "UTF-8", "text": "\"# motivationApp\" \n" } ]
2
live2pro/starbucks-advertising
https://github.com/live2pro/starbucks-advertising
75c70ff6aeb16908046afa809b10a99f31f29695
a752a1bb7fa0344e3b694661c343c268e8904d7e
913d78db92784f4f5a836f143b63db5c7c58c756
refs/heads/master
2022-04-17T17:55:56.703805
2019-02-03T19:39:01
2019-02-03T19:39:01
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6822571754455566, "alphanum_fraction": 0.6856656074523926, "avg_line_length": 58.33707809448242, "blob_id": "bd9a690d53a75781bdca52cf625ba2c218513091", "content_id": "94c79819f90a770fe936e7d635d56edd3623d546", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5555, "license_type": "permissive", "max_line_length": 406, "num_lines": 89, "path": "/README.md", "repo_name": "live2pro/starbucks-advertising", "src_encoding": "UTF-8", "text": "# Starbucks Advertising\nStarbucks challenge to select the best offers to show to each user in the cellphone app. This is the Capstone Project for Udacity's Data Scientist Nanodegree.\n\nThis project has a [Medium story](https://medium.com/@miguel.tasende/starbucks-offer-optimization-adb323ca32b5) associated.\n\n## Installations\nThe project uses Python 3.\nTo install it run:\n```\n$ conda env create -f sbucks.yml\n$ source activate sbucks\n$ pip install -e .\n$ python src/data/make_dataset.py\n```\nThe \"make_dataset.py\" script takes a long time to run (between 40 minutes and 1 hour). It is not necessary to run it, but the datasets have to be generated somewhere. If the script is not run, the main notebook will generate the datasets (taking longer to run).\n\n## Project Motivation\nThe project is based on simulated data from Starbucks. Starbucks has different kinds of offers to their customers that use the mobile app. The offers are of three kinds:\n - BOGO (Buy One Get One): The customer gets a free product with the purchase of one. Valid for a determined duration.\n - Discount: For a period of time the product can be bought at a discount.\n - Informational: Just show ads to the customer.\n\nThe aim of the project is to find the best offers for each customer to maximize the probabilities of \"offer completion\", or maximize the profits. Only one product is considered.\n\nI chose this project, as the Capstone project for Udacity's Data Scientist Nanodegree, because I like the company (as a customer), and I had previously completed a [challenge](https://github.com/mtasende/data-scientist-nanodegree/blob/master/projects/p04_starbucks/Starbucks.ipynb) from Starbucks with, what I think are, very good results, and I thought I could have good intuitions with this kind of data.\n\n## Project Organization\n\n ├── LICENSE\n ├── Makefile <- Makefile with commands like `make data` or `make train`\n ├── README.md <- The top-level README for developers using this project.\n ├── data\n │   ├── external <- Data from third party sources.\n │   ├── interim <- Intermediate data that has been transformed.\n │   ├── processed <- The final, canonical data sets for modeling.\n │   └── raw <- The original, immutable data dump.\n │\n ├── docs <- Documentation files for the project\n │\n ├── notebooks <- Jupyter notebooks. Naming convention is a number (for ordering),\n │ the creator's initials, and a short `-` delimited description, e.g.\n │ `1.0-jqp-initial-data-exploration`.\n │\n ├── reports <- Generated analysis as HTML, PDF, LaTeX, etc.\n │   └── figures <- Generated graphics and figures to be used in reporting\n │\n ├── requirements.txt <- The requirements file for reproducing the analysis environment, e.g.\n │ generated with `pip freeze > requirements.txt` (it's better to use the conda\n | environment with this project)\n │\n ├── sbucks.yml <- The file with the conda environment (preferred way to build the environment)\n │\n ├── setup.py <- makes project pip installable (pip install -e .) so src can be imported\n ├── src <- Source code for use in this project.\n │   ├── __init__.py <- Makes src a Python module\n │ │\n │   ├── data <- Scripts to download or generate data\n │   │   └── make_dataset.py\n │ │\n │   ├── evaluation <- Functions to evaluate models\n │   │   └── offer_success.py\n │ │\n │   ├── features <- Scripts to turn raw data into features for modeling\n │   │   └── clustering.py\n │   │   └── lagged.py\n │ │\n │   └── visualization <- Scripts to create exploratory and results oriented visualizations\n │   └── visualize.py\n │\n └── utils.py <- Utility functions\n\n\n## How to interact with the project\nThe main notebook is in `notebooks/Starbucks_Capstone_notebook.ipynb`. To reproduce the main results run that notebook. Most users will only want to run this.\n\nIn the folder `notebooks/offer_success_experiments` there is one notebook for each experiment that was run to try to predict the probability of \"success\" (that is, that an offer is viewed and completed) for an offer.\n\nIn the folder `notebooks/profit_10_days_experiments` there is one notebook for each experiment that was run to try to predict expected profit in the 10 days following the reception of an offer.\n\nThe `notebooks/scratchpad` folder contains notebooks used for the development process. They are not supposed to be run, unless you want to know more about the development process. They may not be well organized or easy to read.\n\n## Licensing, Authors, Acknowledgements, etc.\nCode released under the [MIT](https://github.com/mtasende/starbucks-advertising/blob/master/LICENSE) license.\n\nThis project was authored by Miguel Tasende.\n\nThanks to Starbucks for the dataset, and to Udacity for bringing the opportunity to work with it.\n\n<p><small>Project based on the <a target=\"_blank\" href=\"https://drivendata.github.io/cookiecutter-data-science/\">cookiecutter data science project template</a>. #cookiecutterdatascience</small></p>\n" }, { "alpha_fraction": 0.6293706297874451, "alphanum_fraction": 0.6293706297874451, "avg_line_length": 27.600000381469727, "blob_id": "035441d8bcf1679e0474c85c346693797ace1e66", "content_id": "d7610a1b40691d7d15918403caa985a5065faa50", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 286, "license_type": "permissive", "max_line_length": 79, "num_lines": 10, "path": "/src/utils.py", "repo_name": "live2pro/starbucks-advertising", "src_encoding": "UTF-8", "text": "\"\"\" General utility functions. \"\"\"\n\n\ndef filter_args(fun, kwargs):\n \"\"\"\n Filters a kwargs dictionary with only the arguments that the function 'fun'\n accepts.\n \"\"\"\n return {key: kwargs[key] for key, value in kwargs.items()\n if key in fun.__code__.co_varnames}\n" }, { "alpha_fraction": 0.5867151021957397, "alphanum_fraction": 0.5924818515777588, "avg_line_length": 38.016666412353516, "blob_id": "2f32d9ac9d08f26b51e62fc0aa7110e511351217", "content_id": "e275e6a4c5978b9abccbd77cdfb8af2433a96694", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4682, "license_type": "permissive", "max_line_length": 87, "num_lines": 120, "path": "/src/data/missing_data.py", "repo_name": "live2pro/starbucks-advertising", "src_encoding": "UTF-8", "text": "\"\"\" Contains classes and functions to handle the missing data. \"\"\"\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom xgboost import XGBClassifier, XGBRegressor\nimport pandas as pd\nimport numpy as np\n\n\nMEMBER_DATE_FEATS = ['member_epoch_days', 'member_day', 'member_month',\n 'member_year', 'member_weekday']\n\n\nclass BasicImputer(BaseEstimator, TransformerMixin):\n \"\"\"\n Fills the demographics missing data with medians and most frequent values.\n Args:\n fill_mode(list(str)): The names of the columns to fill missing data with\n the most frequent value (other than gender value). This is used if new features\n are added to the dataset, that have missing data.\n \"\"\"\n\n def __init__(self, fill_mode=None):\n super(BaseEstimator, self).__init__()\n self.age_value = None\n self.income_value = None\n self.gender_value = None\n if fill_mode is None:\n self.fill_mode = list()\n else:\n self.fill_mode = fill_mode\n self.modes = dict()\n\n def fit(self, X, y=None):\n \"\"\" Get some medians. \"\"\"\n self.age_value = np.round(X.age.median())\n self.income_value = X.income.median()\n self.gender_value = X.gender.mode().values[0]\n\n self.modes = {col: X[col].mode().values[0] for col in self.fill_mode}\n return self\n\n def transform(self, X):\n \"\"\" Encode offer types and gender \"\"\"\n basic_filling = {'age': self.age_value,\n 'income': self.income_value,\n 'gender': self.gender_value}\n filling = {**basic_filling, **self.modes}\n return X.fillna(filling)\n\n\ndef add_date_features(data):\n \"\"\" Generates some features from the date the customer became member on,\"\"\"\n data['member_day'] = data.became_member_on.dt.day\n data['member_weekday'] = data.became_member_on.dt.weekday\n data['member_year'] = data.became_member_on.dt.year\n data['member_month'] = data.became_member_on.dt.month\n\n return data\n\n\nclass EstimatorImputer(BaseEstimator, TransformerMixin):\n \"\"\"\n Fills the demographics missing data with predictions from an estimator.\n \"\"\"\n\n def __init__(self, features=MEMBER_DATE_FEATS, keep_date_feats=True):\n super(BaseEstimator, self).__init__()\n self.features = features\n self.age_estimator = XGBRegressor(max_depth=7, n_estimators=200,\n random_state=2018)\n self.income_estimator = XGBRegressor(max_depth=7, n_estimators=200,\n random_state=2018)\n self.gender_estimator = XGBClassifier(max_depth=7, n_estimators=200,\n random_state=2018)\n self.keep_date_feats = keep_date_feats\n\n def fit(self, X, y=None):\n \"\"\" Fit the estimators \"\"\"\n X = add_date_features(X)\n X_age_clean = X[~X.age.isnull()]\n self.age_estimator.fit(X_age_clean[self.features], X_age_clean.age)\n X_income_clean = X[~X.age.isnull()]\n self.income_estimator.fit(X_income_clean[self.features],\n X_income_clean.income)\n X_gender_clean = X[~X.age.isnull()]\n self.gender_estimator.fit(X_gender_clean[self.features],\n X_gender_clean.gender)\n return self\n\n def transform(self, X):\n \"\"\" Fill the missing data. \"\"\"\n res = X.copy()\n res = add_date_features(res)\n\n # Fill the age values\n age_missing = res[res.age.isnull()]\n age_missing_index = res.index[res.age.isnull()]\n age_values = self.age_estimator.predict(age_missing[self.features])\n res.update(pd.DataFrame(age_values, index=age_missing_index,\n columns=['age']))\n\n # Fill the income values\n income_missing = res[res.income.isnull()]\n income_missing_index = res.index[res.income.isnull()]\n income_values = self.income_estimator.predict(\n income_missing[self.features])\n res.update(pd.DataFrame(income_values, index=income_missing_index,\n columns=['income']))\n\n # Fill the gender values\n gender_missing = res[res.gender.isnull()]\n gender_missing_index = res.index[res.gender.isnull()]\n gender_values = self.gender_estimator.predict(\n gender_missing[self.features])\n res.update(pd.DataFrame(gender_values, index=gender_missing_index,\n columns=['gender']))\n\n if not self.keep_date_feats:\n res.drop(MEMBER_DATE_FEATS, axis=1)\n\n return res\n" }, { "alpha_fraction": 0.7249647378921509, "alphanum_fraction": 0.7249647378921509, "avg_line_length": 34.45000076293945, "blob_id": "cb1d25821afbefd3b9fe77f0779f7521f0472fea", "content_id": "af46c43780c3dc082e65d8baf43c86ce8c4b8b13", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 709, "license_type": "permissive", "max_line_length": 59, "num_lines": 20, "path": "/src/data/__init__.py", "repo_name": "live2pro/starbucks-advertising", "src_encoding": "UTF-8", "text": "import os\nfrom pathlib import Path\nimport pkg_resources\n\n# MODULE_DIR = os.path.dirname(os.path.abspath(__file__))\nMODULE_DIR = pkg_resources.resource_filename('src', 'data')\nROOT_DIR = str(Path(MODULE_DIR).parent.parent)\nDATA_DIR = os.path.join(ROOT_DIR, 'data')\nDATA_RAW = os.path.join(DATA_DIR, 'raw')\nDATA_INTERIM = os.path.join(DATA_DIR, 'interim')\nDATA_EXTERNAL = os.path.join(DATA_DIR, 'external')\nDATA_PROCESSED = os.path.join(DATA_DIR, 'processed')\n\n\nif not os.path.exists(os.path.join(DATA_INTERIM)):\n os.makedirs(DATA_INTERIM)\nif not os.path.exists(os.path.join(DATA_EXTERNAL)):\n os.makedirs(DATA_EXTERNAL)\nif not os.path.exists(os.path.join(DATA_PROCESSED)):\n os.makedirs(DATA_PROCESSED)\n" }, { "alpha_fraction": 0.6222310662269592, "alphanum_fraction": 0.6267728805541992, "avg_line_length": 33.66850662231445, "blob_id": "5a020f205d1b44e9046f6c8854f8b3121a2027ce", "content_id": "231e8bed2e870bfc3c2c101f9a95cfef5ec4550c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12550, "license_type": "permissive", "max_line_length": 91, "num_lines": 362, "path": "/src/data/preprocessing.py", "repo_name": "live2pro/starbucks-advertising", "src_encoding": "UTF-8", "text": "\"\"\" The first functions to apply to the raw datasets. \"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.preprocessing import LabelEncoder\n\nFUTURE_INFO = ['finish', 'success', 'view_time', 'viewed', 'actual_reward',\n 'profit_in_duration', 'profit_until_complete',\n 'spent_in_duration', 'spent_until_complete', 'completed']\n\nABSOLUTE_TIME_DEPENDENT = ['time', 'expected_finish']\n\n\ndef basic_preprocessing(portfolio, profile, transcript):\n \"\"\"\n Perform a basic preprocessing.\n\n Args:\n portfolio(pd.DataFrame): Raw data with the offers information.\n profile(pd.DataFrame): Raw data with the customers information.\n transcript(pd.FataFrame): Raw data with the events information.\n\n Returns:\n pd.DataFrame: A preprocessed dataset, for use in ML algorithms or\n previous feature generation.\n pd.DataFrame: The portfolio dataframe, preprocessed.\n \"\"\"\n portfolio = process_portfolio(portfolio)\n profile = process_profile(profile)\n transcript = process_transcript(transcript)\n\n data = join_data(transcript, profile, portfolio, static=False)\n\n return data, portfolio\n\n\ndef process_portfolio(portfolio):\n \"\"\" All the preprocessing needed for portfolio alone. \"\"\"\n\n return channels_ohe(portfolio)\n\n\ndef process_profile(profile):\n \"\"\" All the preprocessing needed for profile alone. \"\"\"\n profile.age = profile.age.replace(118, np.nan)\n profile.became_member_on = pd.to_datetime(profile.became_member_on,\n format='%Y%m%d')\n profile['missing_demographics'] = profile.isnull().any(axis=1).astype(int)\n profile = member_epoch_days(profile)\n\n return profile\n\n\ndef process_transcript(transcript):\n \"\"\" All the preprocessing needed for transcript alone. \"\"\"\n return unwrap_transcript(transcript)\n\n\ndef channels_ohe(portfolio):\n \"\"\"\n Transforms the 'channels' column of the 'portfolio' dataframe into One-Hot\n encoded columns for the possible channels.\n \"\"\"\n # Get all the possible channels\n possible_channels = set()\n for c_list in portfolio.channels:\n for channel in c_list:\n possible_channels.add(channel)\n possible_channels = list(possible_channels)\n\n # Create the channels' columns and fill them\n for channel in possible_channels:\n portfolio['channel_' + channel] = portfolio.channels.apply(\n lambda x: int(channel in x))\n\n # Drop the old \"channels\" column\n portfolio = portfolio.drop('channels', axis=1)\n\n return portfolio\n\n\ndef member_epoch_days(profile):\n \"\"\"\n Adds a column with the date transformed to 'number of days since\n 1/1/1970.\n \"\"\"\n profile['member_epoch_days'] = (\n profile.became_member_on - dt.datetime(1970, 1, 1)).dt.days\n return profile\n\n\ndef unwrap_transcript(transcript):\n \"\"\" Reads the 'value' dictionaries and adds the values as columns. \"\"\"\n values_df = pd.DataFrame(transcript.value.tolist())\n values_df.offer_id.update(values_df['offer id'])\n values_df = values_df.drop('offer id', axis=1)\n\n return transcript.join(values_df).drop('value', axis=1)\n\n\ndef join_data(transcript, profile, portfolio, static=True):\n \"\"\"\n Joins the three sources of data in one dataframe.\n Args:\n transcript(pandas dataframe): Contains the events (part of the raw\n data)\n profile(pandas dataframe): Contains the customer's profiles (part of\n the raw data)\n portfolio(pandas dataframe): Contains the offers (part of the raw data)\n static(boolean): If True, remove the customer and offer ids. Otherwise\n keep them for a possible time-dependent analysis.\n Returns:\n merged_df(pd.DataFrame): The joined dataframe.\n \"\"\"\n merged_df = transcript.merge(profile, left_on='person', right_on='id',\n how='left').drop('id', axis=1)\n merged_df = merged_df.merge(\n portfolio.rename(columns={'reward': 'reward_t'}),\n left_on='offer_id', right_on='id', how='left').drop('id', axis=1)\n if static:\n merged_df = merged_df.drop(['person', 'offer_id'], axis=1)\n\n return merged_df\n\n\ndef split_transcript(transcript):\n \"\"\"\n Separates the different kinds of events in different dataframes.\n Args:\n transcript(pd.DataFrame): Similar to the raw transcript data.\n\n Returns:\n received(pd.DataFrame): Contains the reception events.\n veiwed(pd.DataFrame): Contains the offer view events.\n completed(pd.DataFrame): Contains the offer completion events.\n transactions(pd.DataFrame): Contains the transactions.\n \"\"\"\n received = transcript[transcript.event == 'offer received']\n viewed = transcript[transcript.event == 'offer viewed']\n completed = transcript[transcript.event == 'offer completed']\n transactions = transcript[transcript.event == 'transaction']\n\n return received, viewed, completed, transactions\n\n\ndef fill_completion(received, completed):\n \"\"\"\n Looks in the records of one person and checks which offers where completed.\n A 'completed' column is set to 1 when the offer was completed. The finish\n time is also added.\n Args:\n received(pd.DataFrame): As returned from split_transcript\n completed(pd.DataFrame): As returned from split_transcript\n\n Returns:\n pd.DataFrame: The received dataframe with some new columns.\n \"\"\"\n results = list()\n for idx, row in received.iterrows():\n record = dict()\n\n # Identify the record\n record['time'] = row.time\n record['offer_id'] = row.offer_id\n\n record['expected_finish'] = row.time + row.duration * 24\n completion = completed[(completed.offer_id == row.offer_id) &\n (completed.time >= row.time) &\n (completed.time <= record['expected_finish'])]\n if completion.shape[0] > 0:\n record['completed'] = 1\n record['finish'] = completion.time.iloc[0]\n else:\n record['completed'] = 0\n record['finish'] = record['expected_finish']\n\n results.append(record)\n\n return received.merge(pd.DataFrame(results), on=['time', 'offer_id'],\n how='left')\n\n\ndef fill_viewed(data, viewed):\n \"\"\"\n Checks if the offer was viewed in the active period of the offers.\n Also fills a column called 'success' that tracks whether an offer\n completion happened after a view.\n Args:\n data(pd.DataFrame): As returned from fill_completed\n viewed(pd.DataFrame): As returned from split_transcript\n\n Returns:\n pd.DataFrame: The received dataframe with some new columns.\n \"\"\"\n results = list()\n for idx, row in data.iterrows():\n record = dict()\n\n # Identify the record\n record['time'] = row.time\n record['offer_id'] = row.offer_id\n\n views = viewed[(viewed.offer_id == viewed.offer_id) &\n (viewed.time >= row.time) &\n (viewed.time <= row.finish)]\n if views.shape[0] > 0:\n record['viewed'] = 1\n record['view_time'] = views.time.iloc[0]\n if (record['view_time'] <= row.finish) and row.completed:\n record['success'] = 1\n else:\n record['success'] = 0\n else:\n record['viewed'] = 0\n record['view_time'] = np.nan\n record['success'] = 0\n\n results.append(record)\n\n return data.merge(pd.DataFrame(results), on=['time', 'offer_id'],\n how='left')\n\n\ndef fill_profits(data, transactions):\n \"\"\"\n Fills \"spending\" and \"profits\" related columns.\n The \"spending\" columns track the transactions of the client in the \"active\"\n period of an offer, and adds them. They are also summed in the period between the\n offer reception and the offer completion (if it is completed).\n The profits columns consider the paid rewards as a cost to the company and substract\n them.\n The paid reward is also recorded in the column \"actual reward\" (it is zero if the\n offer was not completed).\n Args:\n data(pd.DataFrame): As returned from fill_completed\n transactions(pd.DataFrame): As returned from split_transcript\n\n Returns:\n pd.DataFrame: The received dataframe with some new columns.\n \"\"\"\n results = list()\n for idx, row in data.iterrows():\n record = dict()\n\n # Identify the record\n record['time'] = row.time\n record['offer_id'] = row.offer_id\n\n until_complete_tr = transactions[(transactions.time >= row.time) &\n (transactions.time <= row.finish)]\n duration_tr = transactions[(transactions.time >= row.time) &\n (transactions.time <= row.expected_finish)]\n record['spent_until_complete'] = until_complete_tr.amount.sum()\n record['spent_in_duration'] = duration_tr.amount.sum()\n record['actual_reward'] = row.reward_t if row.completed == 1 else 0\n record['profit_until_complete'] = record['spent_until_complete'] - \\\n record['actual_reward']\n record['profit_in_duration'] = record['spent_in_duration'] - \\\n record['actual_reward']\n\n results.append(record)\n\n return data.merge(pd.DataFrame(results), on=['time', 'offer_id'],\n how='left')\n\n\ndef generate_static_person_data(person_data):\n \"\"\"\n Generates a dataset for one person, that contains a row for each sent\n offer, and adds some 'results' columns, like whether the offer was viewed,\n completed, when did the offer finish, how much was spent by the user while\n the offer was active, the total profit in the period, and the reward paid.\n \"\"\"\n received, \\\n viewed, \\\n completed, \\\n transactions = split_transcript(person_data)\n if received.shape[0] == 0:\n return None\n data = fill_completion(received, completed)\n data = fill_viewed(data, viewed)\n data = fill_profits(data, transactions)\n\n return data.drop(['event', 'reward', 'amount'], axis=1)\n\n\ndef generate_static_dataset(data):\n \"\"\"\n Applies the generate_static_person_data to all the users.\n \"\"\"\n return data.groupby('person').apply(generate_static_person_data).reset_index(drop=True)\n\n\ndef anonimize_data(data):\n \"\"\"\n Takes a 'static data' dataframe and converts it into an anonymized dataset.\n \"\"\"\n return data.drop(['person', 'offer_id', 'became_member_on'], axis=1)\n\n\nclass BasicEncoder(BaseEstimator, TransformerMixin):\n \"\"\" Transforms the Basic dataset. \"\"\"\n\n def __init__(self):\n super(BaseEstimator, self).__init__()\n self.offer_type_encoder = LabelEncoder()\n\n def fit(self, X, y=None):\n \"\"\" Get the encodings for the offer types. \"\"\"\n self.offer_type_encoder.fit(X['offer_type'].dropna())\n return self\n\n def transform(self, X):\n \"\"\" Encode offer types and gender \"\"\"\n res = X.copy()\n\n # Ignore the missing offer types and encode\n sub_res = res.dropna(subset=['offer_type']).copy()\n sub_res.offer_type = self.offer_type_encoder.transform(\n sub_res.offer_type)\n res.update(sub_res)\n res.offer_type = pd.to_numeric(res.offer_type)\n\n res = gender_encode(res)\n return res\n\n def inverse_transform(self, X):\n \"\"\" Transform back to the original encoding. \"\"\"\n res = X.copy()\n\n # Ignore the missing offer types and decode\n sub_res = res.dropna(subset=['offer_type']).copy()\n sub_res['offer_type'] = self.offer_type_encoder.inverse_transform(\n sub_res['offer_type'])\n res.update(sub_res)\n\n res = gender_decode(res)\n return res\n\n\ndef gender_encode(data):\n \"\"\" Encode the gender column. F=0, M=1, O=2. \"\"\"\n gender_dict = {'F': 0, 'M': 1, 'O': 2, None: np.nan, np.nan: np.nan}\n data.gender = data.gender.replace(gender_dict)\n\n return data\n\n\ndef gender_decode(data):\n \"\"\" Decode the gender column. F=0, M=1, O=2. \"\"\"\n gender_dict_inverse = {0: 'F', 1: 'M', 2: 'O', np.nan: None}\n data.gender = data.gender.replace(gender_dict_inverse)\n\n return data\n\n\ndef drop_time_dependent(X):\n \"\"\" Drops the features that depend on absolute time.\"\"\"\n\n return X.drop(ABSOLUTE_TIME_DEPENDENT, axis=1)\n" }, { "alpha_fraction": 0.5957371592521667, "alphanum_fraction": 0.6029593348503113, "avg_line_length": 41.68671798706055, "blob_id": "137460ec74745e5be65863c7876c000252e42706", "content_id": "e8adbe89efa744fdbd502f4cf39cf02a00460976", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17031, "license_type": "permissive", "max_line_length": 119, "num_lines": 399, "path": "/src/data/profit_10_days_dataset.py", "repo_name": "live2pro/starbucks-advertising", "src_encoding": "UTF-8", "text": "\"\"\"\nFunctions to create and use the 'profit_10_days' dataset: that is,\na dataset with information about the profits that generated a customer in the 10 days\nafter an offer is presented to them.\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport os\nimport src.data.preprocessing as pp\nimport src.data.missing_data as md\nfrom src.data import DATA_PROCESSED\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.base import BaseEstimator, RegressorMixin\nfrom sklearn.pipeline import Pipeline\nfrom xgboost import XGBRegressor, XGBClassifier\nfrom tqdm import tqdm\n\nVIEWCOL_LABEL = 'viewcol'\nZ_COLS = ['difficulty',\n 'duration',\n 'reward_t',\n 'channel_web',\n 'channel_mobile',\n 'channel_email',\n 'channel_social']\nPROFIT_COLS = Z_COLS + ['offer_id', 'offer_type']\nZ_VIEW_COLS = ['{}_{}'.format(col, VIEWCOL_LABEL) for col in Z_COLS]\nVIEW_COLS = Z_VIEW_COLS + ['offer_id_{}'.format(VIEWCOL_LABEL),\n 'offer_type_{}'.format(VIEWCOL_LABEL)]\n\n\ndef get_offers_ts(user_received, portfolio, data, delta = 24 * 10, viewed=False):\n \"\"\"\n Given the received sequence for a user, this function returns a time series\n dataframe containing a 1 in a 10 days period after receiving an offer.\n It can filter out the non-viewed offers.\n Args:\n user_received(dataframe): The received offers for one user.\n portfolio(dataframe): The original portfolio of offers (just to get the offer ids).\n data(dataframe): The original transcript (just to get the time values).\n delta(int): The period of relevance for an offer.\n viewed(boolean): Whether to show only the offers that were viewed.\n \"\"\"\n offers = portfolio.id.values\n offer_ts = pd.DataFrame(np.zeros((data.time.nunique(), len(offers))),\n index=data.time.unique(), columns=offers)\n for i, row in user_received.iterrows():\n if viewed:\n if row.viewed:\n offer_ts.loc[row.time: row.time + delta, row.offer_id] = 1\n else:\n offer_ts.loc[row.time: row.time + delta, row.offer_id] = 1\n # Fill the \"no-offer\" column\n offer_ts['no_offer'] = (offer_ts.sum(axis=1) == 0).astype(int)\n return offer_ts\n\n\nclass BasicEncoderProfits(pp.BasicEncoder):\n \"\"\"\n Transforms the Basic dataset. Adds the possibility of encoding other custom features, like offer_id,\n for example.\n Args:\n custom_features(list): Names of the custom features to label-encode.\n \"\"\"\n\n def __init__(self, custom_features=list()):\n super().__init__()\n self.custom_encoders = {feat: LabelEncoder() for feat in custom_features}\n\n def fit(self, X, y=None):\n \"\"\" Get the encodings for the offer choice. \"\"\"\n super().fit(X, y)\n for feat, encoder in self.custom_encoders.items():\n encoder.fit(X[feat])\n return self\n\n def transform(self, X):\n \"\"\" Encode offer types and gender, and all the custom features. \"\"\"\n res = super().transform(X)\n for feat, encoder in self.custom_encoders.items():\n res[feat] = encoder.transform(X[feat])\n return res\n\n def inverse_transform(self, X):\n \"\"\" Transform back to the original encoding. \"\"\"\n res = super().inverse_transform(X)\n for feat, encoder in self.custom_encoders.items():\n res[feat] = encoder.inverse_transform(X[feat])\n return res\n\n\ndef get_spent_days_static(static_data, preprocessed_data, days=10 * 24):\n \"\"\"\n Get a static data version of the profit N days dataset.\n Calculates the total money spent for each customer, in the 10 days after\n an offer is shown.\n Args:\n static_data(dataframe): The 'offer success' version of the static data.\n preprocessed_data(dataframe): the result of 'basic_preprocessing'.\n days(int): The number of days to calculate the profits.\n \"\"\"\n received, viewed, completed, transactions = pp.split_transcript(preprocessed_data)\n\n results = list()\n for idx, row in tqdm(list(static_data.iterrows())):\n record = dict()\n\n # Id of the record\n record['person'] = row.person\n record['time'] = row.time\n record['offer_id'] = row.offer_id\n\n record['spent_10_days'] = transactions[(transactions.person == row.person) &\n (transactions.time > row.time) &\n (transactions.time <= row.time + days)\n ].amount.sum()\n results.append(record)\n\n return static_data.merge(pd.DataFrame(results),\n on=['person', 'time', 'offer_id'], how='left')\n\n\ndef fill_null_offer(data):\n \"\"\"\n Fill the 'null' offer data when an offer was not viewed.\n The 'viewcol' features are generated for the views predictor. That model\n doesn't consider the null offer because it predicts the views themselves.\n Args:\n data(dataframe): A dataframe with sent offers (like the result from 'get_spent_days_static').\n\n Returns:\n data(dataframe): Like the input but with added / modified columns.\n view_cols(list): The names of the columns for the 'views' estimator.\n profit_cols(list): The names of the columns for the 'profits' estimator.\n \"\"\"\n viewcol_label = 'viewcol'\n\n z_cols = ['difficulty',\n 'duration',\n 'reward_t',\n 'channel_web',\n 'channel_mobile',\n 'channel_email',\n 'channel_social']\n profit_cols = z_cols + ['offer_id', 'offer_type']\n z_view_cols = ['{}_{}'.format(col, viewcol_label) for col in z_cols]\n view_cols = z_view_cols + ['offer_id_{}'.format(viewcol_label),\n 'offer_type_{}'.format(viewcol_label)]\n\n # Fill the \"non-view\" cols\n data[view_cols] = data[profit_cols].copy()\n data.loc[data['viewed'] == 0, 'offer_id'] = 'no_offer'\n data.loc[data['viewed'] == 0, 'offer_type'] = 'no_offer'\n data.loc[data['viewed'] == 0, data.columns.isin(z_cols)] = 0\n\n return data, view_cols, profit_cols\n\n\ndef split_view_profit(X, view_cols, profit_cols):\n \"\"\"\n Splits a features dataset in the features that are used by\n the views predictor and the features that are used by the profits\n predictor.\n Args:\n X(dataframe): Input of the estimator. Each sample is a sent offer. Contains 'view' columns and 'profit'\n columns. Like the result from 'fill_null_offer'.\n view_cols(list): The names of the columns for the 'views' estimator.\n profit_cols(list): The names of the columns for the 'profits' estimator.\n \"\"\"\n X_view = X.drop(profit_cols, axis=1).copy()\n X_view = X_view.rename(columns={v: p for v, p in zip(view_cols, profit_cols)})\n X_profit = X.drop(view_cols, axis=1).copy()\n\n return X_view, X_profit\n\n\ndef get_profit_10_days_data(basic_dataset_path=os.path.join(DATA_PROCESSED, 'static_spent_10_days.pkl'),\n train_times=(0, 168),\n test_times=(408,),\n drop_time=True,\n anon_person=True,\n drop_offer_id=True,\n fill_null=True,\n target='profit_10_days'):\n \"\"\"\n Generates the dataset to predict the profits in 10 days for each offer.\n The profits are calculated as the money spent minus the paid reward (if any).\n Args:\n basic_dataset_path(str): The path to the pickle containing the basic\n dataset.\n train_times(list): A list (or tuple) with the time values for the training set.\n test_times(list): A list (or tuple) with the time values for the test set.\n drop_time(boolean): Whether to drop or not the absolute time dependent features.\n anon_person(boolean): Whether to drop or not unique identifiers to customers.\n drop_offer_id(boolean): Whether to drop or not the 'offer_id' feature.\n target(list or str): The target feature name (typically, 'viewed' or 'profit_10_days', or both).\n\n Returns:\n X_train(pd.DataFrame): The training dataset.\n X_test(pd.DataFrame): The test dataset.\n y_train(pd.Series): The training target.\n y_test(pd.Series): The test target.\n encoder(BasicEncoderProfits): An encoder to use in an ML pipeline.\n view_cols(list): The names of the columns for the 'views' estimator.\n profit_cols(list): The names of the columns for the 'profits' estimator.\n \"\"\"\n data = pd.read_pickle(basic_dataset_path)\n\n custom_features = ['offer_id']\n view_cols, profit_cols = (None, None)\n if fill_null:\n data, view_cols, profit_cols = fill_null_offer(data)\n if anon_person:\n data = data.drop('person', axis=1)\n if drop_offer_id:\n data = data.drop('offer_id', axis=1)\n custom_features.remove('offer_id')\n data['profit_10_days'] = data.spent_10_days - data.actual_reward\n data = data.drop(['became_member_on', 'spent_10_days'], axis=1)\n\n X = data.drop(pp.FUTURE_INFO + ['profit_10_days'], axis=1)\n y = data[target]\n\n # Split the train-test data\n X_train = X[X.time.isin(train_times)]\n X_test = X[X.time.isin(test_times)]\n y_train = y[X.time.isin(train_times)]\n y_test = y[X.time.isin(test_times)]\n if drop_time:\n X_train = pp.drop_time_dependent(X_train)\n X_test = pp.drop_time_dependent(X_test)\n\n encoder = BasicEncoderProfits(custom_features=custom_features)\n\n return X_train, X_test, y_train, y_test, encoder, view_cols, profit_cols\n\n\ndef predict_profit_with_offer(model, data, offer, null_offer, drop_offer_id=False):\n \"\"\"\n Predicts how much will be the profit in 10 days for a given an offer.\n Args:\n model(ProfitsPredictor): The model to estimate the profits in 10 days.\n data(dataframe): A static dataset, like the result of 'get_profit_10_days_data' (X_train, X_test, ...).\n offer(pd.Series): One row of the portfolio dataframe.\n null_offer(pd.Series): An offer that represents \"no offer at all\".\n drop_offer_id(boolean): Whether to drop or not the 'offer_id' column.\n\n Returns:\n predictions(pd.Series): The predicted profits for the offer and for each sample in 'data'.\n \"\"\"\n samples = data.copy()\n\n if drop_offer_id:\n std_offer = offer.drop('id').rename(index={'reward': 'reward_t'})\n else:\n std_offer = offer.rename(index={'reward': 'reward_t', 'id': 'offer_id'}).sort_index()\n view_offer = std_offer.rename(index={old: '{}_viewcol'.format(old) for old in std_offer.index})\n\n # Substitute all the offers for the given one\n samples.loc[:, sorted(VIEW_COLS)] = np.repeat(view_offer.values.reshape(1, -1), samples.shape[0], axis=0)\n samples.loc[:, sorted(PROFIT_COLS)] = np.repeat(std_offer.values.reshape(1, -1), samples.shape[0], axis=0)\n\n y_pred = model.predict_final_profits(samples, null_offer.sort_index())\n\n return pd.Series(y_pred, name=offer.id).T\n\n\ndef choose_offer(model, X, portfolio, add_null_offer=True):\n \"\"\"\n Given a model and a features dataframe it returns the offers that maximize the model predictions.\n It calls 'predict_profit_with_offer' for each offer in portfolio, and selects the one with the largest\n predicted profit.\n Args:\n model(ProfitsPredictor): The model to estimate the profits in 10 days.\n X(dataframe): A static dataset, like the result of 'get_profit_10_days_data' (X_train, X_test, ...).\n portfolio(dataframe): The processed portfolio dataframe. Like the result from 'pp.basic_preprocessing'.\n add_null_offer(boolean): Whether to add the null offer (no offer at all) to the portfolio.\n\n Returns:\n pd.Series: A series with the offer_id of the selected (best) offer for each sample of X.\n \"\"\"\n complete_portfolio = portfolio.copy()\n\n # Add the null offer\n if add_null_offer:\n null_offer = pd.Series([0, 0, 'no_offer', 'no_offer', 0, 0, 0, 0, 0],\n index=complete_portfolio.columns,\n name=complete_portfolio.shape[0])\n complete_portfolio = complete_portfolio.append(null_offer)\n\n res = complete_portfolio.apply(\n lambda x: predict_profit_with_offer(model, X, x, null_offer), axis=1).T\n res.columns = complete_portfolio.id\n\n return res.idxmax(axis=1), res\n\n\nclass ProfitsPredictor(BaseEstimator, RegressorMixin):\n \"\"\"\n Predicts the profits in 10 days for any given offer to a specific customer. It uses to models (sklearn Pipelines):\n A classifier to predict the probability of an offer being viewed, and a regressor to predict the expected profit\n that a customer will generate in the 10 days following the reception of an offer. Both results are combined to give\n a total expected profit returns in 10 days, after the reception of an offer.\n Args:\n encoder(BasicEncoderProfits): An encoder to use in an ML pipeline.\n view_cols(list): The names of the columns for the 'views' estimator.\n profit_cols(list): The names of the columns for the 'profits' estimator.\n \"\"\"\n\n def __init__(self, encoder=None, view_cols=VIEW_COLS, profit_cols=PROFIT_COLS, **kwargs):\n super().__init__(**kwargs)\n self.view_cols = view_cols\n self.profit_cols = profit_cols\n if encoder is None:\n self.encoder = BasicEncoderProfits()\n else:\n self.encoder = encoder\n\n # Create the models\n self.views_model = Pipeline([\n ('encoder', self.encoder),\n ('imputer', md.BasicImputer()),\n ('estimator', XGBClassifier(max_depth=7, n_estimators=200, n_jobs=-1,\n random_state=2018))\n ])\n self.profits_model = Pipeline([\n ('encoder', self.encoder),\n ('imputer', md.BasicImputer()),\n ('estimator', XGBRegressor(max_depth=4, n_estimators=200, n_jobs=-1,\n random_state=2018))\n ])\n\n def fit(self, X, y):\n \"\"\" Fits all the models. \"\"\"\n y_views = y.iloc[:, 0]\n y_profits = y.iloc[:, 1]\n X_views, X_profits = split_view_profit(X,\n self.view_cols,\n self.profit_cols)\n self.views_model.fit(X_views, y_views)\n self.profits_model.fit(X_profits, y_profits)\n\n return self\n\n def predict(self, X):\n \"\"\" Gets the predictions from all models and returns them.\"\"\"\n X_views, X_profits = split_view_profit(X,\n self.view_cols,\n self.profit_cols)\n vis_probas = self.views_model.predict_proba(X_views)[:, 1]\n profits_pred = self.profits_model.predict(X_profits)\n\n return np.vstack([vis_probas, profits_pred]).T\n\n def predict_profit_alone(self, X):\n \"\"\"\n Predicts the profits as if the offer was already seen. It is useful for the\n 'no offer' case, that is 'always seen'.\n \"\"\"\n X_views, X_profits = split_view_profit(X,\n self.view_cols,\n self.profit_cols)\n\n return self.profits_model.predict(X_profits)\n\n def predict_final_profits(self, X, null_offer):\n \"\"\"\n Predicts the final estimated profits for each sample. It combines the visualization\n probabilities with the estimated profits for viewed offers.\n \"\"\"\n X_null = X.copy()\n\n X_null.loc[:, sorted(VIEW_COLS)] = np.repeat(null_offer.values.reshape(1, -1), X_null.shape[0],\n axis=0)\n X_null.loc[:, sorted(PROFIT_COLS)] = np.repeat(null_offer.values.reshape(1, -1), X_null.shape[0],\n axis=0)\n\n _, X_null_profits = split_view_profit(X_null,\n self.view_cols,\n self.profit_cols)\n X_views, X_profits = split_view_profit(X,\n self.view_cols,\n self.profit_cols)\n\n # Predict the profits with the null offer\n null_profits_pred = self.profits_model.predict(X_null_profits)\n\n\n # Predict the probability of view and the profits if viewed\n vis_probas = self.views_model.predict_proba(X_views)[:, 1]\n profits_pred = self.profits_model.predict(X_profits)\n\n # Predict the final profits\n y_pred = vis_probas * profits_pred + (1 - vis_probas) * null_profits_pred\n\n # The null offer is \"always seen\".\n y_pred[X.offer_type == 'no_offer'] = null_profits_pred[X.offer_type == 'no_offer']\n\n return y_pred" }, { "alpha_fraction": 0.6227210760116577, "alphanum_fraction": 0.6426681876182556, "avg_line_length": 39.77842712402344, "blob_id": "b547003a1b6cda94410750fa6f1c039a65a94d77", "content_id": "48f89225d4fd06850dc558d8c2c13cf6978bf06c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13987, "license_type": "permissive", "max_line_length": 119, "num_lines": 343, "path": "/src/features/clustering.py", "repo_name": "live2pro/starbucks-advertising", "src_encoding": "UTF-8", "text": "\"\"\" Contains the functions implemented to cluster, and visualize the custers. \"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom scipy.cluster.hierarchy import ward, fcluster\nfrom sklearn.metrics import silhouette_score\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.cluster import KMeans, DBSCAN\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.mixture import GaussianMixture\nfrom time import time\nimport os\n\nimport src.data.success_dataset as sd\nimport src.data.preprocessing as pp\nfrom src.data import DATA_INTERIM, DATA_PROCESSED\n\nDIST_12 = 40.61273277762122\nDIST_3D_3 = 99.68755217120427\nDIST_3D_9 = 45.436989981397055\nDIST_3D_19 = 23.16997229826248\n\n\ndef kmeans_error(X, method, cluster_labels):\n \"\"\" Index function for K-Means that returns the SSE\"\"\"\n return method.inertia_\n\n\ndef validate_clustering(X, clustering_algo, params, index_fun, n_clust_name):\n \"\"\"\n Get the Silhouette score and one custom index, and plot the results.\n Args:\n X(array-like): The data to cluster.\n clustering_algo(class): The class of the clustering estimator. Must follow\n scikit-learn conventions.\n params(list of dicts): A list of kwargs to pass in the creation of the clustering\n method.\n index_fun(function): A function that calculates a custom index for the clustering.\n The heading should be index_fun(X, method, cluster_labels) and return a number.\n n_clust_name(str): The name of the parameter that represents the number of clusters.\n If None is given, there will be no plots.\n \"\"\"\n silhouette = list()\n error = list()\n for i, param_set in enumerate(params):\n tic = time()\n method = clustering_algo(**param_set)\n labels = method.fit_predict(X)\n try:\n silhouette.append(silhouette_score(X, labels))\n except ValueError:\n silhouette.append(0)\n error.append(index_fun(X, method, labels))\n toc = time()\n print('Algorithm {} of {} finished in {} seconds.'.format(\n i + 1, len(params), (toc - tic)))\n\n best_silhouette_params = params[np.argmax(silhouette)]\n print('The best Silhouette score is for {}, and its value is: {}'.format(\n best_silhouette_params, max(silhouette)))\n print('The error for {} is: {}'.format(\n best_silhouette_params, error[np.argmax(silhouette)]))\n\n if n_clust_name is not None:\n clusters = [p[n_clust_name] for p in params]\n plt.plot(clusters, silhouette)\n plt.title('Silhouette score')\n plt.vlines(best_silhouette_params[n_clust_name], min(silhouette), max(silhouette), 'r')\n\n plt.figure()\n plt.plot(clusters, error)\n plt.title(index_fun.__name__)\n plt.vlines(best_silhouette_params[n_clust_name], min(error), max(error), 'r')\n\n return silhouette, error, best_silhouette_params\n\n\ndef gmm_aic(X, method, cluster_labels):\n \"\"\"\n Index function that returns the Aikake Information Criterion Index for a\n Gaussian Mixture Model.\n \"\"\"\n return method.aic(X)\n\n\ndef number_of_clusters(X, method, cluster_labels):\n \"\"\" Index function that returns the number of clusters for DBSCAN. \"\"\"\n return len(method.labels_)\n\n\ndef create_cluster_feats_4d(static_dataset_path=os.path.join(DATA_INTERIM, 'static_data.pkl'),\n output_path=os.path.join(DATA_PROCESSED, 'static_cluster1.pkl'),\n save=True):\n \"\"\"\n Adds the features created by clustering for the selected 4D cases (age, income, gender, memeber_since_epoch).\n The features to add are: kmeans_8, ward_12 and dbscan_10.\n Args:\n static_dataset_path(str): The path to the static dataset to be taken as the initial data.\n output_path(str): The path to save the new dataset.\n save(boolean): Whether to save the new static dataset.\n Returns:\n static_cluster1_dataset(dataframe): The same as the static dataset but with the features added into new\n columns.\n X_train_r(dataframe): X_train (as obtained from time-split with the input static data) with the new features.\n X_test_r(dataframe): X_test (as obtained from time-split with the input static data) with the new features.\n y_train(pd.Series): y_train as obtained from time-split with the input static data.\n y_test(pd.Series): y_test as obtained from time-split with the input static data.\n \"\"\"\n # Get the data\n X_train, X_test, y_train, y_test, encoder = sd.get_success_data(basic_dataset_path=static_dataset_path,\n drop_time=False,\n anon=False)\n\n # Encode and filter relevant features\n customer_feats = ['age', 'gender', 'income', 'missing_demographics',\n 'member_epoch_days']\n\n X_train_t = encoder.fit_transform(X_train)\n X_train_t = X_train_t[customer_feats]\n X_test_t = encoder.transform(X_test)\n X_test_t = X_test_t[customer_feats]\n\n # Drop duplicates and missing data\n X_train_t = X_train_t.dropna().drop_duplicates()\n X_test_t = X_test_t.dropna().drop_duplicates()\n\n # Keep a copy with the original demographics\n X_train_o = pp.gender_decode(X_train_t.copy())\n X_test_o = pp.gender_decode(X_test_t.copy())\n\n # Drop the irrelevant column\n X_train_t = X_train_t.drop('missing_demographics', axis=1)\n X_test_t = X_test_t.drop('missing_demographics', axis=1)\n\n # Normalize\n scaler = StandardScaler()\n scaler.fit(X_train_t)\n\n X_train_t = pd.DataFrame(scaler.transform(X_train_t),\n index=X_train_t.index,\n columns=X_train_t.columns)\n X_test_t = pd.DataFrame(scaler.transform(X_test_t),\n index=X_test_t.index,\n columns=X_test_t.columns)\n\n # Add the clustering labels\n # K-Means (k = 8)\n n_clusters = 8\n kmeans = KMeans(n_clusters=n_clusters, random_state=2018)\n kmeans.fit(X_train_t)\n X_train_o['kmeans_8'] = kmeans.predict(X_train_t)\n X_test_o['kmeans_8'] = kmeans.predict(X_test_t)\n\n # Ward 12 clusters\n linkage_matrix = ward(X_train_t)\n dist_12 = DIST_12\n X_train_o['ward_12'] = fcluster(linkage_matrix, dist_12, criterion='distance')\n # Use KNN to determine the test clusters\n knn_ward = KNeighborsClassifier(n_neighbors=5)\n knn_ward.fit(X_train_t, X_train_o['ward_12'])\n X_test_o['ward_12'] = knn_ward.predict(X_test_t)\n\n # DBSCAN eps=0.3, min_samples=20, 10 clusters\n eps = 0.3\n min_samples = 20\n dbs = DBSCAN(eps=eps, min_samples=min_samples)\n dbs.fit(X_train_t)\n X_train_o['dbscan_10'] = dbs.labels_\n # Use KNN to determine the test clusters\n knn_dbscan = KNeighborsClassifier(n_neighbors=5)\n knn_dbscan.fit(X_train_t, X_train_o['dbscan_10'])\n X_test_o['dbscan_10'] = knn_dbscan.predict(X_test_t)\n\n # Merge with the original datsets\n X_train_r = X_train.merge(X_train_o, on=customer_feats, how='left')\n X_test_r = X_test.merge(X_test_o, on=customer_feats, how='left')\n\n # Join the new features with the old static dataset\n static_cluster1 = pd.concat([X_train_r.sort_values(by='time'), X_test_r.sort_values(by='time')])\n old_static = pd.read_pickle(static_dataset_path)\n id_feats = ['person', 'time', 'offer_id']\n cluster_feats = ['kmeans_8', 'ward_12', 'dbscan_10']\n cluster_info = static_cluster1[id_feats + cluster_feats]\n static_cluster1_dataset = old_static.merge(cluster_info, on=id_feats)\n\n # Save the new static dataset\n if save:\n static_cluster1_dataset.to_pickle(output_path)\n\n return static_cluster1_dataset, X_train_r, X_test_r, y_train, y_test\n\n\ndef create_cluster_feats_3d(static_dataset_path=os.path.join(DATA_PROCESSED, 'static_cluster1.pkl'),\n output_path=os.path.join(DATA_PROCESSED, 'static_cluster3d.pkl'),\n save=True):\n \"\"\"\n Adds the features created by clustering for the selected 3D cases (age, income, memeber_since_epoch).\n The features to add are: 3d_kmeans_3, 3d_ward_3, 3d_ward_19, 3d_gmm_3, 3d_gmm_16, 3d_dbscan_02_20, 3d_dbscan_05_100\n Args:\n static_dataset_path(str): The path to the static dataset to be taken as the initial data.\n output_path(str): The path to save the new dataset.\n save(boolean): Whether to save the new static dataset.\n Returns:\n static_cluster3d_dataset(dataframe): The same as the static dataset but with the features added into new\n columns.\n X_train_r(dataframe): X_train (as obtained from time-split with the input static data) with the new features.\n X_test_r(dataframe): X_test (as obtained from time-split with the input static data) with the new features.\n y_train(pd.Series): y_train as obtained from time-split with the input static data.\n y_test(pd.Series): y_test as obtained from time-split with the input static data.\n \"\"\"\n # Get the data\n X_train, X_test, y_train, y_test, encoder = sd.get_success_data(\n basic_dataset_path=static_dataset_path,\n drop_time=False,\n anon=False)\n\n # Encode and filter relevant features\n customer_feats = ['age', 'income', 'missing_demographics',\n 'member_epoch_days']\n\n X_train_t = encoder.fit_transform(X_train)\n X_train_t = X_train_t[customer_feats]\n X_test_t = encoder.transform(X_test)\n X_test_t = X_test_t[customer_feats]\n\n # Drop duplicates and missing data\n X_train_t = X_train_t.dropna().drop_duplicates()\n X_test_t = X_test_t.dropna().drop_duplicates()\n\n # Keep a copy with the original demographics\n X_train_o = X_train_t.copy()\n X_test_o = X_test_t.copy()\n\n # Drop the irrelevant column\n X_train_t = X_train_t.drop('missing_demographics', axis=1)\n X_test_t = X_test_t.drop('missing_demographics', axis=1)\n\n # Normalize\n scaler = StandardScaler()\n scaler.fit(X_train_t)\n\n X_train_t = pd.DataFrame(scaler.transform(X_train_t),\n index=X_train_t.index,\n columns=X_train_t.columns)\n X_test_t = pd.DataFrame(scaler.transform(X_test_t),\n index=X_test_t.index,\n columns=X_test_t.columns)\n\n # Add the clustering labels\n # K-Means (k = 3)\n n_clusters = 3\n kmeans = KMeans(n_clusters=n_clusters, random_state=2018)\n kmeans.fit(X_train_t)\n X_train_o['3d_kmeans_3'] = kmeans.predict(X_train_t)\n X_test_o['3d_kmeans_3'] = kmeans.predict(X_test_t)\n\n # Ward\n linkage_matrix = ward(X_train_t)\n\n # Ward 3 clusters\n n_clusters = 3\n feat_name = '3d_ward_3'\n dist = DIST_3D_3\n X_train_o[feat_name] = fcluster(linkage_matrix, dist, criterion='distance')\n # Use KNN to determine the test clusters\n knn_ward = KNeighborsClassifier(n_neighbors=5)\n knn_ward.fit(X_train_t, X_train_o[feat_name])\n X_test_o[feat_name] = knn_ward.predict(X_test_t)\n\n # Ward 9 clusters\n n_clusters = 9\n feat_name = '3d_ward_9'\n dist = DIST_3D_9\n X_train_o[feat_name] = fcluster(linkage_matrix, dist, criterion='distance')\n # Use KNN to determine the test clusters\n knn_ward = KNeighborsClassifier(n_neighbors=5)\n knn_ward.fit(X_train_t, X_train_o[feat_name])\n X_test_o[feat_name] = knn_ward.predict(X_test_t)\n\n # Ward 19 clusters\n n_clusters = 19\n feat_name = '3d_ward_19'\n dist = DIST_3D_19\n X_train_o[feat_name] = fcluster(linkage_matrix, dist, criterion='distance')\n # Use KNN to determine the test clusters\n knn_ward = KNeighborsClassifier(n_neighbors=5)\n knn_ward.fit(X_train_t, X_train_o[feat_name])\n X_test_o[feat_name] = knn_ward.predict(X_test_t)\n\n # GMM 3 clusters\n gmm = GaussianMixture(n_components=3)\n gmm.fit(X_train_t)\n X_train_o['3d_gmm_3'] = gmm.predict(X_train_t)\n X_test_o['3d_gmm_3'] = gmm.predict(X_test_t)\n\n # GMM 16 clusters\n gmm = GaussianMixture(n_components=16)\n gmm.fit(X_train_t)\n X_train_o['3d_gmm_16'] = gmm.predict(X_train_t)\n X_test_o['3d_gmm_16'] = gmm.predict(X_test_t)\n\n # DBSCAN eps=0.2, min_samples=20\n eps = 0.2\n min_samples = 20\n feat_name = '3d_dbscan_02_20'\n dbs = DBSCAN(eps=eps, min_samples=min_samples)\n dbs.fit(X_train_t)\n X_train_o[feat_name] = dbs.labels_\n # Use KNN to determine the test clusters\n knn_dbscan = KNeighborsClassifier(n_neighbors=5)\n knn_dbscan.fit(X_train_t, X_train_o[feat_name])\n X_test_o[feat_name] = knn_dbscan.predict(X_test_t)\n\n # DBSCAN eps=0.5, min_samples=100\n eps = 0.5\n min_samples = 100\n feat_name = '3d_dbscan_05_100'\n dbs = DBSCAN(eps=eps, min_samples=min_samples)\n dbs.fit(X_train_t)\n X_train_o[feat_name] = dbs.labels_\n # Use KNN to determine the test clusters\n knn_dbscan = KNeighborsClassifier(n_neighbors=5)\n knn_dbscan.fit(X_train_t, X_train_o[feat_name])\n X_test_o[feat_name] = knn_dbscan.predict(X_test_t)\n\n # Merge with the original datsets\n X_train_r = X_train.merge(X_train_o, on=customer_feats, how='left')\n X_test_r = X_test.merge(X_test_o, on=customer_feats, how='left')\n\n # Join the new features with the old static dataset\n cluster_feats = ['3d_kmeans_3', '3d_ward_3', '3d_ward_9', '3d_ward_19',\n '3d_gmm_3', '3d_gmm_16', '3d_dbscan_02_20', '3d_dbscan_05_100']\n static_cluster3d = pd.concat([X_train_r.sort_values(by='time'), X_test_r.sort_values(by='time')])\n old_static = pd.read_pickle(static_dataset_path)\n id_feats = ['person', 'time', 'offer_id']\n cluster_info = static_cluster3d[id_feats + cluster_feats]\n static_cluster3d_dataset = old_static.merge(cluster_info, on=id_feats)\n\n # Save the new static dataset\n if save:\n static_cluster3d_dataset.to_pickle(output_path)\n\n return static_cluster3d_dataset, X_train_r, X_test_r, y_train, y_test\n" }, { "alpha_fraction": 0.6104985475540161, "alphanum_fraction": 0.6191275119781494, "avg_line_length": 38.178401947021484, "blob_id": "06fd9172373e415a4dae68d7c6f0d4df946cbf9a", "content_id": "ab25d1fe3ddb3258fff058e54e8b84805e839467", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8344, "license_type": "permissive", "max_line_length": 78, "num_lines": 213, "path": "/src/evaluation/offer_success.py", "repo_name": "live2pro/starbucks-advertising", "src_encoding": "UTF-8", "text": "\"\"\"\nContains functions and classes that generate validations sets, and validate\nmodels for the 'offer success' problem.\n\"\"\"\nimport src.data.success_dataset as sd\nimport src.utils as utils\nimport src.data.preprocessing as pp\nfrom time import time\nfrom sklearn.metrics import confusion_matrix, classification_report, f1_score\nfrom sklearn.model_selection import StratifiedKFold\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n\ndef get_time_split_val(val_time=370, **kwargs):\n \"\"\"\n Returns all the datasets necessary to perform a time-split validation.\n Args:\n val_time(int): The time to make the validation split.\n kwargs(dict): Arguments to be passed to inner functions.\n\n Returns:\n X_train(pd.DataFrame): Training features.\n X_val(pd.DataFrame): Validation features.\n X_test(pd.DataFrame): Test features.\n X_train_val(pd.DataFrame): Training + Validation features, to use when\n testing.\n y_train(pd.Series): Training target values.\n y_val(pd.Series): Validation target values.\n y_test(pd.Series): Test target values.\n y_train_val(pd.Series): Training + Validation target values, to use\n when testing.\n \"\"\"\n\n fun_kwargs = utils.filter_args(sd.get_success_data, kwargs)\n X_train_val, \\\n X_test, \\\n y_train_val, \\\n y_test, \\\n encoder = sd.get_success_data(drop_time=False, **fun_kwargs)\n X_test = pp.drop_time_dependent(X_test)\n X_train, X_val, y_train, y_val = sd.time_split(X_train_val, y_train_val,\n val_time)\n return X_train, X_val, X_test, X_train_val, y_train, y_val, y_test, \\\n y_train_val\n\n\ndef time_split_validation(model, val_time=370, **kwargs):\n \"\"\"\n Shows some training and test results, for a time-split validation scheme.\n Returns the trained model and the predictions.\n Args:\n model(sklearn.BaseEstimator): The model to fit and make predictions.\n val_time(int): The time to make the validation split.\n kwargs(dict): Arguments to be passed to inner functions.\n\n Returns:\n model(sklearn.BaseEstimator): The trained model.\n y_train_pred(array-like): The predictions for the training set.\n y_val_pred(array-like): The predictions for the validation set.\n \"\"\"\n X_train, X_val, X_test, X_train_val, y_train, y_val, y_test, \\\n y_train_val = get_time_split_val(val_time, **kwargs)\n return evaluate_model(model, X_train, X_val, y_train, y_val)\n\n\ndef random_kfold_validation(model, n_splits=3):\n \"\"\"\n Shows some training and validation results, for a random kfold validation\n scheme.\n Args:\n model(sklearn.BaseEstimator): The model to fit and make predictions.\n n_splits(int): The number of folds for the cross-validation.\n \"\"\"\n skf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=2018)\n X_train_val, X_test, y_train_val, y_test, \\\n encoder = sd.get_success_data(drop_time=True)\n\n # Train and validate for each fold\n f1_train = list()\n f1_val = list()\n i = 0\n for train_index, test_index in skf.split(X_train_val, y_train_val):\n i += 1\n print('Fold - {}'.format(i))\n X_train, X_val = X_train_val.iloc[train_index], X_train_val.iloc[\n test_index]\n y_train, y_val = y_train_val.iloc[train_index], y_train_val.iloc[\n test_index]\n model, y_train_pred, y_val_pred = evaluate_model(\n model, X_train, X_val, y_train, y_val)\n f1_train.append(f1_score(y_train, y_train_pred))\n f1_val.append(f1_score(y_val, y_val_pred))\n\n # Show results\n print('Training F1-score: {} +- {}'.format(np.mean(f1_train),\n np.std(f1_train)))\n print()\n print('Validation F1-score: {} +- {}'.format(np.mean(f1_val),\n 2 * np.std(f1_val)))\n\ndef random_1fold_validation(model, **kwargs):\n \"\"\"\n Shows some training and validation results, for a random train-val-test\n validation scheme.\n Args:\n model(sklearn.BaseEstimator): The model to fit and make predictions.\n \"\"\"\n X_train_val, X_test, y_train_val, y_test, encoder = sd.get_success_data(\n drop_time=True, **kwargs)\n X_train, X_val, y_train, y_val = train_test_split(X_train_val,\n y_train_val,\n test_size=0.3,\n random_state=2018)\n model, y_train_pred, y_val_pred = evaluate_model(model, X_train, X_val,\n y_train, y_val)\n print('Training F1-score: {}'.format(f1_score(y_train, y_train_pred)))\n print()\n print('Validation F1-score: {}'.format(f1_score(y_val, y_val_pred)))\n\n\ndef random_1fold_cust_validation(model, **kwargs):\n \"\"\"\n Shows some training and validation results, for a random train-val-test\n validation scheme. The dataset is divided by customers.\n Args:\n model(sklearn.BaseEstimator): The model to fit and make predictions.\n \"\"\"\n X_train_val, X_test, y_train_val, y_test, encoder = sd.get_success_data(\n drop_time=True, anon=False, **kwargs)\n\n # Get random customer splits\n val_size = 0.3\n customers = X_train_val.person.unique()\n n_train = int(np.floor(customers.shape[0] * (1.0 - val_size)))\n np.random.shuffle(customers)\n X_train = X_train_val[X_train_val.person.isin(customers[:n_train])]\n X_val = X_train_val[X_train_val.person.isin(customers[n_train:])]\n y_train = y_train_val[X_train_val.person.isin(customers[:n_train])]\n y_val = y_train_val[X_train_val.person.isin(customers[n_train:])]\n\n # Anonimize\n X_train = pp.anonimize_data(X_train)\n X_val = pp.anonimize_data(X_val)\n\n # Evaluate and show results\n model, y_train_pred, y_val_pred = evaluate_model(\n model, X_train, X_val, y_train, y_val)\n print('Training F1-score: {}'.format(f1_score(y_train, y_train_pred)))\n print()\n print('Validation F1-score: {}'.format(f1_score(y_val, y_val_pred)))\n\n\ndef offer_success_test(model, **kwargs):\n \"\"\"\n Shows some training and test results, for a time-split validation scheme.\n Args:\n model(sklearn.BaseEstimator): The model to fit and make predictions.\n \"\"\"\n X_train, X_test, y_train, y_test, encoder = sd.get_success_data(**kwargs)\n model, y_train_pred, y_test_pred = evaluate_model(\n model, X_train, X_test, y_train, y_test)\n print('Training F1-score: {}'.format(f1_score(y_train, y_train_pred)))\n print()\n print('Test F1-score: {}'.format(f1_score(y_test, y_test_pred)))\n\n\ndef evaluate_model(model, X_train, X_test, y_train, y_test):\n \"\"\"\n Shows some training and test results. Returns the trained model and the\n predictions.\n Args:\n model(sklearn.BaseEstimator): The model to fit and make predictions.\n X_train(array-like): training features.\n X_test(array-like): test features.\n y_train(array-like): training target.\n y_test(array-like): test target.\n\n Returns:\n model(sklearn.BaseEstimator): The trained model.\n y_train_pred(array-like): The predictions for the training set.\n y_test_pred(array-like): The predictions for the test set.\n \"\"\"\n # Fit the model\n tic = time()\n model.fit(X_train, y_train)\n toc = time()\n print('Training time: {} seconds.'.format(toc - tic))\n\n # Predict and show results\n y_train_pred = model.predict(X_train)\n print('-' * 44 + 'TRAIN RESULTS' + '-' * 44)\n print('Confusion Matrix:')\n print(confusion_matrix(y_train, y_train_pred))\n print('Classification Report:')\n print(classification_report(y_train, y_train_pred))\n print('-' * 100)\n\n y_test_pred = model.predict(X_test)\n print('-' * 44 + 'TEST RESULTS' + '-' * 44)\n print('Confusion Matrix:')\n print(confusion_matrix(y_test, y_test_pred))\n print('Classification Report:')\n print(classification_report(y_test, y_test_pred))\n print('-' * 100)\n\n print('\\n' + '_' * 51)\n print('| MAIN METRIC (test f1-score): {} |'.format(\n f1_score(y_test, y_test_pred)))\n print('-' * 51)\n\n return model, y_train_pred, y_test_pred" }, { "alpha_fraction": 0.607038140296936, "alphanum_fraction": 0.6158357858657837, "avg_line_length": 27.41666603088379, "blob_id": "b0845e3f04f22bba87f2ab2a0f4d0d1abf3d621b", "content_id": "c41b34cb7e0ffdb8028045b28a89ecf2038529db", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 341, "license_type": "permissive", "max_line_length": 75, "num_lines": 12, "path": "/setup.py", "repo_name": "live2pro/starbucks-advertising", "src_encoding": "UTF-8", "text": "from setuptools import find_packages, setup\n\nsetup(\n name='src',\n packages=find_packages(),\n version='0.1.0',\n description='Starbucks challenge to select the best offers for each ' +\n 'client. Capstone project for Udacity Data Scientist ' +\n 'Nanodegree.',\n author='Miguel Tasende',\n license='MIT',\n)\n" }, { "alpha_fraction": 0.5725274682044983, "alphanum_fraction": 0.5843406319618225, "avg_line_length": 30.11111068725586, "blob_id": "55b1e339b4a15fe5a2985689368a54055a8d0f5d", "content_id": "4c3a602a7db24ad8db1953e65722d66f46d1d6fb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3640, "license_type": "permissive", "max_line_length": 77, "num_lines": 117, "path": "/src/visualization/visualize.py", "repo_name": "live2pro/starbucks-advertising", "src_encoding": "UTF-8", "text": "\"\"\" Generic visualization functions. \"\"\"\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom xgboost import plot_importance\nfrom sklearn.decomposition import PCA\nfrom plotly.offline import init_notebook_mode, iplot\nimport plotly.graph_objs as go\n\ninit_notebook_mode(connected=True)\n\n\ndef show_feat_importances(model, X_train):\n \"\"\"\n Show a barplot with the feature importances for this model's estimator.\n The model is assumed to be a pipeline and the estimator name within the\n pipeline is 'estimator'.\n \"\"\"\n n_feats = 20\n feat_imp = np.vstack([X_train.columns,\n model.named_steps[\n 'estimator'].feature_importances_]).T\n feat_imp = pd.DataFrame(feat_imp, columns=['feature', 'importance'])\n feat_imp = feat_imp.sort_values(by='importance').set_index('feature')\n feat_imp.iloc[-n_feats:].plot(kind='barh')\n plt.title('Feature Importances')\n\n # Use built-in importance plot\n plt.figure()\n plot_importance(model.named_steps['estimator'], max_num_features=n_feats)\n\n\ndef add_bar_labels(values):\n for i, v in enumerate(values):\n plt.text(i, v, str(v), ha='center', fontweight='bold')\n\n\ndef show_imputer_results(data, filled,\n continuous=['age', 'income'],\n discrete=['gender']):\n \"\"\" Shows some differences between a dataset and a filled dataset. \"\"\"\n for feat in continuous:\n plt.figure()\n ax1 = plt.subplot(1, 2, 1)\n data[feat].hist(bins=30)\n plt.title('{} original'.format(feat))\n plt.subplot(1, 2, 2, sharey=ax1)\n filled[feat].hist(bins=30)\n plt.title('{} filled'.format(feat))\n\n for feat in discrete:\n counts1 = data.gender.value_counts(dropna=False)\n counts2 = filled.gender.value_counts(dropna=False)\n plt.figure()\n ax1 = plt.subplot(1, 2, 1)\n counts1.plot(kind='bar')\n plt.title('{} original'.format(feat))\n add_bar_labels(counts1)\n plt.subplot(1, 2, 2, sharey=ax1)\n counts2.plot(kind='bar')\n plt.title('{} filled'.format(feat))\n add_bar_labels(counts2)\n\n\ndef pca_visualize(X, **kwargs):\n \"\"\" Applies PCA to get 2-D data and make a scatter plot.\"\"\"\n extractor = PCA(n_components=2)\n X_pca = extractor.fit_transform(X)\n\n print('Explained variance ratio for the first two components: {}'.format(\n extractor.explained_variance_ratio_.sum()))\n\n plt.scatter(X_pca[:, 0], X_pca[:, 1], **kwargs)\n plt.title('PCA scatter plot')\n plt.xlabel('PCA 1')\n _ = plt.ylabel('PCA 2')\n\n\ndef pca_visualize_clusters(X, cluster):\n \"\"\" Visualize all the clusters using PCA. \"\"\"\n for c in np.unique(cluster):\n pca_visualize(X[cluster == c], label='cluster {}'.format(c))\n plt.legend()\n\n\ndef visualize_3d_clusters(X, labels):\n data = list()\n for label in np.unique(labels):\n X_sub = X[labels == label]\n trace = go.Scatter3d(\n x=X_sub.age,\n y=X_sub.income,\n z=X_sub.member_epoch_days,\n mode='markers',\n marker=dict(\n size=2,\n opacity=1.0\n ),\n name='cluster {}'.format(label)\n )\n data.append(trace)\n\n layout = go.Layout(\n margin=dict(\n l=0,\n r=0,\n b=0,\n t=0\n ),\n scene=dict(\n xaxis=dict(title='Age'),\n yaxis=dict(title='Income'),\n zaxis=dict(title='Mambership days since epoch')\n )\n )\n fig = go.Figure(data=data, layout=layout)\n iplot(fig)\n" }, { "alpha_fraction": 0.6187763810157776, "alphanum_fraction": 0.6318206787109375, "avg_line_length": 43.61475372314453, "blob_id": "657935ac7a36c82c7389796164ce838c717c8081", "content_id": "9972e115b1f0b710bcc6058ffb5596c078cd6c50", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5443, "license_type": "permissive", "max_line_length": 115, "num_lines": 122, "path": "/src/data/make_dataset.py", "repo_name": "live2pro/starbucks-advertising", "src_encoding": "UTF-8", "text": "import logging\nimport os\nimport pickle\nimport pandas as pd\nimport src.data.preprocessing as pp\nimport src.data.success_dataset as sd\nimport src.features.clustering as clust\nimport src.features.lagged as lag\nimport src.data.profit_10_days_dataset as p10\nfrom src.data import DATA_RAW, DATA_INTERIM, DATA_PROCESSED\n\n\ndef main():\n \"\"\" Runs data processing scripts to turn raw data from (../raw) into\n cleaned data ready to be analyzed (saved in ../processed).\n \"\"\"\n static_dataset_path = os.path.join(DATA_INTERIM, 'static_data.pkl')\n static_cluster1_path = os.path.join(DATA_PROCESSED, 'static_cluster1.pkl')\n static_cluster3d_path = os.path.join(DATA_PROCESSED, 'static_cluster3d.pkl')\n static_lagged_path = os.path.join(DATA_PROCESSED, 'static_cluster_lagged.pkl')\n static_spent_10_days = os.path.join(DATA_PROCESSED, 'static_spent_10_days.pkl')\n\n\n logger = logging.getLogger(__name__)\n logger.info('Making the final datasets from raw data (the entire process can take about 1 hour, more or less, '\n 'depending on the computational resources available)')\n\n # Load the raw data\n print('data raw is here:')\n print(os.path.join(DATA_RAW, 'portfolio.json'))\n portfolio = pd.read_json(os.path.join(DATA_RAW, 'portfolio.json'),\n orient='records', lines=True)\n profile = pd.read_json(os.path.join(DATA_RAW, 'profile.json'),\n orient='records', lines=True)\n transcript = pd.read_json(os.path.join(DATA_RAW, 'transcript.json'),\n orient='records', lines=True)\n\n # Initial preprocessing\n logger.info('Preprocessing...')\n data, portfolio = pp.basic_preprocessing(portfolio, profile, transcript)\n\n # Generate the static dataset, and save it\n logger.info('Generating the static dataset. ' +\n 'This may take several minutes...')\n static_data = pp.generate_static_dataset(data)\n static_data.to_pickle(static_dataset_path)\n\n # Add the 4D clustering features\n logger.info('Generating the 4D clustering features')\n clust.create_cluster_feats_4d(static_dataset_path=static_dataset_path,\n output_path=static_cluster1_path,\n save=True)\n\n # Add the 3D clustering features\n logger.info('Generating the 3D clustering features')\n clust.create_cluster_feats_3d(static_dataset_path=static_cluster1_path,\n output_path=static_cluster3d_path,\n save=True)\n\n # Add the lagged features\n logger.info('Generating the Lagged features')\n portfolio = pd.read_json(os.path.join(DATA_RAW, 'portfolio.json'),\n orient='records', lines=True)\n static_data = pd.read_pickle(static_cluster3d_path)\n data_lag = lag.fill_lagged_success(static_data, portfolio)\n data_lag.to_pickle(static_lagged_path)\n\n # Create the offer-success datasets and save them\n logger.info('Creating the offer-success datsets...')\n X_train_sd, \\\n X_test_sd, \\\n y_train_sd, \\\n y_test_sd, \\\n encoder_sd = sd.get_success_data(basic_dataset_path=static_lagged_path)\n X_train_sd.to_pickle(os.path.join(DATA_PROCESSED, 'X_train_success.pkl'))\n X_test_sd.to_pickle(os.path.join(DATA_PROCESSED, 'X_test_success.pkl'))\n y_train_sd.to_pickle(os.path.join(DATA_PROCESSED, 'y_train_success.pkl'))\n y_test_sd.to_pickle(os.path.join(DATA_PROCESSED, 'y_test_success.pkl'))\n with open(os.path.join(DATA_PROCESSED,\n 'encoder_success.pkl'), 'wb') as file:\n pickle.dump(encoder_sd, file)\n\n # Create spent-10-days static dataset\n logger.info('Creating the spent-10-days static datset')\n static_data = pd.read_pickle(static_lagged_path)\n filled = p10.get_spent_days_static(static_data, data)\n filled.to_pickle(static_spent_10_days)\n\n # Create the profit-10-days datasets and save them\n logger.info('Creating the profit-10-days datsets...')\n X_train_p10,\\\n X_test_p10,\\\n y_train_p10,\\\n y_test_p10,\\\n encoder_p10,\\\n view_cols_p10,\\\n profit_cols_p10 = p10.get_profit_10_days_data(basic_dataset_path=static_spent_10_days,\n fill_null=True,\n target=['viewed', 'profit_10_days'],\n drop_offer_id=False)\n X_train_p10.to_pickle(os.path.join(DATA_PROCESSED, 'X_train_profits.pkl'))\n X_test_p10.to_pickle(os.path.join(DATA_PROCESSED, 'X_test_profits.pkl'))\n y_train_p10.to_pickle(os.path.join(DATA_PROCESSED, 'y_train_profits.pkl'))\n y_test_p10.to_pickle(os.path.join(DATA_PROCESSED, 'y_test_profits.pkl'))\n with open(os.path.join(DATA_PROCESSED,\n 'encoder_profits.pkl'), 'wb') as file:\n pickle.dump(encoder_p10, file)\n with open(os.path.join(DATA_PROCESSED,\n 'view_cols_profits.pkl'), 'wb') as file:\n pickle.dump(view_cols_p10, file)\n with open(os.path.join(DATA_PROCESSED,\n 'profit_cols_profits.pkl'), 'wb') as file:\n pickle.dump(profit_cols_p10, file)\n\n logger.info('All the datasets were created successfully!')\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n\n main()\n" }, { "alpha_fraction": 0.6428571343421936, "alphanum_fraction": 0.6443768739700317, "avg_line_length": 33.6315803527832, "blob_id": "2b0ab6cdcfb5770ab83dc68a79a7abe9155cb8aa", "content_id": "c2e76cb4244b7a3ee067944f46d754504c72b933", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2632, "license_type": "permissive", "max_line_length": 82, "num_lines": 76, "path": "/src/data/success_dataset.py", "repo_name": "live2pro/starbucks-advertising", "src_encoding": "UTF-8", "text": "\"\"\"\nFunctions to create and use the 'success' dataset: that is, a dataset to determine\nthe probability that an offer is viewed and later completed, or just viewed in\nthe case of informational offers.\n\"\"\"\nimport pandas as pd\nimport os\nimport src.data.preprocessing as pp\nfrom src.data import DATA_INTERIM\n\n\ndef get_success_data(\n basic_dataset_path=os.path.join(DATA_INTERIM, 'static_data.pkl'),\n time_limit=450,\n informational_success=True,\n drop_time=True,\n anon=True\n):\n \"\"\"\n Generates the dataset to predict whether an offer was successful.\n An offer is considered successful if it is viewed and then completed. In\n the case of informational offers a visualization alone may be considered a\n success or not.\n Args:\n basic_dataset_path(str): The path to the pickle containing the basic\n dataset\n time_limit(int): The limit to split the train and test sets.\n informational_success(boolean): Whether a visualization of an\n informational offer should be considered as a success.\n drop_time(boolean): Whether to drop the absolute time dependent\n features.\n anon(boolean): Whether to drop unique identifiers to customers and\n offers.\n\n\n Returns:\n X_train(pd.DataFrame): The training dataset.\n X_test(pd.DataFrame): The test dataset.\n y_train(pd.Series): The training target.\n y_test(pd.Series): The test target.\n BasicEncoder: An encoder to use in an ML pipeline.\n \"\"\"\n\n data = pd.read_pickle(basic_dataset_path)\n if anon:\n data = pp.anonimize_data(data)\n if informational_success:\n data.loc[data.offer_type == 'informational', 'success'] = data.loc[\n data.offer_type == 'informational', 'viewed']\n\n X = data.drop(pp.FUTURE_INFO, axis=1)\n y = data['success']\n X_train, X_test, y_train, y_test = time_split(X, y, time_limit,\n drop_time=drop_time)\n\n encoder = pp.BasicEncoder()\n\n return X_train, X_test, y_train, y_test, encoder\n\n\ndef time_split(X, y, time_limit, drop_time=True):\n \"\"\"\n Splits the features and targets in time. Drops the time dependent features\n if 'drop_time' is True.\n \"\"\"\n X_train = X[X.time < time_limit]\n y_train = y[X.time < time_limit]\n X_test = X[X.time >= time_limit]\n y_test = y[X.time >= time_limit]\n\n # Drop the columns that depend on absolute time\n if drop_time:\n X_train = pp.drop_time_dependent(X_train)\n X_test = pp.drop_time_dependent(X_test)\n\n return X_train, X_test, y_train, y_test\n" }, { "alpha_fraction": 0.6031166911125183, "alphanum_fraction": 0.6087533235549927, "avg_line_length": 40.26027297973633, "blob_id": "8f11d293869978c5d18da0c119cdca96c8faacd7", "content_id": "a6e5bd5ef2605f6564facaf0aea7f28bb1ce95d4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3016, "license_type": "permissive", "max_line_length": 119, "num_lines": 73, "path": "/src/features/lagged.py", "repo_name": "live2pro/starbucks-advertising", "src_encoding": "UTF-8", "text": "\"\"\" Functions to create lagged features. \"\"\"\nimport numpy as np\nimport pandas as pd\n\n\ndef fill_one_lagged_success(user_data, current_time, portfolio):\n \"\"\"\n For a given time, and a given user, it counts how many times each offer was shown,\n and how many of those were a success (the rate of success could be easily calculated\n afterwards).\n offer_id_n: keeps track of how many times the offer was shown\n offer_id_success: keeps track of how many times the offer was successful\n \"\"\"\n feat_names = ['offer_type', 'duration', 'difficulty', 'reward']\n ohe_feats = ['channel_web', 'channel_email', 'channel_social', 'channel_mobile']\n\n # Some type conversion (data may have NaNs and converts to float)\n portfolio_t = portfolio.copy()\n portfolio_t[['difficulty', 'duration', 'reward']] = portfolio_t[\n ['difficulty', 'duration', 'reward']].astype(float)\n\n # Create the results containers\n feats = portfolio_t.id.tolist()\n for feat_name in feat_names:\n feats += [feat_name + '_{}'.format(d)\n for d in portfolio_t[feat_name].unique().tolist()]\n label_feats = np.setdiff1d(feat_names, ['reward']).tolist()\n feats += ohe_feats\n\n shown = {'{}_n'.format(offer): 0 for offer in feats}\n success = {'{}_success'.format(offer): 0 for offer in feats}\n res = {**shown, **success}\n\n old_offers = user_data[user_data.time < current_time]\n for i, row in old_offers.iterrows():\n res['{}_n'.format(row.offer_id)] += 1\n for feat_name in label_feats:\n res[feat_name + '_{}_n'.format(row[feat_name])] += 1\n res['reward_{}_n'.format(row['reward_t'])] += 1\n for feat_name in ohe_feats:\n if row[feat_name] == 1:\n res['{}_n'.format(feat_name)] += 1\n\n if row.success == 1:\n res['{}_success'.format(row.offer_id)] += 1\n for feat_name in label_feats:\n res[feat_name + '_{}_success'.format(row[feat_name])] += 1\n res['reward_{}_success'.format(row['reward_t'])] += 1\n for feat_name in ohe_feats:\n if row[feat_name] == 1:\n res['{}_success'.format(feat_name)] += 1\n\n return pd.Series(res)\n\n\ndef fill_user_lagged_success(user_data, portfolio):\n \"\"\" Fills the lagged success features for all the records in one customer. \"\"\"\n return user_data.join(user_data.apply(\n lambda x: fill_one_lagged_success(user_data, x.time, portfolio), axis=1))\n\n\ndef fill_lagged_success(data, portfolio):\n \"\"\" Fills the lagged success features for the entire dataset. \"\"\"\n filled = data.groupby('person').apply(\n lambda x: fill_user_lagged_success(x, portfolio))\n\n # Fill the ratios of success / shown\n success_cols = filled.columns.str.extract(\n '(.*)_success').dropna().values.flatten().tolist()\n for col in success_cols:\n filled['{}_success_ratio'.format(col)] = filled['{}_success'.format(col)] / (filled['{}_n'.format(col)] + 1e-5)\n\n return filled\n\n\n\n\n" } ]
13
udox/udoxmailinglist
https://github.com/udox/udoxmailinglist
02db165a706b555c24eba8097cbe85809d78dcd8
4ac43a5353bad88b715e62cd71125d2607cc4aa4
5fdb514283af5cc36dfa61642bda865aa87afd1e
refs/heads/master
2021-01-01T15:45:09.941552
2010-08-20T17:38:38
2010-08-20T17:38:38
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5318319797515869, "alphanum_fraction": 0.5318319797515869, "avg_line_length": 30.20270347595215, "blob_id": "f489f1dfb583ace44edc1e4ec4de201990567223", "content_id": "a97006aebf251122cd5dd4165fce07256f477799", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2309, "license_type": "no_license", "max_line_length": 122, "num_lines": 74, "path": "/admin.py", "repo_name": "udox/udoxmailinglist", "src_encoding": "UTF-8", "text": "import csv\nfrom datetime import datetime\n\nfrom django.http import HttpResponse\nfrom django.contrib import admin\n\nfrom udoxmailinglist.models import Member, InterestGroup, InterestItem\n\n\ndef export_csv(modeladmin, request, queryset):\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=members_%s.csv' % datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n writer = csv.writer(response)\n for row in queryset:\n writer.writerow([\n row.name,\n row.email,\n row.dob.strftime('%Y-%m-%d'),\n row.sex,\n row.mobile,\n ','.join([item.title for item in row.interest_items.order_by('group__pk','pk')],)\n ])\n queryset.update(last_exported=datetime.now())\n return response\nexport_csv.short_description = \"Export as CSV\"\n\n\nclass MemberAdmin(admin.ModelAdmin):\n list_display = ('name','email','mobile','join_date','last_exported')\n search_fields = ('name','email')\n list_display_links = ('name', 'email')\n list_filter = ('sex','interest_items','last_exported')\n fieldsets = (\n (None, {'fields': ( 'name',\n 'email',\n 'dob',\n 'sex',\n 'mobile',\n 'interest_items',\n )\n }),\n )\n actions=[export_csv]\nadmin.site.register(Member, MemberAdmin)\n\n\n\nclass InterestGroupAdmin(admin.ModelAdmin):\n list_display = ('sort_weight','title','description')\n search_fields = ('title','description')\n fieldsets = (\n (None, {'fields': ( 'sort_weight',\n 'title',\n 'description',\n )\n }),\n )\nadmin.site.register(InterestGroup, InterestGroupAdmin)\n\n\n\nclass InterestItemAdmin(admin.ModelAdmin):\n list_display = ('sort_weight','title','group','description')\n search_fields = ('title','description')\n list_filter = ('group',)\n fieldsets = (\n (None, {'fields': ( 'sort_weight',\n 'group',\n 'title',\n 'description',\n )\n }),\n )\nadmin.site.register(InterestItem, InterestItemAdmin)\n" }, { "alpha_fraction": 0.6183721423149109, "alphanum_fraction": 0.6241135001182556, "avg_line_length": 30.5, "blob_id": "c6351c573e1bdb92a25f761c7a3237a67eaf34e9", "content_id": "f7f2b0870b8b3375388fe14c6560764b5e256fa8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2961, "license_type": "no_license", "max_line_length": 148, "num_lines": 94, "path": "/views.py", "repo_name": "udox/udoxmailinglist", "src_encoding": "UTF-8", "text": "import datetime\nfrom django import forms\nfrom django.conf import settings\nfrom django.http import HttpResponse\nfrom django.shortcuts import render_to_response\nfrom django.template.loader import render_to_string\nfrom django.template import RequestContext\nfrom django.core.urlresolvers import reverse\nfrom django.forms.extras import widgets\nfrom django.http import Http404, HttpResponse, HttpResponseRedirect\n\nfrom udoxmailinglist import models\nfrom udoxcore.models import GENDER_CHOICES\nfrom udoxrecaptcha import ReCaptchaField\n\n\nclass SignupForm(forms.ModelForm):\n sex = forms.ChoiceField(widget=forms.RadioSelect, choices=GENDER_CHOICES, required=False)\n now = datetime.datetime.now()\n dob = forms.DateField(widget=widgets.SelectDateWidget(years=range(now.year,1900,-1)), required=False)\n interest_items = forms.ModelMultipleChoiceField(queryset=models.InterestItem.objects.all(), widget=forms.CheckboxSelectMultiple, required=False)\n captcha = ReCaptchaField()\n\n class Meta:\n model = models.Member\n\n\ndef signup(request):\n status = 200\n if request.method == 'POST':\n form = SignupForm(request.POST, initial={'captcha': request.META['REMOTE_ADDR']})\n if form.is_valid():\n form.save()\n if request.is_ajax():\n template = 'mailinglist/thankyou_message.html'\n else:\n template = 'mailinglist/thankyou.html'\n return render_to_response(template,\n {},\n context_instance=RequestContext(request)\n )\n else:\n status = 400\n else:\n form = SignupForm()\n \n interests = models.InterestItem.objects.all()\n \n if request.is_ajax():\n template = 'mailinglist/signup_form.html'\n else:\n template = 'mailinglist/signup.html'\n \n return HttpResponse(\n render_to_string(template,\n {\n 'form': form,\n 'interests': interests,\n },\n context_instance=RequestContext(request)\n ),\n status=status,\n )\n\ndef mobile_signup(request):\n status = 200\n if request.method == 'POST':\n form = SignupForm(request.POST, initial={'captcha': request.META['REMOTE_ADDR']})\n if form.is_valid():\n form.save()\n template = 'mailinglist/mobile_thankyou.html'\n return render_to_response(template,\n {},\n context_instance=RequestContext(request)\n )\n else:\n HttpResponseRedirect(\"/signup/mobile/\")\n else:\n form = SignupForm()\n \n interests = models.InterestItem.objects.all()\n \n template = 'mailinglist/mobile_signup.html'\n \n return HttpResponse(\n render_to_string(template,\n {\n 'form': form,\n 'interests': interests,\n },\n context_instance=RequestContext(request)\n ),\n status=status,\n )\n" }, { "alpha_fraction": 0.6926973462104797, "alphanum_fraction": 0.7011397480964661, "avg_line_length": 39.81034469604492, "blob_id": "d7f481cbaa8da464bec4941f42db18bc99f5236d", "content_id": "ed4dc8382e5eed0fe2a58e2f71342909adaeca28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2369, "license_type": "no_license", "max_line_length": 167, "num_lines": 58, "path": "/models.py", "repo_name": "udox/udoxmailinglist", "src_encoding": "UTF-8", "text": "from datetime import datetime\nfrom django.db import models\n\nfrom udoxcore.models import BaseModel, BaseManager, StatusModel, GENDER_CHOICES\n\n\nclass InterestGroup(StatusModel):\n sort_weight = models.IntegerField(default=100, help_text=\"'Heavier' groups will sink to the bottom of lists. Items with the same weight will sort alphabetically.\")\n title = models.CharField(max_length=128)\n description = models.TextField(null=True, blank=True)\n \n def __unicode__(self):\n return self.title\n \n class Meta:\n ordering = [\"sort_weight\",\"title\"]\n\n\nclass InterestItemManager(BaseManager):\n def get_query_set(self):\n return super(InterestItemManager, self).get_query_set().filter(group__status__in=[1,2])\n\nclass InterestItem(StatusModel):\n group = models.ForeignKey(InterestGroup, related_name='items', help_text=\"The item will not appear on site unless its parent group is also Published.\")\n sort_weight = models.IntegerField(default=100, help_text=\"'Heavier' items will sink to the bottom of lists. Items with the same weight will sort alphabetically.\")\n title = models.CharField(max_length=128)\n description = models.TextField(null=True, blank=True)\n \n objects = InterestItemManager()\n \n def __unicode__(self):\n return self.title\n \n class Meta:\n ordering = [\"group__pk\",\"sort_weight\",\"title\"]\n\n\nclass Member(BaseModel):\n join_date = models.DateTimeField(auto_now_add=True, editable=False, null=False)\n last_modified = models.DateTimeField(auto_now=True, editable=False, null=False)\n last_exported = models.DateTimeField(editable=False, null=True, blank=True)\n name = models.CharField(max_length=128)\n email = models.EmailField(unique=True)\n dob = models.DateField('Date of Birth', blank=True, null=True, default=\"YYYY-MM-DD\")\n sex = models.CharField(max_length=1, choices=GENDER_CHOICES, blank=True, null=True)\n mobile = models.CharField(max_length=32, blank=True, null=True)\n interest_items = models.ManyToManyField(InterestItem)\n \n def interest_groups(self):\n group_ids = self.interest_items.order_by().values_list('group__pk', flat=True)\n return InterestGroup.objects.filter(pk__in=group_ids)\n\n def __unicode__(self):\n return self.name\n \n class Meta:\n ordering = [\"name\",\"email\"]\n get_latest_by = 'join_date'\n\n\n" }, { "alpha_fraction": 0.6111111044883728, "alphanum_fraction": 0.6111111044883728, "avg_line_length": 25.899999618530273, "blob_id": "da6d9da3870d9f7773cedc3b730f0f9e669a101c", "content_id": "8493001287e7bae307e1ee2bfe235ca115469dac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 270, "license_type": "no_license", "max_line_length": 51, "num_lines": 10, "path": "/urls.py", "repo_name": "udox/udoxmailinglist", "src_encoding": "UTF-8", "text": "from django.conf.urls.defaults import *\n\nurlpatterns = patterns('',\n url(r'^$',\n view='udoxmailinglist.views.signup',\n name='signup_form'),\n url(r'^mobile/$',\n view='udoxmailinglist.views.mobile_signup',\n name='mobile_signup_form'),\n)\n\n" } ]
4
dhruv-chitkara27/Python--Fullstack-Academy-batch-2016
https://github.com/dhruv-chitkara27/Python--Fullstack-Academy-batch-2016
dea948a2d5f51d0fd33a419cc85bfa19f6b6d447
5623ddb9a77af320f203a1fcd4bcbf0742c98e6e
872434c860e56b7276908fcb75eafcfab108a2c9
refs/heads/master
2020-03-22T18:23:13.137150
2019-05-12T16:50:29
2019-05-12T16:50:29
140,456,915
1
2
null
2018-07-10T16:02:25
2018-12-05T17:54:44
2018-12-08T17:50:25
Python
[ { "alpha_fraction": 0.6792452931404114, "alphanum_fraction": 0.6915094256401062, "avg_line_length": 30.176469802856445, "blob_id": "f4fb063cb338e5e17d1885b440dedf291eac59e6", "content_id": "f337737904e36facda84c3273e6d5e623f080b43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1060, "license_type": "no_license", "max_line_length": 114, "num_lines": 34, "path": "/Python Projects/python_project1/3 modules.py", "repo_name": "dhruv-chitkara27/Python--Fullstack-Academy-batch-2016", "src_encoding": "UTF-8", "text": "# Task :\n#\n# Given the list of n numbers contains positive and negative values.\n# Write a program using modules :\n# - to find unique triples whose 3 elements gives the sum 0 from the given numbers\n# - form the given list find the possible 2 digit valid numbers\n# - find a random number which can be both positive and negative from the sepicified range entered by the user\n\nimport unique_triplets, valid_numbers, random_numbers\n\nn = int(input('Enter the List Size : '))\nmy_list = []\n\nfor i in range(n):\n num = int(input('Enter Number : '))\n my_list.append(num)\n\nprint('Enter your choice : ')\nprint('1 for finding triplets with sum 0')\nprint('2 for finding 2 digit valid numbers')\nprint('3 for finding a random number')\n\nchoice = int(input('Enter your choice : '))\n\nwhile(choice < 1 or choice > 3):\n print('Wrong Choice Entered...TRY AGAIN')\n choice = int(input('Enter your choice : '))\n\nif(choice == 1):\n unique_triplets.triplets(my_list)\nelif(choice == 2):\n valid_numbers.validate(my_list)\nelif(choice == 3):\n random_numbers()\n" }, { "alpha_fraction": 0.5416666865348816, "alphanum_fraction": 0.625, "avg_line_length": 23, "blob_id": "7dd4fcbeb91f1b57423feaf3475e9c3adc075956", "content_id": "16e4ad4756fc4d0f58b4c79435d3293c57153d67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48, "license_type": "no_license", "max_line_length": 34, "num_lines": 2, "path": "/Class Questions/dict1.py", "repo_name": "dhruv-chitkara27/Python--Fullstack-Academy-batch-2016", "src_encoding": "UTF-8", "text": "dict1 = {x:x**2 for x in range(5)}\nprint(dict1)\n" }, { "alpha_fraction": 0.5056818127632141, "alphanum_fraction": 0.5681818127632141, "avg_line_length": 21, "blob_id": "db7a73dd6a4323e1dca3d31f5479e7f08ebe8cc5", "content_id": "7ed68f468248f8b0988f31fed445a8d8f04f3d6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 352, "license_type": "no_license", "max_line_length": 32, "num_lines": 16, "path": "/Class Questions/question7.py", "repo_name": "dhruv-chitkara27/Python--Fullstack-Academy-batch-2016", "src_encoding": "UTF-8", "text": "x = input(\"Enter the string=\")\ny=len(x)\ncount1=0\ncount2=0\ncount3=0\nfor i in range(y):\n if(x[i]>='a' and x[i]<='z'):\n count1=count1+1\n if(x[i]>='A' and x[i]<='Z'):\n count2=count2+1\n if(x[i]>='0' and x[i]<='9'):\n count3=count3+1\nprint(\"small\",count1)\nprint(\"large\",count2)\nprint(\"digit\",count3)\nprint(\"alpha\",count1+count2)\n" }, { "alpha_fraction": 0.6181818246841431, "alphanum_fraction": 0.6181818246841431, "avg_line_length": 26.5, "blob_id": "1ae7a1a31d7c5d577fb15dc47460d8e7136fa497", "content_id": "90f59c9738ba7baee38767993c0c0d93c2b54dbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 110, "license_type": "no_license", "max_line_length": 39, "num_lines": 4, "path": "/Class Questions/sum_lecture.py", "repo_name": "dhruv-chitkara27/Python--Fullstack-Academy-batch-2016", "src_encoding": "UTF-8", "text": "a = int(input(\"Enter the value of a=\"))\nb = int(input(\"Enter the vaue of b=\"))\nsum = int(a)+int(b)\nprint(sum)\n" }, { "alpha_fraction": 0.5277777910232544, "alphanum_fraction": 0.6018518805503845, "avg_line_length": 14.428571701049805, "blob_id": "0e8b52a5a336b19bcb4e5edd8d204cba7119a3c5", "content_id": "65cc1dd3c9423127aef0d1206916061ce0267244", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 108, "license_type": "no_license", "max_line_length": 28, "num_lines": 7, "path": "/Class Questions/slice_operator.py", "repo_name": "dhruv-chitkara27/Python--Fullstack-Academy-batch-2016", "src_encoding": "UTF-8", "text": "a = \"Hello friends!! Chai piloo\"\none = a[2:5]\nprint(one)\ntwo = a[2:10:2]\nprint(two)\nthree = a[16:]\nprint(three)\n" }, { "alpha_fraction": 0.5671077370643616, "alphanum_fraction": 0.5879017114639282, "avg_line_length": 20.15999984741211, "blob_id": "c5f243e028e41810627366807a15fc8b5f77c23a", "content_id": "9323104662c06fbd34c40226484120a7f13661d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 529, "license_type": "no_license", "max_line_length": 148, "num_lines": 25, "path": "/Exam Questions/class_test_2_que1.py", "repo_name": "dhruv-chitkara27/Python--Fullstack-Academy-batch-2016", "src_encoding": "UTF-8", "text": "\"\"\"\nWrite a program which takes two digits x and y as input and generates a 2d array. The element value in the ith row and jth column should be i*(j-1).\n \n SAMPLE INPUT :\n 3,5\n \n SAMPLE OUTPUT :\n [ [],[],[] ]\n \n\"\"\"\n \nnum = input('Enter x and y(with comma) : ').split(',')\ni = int(num[0])\nj = int(num[1])\nmain_list = []\nsub_list = []\n\nfor a in range(1,i+1):\n for b in range(1,j+1):\n value = a*(b-1)\n sub_list.append(value)\n main_list.append(sub_list)\n sub_list = []\n\nprint(f'Required Output : {main_list}')\n" }, { "alpha_fraction": 0.38884758949279785, "alphanum_fraction": 0.48252788186073303, "avg_line_length": 39.75757598876953, "blob_id": "3d3703d549175f0e8861ba2b937d4639b23361cf", "content_id": "bcdb734b965fbe8607cc9b7daa543209660e0f07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1345, "license_type": "no_license", "max_line_length": 246, "num_lines": 33, "path": "/Exam Questions/class_test_question_one.py", "repo_name": "dhruv-chitkara27/Python--Fullstack-Academy-batch-2016", "src_encoding": "UTF-8", "text": "\"\"\"QUESTION : -> A list of numbers is provided to the students such that they are required to sort the list according to the length of its elements assume that all the elements of the list are integers. Print the output in the required format.\n\n | | INPUT | OUTPUT\n | Testcase 1 |121 131 111 1413 1431|121 131 111 1413 1431\nSample Input : | | |\n[23,10,4566,344,123,121] | | |\n | Testcase 2 |2 3 10 1 3 123 |2 3 1 3 10 123\nSample Output : | | |\n[23,10,344,123,121,4566] | Testcase 3 |111 101 9 3 123 1431 |9 3 19 111 101 123 \n | | 19 |1431\n\"\"\"\n\ndef sort_list(my_list):\n new_list = []\n length_of_element = 4\n\n for j in range(1,length_of_element+1):\n for i in range(len(my_list)):\n if(len(str(my_list[i])) == j):\n new_list.append(my_list[i])\n\n print(f'Sorted List according to Length of Elements : {new_list}')\n\nif __name__ == '__main__':\n my_list = []\n\n n = int(input('Enter List Length : '))\n\n for i in range(n):\n num = int(input('Enter Number : '))\n my_list.append(num)\n\n sort_list(my_list)\n" }, { "alpha_fraction": 0.3478260934352875, "alphanum_fraction": 0.49275362491607666, "avg_line_length": 22, "blob_id": "6b2eb523c2f395272cfedf1d5f15dd31c707bcb9", "content_id": "dfd9a55cd1a7a2b59eab1d3d108ad36280365379", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 69, "license_type": "no_license", "max_line_length": 26, "num_lines": 3, "path": "/Class Questions/question3.py", "repo_name": "dhruv-chitkara27/Python--Fullstack-Academy-batch-2016", "src_encoding": "UTF-8", "text": "for i in range(100,501):\n if(i%5==0 and i%7==0):\n print(i)\n" }, { "alpha_fraction": 0.6285714507102966, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 34, "blob_id": "61f17a2ca650bd878ef22580f8a3615dd26c1e27", "content_id": "4ab4014d4abdb01baa7cb478b85bd94939693d9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 70, "license_type": "no_license", "max_line_length": 48, "num_lines": 2, "path": "/Class Questions/dict_circumference.py", "repo_name": "dhruv-chitkara27/Python--Fullstack-Academy-batch-2016", "src_encoding": "UTF-8", "text": "circumference = {r:2*3.14*r for r in range(2,9)}\nprint(circumference)\n" }, { "alpha_fraction": 0.5922330021858215, "alphanum_fraction": 0.6601941585540771, "avg_line_length": 16.08333396911621, "blob_id": "3bb73c2820c4997359d0ade1bfc34fd93ecb7a9e", "content_id": "f9942d2627d72ace5e88e4be7773aab724d3589c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 206, "license_type": "no_license", "max_line_length": 28, "num_lines": 12, "path": "/Class Questions/practice_modules.py", "repo_name": "dhruv-chitkara27/Python--Fullstack-Academy-batch-2016", "src_encoding": "UTF-8", "text": "import random\na = random.choice([2,4,5,6])\nprint(a)\nb = [2,4,5,7]\nrandom.shuffle(b)\nprint(b)\nc = random.randrange(2,3,2)\nprint(c)\nd = random.uniform(2,4)\nprint(d)\ne = random.seed(2)\nprint(random.random())\n\n" }, { "alpha_fraction": 0.3457711338996887, "alphanum_fraction": 0.37562188506126404, "avg_line_length": 21.05555534362793, "blob_id": "64cae418d9949b774b67e6bfede61725c90f4d6d", "content_id": "19eb3ec2ae753d6405bee341b4866db8ec49f1a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 402, "license_type": "no_license", "max_line_length": 43, "num_lines": 18, "path": "/Class Questions/question11.py", "repo_name": "dhruv-chitkara27/Python--Fullstack-Academy-batch-2016", "src_encoding": "UTF-8", "text": "a = int(input(\"Enter the first range : \"))\nb = int(input(\"Enter the second range : \"))\nif(a%2!=0):\n a=a+1\nwhile(a!=b):\n if(a%2==0):\n flag=1\n c = str(a)\n for j in range(len(c)):\n if(int(c[j])%2==0):\n #print(c[j])\n flag=1\n else:\n flag=0\n break\n if(flag==1):\n print(a)\n a = a+2\n \n" }, { "alpha_fraction": 0.6340206265449524, "alphanum_fraction": 0.6443299055099487, "avg_line_length": 31.33333396911621, "blob_id": "3a776b3be9c6d7530ac2f2921dd1af42408802c9", "content_id": "b7bd3ce716487cb917a6525aed4d8391a97a40f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 194, "license_type": "no_license", "max_line_length": 101, "num_lines": 6, "path": "/Class Questions/question10.py", "repo_name": "dhruv-chitkara27/Python--Fullstack-Academy-batch-2016", "src_encoding": "UTF-8", "text": "#WAP TO CHECK FOR ODD NUMBERS VALUE GIVEN IN A STRING.IF MORE THAN TWO VALUES THEN SEPARATE BY COMMA.\n\na = input(\"Enter the string : \")\nfor i in range(a):\n if(a[i]%2!=0):\n print(a[i])\n" }, { "alpha_fraction": 0.40625, "alphanum_fraction": 0.46875, "avg_line_length": 9.666666984558105, "blob_id": "07bbb7ad3655cd3dfa1f828f476be49798e87da9", "content_id": "9525f2f2a48384b411258e6e80f778606e55563a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 64, "license_type": "no_license", "max_line_length": 11, "num_lines": 6, "path": "/Class Questions/question5.py", "repo_name": "dhruv-chitkara27/Python--Fullstack-Academy-batch-2016", "src_encoding": "UTF-8", "text": "a = 'hello'\nb = a[0]\nc = a[4]\nd = a[1:4]\nsum = c+d+b\nprint(sum)\n" }, { "alpha_fraction": 0.5894039869308472, "alphanum_fraction": 0.6158940196037292, "avg_line_length": 20.571428298950195, "blob_id": "44d1c32cd7172c1407d2705cb040390e06568412", "content_id": "b049cc0ca7761ee6e74a6f1065033cecd447b368", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 151, "license_type": "no_license", "max_line_length": 31, "num_lines": 7, "path": "/Class Questions/dict_occurence_print_character.py", "repo_name": "dhruv-chitkara27/Python--Fullstack-Academy-batch-2016", "src_encoding": "UTF-8", "text": "dict1 = { }\nstr = input(\"Enter the string\")\nfor i in str:\n dict1[i]=str.count(i)\nprint(dict1)\nfor key,val in dict1.items():\n print(key,'*' *val)\n" }, { "alpha_fraction": 0.5284090638160706, "alphanum_fraction": 0.5625, "avg_line_length": 34.20000076293945, "blob_id": "479a9eace56a19f76c1a5476fdd2bd03bdae19dc", "content_id": "d6b85d42089caa5de00fd9c88eb65542ffd48fc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 176, "license_type": "no_license", "max_line_length": 46, "num_lines": 5, "path": "/Class Questions/keyworded_arguements.py", "repo_name": "dhruv-chitkara27/Python--Fullstack-Academy-batch-2016", "src_encoding": "UTF-8", "text": "def print_val(**val):\n for key,val in val.items():\n print(\"%s has got %d marks\"%(key,val))\n print(\"{} has got {}\".format(key,val))\nprint_val(Ram=20,X=30,Y=31)\n" }, { "alpha_fraction": 0.5344827771186829, "alphanum_fraction": 0.5344827771186829, "avg_line_length": 11.88888931274414, "blob_id": "78e5cd39579fabb1d482fc07e322079cb5d7136d", "content_id": "0a9d4e13cd84e684bd383c41442010e54c6951a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 116, "license_type": "no_license", "max_line_length": 16, "num_lines": 9, "path": "/Class Questions/question6.py", "repo_name": "dhruv-chitkara27/Python--Fullstack-Academy-batch-2016", "src_encoding": "UTF-8", "text": "a = 'hello'\nb = a.count('h')\nprint(b)\nc = a.count('e')\nprint(c)\nd = a.count('l')\nprint(d)\ne = a.count('o')\nprint(e)\n" }, { "alpha_fraction": 0.6997885704040527, "alphanum_fraction": 0.7019027471542358, "avg_line_length": 21.285715103149414, "blob_id": "32fd4cd6f58290d8d9b4e2b66660a9e324694ecb", "content_id": "ebad112936ac9f82d92aa8b1e7909f7dd24bda0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 473, "license_type": "no_license", "max_line_length": 147, "num_lines": 21, "path": "/Exam Questions/class_test_2_q3.py", "repo_name": "dhruv-chitkara27/Python--Fullstack-Academy-batch-2016", "src_encoding": "UTF-8", "text": "\"\"\"\n\nWAP that accepts a sequence of wide space separated words and print the words after removing all duplicates words and sorting them alphanumerically\n \nSAMPLE INPUT :\n\nhello world practice makes perfect and hello world again\n \nSAMPLE OUTPUT :\n\nagain and hello makes perfect practice world\n \n\"\"\"\n\nseq = input(\"Enter the string(with space) : \").split(' ')\n\nseq = list(set(seq))\nseq.sort()\nprint(seq[0],end='')\nfor i in range(len(seq)):\n print(f' {seq[i]}',end='')\n\n\n\n\n\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5164233446121216, "avg_line_length": 23.909090042114258, "blob_id": "9cf3277e097359862853b6c7c5e4ebac37ed4343", "content_id": "63b64ba873889f9b639c1620de7a9249eb32f5b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 548, "license_type": "no_license", "max_line_length": 80, "num_lines": 22, "path": "/Python Projects/python_project1/unique_triplets.py", "repo_name": "dhruv-chitkara27/Python--Fullstack-Academy-batch-2016", "src_encoding": "UTF-8", "text": "# Task :\n#\n# to find unique triples whose 3 elements gives the sum 0 from the given numbers\n\ndef triplets(my_list):\n n = len(my_list)\n\n found = True\n for i in range(0, n-2):\n\n for j in range(i+1, n-1):\n\n for k in range(j+1, n):\n\n if (my_list[i] + my_list[j] + my_list[k] == 0):\n print(my_list[i],'\\t', my_list[j],'\\t', my_list[k])\n found = True\n\n\n # If no triplet with 0 sum found in listt\n if (found == False):\n print(\"No triplets with sum zero exists\")\n" }, { "alpha_fraction": 0.453125, "alphanum_fraction": 0.578125, "avg_line_length": 20.33333396911621, "blob_id": "6411333c971f6f5c12edc625ab7c887bee7ee8bb", "content_id": "40d4168453252a38cd2db53c022bc640083182a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 64, "license_type": "no_license", "max_line_length": 27, "num_lines": 3, "path": "/Class Questions/question4.py", "repo_name": "dhruv-chitkara27/Python--Fullstack-Academy-batch-2016", "src_encoding": "UTF-8", "text": "for i in range(300,199,-1):\n square = i**2\n print(square)\n" }, { "alpha_fraction": 0.6715686321258545, "alphanum_fraction": 0.6764705777168274, "avg_line_length": 24.3125, "blob_id": "deb2ef87b7a091190651347bc8dd86ebba3bc2dc", "content_id": "26b96a7a5d17c46c90549d27d333c508237c09f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 408, "license_type": "no_license", "max_line_length": 163, "num_lines": 16, "path": "/Exam Questions/class_test_2_que2.py", "repo_name": "dhruv-chitkara27/Python--Fullstack-Academy-batch-2016", "src_encoding": "UTF-8", "text": "\"\"\"\nWrite a program that accepts comma separated sequence of words as input and print the words in a comma separated sequence after sorting them in alphabatical order.\n \n SAMPLE INPUT :\n without , hello , bag , world\n \n SAMPLE OUTPUT :\n bag,hello,without,world\n \n\"\"\"\n\nseq = input(\"Enter the string : \").split(',')\nseq.sort()\nprint(seq[0],end='')\nfor i in range(1,len(seq)):\n print(f',{seq[i]}',end='')\n\n\n\n" }, { "alpha_fraction": 0.4447004497051239, "alphanum_fraction": 0.5760368704795837, "avg_line_length": 29.714284896850586, "blob_id": "48cfa17ff1b7898857145354aface08efb8fc519", "content_id": "704a482ac01878e7f0dd38c935a104521ad0cc34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 434, "license_type": "no_license", "max_line_length": 134, "num_lines": 14, "path": "/Exam Questions/class_test_2_que4.py", "repo_name": "dhruv-chitkara27/Python--Fullstack-Academy-batch-2016", "src_encoding": "UTF-8", "text": "\"\"\"\n\nWrite a program which will find all such numbers between 1000 and 3000 both included such that each digit of a number is a even number\n\n\"\"\"\n\nprint('Required Output : ',end='')\nfor i in range(2000,3001,2):\n if((i >= 2010 and i < 2020) or (i >= 2030 and i < 2040) or (i >= 2050 and i < 2060)):\n pass\n elif((i >= 2070 and i < 2080) or (i >= 2090 and i < 3000)):\n pass\n else:\n print(f'{i}',end=' ')\n\n \n" }, { "alpha_fraction": 0.591160237789154, "alphanum_fraction": 0.6151012778282166, "avg_line_length": 30.941177368164062, "blob_id": "d5644ddb76efa3f4a324fa43136a743223e8e4a0", "content_id": "8cf0ea40e326a66c06de072bb51af1af470ad10c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 543, "license_type": "no_license", "max_line_length": 76, "num_lines": 17, "path": "/Class Questions/question8.py", "repo_name": "dhruv-chitkara27/Python--Fullstack-Academy-batch-2016", "src_encoding": "UTF-8", "text": "w1=0\nd1=0\ndeposit=0\nwithdraw=0\nbalance=0\nentries = int(input(\"Enter the no. of transactions you want to do : \"))\nfor i in range(entries):\n choice = input(\"Enter b for balance, d for deposit , w for withdrawl\")\n if(choice=='d'):\n deposit = input(\"Enter the amount of cash you want to deposit : \")\n d1=d1+int(deposit)\n if(choice=='w'):\n withdraw = input(\"Enter the amount of cash you want to withdraw : \")\n w1=w1+int(withdraw)\n if(choice=='b'):\n balance = d1-w1\n print(\"Balance = \",balance)\n" }, { "alpha_fraction": 0.5806451439857483, "alphanum_fraction": 0.6129032373428345, "avg_line_length": 19.66666603088379, "blob_id": "aea191c41c3d5e37fb97c22ddc26b644b39732a2", "content_id": "a648a2d29c8ee21618ecd1fa6559b058ea4c02b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 62, "license_type": "no_license", "max_line_length": 40, "num_lines": 3, "path": "/Class Questions/question9.py", "repo_name": "dhruv-chitkara27/Python--Fullstack-Academy-batch-2016", "src_encoding": "UTF-8", "text": "a = input(\"Please insert characters : \")\nb = a[0::2]\nprint(b)\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.7844827771186829, "avg_line_length": 37.66666793823242, "blob_id": "d30fb9ed4f9e87b8e91c711aa495637031044aa5", "content_id": "e0c9e019efec713c6443705205fe5c9ca0004a29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 116, "license_type": "no_license", "max_line_length": 72, "num_lines": 3, "path": "/README.md", "repo_name": "dhruv-chitkara27/Python--Fullstack-Academy-batch-2016", "src_encoding": "UTF-8", "text": ">**Python--Fullstack-Academy-batch-2016**\n\n>This repository contains all the Python questions covered in the Class!\n" }, { "alpha_fraction": 0.6279069781303406, "alphanum_fraction": 0.6279069781303406, "avg_line_length": 16.200000762939453, "blob_id": "a508a072fa638e5d1b61ed8ef3f7d621d98cda05", "content_id": "b246ae7b4efdf39e0b064b601a892106d666ac4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 86, "license_type": "no_license", "max_line_length": 35, "num_lines": 5, "path": "/Class Questions/experiment2.py", "repo_name": "dhruv-chitkara27/Python--Fullstack-Academy-batch-2016", "src_encoding": "UTF-8", "text": "n = input(\"Enter the value of n\")\n\nanswer = int(n)+int(n+n)+int(n+n+n)\n\nprint(answer)\n" }, { "alpha_fraction": 0.5259259343147278, "alphanum_fraction": 0.585185170173645, "avg_line_length": 21.5, "blob_id": "a5000a0d57a5b458890c82165d2727abc267de62", "content_id": "4d967d22ad6c6c8ba45ded35f9130aa67d73421b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 135, "license_type": "no_license", "max_line_length": 38, "num_lines": 6, "path": "/Class Questions/dict_circumference_inputFromUser.py", "repo_name": "dhruv-chitkara27/Python--Fullstack-Academy-batch-2016", "src_encoding": "UTF-8", "text": "dict1={ }\nr = int(input(\"Enter the radius\"))\nwhile(r!=-1):\n dict1[r] = 2*3.14*r\n r = int(input(\"Enter the radius\"))\nprint(dict1)\n" } ]
26
ArtemCLime/VimTest
https://github.com/ArtemCLime/VimTest
1858c7c14552091f19d5d3abd7dfb03ca203b99d
adc34f94c5d6b86eb7bba5b90bf008a7cf60d498
a9d8b15bd175384d8ec6f977b8cefc8d14c107bf
refs/heads/master
2016-09-13T22:43:30.793764
2016-05-12T23:43:06
2016-05-12T23:43:06
58,675,370
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6508728265762329, "alphanum_fraction": 0.6558603644371033, "avg_line_length": 17.55813980102539, "blob_id": "22bc76e5ee8e53f114b669431417d2ab0b7727d1", "content_id": "d36f03f8d3e62233461dd4c321c491c8e5387a6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 802, "license_type": "no_license", "max_line_length": 47, "num_lines": 43, "path": "/hello_world.py", "repo_name": "ArtemCLime/VimTest", "src_encoding": "UTF-8", "text": "\nclass Human():\n\tdef __init__(self, name):\n\t\tself.name = name\n\t\tself.age = 0\n\n\tdef __repr__(self):\n\t\treturn self.name\n\n\tdef breathe(self, air):\n\t\tif air != None:\n\t\t\treturn True\n\t\treturn False\n\n\tdef get_older(self):\n\t\tif self.age == 99:\n\t\t\treturn False\n\t\tself.age += 1\n\t\treturn True\n\t\n\nimport unittest\n\nclass HumanTest(unittest.TestCase):\n\tdef test_geting_older(self):\n\t\thuman = Human('Lirian')\n\t\tself.assertEquals(True, human.get_older())\n\n\tdef test_return_name(self):\n\t\tname = \"Arkaniy\"\n\t\thuman = Human(name)\n\t\tself.assertEquals(name, str(human))\n\n\tdef test_breathe(self):\n\t\thuman = Human('Test')\n\t\tself.assertEquals(True, human.breathe('air'))\n\n\tdef test_dont_breathe(self):\n\t\thuman = Human('NewTest')\n\t\tself.assertEquals(False, human.breathe(None))\n\n\nif __name__ == \"__main__\":\n\tunittest.main()\n\n\n\n" } ]
1
cclauss/testeng-ci
https://github.com/cclauss/testeng-ci
7693aad53df1a55bac0fada5443d1fe9b7631e95
7b99e107ae4869c9ff24f6105d540f24f224780a
128d48e30f01c2f7bed9d153186e33879c23a184
refs/heads/master
2020-04-16T11:35:51.619232
2019-01-08T16:53:49
2019-01-08T16:53:49
165,542,896
0
0
null
2019-01-13T19:02:34
2019-01-08T16:53:56
2019-01-08T16:53:52
null
[ { "alpha_fraction": 0.8011869192123413, "alphanum_fraction": 0.8011869192123413, "avg_line_length": 55.16666793823242, "blob_id": "3185c1062229858a886b7aa2828cc633fafb4644", "content_id": "98241446ee022a5d2af8b6cb46d86c6781502ef1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 337, "license_type": "no_license", "max_line_length": 74, "num_lines": 6, "path": "/Makefile", "repo_name": "cclauss/testeng-ci", "src_encoding": "UTF-8", "text": "upgrade:\n\tpip install -qr requirements/pip-tools.txt\n\tpip-compile --upgrade -o requirements/base.txt requirements/base.in\n\tpip-compile --upgrade -o requirements/testing.txt requirements/testing.in\n\tpip-compile --upgrade -o requirements/travis.txt requirements/travis.in\n\tpip-compile --upgrade -o requirements/aws.txt requirements/aws.in\n" }, { "alpha_fraction": 0.6017804145812988, "alphanum_fraction": 0.604451060295105, "avg_line_length": 28.30434799194336, "blob_id": "10a9dc73945660831452d3493e083bf72727bacc", "content_id": "ffeb560c5df907896ec47583d9062ceb026d5482", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3370, "license_type": "no_license", "max_line_length": 77, "num_lines": 115, "path": "/aws/deregister_amis.py", "repo_name": "cclauss/testeng-ci", "src_encoding": "UTF-8", "text": "\"\"\"\nUse boto to deregister AMIs that match a given tag key-value pair.\n\nThat tag key-value pair is hardcoded to:\n delete_or_keep: delete\n\nUsage:\n If you've defined the AWS credentials as environment variables or in a\n .boto file, then use:\n `python deregister_amis.py\n\n Else, you can add the aws keys as arguments to the above command:\n ** '--aws-access-key-id' or '-i'\n ** '--aws-secret-access-key' or '-s'\n\n If you don't want to deregister AMIs, but you'd like to know which ones\n you'd deregister if you ran the command, then use the --dry-run switch.\n\n\"\"\"\nimport argparse\nimport boto\nfrom boto.exception import EC2ResponseError\nimport logging\nimport os\nimport sys\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_ec2_connection(aws_access_key_id, aws_secret_access_key):\n return boto.connect_ec2(aws_access_key_id, aws_secret_access_key)\n\n\ndef deregister_amis_by_tag(tag_key, tag_value, dry_run, connection):\n \"\"\"\n Deregisters AMIs that are found according to tag key/value pairs.\n \"\"\"\n\n tag_key_string = \"tag:{tag_key}\".format(tag_key=tag_key)\n\n logger.info(\"Finding AMIs tagged with {key}: {value}\".format(\n key=tag_key,\n value=tag_value,\n ))\n try:\n amis = connection.get_all_images(filters={tag_key_string: tag_value})\n except EC2ResponseError:\n logger.error(\"An error occurred gathering images.\")\n logger.error(EC2ResponseError.message)\n raise\n\n if len(amis) == 0:\n logger.info('No images found matching criteria.')\n return\n for i in amis:\n logger.info(\"Deregistering {image}\".format(image=str(i)))\n if dry_run:\n logger.info(\"--> Dry run: skipping deregister\")\n else:\n i.deregister()\n\n\ndef main(raw_args):\n desc = (\n \"Deregister EC2 images that are tagged for 'delete_or_keep' with \"\n \"'delete' as the tag value.\"\n )\n parser = argparse.ArgumentParser(description=desc)\n parser.add_argument(\n '--aws-access-key-id', '-i',\n dest='aws_access_key_id',\n help='aws access key id',\n default=os.environ.get(\"AWS_ACCESS_KEY_ID\"),\n )\n parser.add_argument(\n '--aws-secret-access-key', '-s',\n dest='aws_secret_access_key',\n help='aws secret access key',\n default=os.environ.get(\"AWS_SECRET_ACCESS_KEY\"),\n )\n parser.add_argument(\n '--dry-run',\n action='store_true',\n default=False,\n help=\"\"\"\n Do not deregister any AMIs, just list the ones\n that are found matching the tag key/value pair.\n \"\"\"\n )\n parser.add_argument(\n '--log-level',\n dest='log_level',\n help=\"set logging level\",\n choices=[\n 'DEBUG', 'debug',\n 'INFO', 'info',\n 'WARNING', 'warning',\n 'ERROR', 'error',\n 'CRITICAL', 'critical',\n ],\n default=\"INFO\",\n )\n args = parser.parse_args(raw_args)\n\n # Set logging level\n logging.getLogger(__name__).setLevel(args.log_level.upper())\n conn = get_ec2_connection(\n args.aws_access_key_id,\n args.aws_secret_access_key\n )\n deregister_amis_by_tag(\"delete_or_keep\", \"delete\", args.dry_run, conn)\n\nif __name__ == \"__main__\":\n logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s')\n main(sys.argv[1:])\n" }, { "alpha_fraction": 0.609375, "alphanum_fraction": 0.6098958253860474, "avg_line_length": 25.853147506713867, "blob_id": "69bc730b243ba728669d07356255acbbfeaea3ab", "content_id": "b407b148a3024071adab3d5e52652c46f471d9bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3840, "license_type": "no_license", "max_line_length": 70, "num_lines": 143, "path": "/mobile_app/test/test_trigger_build.py", "repo_name": "cclauss/testeng-ci", "src_encoding": "UTF-8", "text": "\"\"\"\nTests for trigger app build script\n\"\"\"\nimport json\nimport os\nimport shutil\nfrom StringIO import StringIO\nfrom unittest import TestCase, skip\n\nimport git\nfrom mock import Mock, patch\n\nfrom .. import trigger_build\nfrom .. import path_constants\nfrom .. import exceptions\nfrom . import utils\n\nTEST_BRANCH = \"test-branch\"\n\n\nclass Aborted(Exception):\n \"\"\"\n Helper exception for indicating system exit instead of\n actually exiting the process\n \"\"\"\n pass\n\n\nclass TriggerBuildTestCase(TestCase):\n \"\"\"\n Test cases for the trigger build task\n \"\"\"\n\n def setUp(self):\n self.repo_path = utils.make_test_repo()\n self.addCleanup(self._clear_repo)\n\n repo = git.Repo(self.repo_path)\n repo.create_remote('origin', \"https://example.com\")\n\n def _clear_repo(self):\n \"\"\"\n Removes the created repo\n \"\"\"\n shutil.rmtree(self.repo_path)\n\n @patch('sys.stderr', new_callable=StringIO)\n def test_missing_arguments(self, _mock_stdout):\n \"\"\"\n Tests that the command quits when not given\n enough arguments\n \"\"\"\n with self.assertRaises(SystemExit) as context_manager:\n trigger_build.run_trigger_build([], {})\n self.assertEqual(context_manager.exception.code, 2)\n\n @patch('git.Remote.pull')\n def test_branch_already_exists(self, _):\n \"\"\"\n Tests that the task fails if the branch\n already exists\n \"\"\"\n repo = git.Repo(self.repo_path)\n repo.create_head(TEST_BRANCH, force=False)\n environ = _dummy_environ()\n with self.assertRaises(exceptions.BranchAlreadyExists):\n _trigger_build(self.repo_path, environ)\n\n def test_missing_env_variable(self):\n \"\"\"\n Test that a missing expected environment variable causes\n the task to fail.\n \"\"\"\n with self.assertRaises(exceptions.MissingEnvironmentVariable):\n _trigger_build(self.repo_path, {})\n\n @patch('git.Remote.pull')\n @patch('git.Remote.push')\n def test_config_file_written(self, _, __):\n \"\"\"\n Test that the configuration passed in is properly written to\n the expected file and is committed.\n \"\"\"\n environ = _dummy_environ()\n _trigger_build(self.repo_path, environ)\n\n config_path = os.path.join(\n self.repo_path,\n path_constants.CONFIG_FILE\n )\n config = json.load(file(config_path))\n\n for key, value in environ.iteritems():\n self.assertEqual(config[key], value)\n\n @patch('git.Remote.pull')\n @patch('git.Remote.push')\n def test_branch_setup(self, _, __):\n \"\"\"\n Verifies that the change has been committed to a new branch\n with the appropriate name\n \"\"\"\n _trigger_build(self.repo_path, _dummy_environ())\n\n repo = git.Repo(self.repo_path)\n self.assertEqual(repo.active_branch.name, TEST_BRANCH)\n\n # verify that the config was committed by adding it again\n # and making sure that didn't actually do anything\n # by checking that there are no diffs\n config_path = os.path.join(\n self.repo_path,\n trigger_build.CONFIG_FILE\n )\n repo.index.add([config_path])\n diffs = repo.head.commit.diff()\n self.assertEqual(len(diffs), 0)\n\n\n# Helpers\n\n\ndef _trigger_build(repo_path, environ):\n \"\"\"\n Helper to kick off the trigger build task\n \"\"\"\n trigger_build.run_trigger_build(\n [\n \"--trigger-repo-path\", repo_path,\n \"--branch-name\", TEST_BRANCH\n ],\n environ\n )\n\n\ndef _dummy_environ():\n \"\"\"\n Generates a test config for all known keys\n \"\"\"\n result = {}\n for key in trigger_build.EXPECTED_ENVIRONMENT_VARIABLES:\n result[key] = \"test-value\"\n return result\n" }, { "alpha_fraction": 0.734004557132721, "alphanum_fraction": 0.734004557132721, "avg_line_length": 30.40816307067871, "blob_id": "4d97dabbf1672bf3d93eb8f94a3b83e82465d7ad", "content_id": "971027599270007b0cc62680e4e9fee52128e404", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3079, "license_type": "no_license", "max_line_length": 188, "num_lines": 98, "path": "/mobile_app/README.md", "repo_name": "cclauss/testeng-ci", "src_encoding": "UTF-8", "text": "Mobile Apps\n===========\n\nTools contained here are used for working with the mobile-apps.\n\n\nSetup\n-----\nTo run any of these scripts, you will first need to:\n* Create your virtual environment\n* ``pip install -r testeng-ci/requirements.txt``\n\nThe following scripts constitute a build pipeline for the edX mobile apps.\n\nmake_android_build\n------------------\nInvokes ``make_build``, filling in parameters specific to the edx android app.\n\n* Call script as a python module, e.g.\n\n\t`python -m mobile_app.make_android_build`\n\nmake_ios_build\n------------------\nInvokes ``make_build``, filling in parameters specific to the edx ios app.\n\n* Call script as a python module, e.g.\n\n\t`python -m mobile_app.make_ios_build`\n\n\n\nmake_build\n----------------\nThis contains code to generate a new environment and command line for the\n``trigger_build`` task (see below) by asking the user to enter that\ninformation manually. It is meant to stand in for a task runner like Jenkins\nwhen doing local testing. Once the user has entered all the relevant\ninformation, it invokes ``trigger_build`` with those values.\n\n*Usage*\n\n* Call script as a python module, e.g.\n\n\t`python -m mobile_app.make_build`\n\n\ntrigger_build\n-------------\nThis will:\n\n* Write a known set of environment variables to disk in JSON format to a file\n named CONFIGURATION.\n* Create a new commit with that environment and push to origin.\n\nThe build repo should be configured so that this push will in turn trigger a CI\njob which makes a new build with that commit, providing access to that\nenvironment. The goal is to capture the environmment where the job is triggered\n(like Jenkins) and save it so the actual build step can be run on a separate\nbuild machine with access to that environment.\n\n*Usage*\n\n* Export the necessary environment variables. There's a list inside the script.\n* Call script as a python module, e.g.\n\n\t`python -m mobile_app.trigger_build --branch-name UniqueBranchName --trigger-repo-path ../my-repo`\n\n\nThe expectation is that the branch name will be some unique identifier like the\njenkins job number and the date.\n\t\n\ncheckout_build_repos\n--------------------\n\nChecks out a code and config repository and sets them up for building, by\ncreating a properties file to point the code at the config. This is meant to\nrun on a CI machine before the build step.\n\n*Usage*\n* Create a file called ``CONFIGURATION``. See ``trigger_build`` for more information on the format.\n* Call script as a python module, e.g.\n\n `python -m mobile_app.checkout_build_repos`\n\nThis will result in two new folders, \"code.git\" and \"config.git\" cloned from the code and config URLs in CONFIGURATION. They will be checked out to the revision specified in CONFIGURATION.\n\nupload_build\n------------\nUploads a built app for distribution + archiving. Currently builds go to HockeyApp. This is meant to run on a CI machine after the build step.\n\n*Usage*\n* Build your app somewhere\n* Export the necessary environment variables. There's a list inside the script.\n* Call script as a python module, e.g.\n \n `python -m mobile_app.upload_build`\n\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 24.689655303955078, "blob_id": "b3adbf159d1f4141e4e81140550ad01b9abfb346", "content_id": "8f1f94448c9f5acbfc78eca7f5e057e5effa257b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 745, "license_type": "no_license", "max_line_length": 68, "num_lines": 29, "path": "/mobile_app/test/test_utils.py", "repo_name": "cclauss/testeng-ci", "src_encoding": "UTF-8", "text": "\"\"\"\nTest cases for mobile app build utility functions\n\"\"\"\n\nimport ddt\nfrom mobile_app import utils\nfrom unittest import TestCase\n\n\[email protected]\nclass UtilsTestCase(TestCase):\n # pylint: disable=missing-docstring\n\n @ddt.data(\"True\", \"true\", \"y\", \"Y\", \"Yes\", \"yes\", \"TRUE\", \"YES\")\n def test_affirmative_yes(self, string):\n \"\"\"\n Tests that we accept affirmative values\n \"\"\"\n self.assertTrue(utils.is_affirmative(string))\n\n @ddt.data(\n \"Trueblue\", \"False\", \"false\", \"n\",\n \"N\", \"No\", \"no\", \"FALSE\", \"NO\", \"NONE\", \"None\"\n )\n def test_affirmative_no(self, string):\n \"\"\"\n Tests that we reject non-affirmative values\n \"\"\"\n self.assertFalse(utils.is_affirmative(string))\n" }, { "alpha_fraction": 0.6275637745857239, "alphanum_fraction": 0.6276888251304626, "avg_line_length": 28.397058486938477, "blob_id": "afe034812229ecd16eec9d064d7e599845ebaf33", "content_id": "5e0dc61f158cd3aa6e8c629c1a77b7adbf4d3dac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7996, "license_type": "no_license", "max_line_length": 119, "num_lines": 272, "path": "/jenkins/bokchoy_db_pull_request.py", "repo_name": "cclauss/testeng-ci", "src_encoding": "UTF-8", "text": "\"\"\"\nThis script is to be run inside a Jenkins job after updating bokchoy\ndb cache files through paver commands on edx-platform. If changes have\nbeen made, this script will generate a PR into master with the updates.\n\"\"\"\nimport sys\nimport logging\nimport os\n\nimport click\nfrom github import Github\nfrom git import Git\n\nlogging.basicConfig()\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\nDB_CACHE_FILEPATH = 'common/test/db_cache'\n\nFINGERPRINT_FILE = 'bok_choy_migrations.sha1'\nBOKCHOY_DB_FILES = [\n 'bok_choy_data_default.json',\n 'bok_choy_data_student_module_history.json',\n 'bok_choy_migrations_data_default.sql',\n 'bok_choy_migrations_data_student_module_history.sql',\n 'bok_choy_schema_default.sql',\n 'bok_choy_schema_student_module_history.sql'\n]\nBOKCHOY_DB_FILES.append(FINGERPRINT_FILE)\n\n\ndef _get_github_token():\n \"\"\"\n Get the github token environment variable.\n \"\"\"\n try:\n github_token = os.environ.get('GITHUB_TOKEN')\n except:\n raise StandardError(\n \"Could not find env variable GITHUB_TOKEN. \"\n \"Please make sure the variable is set and try again.\"\n )\n return github_token\n\n\ndef _authenticate_with_github():\n \"\"\"\n Authenticate with Github using a token and return the instance.\n \"\"\"\n github_token = _get_github_token()\n try:\n github_instance = Github(github_token)\n except:\n raise StandardError(\n \"Failed connecting to Github. \" +\n \"Please make sure the github token is accurate and try again.\"\n )\n return github_instance\n\n\ndef _connect_to_repo(repo_name):\n \"\"\"\n Get the repository object of the desired repo.\n \"\"\"\n github_instance = _authenticate_with_github()\n repos_list = github_instance.get_user().get_repos()\n repository = None\n for repo in repos_list:\n if repo.name == repo_name:\n return repo\n\n raise StandardError(\n \"Could not connect to the repository: {}. \"\n \"Please make sure you are using the correct \"\n \"credentials and try again.\".format(repo_name)\n )\n\n\ndef _read_local_file_contents(repo_root, db_file):\n \"\"\"\n Read the contents of a file and return a string of the data.\n \"\"\"\n file_path = os.path.join(repo_root, DB_CACHE_FILEPATH, db_file)\n try:\n with open(file_path, 'r') as opened_file:\n data = opened_file.read()\n except:\n raise StandardError(\n \"Unable to read file: {}\".format(file_path)\n )\n return data\n\n\ndef _branch_exists(repository, branch_name):\n \"\"\"\n Checks to see if this branch name already exists\n \"\"\"\n try:\n repository.get_branch(branch_name)\n except:\n return False\n return True\n\n\ndef _get_file_sha(repository, file_path):\n \"\"\"\n Finds the sha of a specific file on master.\n Returns the file sha, or None if the file doesn't exist.\n \"\"\"\n try:\n # Get the blob sha of the db file on our branch\n file_sha = repository.get_file_contents(file_path).sha\n except:\n logger.info(\"Could not locate file: {}\".format(file_path))\n file_sha = None\n return file_sha\n\n\ndef _get_git_instance(repo_root):\n \"\"\"\n Gets the git instance of the edx-platform repository.\n \"\"\"\n git_instance = Git(repo_root)\n git_instance.init()\n return git_instance\n\n\ndef _get_modified_files_list(repo_root):\n \"\"\"\n Use the Git library to run the ls-files command to find\n the list of files modified.\n \"\"\"\n git_instance = _get_git_instance(repo_root)\n return git_instance.ls_files(\"-m\")\n\n\ndef _file_has_changed(db_file, modified_files):\n \"\"\"\n Determine if the db file is among the changed files.\n \"\"\"\n file_path = os.path.join(DB_CACHE_FILEPATH, db_file)\n return file_path in modified_files\n\n\ndef _create_branch(repository, branch_name, sha):\n \"\"\"\n Create a new branch with the given sha as its head.\n \"\"\"\n try:\n branch_object = repository.create_git_ref(branch_name, sha)\n except:\n raise StandardError(\n \"Unable to create git branch: {}. \"\n \"Check to make sure this branch doesn't already exist.\".format(branch_name)\n )\n return branch_object\n\n\ndef _update_file(repository, file_path, commit_message, contents, file_sha, branch_name):\n \"\"\"\n Create a commit on a branch that updates the file_path with the string contents.\n \"\"\"\n try:\n repository.update_file(file_path, commit_message, contents, file_sha, branch_name)\n except:\n raise StandardError(\n \"Error updating database file: {}\".format(file_path)\n )\n\n\ndef _create_file(repository, file_path, commit_message, contents, branch_name):\n \"\"\"\n Create a commit on a branch that creates a new file with the string contents.\n \"\"\"\n try:\n repository.create_file(file_path, commit_message, contents, branch_name)\n except:\n raise StandardError(\n \"Error creating database file: {}\".format(file_path)\n )\n\n\ndef _create_pull_request(repository, title, body, base, head):\n \"\"\"\n Create a new pull request with the changes in head.\n \"\"\"\n try:\n pull_request = repository.create_pull(\n title=title,\n body=body,\n base=base,\n head=head\n )\n except:\n raise StandardError(\n \"Could not create pull request\"\n )\n\n\ndef _delete_branch(branch_object):\n \"\"\"\n Delete a branch from a repository.\n \"\"\"\n try:\n branch_object.delete()\n except:\n raise StandardError(\n \"Failed to delete branch\"\n )\n\n\[email protected]()\[email protected](\n '--sha',\n help=\"Sha of the merge commit to base the new PR off of\",\n required=True,\n)\[email protected](\n '--repo_root',\n help=\"Path to local edx-platform repository that will \"\n \"hold updated database files\",\n required=True,\n)\ndef main(sha, repo_root):\n logger.info(\"Authenticating with Github\")\n repository = _connect_to_repo(\"edx-platform\")\n\n fingerprint = _read_local_file_contents(repo_root, FINGERPRINT_FILE)\n branch_name = \"refs/heads/testeng/bokchoy_auto_cache_update_{}\".format(fingerprint)\n\n if _branch_exists(repository, branch_name):\n # If this branch already exists, then there's already a PR\n # for this fingerprint. To avoid excessive PR's, exit.\n logger.info(\"Branch name: {} already exists. Exiting.\".format(branch_name))\n return\n\n branch_object = _create_branch(repository, branch_name, sha)\n modified_files = _get_modified_files_list(repo_root)\n\n changes_made = False\n for db_file in BOKCHOY_DB_FILES:\n repo_file_path = os.path.join('/', DB_CACHE_FILEPATH, db_file)\n file_sha = _get_file_sha(repository, repo_file_path)\n if file_sha:\n if _file_has_changed(db_file, modified_files):\n logger.info(\"File {} has changed.\".format(repo_file_path))\n local_file_data = _read_local_file_contents(repo_root, db_file)\n logger.info(\"Updating database file: {}\".format(repo_file_path))\n _update_file(repository, repo_file_path, 'Updating migrations', local_file_data, file_sha, branch_name)\n changes_made = True\n else:\n logger.info(\"Creating new database file: {}\".format(repo_file_path))\n local_file_data = _read_local_file_contents(repo_root, db_file)\n _create_file(repository, repo_file_path, 'Updating', local_file_data, branch_name)\n changes_made = True\n\n if changes_made:\n logger.info(\"Creating a new pull request.\")\n _create_pull_request(\n repository,\n 'Bokchoy db cache update',\n '@edx/testeng please review',\n 'master',\n branch_name\n )\n else:\n logger.info(\"No changes needed. Deleting branch: {}\".format(branch_name))\n _delete_branch(branch_object)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6323618292808533, "alphanum_fraction": 0.6380457282066345, "avg_line_length": 45.98295593261719, "blob_id": "62b476896fd44ebebc6113ded52b59258d247b70", "content_id": "74dbb853596708726c978a7b57e326cc36757a90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8269, "license_type": "no_license", "max_line_length": 119, "num_lines": 176, "path": "/jenkins/tests/test_bokchoy_db_pull_request.py", "repo_name": "cclauss/testeng-ci", "src_encoding": "UTF-8", "text": "from unittest import TestCase\n\nimport click\nfrom click.testing import CliRunner\nfrom mock import patch, Mock\n\nfrom jenkins.bokchoy_db_pull_request import (_connect_to_repo, _read_local_file_contents,\n _branch_exists, _get_modified_files_list, _get_file_sha,\n _file_has_changed, _create_branch, _update_file,\n _create_file, _create_pull_request,\n _delete_branch, main)\n\n\nclass BokchoyPullRequestTestCase(TestCase):\n \"\"\"\n Test Case class for bokchoy_db_pull_request.py.\n \"\"\"\n # Create the Cli runner to run the main function with click arguments\n runner = CliRunner()\n\n @patch('jenkins.bokchoy_db_pull_request._connect_to_repo',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._read_local_file_contents',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._branch_exists',\n return_value=False)\n @patch('jenkins.bokchoy_db_pull_request._get_modified_files_list',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._get_file_sha',\n return_value=\"abc123\")\n @patch('jenkins.bokchoy_db_pull_request._file_has_changed',\n return_value=False)\n @patch('jenkins.bokchoy_db_pull_request._create_branch',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._update_file',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._create_pull_request',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._delete_branch',\n return_value=None)\n def test_no_changes(\n self, delete_branch_mock, create_pr_mock, update_file_mock, create_branch_mock, file_changed_mock,\n file_sha_mock, modified_list_mock, get_branch_mock, read_file_mock, repo_mock\n ):\n \"\"\"\n Ensure a merge with no changes to db files will not result in any updates.\n \"\"\"\n result = self.runner.invoke(main, args=['--sha=123', '--repo_root=../../repo'])\n assert not update_file_mock.called\n assert not create_pr_mock.called\n assert delete_branch_mock.called\n\n @patch('jenkins.bokchoy_db_pull_request._connect_to_repo',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._read_local_file_contents',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._branch_exists',\n return_value=False)\n @patch('jenkins.bokchoy_db_pull_request._get_modified_files_list',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._get_file_sha',\n return_value=\"abc123\")\n @patch('jenkins.bokchoy_db_pull_request._file_has_changed',\n return_value=True)\n @patch('jenkins.bokchoy_db_pull_request._create_branch',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._update_file',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._create_pull_request',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._delete_branch',\n return_value=None)\n def test_changes(\n self, delete_branch_mock, create_pr_mock, update_file_mock, create_branch_mock, file_changed_mock,\n file_sha_mock, modified_list_mock, get_branch_mock, read_file_mock, repo_mock\n ):\n \"\"\"\n Ensure a merge with changes to db files will result in the proper updates, a new branch, and a PR.\n \"\"\"\n result = self.runner.invoke(main, args=['--sha=123', '--repo_root=../../repo'])\n assert create_branch_mock.called\n self.assertEqual(create_branch_mock.call_count, 1)\n assert update_file_mock.called\n assert create_pr_mock.called\n assert not delete_branch_mock.called\n\n @patch('jenkins.bokchoy_db_pull_request._connect_to_repo',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._read_local_file_contents',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._branch_exists',\n return_value=False)\n @patch('jenkins.bokchoy_db_pull_request._get_modified_files_list',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._get_file_sha',\n return_value=\"abc123\")\n @patch('jenkins.bokchoy_db_pull_request._file_has_changed',\n side_effect=[True, False, False, False, False, False, False, False])\n @patch('jenkins.bokchoy_db_pull_request._create_branch',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._update_file',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._create_pull_request',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._delete_branch',\n return_value=None)\n def test_one_change(\n self, delete_branch_mock, create_pr_mock, update_file_mock, create_branch_mock, file_changed_mock,\n file_sha_mock, modified_list_mock, get_branch_mock, read_file_mock, repo_mock\n ):\n \"\"\"\n Ensure a merge with changes to one file will result in updating only that file, as well as a new branch and PR.\n \"\"\"\n result = self.runner.invoke(main, args=['--sha=123', '--repo_root=../../repo'])\n self.assertEqual(create_branch_mock.call_count, 1)\n self.assertEqual(update_file_mock.call_count, 1)\n assert create_pr_mock.called\n assert not delete_branch_mock.called\n\n @patch('jenkins.bokchoy_db_pull_request._connect_to_repo',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._read_local_file_contents',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._branch_exists',\n return_value=False)\n @patch('jenkins.bokchoy_db_pull_request._get_modified_files_list',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._get_file_sha',\n side_effect=[None, \"abc123\", \"abc123\", \"abc123\", \"abc123\", \"abc123\", \"abc123\"])\n @patch('jenkins.bokchoy_db_pull_request._file_has_changed',\n side_effect=[False, False, False, False, False, False])\n @patch('jenkins.bokchoy_db_pull_request._create_file',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._create_branch',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._create_pull_request',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._delete_branch',\n return_value=None)\n def test_create_file(\n self, delete_branch_mock, create_pr_mock, create_branch_mock, create_file_mock, file_changed_mock,\n file_sha_mock, modified_list_mock, get_branch_mock, read_file_mock, repo_mock\n ):\n \"\"\"\n Ensure that functionality for creating a new file works as expected.\n \"\"\"\n result = self.runner.invoke(main, args=['--sha=123', '--repo_root=../../repo'])\n self.assertEqual(create_branch_mock.call_count, 1)\n assert create_file_mock.called\n self.assertEqual(create_file_mock.call_count, 1)\n assert not delete_branch_mock.called\n\n @patch('jenkins.bokchoy_db_pull_request._connect_to_repo',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._branch_exists',\n return_value=True)\n @patch('jenkins.bokchoy_db_pull_request._get_modified_files_list',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._create_branch',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._create_pull_request',\n return_value=None)\n @patch('jenkins.bokchoy_db_pull_request._delete_branch',\n return_value=None)\n def test_branch_exists(\n self, delete_branch_mock, create_pr_mock, create_branch_mock, modified_list_mock,\n get_branch_mock, repo_mock\n ):\n \"\"\"\n If the branch for a given fingerprint already exists, make sure the script\n doesn't try to create a new branch or create a PR.\n \"\"\"\n result = self.runner.invoke(main, args=['--sha=123', '--repo_root=../../repo'])\n assert not create_branch_mock.called\n assert not create_pr_mock.called\n assert not delete_branch_mock.called\n" }, { "alpha_fraction": 0.5317997336387634, "alphanum_fraction": 0.5372124314308167, "avg_line_length": 27.8671875, "blob_id": "5ad1c4fc9faa887d73fa67614713dfe05ac88396", "content_id": "b737342314044c7b8c67b77cd2c0a8b4754c948c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3695, "license_type": "no_license", "max_line_length": 75, "num_lines": 128, "path": "/aws/tests/test_deregister_amis.py", "repo_name": "cclauss/testeng-ci", "src_encoding": "UTF-8", "text": "\"\"\"\nTests for testeng-ci/aws.\n\"\"\"\nimport boto\nfrom boto.exception import EC2ResponseError\nfrom aws.deregister_amis import(\n main, get_ec2_connection, deregister_amis_by_tag)\nfrom moto import mock_ec2\nfrom testfixtures import LogCapture\nfrom unittest import TestCase\n\n\n@mock_ec2\nclass DeregisterAmisTestCase(TestCase):\n \"\"\"\n TestCase class for testing get_running_instances.py.\n \"\"\"\n\n def setUp(self):\n self.key_id = 'my-key-id'\n self.secret_key = 'my-secret-key'\n self.conn = boto.connect_ec2(self.key_id, self.secret_key)\n self.args = [\n '-i', self.key_id,\n '-s', self.secret_key,\n '--log-level', 'INFO',\n ]\n\n def _get_test_image(self):\n test_image_id = 'ami-11122278'\n reservation = self.conn.run_instances(test_image_id)\n self.conn.create_image(\n name='test-ami',\n instance_id=reservation.instances[0].id\n )\n return self.conn.get_all_images()[0]\n\n def test_main(self):\n \"\"\"\n Test output of main\n \"\"\"\n with LogCapture() as l:\n main(self.args)\n l.check(\n ('aws.deregister_amis',\n 'INFO',\n 'Finding AMIs tagged with delete_or_keep: delete'),\n\n ('aws.deregister_amis',\n 'INFO',\n 'No images found matching criteria.')\n )\n\n def test_main_deregister(self):\n \"\"\"\n Test that a correctly-tagged AMI is deregistered\n \"\"\"\n\n test_ami = self._get_test_image()\n test_ami.add_tag('delete_or_keep', 'delete')\n with LogCapture() as l:\n main(self.args)\n\n l.check(\n ('aws.deregister_amis',\n 'INFO',\n 'Finding AMIs tagged with delete_or_keep: delete'),\n\n ('aws.deregister_amis',\n 'INFO',\n 'Deregistering {image_id}'.format(image_id=test_ami))\n )\n self.assertEqual(len(self.conn.get_all_images()), 0)\n\n def test_main_no_deregister(self):\n \"\"\"\n Test that an AMI without proper tags is not de-registered\n \"\"\"\n test_ami = self._get_test_image()\n # Flag AMI as 'keep'\n test_ami.add_tag('delete_or_keep', 'keep')\n\n with LogCapture() as l:\n main(self.args)\n\n l.check(\n ('aws.deregister_amis',\n 'INFO',\n 'Finding AMIs tagged with delete_or_keep: delete'),\n\n ('aws.deregister_amis',\n 'INFO',\n 'No images found matching criteria.')\n )\n self.assertEqual(len(self.conn.get_all_images()), 1)\n\n def test_main_dry_run(self):\n \"\"\"\n Test that a correctly-tagged AMI is NOT deregistered\n \"\"\"\n test_ami = self._get_test_image()\n test_ami.add_tag('delete_or_keep', 'delete')\n\n self.args.append('--dry-run')\n main(self.args)\n self.assertEqual(len(self.conn.get_all_images()), 1)\n\n\nclass DergisterExceptionTestCase(TestCase):\n \"\"\"\n Test exceptions that would be thrown from the script. Note that boto is\n not mocked in this class. It will make actual network calls.\n\n \"\"\"\n\n def setUp(self):\n self.key_id = 'FAKEBADKEY'\n self.secret_key = 'FAKEBADSECRET'\n\n def test_cant_get_instances(self):\n conn = get_ec2_connection(self.key_id, self.secret_key)\n with self.assertRaises(EC2ResponseError):\n deregister_amis_by_tag(\n \"foo_tag\",\n \"foo_tag_value\",\n dry_run=False,\n connection=conn\n )\n" } ]
8
ashutoshpandey1710/LanguageModel
https://github.com/ashutoshpandey1710/LanguageModel
e32be8b25d2b866d3f3aab3dbe0db48f1b93c6ae
295f8354350989a1417a57577d3dfcad884fffd8
d8efb496ecb3bde50a16c06d172cbf714a0420b3
refs/heads/master
2020-05-17T11:27:08.640845
2014-07-06T17:06:21
2014-07-06T17:06:21
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5581313371658325, "alphanum_fraction": 0.5725324749946594, "avg_line_length": 31.505746841430664, "blob_id": "571f4c80202b81e3ac8f7792c499ece11b6b2574", "content_id": "3029dbb964b2d1ca1f11f5b8010625b0a500d981", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2847, "license_type": "no_license", "max_line_length": 150, "num_lines": 87, "path": "/languagemodel.py", "repo_name": "ashutoshpandey1710/LanguageModel", "src_encoding": "UTF-8", "text": "import re\nfrom pprint import pprint\nimport argparse\ntry:\n import cPickle as pickle\nexcept:\n import pickle\n\ndef get_filtered_lines(filename):\n lines = [['<begin>'] + re.sub(r'[^a-z ]+', '', line.lower())[:-1].split() + ['<end>'] for line in open(filename, 'r').readlines()]\n #print lines\n return lines\n \n\ndef trigrammatize(sent_as_list):\n return [sent_as_list[i:i + 3] for i in range(len(sent_as_list) - 2)]\n\ndef setup_lm_dict(filename, verbose=None):\n if verbose:\n print \"Forming trigrams..,\"\n \n trigrams = []\n for line in get_filtered_lines(filename): trigrams.extend(trigrammatize(line))\n \n if verbose:\n print \"Done.\"\n \n if verbose:\n print \"Building Language Model...\"\n lm_dict = {}\n count = 0\n for line in trigrams:\n count += 1\n if lm_dict.has_key(line[0]):\n if lm_dict[ line[0] ].has_key(line[1]):\n if lm_dict[ line[0] ][ line[1] ].has_key(line[2]):\n lm_dict[ line[0] ][ line[1] ][ line[2] ] += 1.0\n else:\n lm_dict[ line[0] ][ line[1] ][ line[2] ] = 1.0\n else:\n lm_dict[ line[0] ][ line[1] ] = { line[2] : 1.0 }\n else:\n lm_dict[ line[0] ] = { line[1] : { line[2] : 1.0 }} \n if verbose:\n print \"Done...\"\n print \"Trigram Count: {}\".format(count) \n return lm_dict\n\n \n\ndef count_prob_estimator(lm_dict):\n for word1, l1dict in lm_dict.items():\n for word2, l2dict in l1dict.items():\n total = sum(l2dict.values())\n for word3 in l2dict.keys():\n l2dict[word3] /= total\n#TODO: Implement other estimators.\n\ndef dump_lm(lm_dict, outfile):\n pickle.dump(lm_dict, open(outfile, 'wb'))\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Efficient probabalistic language model (unsmoothed).\\n Eg. $ python languagemodel.py -o model samp')\n\n parser.add_argument(\"-v\", \"--verbose\", help=\"turns on verbose mode.\", action=\"store_true\")\n parser.add_argument(\"inputfile\", help=\"text input file with one sentence per line (see file samp for example)\", action=\"store\")\n parser.add_argument(\"-o\", \"--output\", help=\"output model file. prints to stdout if unspecified.\", action=\"store\")\n #TODO: Add options for different estimator implementations.\n args = parser.parse_args()\n \n lm_dict = setup_lm_dict(args.inputfile, args.verbose)\n \n if args.verbose:\n print \"Estimating trigram probabilities (count)...\"\n count_prob_estimator(lm_dict)\n if args.verbose:\n print \"Done.\"\n \n \n if args.output:\n if args.verbose:\n print \"Dumping Langiage Model...\"\n dump_lm(lm_dict, args.output)\n if args.verbose:\n print \"Done.\"\n else:\n pprint(lm_dict)\n \n \n \n" }, { "alpha_fraction": 0.8063241243362427, "alphanum_fraction": 0.8063241243362427, "avg_line_length": 49.20000076293945, "blob_id": "6e9d1c01add01cc26cc59280264b1b10602709cb", "content_id": "d604f28569eccc4b0cd3358747f2173121ad6c4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 253, "license_type": "no_license", "max_line_length": 199, "num_lines": 5, "path": "/README.md", "repo_name": "ashutoshpandey1710/LanguageModel", "src_encoding": "UTF-8", "text": "LanguageModel is a python based utility that efficiently creates a probabilistic language model from plain text. The created model is unsmoothed (for now) and uses the counting probability estimator.\n\nFor help\\usage, run\n\n$ python languagemodel.py -h\n\n\n" } ]
2
npgallagher/my-first-python-programs
https://github.com/npgallagher/my-first-python-programs
2a3c1a5870636dbf0bd693544d9e159eebed5e46
576b37cba12e59d7fe015dfdb483ff7771c549e9
3ca7d93603a1e7192cd9a424735e906a4b7549d5
refs/heads/master
2021-01-21T02:10:45.678321
2017-08-31T13:15:08
2017-08-31T13:15:08
101,882,913
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.599023699760437, "alphanum_fraction": 0.6136680841445923, "avg_line_length": 40.17647171020508, "blob_id": "bdbb9607aaccfd95efcdc484d46f176f581afc5c", "content_id": "062518ab4859b0cd29fdbb5c9b9b681d9093642a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1434, "license_type": "no_license", "max_line_length": 103, "num_lines": 34, "path": "/madlib.py", "repo_name": "npgallagher/my-first-python-programs", "src_encoding": "UTF-8", "text": "print(\"Welcome to madlib!\")\r\nprint(\"What's your name?\")\r\nname = input()\r\nprint(\"Nice to meet you, \" + name + \".\")\r\nprint(\"Please type a noun.\")\r\nnoun1 = input()\r\nprint(\"Now a verb with -ing.\")\r\nverb1 = input()\r\nprint(\"And then an adjective.\")\r\nadjective = input()\r\nprint(\"Name a place.\")\r\nplace = input()\r\nprint(\"Please type a random name.\")\r\ngiven_name = input()\r\nprint(\"Now type another verb with -ing.\")\r\nverb2 = input()\r\nprint(\"Now we need another noun.\")\r\nnoun2 = input()\r\nprint(\"One more noun.\")\r\nnoun3 = input()\r\nprint(\"Type a place and then we can get started!\")\r\nplace2 = input()\r\n#This is where the story starts------------------------------\r\nprint(\"One day \" + name + \" was hanging out at \" + place2 + \".\")\r\nprint(place2 + \" had been particularly \" + adjective + \" that day.\")\r\nprint(name + \" had gotten bored and decided to start \" + verb1 + \".\")\r\nprint(\"The sound of the \" + verb1 + \" was very loud and annoying to everyone \" + verb2 + \" nearby.\")\r\nprint(given_name + \" actually liked the sound of the \" + verb1 + \" and started \" + verb1 + \" as well.\")\r\nprint(\"Everyone noticed all the \" + verb1 + \" and eventually just started to \" + verb1 + \" too.\")\r\nprint(\"Some people tried \" + verb1 + \" with a \" + noun1 + \".\")\r\nprint(\"Others tried with a \" + noun2 + \".\")\r\nprint(\"Most people were just \" + verb1 + \" with a \" + noun3 + \".\")\r\nprint(\"The \" + verb1 + \" could be heard miles away at \" + place + \".\")\r\nprint(\"The End!\")\r\n" }, { "alpha_fraction": 0.5815654993057251, "alphanum_fraction": 0.5983906388282776, "avg_line_length": 20.75, "blob_id": "ca37745d5ab269c57887dce2b765626d22c415be", "content_id": "71849fe8c684633cfbebc2a9d892b2b8c74eddff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1367, "license_type": "no_license", "max_line_length": 73, "num_lines": 60, "path": "/geometry.py", "repo_name": "npgallagher/my-first-python-programs", "src_encoding": "UTF-8", "text": "#This program contains functions that evaluate formulas used in geometry.\r\n#\r\n#Noah Gallagher\r\n#August 30, 2017\r\n\r\nprint (\"Program options:\")\r\nprint (\"-triangle_area, base and height\")\r\nprint (\"-circle_area, radius\")\r\nprint (\"-parallelogram_area, base and height\")\r\nprint (\"-trapezoid_area, base, base, height\")\r\nprint (\"-rectangular_prism_volume, width height and length\")\r\nprint (\"-cone_volume, radius and height\")\r\nprint (\"-rectangular_prism_surface_area, length width and height\")\r\nprint (\"-surface_area_sphere, radius\")\r\nprint (\"-hypotenuse_right_triangle, a length and b length\")\r\nprint (\" \")\r\n\r\n\r\nimport math\r\n\r\ndef triangle_area(b, h):\r\n a = (1/2) * b * h\r\n return a\r\n\r\ndef circle_area(r):\r\n a = math.pi * r**2\r\n return a\r\n\r\n#function calls\r\nprint(triangle_area(4,9))\r\nprint(circle_area(5))\r\nprint(circle_area(12))\r\n\r\ndef parallelogram_area(b, h):\r\n a = b * h\r\n return a\r\n\r\ndef trapezoid_area(b, c, h):\r\n a = ((b+c)/2) * h\r\n return a\r\n\r\ndef rectangular_prism_volume(w, h, l):\r\n v = w * h * l\r\n return v\r\n\r\ndef cone_volume(r, h):\r\n v = math.pi * r**2 * (h/3)\r\n return v\r\n\r\ndef rectangular_prism_surface_area(l, w, h):\r\n a = 2 * (w * l + h * l + h * w)\r\n return a\r\n\r\ndef surface_area_sphere(r):\r\n a = 4 * math.pi * r**2\r\n return a\r\n\r\ndef hypotenuse_right_triangle(a, b):\r\n c = (a ** 2 + b ** 2) ** .5\r\n return c\r\n\r\n" }, { "alpha_fraction": 0.6135371327400208, "alphanum_fraction": 0.6266375780105591, "avg_line_length": 26.625, "blob_id": "84918e980a418fcbb3ae9a2e493029ece5ba3331", "content_id": "8675ab04852ac01301653349b30d57cce8e40289", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 458, "license_type": "no_license", "max_line_length": 72, "num_lines": 16, "path": "/hello.py.py", "repo_name": "npgallagher/my-first-python-programs", "src_encoding": "UTF-8", "text": "#This program says hello and greets a person by name.\r\n#\r\n#Your Name\r\n#August 24, 2017\r\n\r\nprint(\"Hello.\")\r\nprint(\"What is your name?\")\r\nname = input()\r\nprint(\"It is good to meet you, \" + name + \".\")\r\nprint(\"Where were you born, \" + name + \"?\")\r\nlocation = input()\r\nprint(\"That's interesting. I'd like to visit \" + location + \" one day.\")\r\nprint(\"What's your favorite color?\")\r\ncolor = input()\r\nprint(\"Wow, \" + color + \" is my favorite color too!\")\r\ninput()\r\n" } ]
3
dariabird/st-python-selenium-po
https://github.com/dariabird/st-python-selenium-po
cb4b8bbf297f79a34d50d3ae8e5722458611466e
2df68d5f17e2c57d1c1ec22671ec7fc2d5673629
34a9a3408d2279a13eed50102653f593a78e13db
refs/heads/master
2022-04-20T02:12:42.809641
2020-04-18T16:51:20
2020-04-18T16:51:20
256,781,280
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6513158082962036, "alphanum_fraction": 0.6532894968986511, "avg_line_length": 29.399999618530273, "blob_id": "efe7132fa2fcf81c268440c5325ddd95bc9fd08d", "content_id": "cd41d032b676df1c629de335bc8a46c7a7d27396", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1520, "license_type": "no_license", "max_line_length": 80, "num_lines": 50, "path": "/pages/base_page.py", "repo_name": "dariabird/st-python-selenium-po", "src_encoding": "UTF-8", "text": "from selenium.common.exceptions import NoSuchElementException, TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\nclass BasePage:\n HOME_LOCATOR = By.CSS_SELECTOR, 'i[title=Home]'\n CART_LOCATOR = By.XPATH, '//a[contains(., \"Checkout\")]'\n CART_COUNTER_LOCATOR = By.CSS_SELECTOR, 'a.content'\n\n def __init__(self, driver):\n self.driver = driver\n self.wait = WebDriverWait(driver, 10)\n\n @property\n def home(self):\n return self.driver.find_element(*self.HOME_LOCATOR)\n\n @property\n def cart(self):\n return self.driver.find_element(*self.CART_LOCATOR)\n\n def go_home(self):\n self.home.click()\n\n def go_to_cart(self):\n self.cart.click()\n\n def is_element_present(self, *args):\n try:\n self.driver.find_element(*args)\n return True\n except NoSuchElementException:\n return False\n\n def are_elements_present(self, *args):\n return len(self.driver.find_elements(*args)) > 0\n\n def wait_for_element_present(self, locator):\n try:\n return self.wait.until(EC.presence_of_element_located(locator))\n except TimeoutException:\n return None\n\n def wait_for_elements_present(self, locator):\n try:\n return self.wait.until(EC.presence_of_all_elements_located(locator))\n except TimeoutException:\n return None\n" }, { "alpha_fraction": 0.7112488746643066, "alphanum_fraction": 0.7121346592903137, "avg_line_length": 39.28571319580078, "blob_id": "40b4b57bc1c7775447eaa71a81da3840cf9e0b2d", "content_id": "c2f819e723d83d4130658cf20d22a923c2e50109", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1129, "license_type": "no_license", "max_line_length": 97, "num_lines": 28, "path": "/pages/product_page.py", "repo_name": "dariabird/st-python-selenium-po", "src_encoding": "UTF-8", "text": "from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.select import Select\nfrom pages.base_page import BasePage\nfrom pages.helper import is_text_of_element_changed\n\n\nclass ProductPage(BasePage):\n SIZE_SELECTOR_LOCATOR = By.CSS_SELECTOR, 'select[name=\"options[Size]\"]'\n ADD_TO_CART_BUTTON_LOCATOR = By.NAME, \"add_cart_product\"\n\n @property\n def size_selector(self):\n return Select(self.driver.find_element(*self.SIZE_SELECTOR_LOCATOR))\n\n @property\n def add_to_cart_button(self):\n return self.driver.find_element(*self.ADD_TO_CART_BUTTON_LOCATOR)\n\n def select_product_size(self, index=1):\n self.size_selector.select_by_index(index)\n\n def add_product_to_cart(self):\n self.wait_for_element_present(self.ADD_TO_CART_BUTTON_LOCATOR)\n if self.is_element_present(*self.SIZE_SELECTOR_LOCATOR):\n self.select_product_size()\n cart_counter_text = self.driver.find_element(*self.CART_COUNTER_LOCATOR).text\n self.add_to_cart_button.click()\n self.wait.until(is_text_of_element_changed(self.CART_COUNTER_LOCATOR, cart_counter_text))\n\n" }, { "alpha_fraction": 0.6176470518112183, "alphanum_fraction": 0.6176470518112183, "avg_line_length": 33, "blob_id": "0eb071ab0228e1cd33ebe11b2cc2ed920e84231f", "content_id": "f1fd75a8778d661c30e66cfb4cea35a9c52bcbd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 272, "license_type": "no_license", "max_line_length": 61, "num_lines": 8, "path": "/pages/helper.py", "repo_name": "dariabird/st-python-selenium-po", "src_encoding": "UTF-8", "text": "class is_text_of_element_changed(object):\n def __init__(self, locator, text):\n self.locator = locator\n self.text = text\n\n def __call__(self, driver):\n actual_text = driver.find_element(*self.locator).text\n return actual_text != self.text\n" }, { "alpha_fraction": 0.6869158744812012, "alphanum_fraction": 0.6876947283744812, "avg_line_length": 34.66666793823242, "blob_id": "1383d3cbccf395cc7e1606f96637b2b4a7682889", "content_id": "db02c6664368d67579671b9c4c5ae2a34ae6c6ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1284, "license_type": "no_license", "max_line_length": 78, "num_lines": 36, "path": "/pages/cart_page.py", "repo_name": "dariabird/st-python-selenium-po", "src_encoding": "UTF-8", "text": "import time\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom pages.base_page import BasePage\n\n\nclass CartPage(BasePage):\n PRODUCT_SHORTCUTS_LOCATOR = By.CSS_SELECTOR, 'ul.shortcuts li'\n PRODUCT_SHORTCUT = By.CSS_SELECTOR, 'ul.shortcuts li a'\n ORDER_SUMMARY_LOCATOR = By.ID, 'box-checkout-summary'\n REMOVE_BUTTON_LOCATOR = By.CSS_SELECTOR, 'button[name=\"remove_cart_item\"]'\n\n def open(self):\n self.driver.get(\"http://localhost/litecart/en/checkout\")\n return self\n\n @property\n def product_shortcut(self):\n return self.driver.find_element(*self.PRODUCT_SHORTCUT)\n\n @property\n def remove_button(self):\n return self.driver.find_element(*self.REMOVE_BUTTON_LOCATOR)\n\n @property\n def order_summary(self):\n return self.driver.find_element(*self.ORDER_SUMMARY_LOCATOR)\n\n def remove_product_from_cart(self):\n if self.are_elements_present(*self.PRODUCT_SHORTCUTS_LOCATOR):\n time.sleep(1)\n self.product_shortcut.click()\n if self.is_element_present(*self.REMOVE_BUTTON_LOCATOR):\n order_summary = self.order_summary\n self.remove_button.click()\n self.wait.until(EC.staleness_of(order_summary))\n" }, { "alpha_fraction": 0.6201117038726807, "alphanum_fraction": 0.6312848925590515, "avg_line_length": 21.375, "blob_id": "d86a77c5b4ec58631f829d740e8b2fae99a45626", "content_id": "cbf8c3b290e34c2f459f91b45cd4c49a60ea333f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 179, "license_type": "no_license", "max_line_length": 43, "num_lines": 8, "path": "/tests/test_work_with_cart.py", "repo_name": "dariabird/st-python-selenium-po", "src_encoding": "UTF-8", "text": "import pytest\n\n\ndef test_add_and_delete_three_product(app):\n for i in range(3):\n app.add_product_to_cart()\n for i in range(3):\n app.delete_product_from_cart()\n" }, { "alpha_fraction": 0.5833333134651184, "alphanum_fraction": 0.75, "avg_line_length": 11, "blob_id": "236eebbbd527fa346d6a7accbdca7586e7b50f22", "content_id": "7f4f841068443932adac1054de759054d01394ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 24, "license_type": "no_license", "max_line_length": 16, "num_lines": 2, "path": "/requirements.txt", "repo_name": "dariabird/st-python-selenium-po", "src_encoding": "UTF-8", "text": "pytest\nselenium==3.14.0\n" }, { "alpha_fraction": 0.6722689270973206, "alphanum_fraction": 0.6764705777168274, "avg_line_length": 24.052631378173828, "blob_id": "d7ecb48bc0fbf554ee0411721c4f6f5dd96d2ca4", "content_id": "b172ea9cb9c16c8e4fbba736a6cb3a1fcbbe1e03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 476, "license_type": "no_license", "max_line_length": 64, "num_lines": 19, "path": "/pages/start_page.py", "repo_name": "dariabird/st-python-selenium-po", "src_encoding": "UTF-8", "text": "import time\nfrom selenium.webdriver.common.by import By\nfrom pages.base_page import BasePage\n\n\nclass StartPage(BasePage):\n PRODUCTS_LOCATOR = By.CSS_SELECTOR, 'ul.products li'\n\n def open(self):\n self.driver.get(\"http://localhost/litecart/en/\")\n return self\n\n @property\n def products(self):\n return self.driver.find_elements(*self.PRODUCTS_LOCATOR)\n\n def open_first_product_page(self):\n time.sleep(1)\n self.products[0].click()\n" }, { "alpha_fraction": 0.6718376874923706, "alphanum_fraction": 0.6718376874923706, "avg_line_length": 30.037036895751953, "blob_id": "3895d810681ac7facb2fecd337c2f8864c41e967", "content_id": "04aba87a9f76bf2e1dc87112604c5d28e99e0568", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 838, "license_type": "no_license", "max_line_length": 52, "num_lines": 27, "path": "/app/application.py", "repo_name": "dariabird/st-python-selenium-po", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom pages.base_page import BasePage\nfrom pages.cart_page import CartPage\nfrom pages.start_page import StartPage\nfrom pages.product_page import ProductPage\n\n\nclass Application:\n def __init__(self):\n self.driver = webdriver.Chrome()\n self.start_page = StartPage(self.driver)\n self.product_page = ProductPage(self.driver)\n self.base_page = BasePage(self.driver)\n self.cart_page = CartPage(self.driver)\n\n def quit(self):\n self.driver.quit()\n\n def add_product_to_cart(self):\n self.start_page.open()\n self.start_page.open_first_product_page()\n self.product_page.add_product_to_cart()\n # self.base_page.go_home()\n\n def delete_product_from_cart(self):\n self.cart_page.open()\n self.cart_page.remove_product_from_cart()\n" } ]
8